code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Ryan Brown <[email protected]>
#
# This file is part of ipylogue.
#
# ipylogue is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""ipylogue
Backs the IPython notebook system with git.
"""
from ipylogue.gitmanager import GitNotebookManager
| ryansb/ipylogue | ipylogue/__init__.py | Python | agpl-3.0 | 898 |
# -*- coding: utf-8 -*-
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import errno
import functools
import logging
import os
from pathlib import Path
import threading
import time
from typing import Callable
from typing import Optional
from typing import Union
from fasteners import _utils
from fasteners.process_mechanism import _interprocess_mechanism
from fasteners.process_mechanism import _interprocess_reader_writer_mechanism
LOG = logging.getLogger(__name__)
def _ensure_tree(path):
"""Create a directory (and any ancestor directories required).
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
return False
elif e.errno == errno.EISDIR:
return False
else:
raise
else:
return True
class InterProcessLock:
"""An interprocess lock."""
MAX_DELAY = 0.1 # For backwards compatibility
DELAY_INCREMENT = 0.01 # For backwards compatibility
def __init__(self,
path: Union[Path, str],
sleep_func: Callable[[float], None] = time.sleep,
logger: Optional[logging.Logger] = None):
"""
args:
path:
Path to the file that will be used for locking.
sleep_func:
Optional function to use for sleeping.
logger:
Optional logger to use for logging.
"""
self.lockfile = None
self.path = _utils.canonicalize_path(path)
self.acquired = False
self.sleep_func = sleep_func
self.logger = _utils.pick_first_not_none(logger, LOG)
def _try_acquire(self, blocking, watch):
try:
self.trylock()
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
if not blocking or watch.expired():
return False
else:
raise _utils.RetryAgain()
else:
raise threading.ThreadError("Unable to acquire lock on"
" `%(path)s` due to"
" %(exception)s" %
{
'path': self.path,
'exception': e,
})
else:
return True
def _do_open(self):
basedir = os.path.dirname(self.path)
if basedir:
made_basedir = _ensure_tree(basedir)
if made_basedir:
self.logger.log(_utils.BLATHER,
'Created lock base path `%s`', basedir)
# Open in append mode so we don't overwrite any potential contents of
# the target file. This eliminates the possibility of an attacker
# creating a symlink to an important file in our lock path.
if self.lockfile is None or self.lockfile.closed:
self.lockfile = open(self.path, 'a')
def acquire(self,
blocking: bool = True,
delay: float = 0.01,
max_delay: float = 0.1,
timeout: Optional[float] = None) -> bool:
"""Attempt to acquire the lock.
Args:
blocking:
Whether to wait to try to acquire the lock.
delay:
When `blocking`, starting delay as well as the delay increment
(in seconds).
max_delay:
When `blocking` the maximum delay in between attempts to
acquire (in seconds).
timeout:
When `blocking`, maximal waiting time (in seconds).
Returns:
whether or not the acquisition succeeded
"""
if delay < 0:
raise ValueError("Delay must be greater than or equal to zero")
if timeout is not None and timeout < 0:
raise ValueError("Timeout must be greater than or equal to zero")
if delay >= max_delay:
max_delay = delay
self._do_open()
watch = _utils.StopWatch(duration=timeout)
r = _utils.Retry(delay, max_delay,
sleep_func=self.sleep_func, watch=watch)
with watch:
gotten = r(self._try_acquire, blocking, watch)
if not gotten:
return False
else:
self.acquired = True
self.logger.log(_utils.BLATHER,
"Acquired file lock `%s` after waiting %0.3fs [%s"
" attempts were required]", self.path,
watch.elapsed(), r.attempts)
return True
def _do_close(self):
if self.lockfile is not None:
self.lockfile.close()
self.lockfile = None
def __enter__(self):
gotten = self.acquire()
if not gotten:
# This shouldn't happen, but just in case...
raise threading.ThreadError("Unable to acquire a file lock"
" on `%s` (when used as a"
" context manager)" % self.path)
return self
def release(self):
"""Release the previously acquired lock."""
if not self.acquired:
raise threading.ThreadError("Unable to release an unaquired lock")
try:
self.unlock()
except Exception as e:
msg = "Could not unlock the acquired lock opened on `%s`", self.path
self.logger.exception(msg)
raise threading.ThreadError(msg) from e
else:
self.acquired = False
try:
self._do_close()
except IOError:
self.logger.exception("Could not close the file handle"
" opened on `%s`", self.path)
else:
self.logger.log(_utils.BLATHER,
"Unlocked and closed file lock open on"
" `%s`", self.path)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.path)
def trylock(self):
_interprocess_mechanism.trylock(self.lockfile)
def unlock(self):
_interprocess_mechanism.unlock(self.lockfile)
class InterProcessReaderWriterLock:
"""An interprocess readers writer lock."""
MAX_DELAY = 0.1 # for backwards compatibility
DELAY_INCREMENT = 0.01 # for backwards compatibility
def __init__(self,
path: Union[Path, str],
sleep_func: Callable[[float], None] = time.sleep,
logger: Optional[logging.Logger] = None):
"""
Args:
path:
Path to the file that will be used for locking.
sleep_func:
Optional function to use for sleeping.
logger:
Optional logger to use for logging.
"""
self.lockfile = None
self.path = _utils.canonicalize_path(path)
self.sleep_func = sleep_func
self.logger = _utils.pick_first_not_none(logger, LOG)
@contextmanager
def read_lock(self, delay=0.01, max_delay=0.1):
"""Context manager that grans a read lock"""
self.acquire_read_lock(blocking=True, delay=delay,
max_delay=max_delay, timeout=None)
try:
yield
finally:
self.release_read_lock()
@contextmanager
def write_lock(self, delay=0.01, max_delay=0.1):
"""Context manager that grans a write lock"""
gotten = self.acquire_write_lock(blocking=True, delay=delay,
max_delay=max_delay, timeout=None)
if not gotten:
# This shouldn't happen, but just in case...
raise threading.ThreadError("Unable to acquire a file lock"
" on `%s` (when used as a"
" context manager)" % self.path)
try:
yield
finally:
self.release_write_lock()
def _try_acquire(self, blocking, watch, exclusive):
try:
gotten = _interprocess_reader_writer_mechanism.trylock(self.lockfile, exclusive)
except Exception as e:
raise threading.ThreadError(
"Unable to acquire lock on {} due to {}!".format(self.path, e))
if gotten:
return True
if not blocking or watch.expired():
return False
raise _utils.RetryAgain()
def _do_open(self):
basedir = os.path.dirname(self.path)
if basedir:
made_basedir = _ensure_tree(basedir)
if made_basedir:
self.logger.log(_utils.BLATHER,
'Created lock base path `%s`', basedir)
if self.lockfile is None:
self.lockfile = _interprocess_reader_writer_mechanism.get_handle(self.path)
def acquire_read_lock(self,
blocking: bool = True,
delay: float = 0.01,
max_delay: float = 0.1,
timeout: float = None) -> bool:
"""Attempt to acquire a reader's lock.
Args:
blocking:
Whether to wait to try to acquire the lock.
delay:
When `blocking`, starting delay as well as the delay increment
(in seconds).
max_delay:
When `blocking` the maximum delay in between attempts to
acquire (in seconds).
timeout:
When `blocking`, maximal waiting time (in seconds).
Returns:
whether or not the acquisition succeeded
"""
return self._acquire(blocking, delay, max_delay, timeout, exclusive=False)
def acquire_write_lock(self,
blocking: bool = True,
delay: float = 0.01,
max_delay: float = 0.1,
timeout: float = None) -> bool:
"""Attempt to acquire a writer's lock.
Args:
blocking:
Whether to wait to try to acquire the lock.
delay:
When `blocking`, starting delay as well as the delay increment
(in seconds).
max_delay:
When `blocking` the maximum delay in between attempts to
acquire (in seconds).
timeout:
When `blocking`, maximal waiting time (in seconds).
Returns:
whether or not the acquisition succeeded
"""
return self._acquire(blocking, delay, max_delay, timeout, exclusive=True)
def _acquire(self, blocking=True,
delay=0.01, max_delay=0.1,
timeout=None, exclusive=True):
if delay < 0:
raise ValueError("Delay must be greater than or equal to zero")
if timeout is not None and timeout < 0:
raise ValueError("Timeout must be greater than or equal to zero")
if delay >= max_delay:
max_delay = delay
self._do_open()
watch = _utils.StopWatch(duration=timeout)
r = _utils.Retry(delay, max_delay,
sleep_func=self.sleep_func, watch=watch)
with watch:
gotten = r(self._try_acquire, blocking, watch, exclusive)
if not gotten:
return False
else:
self.logger.log(_utils.BLATHER,
"Acquired file lock `%s` after waiting %0.3fs [%s"
" attempts were required]", self.path,
watch.elapsed(), r.attempts)
return True
def _do_close(self):
if self.lockfile is not None:
_interprocess_reader_writer_mechanism.close_handle(self.lockfile)
self.lockfile = None
def release_write_lock(self):
"""Release the writer's lock."""
try:
_interprocess_reader_writer_mechanism.unlock(self.lockfile)
except IOError:
self.logger.exception("Could not unlock the acquired lock opened"
" on `%s`", self.path)
else:
try:
self._do_close()
except IOError:
self.logger.exception("Could not close the file handle"
" opened on `%s`", self.path)
else:
self.logger.log(_utils.BLATHER,
"Unlocked and closed file lock open on"
" `%s`", self.path)
def release_read_lock(self):
"""Release the reader's lock."""
try:
_interprocess_reader_writer_mechanism.unlock(self.lockfile)
except IOError:
self.logger.exception("Could not unlock the acquired lock opened"
" on `%s`", self.path)
else:
try:
self._do_close()
except IOError:
self.logger.exception("Could not close the file handle"
" opened on `%s`", self.path)
else:
self.logger.log(_utils.BLATHER,
"Unlocked and closed file lock open on"
" `%s`", self.path)
def interprocess_write_locked(path: Union[Path, str]):
"""Acquires & releases an interprocess **write** lock around the call into
the decorated function
Args:
path: Path to the file used for locking.
"""
lock = InterProcessReaderWriterLock(path)
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with lock.write_lock():
return f(*args, **kwargs)
return wrapper
return decorator
def interprocess_read_locked(path: Union[Path, str]):
"""Acquires & releases an interprocess **read** lock around the call into
the decorated function
Args:
path: Path to the file used for locking.
"""
lock = InterProcessReaderWriterLock(path)
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with lock.read_lock():
return f(*args, **kwargs)
return wrapper
return decorator
def interprocess_locked(path: Union[Path, str]):
"""Acquires & releases an interprocess lock around the call to the
decorated function.
Args:
path: Path to the file used for locking.
"""
lock = InterProcessLock(path)
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return wrapper
return decorator
| harlowja/fasteners | fasteners/process_lock.py | Python | apache-2.0 | 15,792 |
# Generated by Django 2.2.12 on 2020-05-24 10:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0105_auto_20200517_1141'),
]
operations = [
migrations.AddField(
model_name='chapter',
name='new',
field=models.BooleanField(default=False, verbose_name='new'),
),
migrations.AddField(
model_name='chapter',
name='updated',
field=models.BooleanField(default=False, verbose_name='updated'),
),
]
| flavoi/diventi | diventi/ebooks/migrations/0106_auto_20200524_1227.py | Python | apache-2.0 | 585 |
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Albert SHENOUDA <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from datetime import datetime, timedelta
from odoo.addons.sponsorship_compassion.tests.test_sponsorship_compassion import (
BaseSponsorshipTest,
)
logger = logging.getLogger(__name__)
class TestCrmCompassion(BaseSponsorshipTest):
def test_crm(self):
"""
This scenario consists in the creation of an opportunity,
then comes the event.
Check if we can find the origin from the event in a sponsorship
contract.
"""
# Creation of a lead and an event
lead = self._create_lead("PlayoffsCompassion", 1)
lead2 = self._create_lead("JO_Compassion", 1)
self.assertTrue(lead.id)
event = self._create_event(lead, "sport")
event2 = self._create_event(lead2, "sport")
self.assertTrue(event.id)
event.write({"use_tasks": True, "partner_id": self.david.id})
event2.write({"use_tasks": True, "partner_id": self.david.id})
# Creation of the sponsorship contract
child = self.create_child("AB123456789")
sp_group = self.create_group({"partner_id": self.thomas.id})
sponsorship = self.create_contract(
{
"partner_id": self.thomas.id,
"group_id": sp_group.id,
"origin_id": event.origin_id.id,
"child_id": child.id,
"correspondent_id": sp_group.partner_id.id,
},
[{"amount": 50.0}],
)
sponsorship.write({"user_id": self.michel.id})
mark_origin = self.env["recurring.contract.origin"].search(
[("type", "=", "marketing")]
)
self.assertEqual(sponsorship.origin_id.name, event.full_name)
self.assertEqual(sponsorship.state, "draft")
sponsorship.write({"origin_id": mark_origin.id})
sponsorship.on_change_origin()
self.validate_sponsorship(sponsorship)
invoices = sponsorship.invoice_line_ids.mapped("invoice_id")
self.assertEqual(len(invoices), 2)
self.assertEqual(invoices[0].state, "open")
self.assertEqual(invoices[0].invoice_line_ids[0].user_id, sponsorship.user_id)
event_dico = self.david.open_events()
self.assertEqual(len(event_dico["domain"][0][2]), 2)
is_unlinked = event.unlink()
self.assertTrue(is_unlinked)
def test_calendar_event_synchronization(self):
lead = self._create_lead("MyLead", 1)
event = self._create_event(lead, "sport")
self.assertEqual(event.calendar_event_id.duration, 9)
in_two_days = datetime.today().date() + timedelta(days=2)
event.end_date = datetime.combine(in_two_days, datetime.min.time())
self.assertEqual(event.calendar_event_id.duration, 48)
# The event duration should have a lower bound of 3 hours
event.end_date = datetime.combine(datetime.today(), datetime.min.time())
self.assertEqual(event.calendar_event_id.duration, 3)
def _create_event(self, lead, event_type):
event_dico = lead.create_event()
now = datetime.today().date()
event = self.env["crm.event.compassion"].create(
{
"name": event_dico["context"]["default_name"],
"type": event_type,
"start_date": now,
"end_date": datetime.today().replace(hour=8, minute=43),
"hold_start_date": now,
"hold_end_date": now,
"number_allocate_children": 2,
"planned_sponsorships": 0,
"lead_id": lead.id,
"user_id": event_dico["context"]["default_user_id"],
}
)
return event
def _create_lead(self, name, user_id):
lead = self.env["crm.lead"].create({"name": name, "user_id": user_id, })
return lead
| CompassionCH/compassion-modules | crm_compassion/tests/test_crm_compassion.py | Python | agpl-3.0 | 4,236 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import os
from glumpy.ext.six.moves.urllib import request
import numpy as np
try:
from PIL import Image
except:
Image = None
from glumpy import gloo
from glumpy.log import log
def _fetch_file(filename):
"""
Fetch a font file from a remote data server
Available servers:
* https://github.com/glumpy/glumpy-font/raw/master/Fonts
* https://github.com/glumpy/glumpy-data/raw/master/Data
"""
local_directory = os.path.dirname(__file__) or '.'
local_file = os.path.join(local_directory, filename)
if os.path.isfile(local_file):
return local_file
extension = os.path.basename(filename).split('.')[-1]
# Font server
if extension in ['ttf', 'otf']:
server = "https://github.com/glumpy/glumpy-font/raw/master/Fonts"
# Data server
else:
server = "https://github.com/glumpy/glumpy-data/raw/master/Data"
filename = os.path.basename(filename)
remote = os.path.join(server, filename)
# Build url request
log.info('Requesting "%s" from remote server' % filename)
try:
response = request.urlopen(remote)
except:
log.warning('Data not available on remote server')
return None
# Fetch symlink data (font location)
symlink = response.read().decode()
remote = os.path.join(server, symlink)
response = request.urlopen(remote)
# Fetch data
size = response.headers['Content-Length'].strip()
log.info('Fetching data (%s bytes) to "%s"' % (size, local_file))
with open(local_file, 'wb') as fp:
fp.write(response.read())
return local_file
def objload(filename) :
V = [] #vertex
T = [] #texcoords
N = [] #normals
F = [] #face indexies
for line in open(filename):
if line[0] == '#':
continue
line = line.strip().split(' ')
if line[0] == 'v': #vertex
V.append([float(x) for x in line[1:]])
elif line[0] == 'vt' : # tex-coord
T.append([float(x) for x in line[1:]])
elif line[0] == 'vn' : # normal vector
N.append([float(x) for x in line[1:]])
elif line[0] == 'f' : # face
face = line[1:]
if len(face) != 3 :
raise Exception('not a triangle')
for i in range(0, len(face)) :
face[i] = face[i].split('/')
for j in range(0, len(face[i])):
face[i][j] = int(face[i][j]) - 1
F.append(face)
hashes = []
indices = []
vertices = []
for face in F:
for i in range(3):
h = hash(tuple(face[i]))
if h in hashes:
j = hashes.index(h)
else:
j = len(hashes)
vertices.append( (V[face[i][0]],
T[face[i][1]],
N[face[i][2]]) )
hashes.append(h)
indices.append(j)
vtype = [('position', np.float32, 3),
('texcoord', np.float32, 2),
('normal', np.float32, 3)]
itype = np.uint32
vertices = np.array(vertices, dtype=vtype).view(gloo.VertexBuffer)
indices = np.array(indices, dtype=itype).view(gloo.IndexBuffer)
return vertices, indices
def checkerboard(grid_num=8, grid_size=32):
row_even = grid_num // 2 * [0, 1]
row_odd = grid_num // 2 * [1, 0]
Z = np.row_stack(grid_num // 2 * (row_even, row_odd)).astype(np.uint8)
return 255 * Z.repeat(grid_size, axis=0).repeat(grid_size, axis=1)
def get(name, depth=0):
""" Retrieve data content from a name """
if name == "checkerboard":
return checkerboard(8,16)
extension = os.path.basename(name).split('.')[-1]
filename = _fetch_file(name)
if extension == 'npy':
return np.load(filename)
elif extension in ['ttf', 'otf']:
if filename is not None:
return filename
if depth == 0:
log.warning("Falling back to default font")
return get("SourceSansPro-Regular.otf", 1)
else:
log.critical("Default font not available")
raise RuntimeError
elif extension == 'obj':
return objload(filename)
elif extension == 'svg':
return filename
elif extension == 'json':
return filename
elif extension in ('png', 'jpg', 'jpeg', 'tif', 'tiff', 'tga'):
if Image is not None:
if filename is not None:
return np.array(Image.open(filename))
log.warning("File not found")
return checkerboard(16,32)
else:
log.warning("PIL/Pillow not installed, cannot load image")
return checkerboard(16,32)
log.warning("Data not found (%s)" % name)
raise RuntimeError
return None
| duyuan11/glumpy | glumpy/data/__init__.py | Python | bsd-3-clause | 5,100 |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.unversioned_status import UnversionedStatus
class TestUnversionedStatus(unittest.TestCase):
""" UnversionedStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testUnversionedStatus(self):
"""
Test UnversionedStatus
"""
model = lib_openshift.models.unversioned_status.UnversionedStatus()
if __name__ == '__main__':
unittest.main()
| detiber/lib_openshift | test/test_unversioned_status.py | Python | apache-2.0 | 1,300 |
# This file is part of Beneath a Binary Sky.
# Copyright (C) 2016, Aidin Gharibnavaz <[email protected]>
#
# Beneath a Binary Sky is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Beneath a Binary Sky is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beneath a Binary Sky. If not, see
# <http://www.gnu.org/licenses/>.
import math
class SquareInterator:
'''Iterates squares around a point.
Note that first returning point is the center point itself.
example usage:
for x, y in SquareIterator((4, 3), (100, 100))
'''
ITERATION_DIRECTIONS = [(1, 0), (0, 1), (-1, 0), (0, -1)]
def __init__(self, center_point, map_size, max_radios=None):
'''
@param center_point: Point to iterate around.
@param map_size: Size of the current map (world).
@keyword max_radios: If provided, it iterates to this maximum distance
from the center. For example, if center is on Y 3, and max_radios is
2, it will goes up to Y 5.
'''
self._center_x = center_point[0]
self._center_y = center_point[1]
self._map_size_x = map_size[0]
self._map_size_y = map_size[1]
self._max_raios = max_radios
def __iter__(self):
return next(self)
def __next__(self):
# First point is the center itself.
yield (self._center_x, self._center_y)
# The walking algorithm:
# It iterates points around the center, in a shape of square.
# First, it finds the upper left corner of the square. Then, it moves to the right.
# After reaching the right edge, it moves down. Then, left, then, up.
# After that, it increase the size of the square's side by one, and iterates again.
# How many squares to walk in each row? e.g. Square's side size.
length = 0
while True:
square_found = False
length += 2
corner_x = self._center_x - math.floor(length / 2)
corner_y = self._center_y - math.floor(length / 2)
current_x = corner_x
current_y = corner_y
for direction in SquareInterator.ITERATION_DIRECTIONS:
for i in range(length):
current_x += direction[0]
current_y += direction[1]
if (current_x < 0 or current_x > self._map_size_x - 1 or
current_y < 0 or current_y > self._map_size_y - 1):
# Out of map.
continue
square_found = True
yield (current_x, current_y)
if not square_found:
# If nothing found after a complete loop (e.g. we iterate all possible points.)
raise StopIteration()
if self._max_raios is not None and (length / 2) >= self._max_raios:
# We iterated to the maximum requested radios.
raise StopIteration()
| aidin36/beneath-a-binary-sky | src/world/square_iterator.py | Python | gpl-3.0 | 3,416 |
"""
2013 Paul Logston
This class defines methods for returning then next available Image instance for
use in a pestering. It also manages call the image APIs in the event that more
ImageDatas are necessary.
"""
from django.db.utils import IntegrityError
import simplejson
from pester.models import API, APICall, ImageData, PesteringAttempt
from pester.pesterutils.bingapi import BingAPI
from pester.pesterutils.googleapi import GoogleAPI
class APIException(Exception):
"""Exception class for all API issues"""
pass
class NoAPIException(APIException):
"""Exception for no available APIs"""
def __init__(self, msg):
""" msg -- explination of error """
self.msg = msg
class NoAPIResultsException(APIException):
"""Exception for no when API returns no results"""
def __init__(self, api, msg):
""" msg -- explination of error"""
self.msg = api + ': ' + msg
class ImageManager(object):
"""Defines methods to manage Image instance acquisition and delivery"""
def __init__(self, pestering):
self.pestering = pestering
self._used_image_pk_list = None
self._unused_image_list = None
self._use_bing = True
self._bing_offset = 0
self._use_google = True
self._google_offset = 0
self._get_used_image_list()
self._get_unused_image_list()
def get_image(self):
"""Return unused ImageData instance or exception"""
if not self._unused_image_list:
self._get_more_images()
if self._unused_image_list:
return self._unused_image_list[0]
raise NoAPIResultsException('ALL APIs', 'No API results recieved')
def _get_used_image_list(self):
"""
Get list of images from successful pestering attempts for pestering
"""
pa_list = PesteringAttempt.objects.filter(
pestering=self.pestering,
success=True)
self._used_image_pk_list = [pestering_attempt.image.pk
for pestering_attempt in pa_list
if pestering_attempt.image]
def _get_unused_image_list(self):
"""
Get list of images that fullfill the requirements of the pestering
that have not been sent.
"""
self._unused_image_list = ImageData.objects.exclude(
pk__in=self._used_image_pk_list
).filter(
search_term=self.pestering.search_term,
adult_safety_level=self.pestering.adult_safety_level
)
def _get_api_offset(self, api):
"""return the offset for the api, search_term, options combo"""
if api == 'Bing':
apic=APICall.objects.filter(
api=api,
pestering.search_term=self.pestering.search_term,
pestering.adult_safety_level=self.pestering.adult_safety_level)
self._bing_offset=len(apic)*50
if self._use_google:
apic=APICall.objects.filter(
api=api,
pestering.search_term=self.pestering.search_term,
pestering.adult_safety_level=self.pestering.adult_safety_level)
self._google_offset=len(apic)*10
def _get_more_images(self):
"""Query API for more images and load them into db"""
if self._use_bing:
api = API.objects.get(name='Bing')
bapi = BingAPI(api.key)
self._insert_images_into_db(
bapi.query(
search_terms=self.pestering.search_term,
offset=self._bing_offset,
adult=self.pestering.adult_safety_level))
APICall.objects.create(api=api,pestering=pestering)
return
if self._use_google:
g = API.objects.get(name='Google')
key = g.key
params = simplejson.loads(g.params)
gapi = GoogleAPI(key, params['cx'])
self._insert_images_into_db(
gapi.query(
search_terms=self.pestering.search_term,
offset=self._google_offset,
adult=self.pestering.adult_safety_level))
APICall.objects.create(api=g, pestering=pestering)
return
raise NoAPIException('No available APIs to query.')
def _insert_images_into_db(self, image_list):
"""Insert new images into db"""
for image in image_list:
try:
ImageData.objects.create(
search_term=self.pestering.search_term,
url=image[0],
file_type=image[1],
width=image[2],
height=image[3],
adult_safety_level=self.pestering.adult_safety_level)
except IntegrityError:
pass
self._get_unused_image_list()
| logston/pester | pesterutils/imagemanager.py | Python | gpl-2.0 | 4,984 |
# Copyright (C) 2006, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gettext import gettext as _
import uuid
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkX11
import dbus
import statvfs
import os
from sugar3.graphics.alert import ErrorAlert
from sugar3 import env
from sugar3.activity import activityfactory
from gi.repository import SugarExt
from jarabe.journal.journaltoolbox import MainToolbox
from jarabe.journal.journaltoolbox import DetailToolbox
from jarabe.journal.journaltoolbox import EditToolbox
from jarabe.journal.listview import ListView
from jarabe.journal.detailview import DetailView
from jarabe.journal.volumestoolbar import VolumesToolbar
from jarabe.journal import misc
from jarabe.journal.objectchooser import ObjectChooser
from jarabe.journal.modalalert import ModalAlert
from jarabe.journal import model
from jarabe.journal.journalwindow import JournalWindow
from jarabe.model import session
J_DBUS_SERVICE = 'org.laptop.Journal'
J_DBUS_INTERFACE = 'org.laptop.Journal'
J_DBUS_PATH = '/org/laptop/Journal'
_SPACE_TRESHOLD = 52428800
_BUNDLE_ID = 'org.laptop.JournalActivity'
_journal = None
class JournalActivityDBusService(dbus.service.Object):
def __init__(self, parent):
self._parent = parent
session_bus = dbus.SessionBus()
bus_name = dbus.service.BusName(J_DBUS_SERVICE,
bus=session_bus,
replace_existing=False,
allow_replacement=False)
logging.debug('bus_name: %r', bus_name)
dbus.service.Object.__init__(self, bus_name, J_DBUS_PATH)
@dbus.service.method(J_DBUS_INTERFACE,
in_signature='s', out_signature='')
def ShowObject(self, object_id):
"""Pop-up journal and show object with object_id"""
logging.debug('Trying to show object %s', object_id)
if self._parent.show_object(object_id):
self._parent.reveal()
def _chooser_response_cb(self, chooser, response_id, chooser_id):
logging.debug('JournalActivityDBusService._chooser_response_cb')
if response_id == Gtk.ResponseType.ACCEPT:
object_id = chooser.get_selected_object_id()
self.ObjectChooserResponse(chooser_id, object_id)
else:
self.ObjectChooserCancelled(chooser_id)
chooser.destroy()
del chooser
@dbus.service.method(J_DBUS_INTERFACE, in_signature='is',
out_signature='s')
def ChooseObject(self, parent_xid, what_filter=''):
"""
This method is keep for backwards compatibility
"""
chooser_id = uuid.uuid4().hex
if parent_xid > 0:
display = Gdk.Display.get_default()
parent = GdkX11.X11Window.foreign_new_for_display(
display, parent_xid)
else:
parent = None
chooser = ObjectChooser(parent, what_filter)
chooser.connect('response', self._chooser_response_cb, chooser_id)
chooser.show()
return chooser_id
@dbus.service.method(J_DBUS_INTERFACE, in_signature='issb',
out_signature='s')
def ChooseObjectWithFilter(self, parent_xid, what_filter='',
filter_type=None, show_preview=False):
chooser_id = uuid.uuid4().hex
if parent_xid > 0:
display = Gdk.Display.get_default()
parent = GdkX11.X11Window.foreign_new_for_display(
display, parent_xid)
else:
parent = None
chooser = ObjectChooser(parent, what_filter, filter_type, show_preview)
chooser.connect('response', self._chooser_response_cb, chooser_id)
chooser.show()
return chooser_id
@dbus.service.signal(J_DBUS_INTERFACE, signature='ss')
def ObjectChooserResponse(self, chooser_id, object_id):
pass
@dbus.service.signal(J_DBUS_INTERFACE, signature='s')
def ObjectChooserCancelled(self, chooser_id):
pass
class JournalActivity(JournalWindow):
def __init__(self):
logging.debug('STARTUP: Loading the journal')
JournalWindow.__init__(self)
self.set_title(_('Journal'))
self._main_view = None
self._secondary_view = None
self._list_view = None
self._detail_view = None
self._main_toolbox = None
self._detail_toolbox = None
self._volumes_toolbar = None
self._mount_point = '/'
self._main_view_active = True
self._editing_mode = False
self._setup_main_view()
self._setup_secondary_view()
self.add_events(Gdk.EventMask.ALL_EVENTS_MASK)
self._realized_sid = self.connect('realize', self.__realize_cb)
self.connect('window-state-event', self.__window_state_event_cb)
self.connect('key-press-event', self._key_press_event_cb)
self.connect('focus-in-event', self._focus_in_event_cb)
self.connect('focus-out-event', self._focus_out_event_cb)
model.created.connect(self.__model_created_cb)
model.updated.connect(self.__model_updated_cb)
model.deleted.connect(self.__model_deleted_cb)
self._dbus_service = JournalActivityDBusService(self)
self.iconify()
self._critical_space_alert = None
self._check_available_space()
session.get_session_manager().shutdown_signal.connect(
self._session_manager_shutdown_cb)
def volume_error_cb(self, gobject, message, severity):
alert = ErrorAlert(title=severity, msg=message)
alert.connect('response', self.__alert_response_cb)
self.add_alert(alert)
alert.show()
def __alert_response_cb(self, alert, response_id):
self.remove_alert(alert)
def __realize_cb(self, window):
xid = window.get_window().get_xid()
SugarExt.wm_set_bundle_id(xid, _BUNDLE_ID)
activity_id = activityfactory.create_activity_id()
SugarExt.wm_set_activity_id(xid, str(activity_id))
self.disconnect(self._realized_sid)
self._realized_sid = None
def _session_manager_shutdown_cb(self, event):
self.destroy()
def can_close(self):
return False
def _setup_main_view(self):
self._main_toolbox = MainToolbox()
self._edit_toolbox = EditToolbox(self)
self._main_view = Gtk.VBox()
self._main_view.set_can_focus(True)
self._list_view = ListView(self, enable_multi_operations=True)
self._list_view.connect('detail-clicked', self.__detail_clicked_cb)
self._list_view.connect('clear-clicked', self.__clear_clicked_cb)
self._list_view.connect('volume-error', self.volume_error_cb)
self._list_view.connect('title-edit-started',
self.__title_edit_started_cb)
self._list_view.connect('title-edit-finished',
self.__title_edit_finished_cb)
self._list_view.connect('selection-changed',
self.__selection_changed_cb)
self._main_view.pack_start(self._list_view, True, True, 0)
self._list_view.show()
self._volumes_toolbar = VolumesToolbar()
self._volumes_toolbar.connect('volume-changed',
self.__volume_changed_cb)
self._volumes_toolbar.connect('volume-error', self.volume_error_cb)
self._main_view.pack_start(self._volumes_toolbar, False, True, 0)
self._main_toolbox.connect('query-changed', self._query_changed_cb)
self._main_toolbox.search_entry.connect('icon-press',
self.__search_icon_pressed_cb)
self._main_toolbox.set_mount_point(self._mount_point)
def _setup_secondary_view(self):
self._secondary_view = Gtk.VBox()
self._detail_toolbox = DetailToolbox(self)
self._detail_toolbox.connect('volume-error', self.volume_error_cb)
self._detail_view = DetailView(self)
self._detail_view.connect('go-back-clicked', self.__go_back_clicked_cb)
self._secondary_view.pack_end(self._detail_view, True, True, 0)
self._detail_view.show()
def _key_press_event_cb(self, widget, event):
if not self._main_toolbox.search_entry.has_focus():
self._main_toolbox.search_entry.grab_focus()
keyname = Gdk.keyval_name(event.keyval)
if keyname == 'Escape':
self.show_main_view()
def __detail_clicked_cb(self, list_view, object_id):
self._show_secondary_view(object_id)
def __clear_clicked_cb(self, list_view):
self._main_toolbox.clear_query()
def __selection_changed_cb(self, list_view, selected_items):
self._editing_mode = selected_items != 0
self._edit_toolbox.set_selected_entries(selected_items)
self._edit_toolbox.display_selected_entries_status()
self.show_main_view()
def update_selected_items_ui(self):
selected_items = \
len(self.get_list_view().get_model().get_selected_items())
self.__selection_changed_cb(None, selected_items)
def __go_back_clicked_cb(self, detail_view):
self.show_main_view()
def _query_changed_cb(self, toolbar, query):
self._list_view.update_with_query(query)
self.show_main_view()
def __search_icon_pressed_cb(self, entry, icon_pos, event):
self._main_view.grab_focus()
def __title_edit_started_cb(self, list_view):
self.disconnect_by_func(self._key_press_event_cb)
def __title_edit_finished_cb(self, list_view):
self.connect('key-press-event', self._key_press_event_cb)
def show_main_view(self):
self._main_view_active = True
if self._editing_mode:
self._toolbox = self._edit_toolbox
self._toolbox.set_total_number_of_entries(
self.get_total_number_of_entries())
else:
self._toolbox = self._main_toolbox
self.set_toolbar_box(self._toolbox)
self._toolbox.show()
if self.canvas != self._main_view:
self.set_canvas(self._main_view)
self._main_view.show()
def _show_secondary_view(self, object_id):
self._main_view_active = False
metadata = model.get(object_id)
try:
self._detail_toolbox.set_metadata(metadata)
except Exception:
logging.exception('Exception while displaying entry:')
self.set_toolbar_box(self._detail_toolbox)
self._detail_toolbox.show()
try:
self._detail_view.props.metadata = metadata
except Exception:
logging.exception('Exception while displaying entry:')
self.set_canvas(self._secondary_view)
self._secondary_view.show()
def show_object(self, object_id):
metadata = model.get(object_id)
if metadata is None:
return False
else:
self._show_secondary_view(object_id)
return True
def __volume_changed_cb(self, volume_toolbar, mount_point):
logging.debug('Selected volume: %r.', mount_point)
self._mount_point = mount_point
self.set_editing_mode(False)
self._main_toolbox.set_mount_point(mount_point)
self._edit_toolbox.batch_copy_button.update_mount_point()
def __model_created_cb(self, sender, **kwargs):
misc.handle_bundle_installation(model.get(kwargs['object_id']))
self._main_toolbox.refresh_filters()
self._check_available_space()
def __model_updated_cb(self, sender, **kwargs):
misc.handle_bundle_installation(model.get(kwargs['object_id']))
if self.canvas == self._secondary_view and \
kwargs['object_id'] == self._detail_view.props.metadata['uid']:
self._detail_view.refresh()
self._check_available_space()
def __model_deleted_cb(self, sender, **kwargs):
if self.canvas == self._secondary_view and \
kwargs['object_id'] == self._detail_view.props.metadata['uid']:
self.show_main_view()
def _focus_in_event_cb(self, window, event):
if not self._main_view_active:
return
self._list_view.set_is_visible(True)
def _focus_out_event_cb(self, window, event):
self._list_view.set_is_visible(False)
def __window_state_event_cb(self, window, event):
logging.debug('window_state_event_cb %r', self)
if not self._main_view_active:
return
if event.changed_mask & Gdk.WindowState.ICONIFIED:
state = event.new_window_state
visible = not state & Gdk.WindowState.ICONIFIED
self._list_view.set_is_visible(visible)
def _check_available_space(self):
"""Check available space on device
If the available space is below 50MB an alert will be
shown which encourages to delete old journal entries.
"""
if self._critical_space_alert:
return
stat = os.statvfs(env.get_profile_path())
free_space = stat[statvfs.F_BSIZE] * stat[statvfs.F_BAVAIL]
if free_space < _SPACE_TRESHOLD:
self._critical_space_alert = ModalAlert()
self._critical_space_alert.connect('destroy',
self.__alert_closed_cb)
self._critical_space_alert.show()
def __alert_closed_cb(self, data):
self.show_main_view()
self.reveal()
self._critical_space_alert = None
def set_active_volume(self, mount):
self._volumes_toolbar.set_active_volume(mount)
def show_journal(self):
"""Become visible and show main view"""
self.reveal()
self.show_main_view()
def get_list_view(self):
return self._list_view
def get_total_number_of_entries(self):
list_view_model = self.get_list_view().get_model()
return len(list_view_model)
def get_editing_mode(self):
return self._editing_mode
def set_editing_mode(self, editing_mode):
if editing_mode == self._editing_mode:
return
self._editing_mode = editing_mode
if self._editing_mode:
self.get_list_view().disable_drag_and_copy()
else:
self.get_list_view().enable_drag_and_copy()
self.show_main_view()
def get_mount_point(self):
return self._mount_point
def _set_widgets_sensitive_state(self, sensitive_state):
self._toolbox.set_sensitive(sensitive_state)
self._list_view.set_sensitive(sensitive_state)
if sensitive_state:
self._list_view.enable_updates()
else:
self._list_view.disable_updates()
self._volumes_toolbar.set_sensitive(sensitive_state)
def freeze_ui(self):
self._set_widgets_sensitive_state(False)
def unfreeze_ui(self):
self._set_widgets_sensitive_state(True)
def get_journal():
global _journal
if _journal is None:
_journal = JournalActivity()
_journal.show()
return _journal
def start():
get_journal()
| puneetgkaur/backup_sugar_shell_for_cordova | src/jarabe/journal/journalactivity.py | Python | gpl-2.0 | 15,976 |
from PIL import Image
from os import system, remove
def extractor(name):
"""Goes through the captcha image and lines up
the individual numbers to increase the chance
of extracting the numbers"""
im = Image.open(name)
# prepare new cleaned image surface
im2 = Image.new("P", im.size, 255)
currentNumber = 0 # current number being scanned
# Measurements about the numbers in the captcha
# top bot left right
numbers = {"1": [500, 0, 0, 0],
"2": [500, 0, 0, 0],
"3": [500, 0, 0, 0],
"4": [500, 0, 0, 0],
"5": [500, 0, 0, 0],
"6": [500, 0, 0, 0]}
spaces = [0] * 6
gotBlackPixel = False
lastRowEmpty = True
emptyXSpace = []
# clean image and get measurements about numbers
for x in range(im.size[0]):
for y in range(im.size[1]):
pix = im.getpixel((x, y))
if 2 < x < im.size[0] - 2 and 2 < y < im.size[1] - 2:
if pix == 16:
im2.putpixel((x, y), 0)
if lastRowEmpty and currentNumber != 6:
currentNumber += 1
if not gotBlackPixel and lastRowEmpty: # get left most pixel of number
numbers[str(currentNumber)][2] = x
if y < numbers[str(currentNumber)][0]: # get top most pixel of number
numbers[str(currentNumber)][0] = y
if y > numbers[str(currentNumber)][1]: # new get lowest pixel of number
numbers[str(currentNumber)][1] = y
gotBlackPixel = True
lastRowEmpty = False
else:
if not lastRowEmpty and not gotBlackPixel: # get the far right x pixel of number
numbers[str(currentNumber)][3] = x - 1
if not gotBlackPixel:
lastRowEmpty = True
spaces[currentNumber - 1] += 1
emptyXSpace.append(x) # recording empty x coordinates
gotBlackPixel = False
# save image with newely cleaned image
im2.save("alignNumbers.gif")
# prepare the final image surface
im3 = Image.new("P", im.size, 255)
currentNumber = 0
gotBlackPixel = False
lastRowEmpty = True
dy = 0 # how many pixels to move up
dx = 0 # how many pixels to move left
line = 4 # line where all numbers to align up on
row = 10 # the row from where to start drawing numbers
newNumber = True
# create the new image
for x in range(im2.size[0]):
for y in range(im2.size[1]):
pix = im2.getpixel((x, y))
if pix == 0:
if lastRowEmpty and currentNumber != 6:
currentNumber += 1
dy = numbers[str(currentNumber)][0] - line
if newNumber:
newNumber = False
if currentNumber == 1:
dx = (im2.size[0] - row) - (im2.size[0] - x)
else:
dx += 3
im3.putpixel((dx, y - dy), 0)
gotBlackPixel = True
lastRowEmpty = False
if x in emptyXSpace:
newNumber = True
if gotBlackPixel:
dx += 1
if not gotBlackPixel:
lastRowEmpty = True
gotBlackPixel = False
# save the final image - cleaned and aligned
im3.save("output.gif")
# use tesseract to extract the numbers - saving to text file
system("tesseract output.gif result -psm 8")
# read in numbers from output file
result = open("result.txt", 'r').readline().strip()
if len(result) == 6 and result.isdigit():
# do some cleaning up
try:
remove("alignNumbers.gif")
remove("output.gif")
remove("result.txt")
except IOError, e:
print e.message
return result
else:
return -1
| thenewcoder/Acesse-iRewards-Automation-Tools | modules/captchas.py | Python | mit | 4,019 |
ID = "rank"
permission = 1
def execute(self, name, params, channel, userdata, rank):
if len(params) == 0:
rank = self.userGetRankNum(channel, name)
print rank
self.sendChatMessage(self.send, channel, "You "+{2 : "are OP", 1 : "are voiced", 0 : "do not have a special rank", 3 : "are Bot OP"}[rank])
else:
name = params[0]
rank = self.userGetRankNum(channel, name)
ranknames = ["User", "Voiced", "OP", "Bot OP"]
if rank == -1:
pass
else:
self.sendMessage(channel, "User {0} has the rank {1} ({2})".format(name, rank, ranknames[rank]))
#self.sendChatMessage(self.send, channel, "You "+{"@" : "are OP", "+" : "are voiced", "" : "do not have a special rank", "@@" : "are Bot OP"}[self.userGetRank(channel, name)]) | NightKev/Renol-IRC | commands/showRank.py | Python | mit | 837 |
import os
from celery import Celery
import ente_common as E
# monkeypatch threadpool threads
import threadpool
from __main__ import bootstrap_thread
try:
threadpool._WorkerThread
except AttributeError:
threadpool._WorkerThread = threadpool.WorkerThread
class WorkerThread(threadpool._WorkerThread):
def run(self):
return bootstrap_thread(lambda: threadpool._WorkerThread.run(self))
threadpool.WorkerThread = WorkerThread
class Config(object):
CELERYD_POOL = "prefork"
CELERYD_ACCEPT_CONTENT = ["pickle", "json", "msgpack", "yaml"]
app = Celery("tasks.tasks", backend="redis://redis", broker="amqp://guest@rabbitmq//")
app.config_from_object(Config)
@app.task
def add(x, y):
return x + y
@app.task
def sub(x, y):
return x - y
@app.task
def e_name(nid):
return _e_name(nid)
@E.tx_abort_encaps
def _e_name(nid):
return E.e_name(nid)
@app.task
def modify():
pid = os.getpid()
nid = E.tx_encaps(E.e_create_node)("NVAL", E.nb.root(), "H", name=str(pid))
return (pid, nid)
@E.tx_abort_encaps
def _list_node(nid=None, attribs=None):
if attribs is None:
attribs = {}
if nid is None:
nid = E.nb.root()
def mkn(nid):
d = dict(id=nid)
for k, f in attribs.items():
d[k] = f(nid)
return d
return (mkn(nid),
[mkn(iid) for iid in E.e_walk(nid, (E.UP, 1))],
[mkn(iid) for iid in E.e_walk(nid, (E.DOWN, 1))])
@app.task
def list_node(node_id=None):
import time
s = time.time()
try:
attribs = {"Name" : E.e_name,
"Type" : E.e_nti,
"Info" : E.e_info,
"Value" : E.e_val,
}
node, parents, kids = _list_node(node_id, attribs)
return dict(attribs=sorted(attribs), node=node, parents=parents, children=kids)
finally:
print "took %s for pid %s" % (time.time() - s, node_id)
| diresi/docker | ente/ente/src/tasks/tasks/tasks.py | Python | apache-2.0 | 1,931 |
#!/usr/bin/env python
import sys
import hyperdex.client
from hyperdex.client import LessEqual, GreaterEqual, Range, Regex, LengthEquals, LengthLessEqual, LengthGreaterEqual
c = hyperdex.client.Client(sys.argv[1], int(sys.argv[2]))
def to_objectset(xs):
return set([frozenset(x.items()) for x in xs])
assert c.put('kv', 'k', {}) == True
assert c.get('kv', 'k') == {'v': 0}
assert c.put('kv', 'k', {'v': 1}) == True
assert c.get('kv', 'k') == {'v': 1}
assert c.put('kv', 'k', {'v': -1}) == True
assert c.get('kv', 'k') == {'v': -1}
assert c.put('kv', 'k', {'v': 0}) == True
assert c.get('kv', 'k') == {'v': 0}
assert c.put('kv', 'k', {'v': 9223372036854775807}) == True
assert c.get('kv', 'k') == {'v': 9223372036854775807}
assert c.put('kv', 'k', {'v': -9223372036854775808}) == True
assert c.get('kv', 'k') == {'v': -9223372036854775808}
| hyc/HyperDex | test/python/DataTypeInt.py | Python | bsd-3-clause | 842 |
'''
cfn_manage
create, update, delete aws cloudformation stacks
pass parameters
enforce personal configuration conventions
'''
__version__ = '0.1.0'
| quagly/cfn-manage | cfn_manage/__init__.py | Python | apache-2.0 | 166 |
"""
Component that performs TensorFlow classification on images.
For a quick start, pick a pre-trained COCO model from:
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.tensorflow/
"""
import logging
import sys
import os
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_CONFIDENCE, CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['numpy==1.15.3', 'pillow==5.2.0', 'protobuf==3.6.1']
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_FILE_OUT = 'file_out'
CONF_MODEL = 'model'
CONF_GRAPH = 'graph'
CONF_LABELS = 'labels'
CONF_MODEL_DIR = 'model_dir'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_AREA = 'area'
CONF_TOP = 'top'
CONF_LEFT = 'left'
CONF_BOTTOM = 'bottom'
CONF_RIGHT = 'right'
AREA_SCHEMA = vol.Schema({
vol.Optional(CONF_TOP, default=0): cv.small_float,
vol.Optional(CONF_LEFT, default=0): cv.small_float,
vol.Optional(CONF_BOTTOM, default=1): cv.small_float,
vol.Optional(CONF_RIGHT, default=1): cv.small_float
})
CATEGORY_SCHEMA = vol.Schema({
vol.Required(CONF_CATEGORY): cv.string,
vol.Optional(CONF_AREA): AREA_SCHEMA
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_GRAPH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_AREA): AREA_SCHEMA,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [vol.Any(
cv.string,
CATEGORY_SCHEMA
)])
})
})
def draw_box(draw, box, img_width,
img_height, text='', color=(255, 255, 0)):
"""Draw bounding box on image."""
ymin, xmin, ymax, xmax = box
(left, right, top, bottom) = (xmin * img_width, xmax * img_width,
ymin * img_height, ymax * img_height)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=5, fill=color)
if text:
draw.text((left, abs(top-15)), text, fill=color)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the TensorFlow image processing platform."""
model_config = config.get(CONF_MODEL)
model_dir = model_config.get(CONF_MODEL_DIR) \
or hass.config.path('tensorflow')
labels = model_config.get(CONF_LABELS) \
or hass.config.path('tensorflow', 'object_detection',
'data', 'mscoco_label_map.pbtxt')
# Make sure locations exist
if not os.path.isdir(model_dir) or not os.path.exists(labels):
_LOGGER.error("Unable to locate tensorflow models or label map.")
return
# append custom model path to sys.path
sys.path.append(model_dir)
try:
# Verify that the TensorFlow Object Detection API is pre-installed
# pylint: disable=unused-import,unused-variable
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf # noqa
from object_detection.utils import label_map_util # noqa
except ImportError:
# pylint: disable=line-too-long
_LOGGER.error(
"No TensorFlow Object Detection library found! Install or compile "
"for your system following instructions here: "
"https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md") # noqa
return
try:
# Display warning that PIL will be used if no OpenCV is found.
# pylint: disable=unused-import,unused-variable
import cv2 # noqa
except ImportError:
_LOGGER.warning("No OpenCV library found. "
"TensorFlow will process image with "
"PIL at reduced resolution.")
# setup tensorflow graph, session, and label map to pass to processor
# pylint: disable=no-member
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_config.get(CONF_GRAPH), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
session = tf.Session(graph=detection_graph)
label_map = label_map_util.load_labelmap(labels)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
entities = []
for camera in config[CONF_SOURCE]:
entities.append(TensorFlowImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
session, detection_graph, category_index, config))
add_entities(entities)
class TensorFlowImageProcessor(ImageProcessingEntity):
"""Representation of an TensorFlow image processor."""
def __init__(self, hass, camera_entity, name, session, detection_graph,
category_index, config):
"""Initialize the TensorFlow entity."""
model_config = config.get(CONF_MODEL)
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
self._name = "TensorFlow {0}".format(
split_entity_id(camera_entity)[1])
self._session = session
self._graph = detection_graph
self._category_index = category_index
self._min_confidence = config.get(CONF_CONFIDENCE)
self._file_out = config.get(CONF_FILE_OUT)
# handle categories and specific detection areas
categories = model_config.get(CONF_CATEGORIES)
self._include_categories = []
self._category_areas = {}
for category in categories:
if isinstance(category, dict):
category_name = category.get(CONF_CATEGORY)
category_area = category.get(CONF_AREA)
self._include_categories.append(category_name)
self._category_areas[category_name] = [0, 0, 1, 1]
if category_area:
self._category_areas[category_name] = [
category_area.get(CONF_TOP),
category_area.get(CONF_LEFT),
category_area.get(CONF_BOTTOM),
category_area.get(CONF_RIGHT)
]
else:
self._include_categories.append(category)
self._category_areas[category] = [0, 0, 1, 1]
# Handle global detection area
self._area = [0, 0, 1, 1]
area_config = model_config.get(CONF_AREA)
if area_config:
self._area = [
area_config.get(CONF_TOP),
area_config.get(CONF_LEFT),
area_config.get(CONF_BOTTOM),
area_config.get(CONF_RIGHT)
]
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
ATTR_SUMMARY: {category: len(values)
for category, values in self._matches.items()},
ATTR_TOTAL_MATCHES: self._total_matches
}
def _save_image(self, image, matches, paths):
from PIL import Image, ImageDraw
import io
img = Image.open(io.BytesIO(bytearray(image))).convert('RGB')
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
# Draw custom global region/area
if self._area != [0, 0, 1, 1]:
draw_box(draw, self._area,
img_width, img_height,
"Detection Area", (0, 255, 255))
for category, values in matches.items():
# Draw custom category regions/areas
if (category in self._category_areas
and self._category_areas[category] != [0, 0, 1, 1]):
label = "{} Detection Area".format(category.capitalize())
draw_box(draw, self._category_areas[category], img_width,
img_height, label, (0, 255, 0))
# Draw detected objects
for instance in values:
label = "{0} {1:.1f}%".format(category, instance['score'])
draw_box(draw, instance['box'],
img_width, img_height,
label, (255, 255, 0))
for path in paths:
_LOGGER.info("Saving results image to %s", path)
img.save(path)
def process_image(self, image):
"""Process the image."""
import numpy as np
try:
import cv2 # pylint: disable=import-error
img = cv2.imdecode(
np.asarray(bytearray(image)), cv2.IMREAD_UNCHANGED)
inp = img[:, :, [2, 1, 0]] # BGR->RGB
inp_expanded = inp.reshape(1, inp.shape[0], inp.shape[1], 3)
except ImportError:
from PIL import Image
import io
img = Image.open(io.BytesIO(bytearray(image))).convert('RGB')
img.thumbnail((460, 460), Image.ANTIALIAS)
img_width, img_height = img.size
inp = np.array(img.getdata()).reshape(
(img_height, img_width, 3)).astype(np.uint8)
inp_expanded = np.expand_dims(inp, axis=0)
image_tensor = self._graph.get_tensor_by_name('image_tensor:0')
boxes = self._graph.get_tensor_by_name('detection_boxes:0')
scores = self._graph.get_tensor_by_name('detection_scores:0')
classes = self._graph.get_tensor_by_name('detection_classes:0')
boxes, scores, classes = self._session.run(
[boxes, scores, classes],
feed_dict={image_tensor: inp_expanded})
boxes, scores, classes = map(np.squeeze, [boxes, scores, classes])
classes = classes.astype(int)
matches = {}
total_matches = 0
for box, score, obj_class in zip(boxes, scores, classes):
score = score * 100
boxes = box.tolist()
# Exclude matches below min confidence value
if score < self._min_confidence:
continue
# Exclude matches outside global area definition
if (boxes[0] < self._area[0] or boxes[1] < self._area[1]
or boxes[2] > self._area[2] or boxes[3] > self._area[3]):
continue
category = self._category_index[obj_class]['name']
# Exclude unlisted categories
if (self._include_categories
and category not in self._include_categories):
continue
# Exclude matches outside category specific area definition
if (self._category_areas
and (boxes[0] < self._category_areas[category][0]
or boxes[1] < self._category_areas[category][1]
or boxes[2] > self._category_areas[category][2]
or boxes[3] > self._category_areas[category][3])):
continue
# If we got here, we should include it
if category not in matches.keys():
matches[category] = []
matches[category].append({
'score': float(score),
'box': boxes
})
total_matches += 1
# Save Images
if total_matches and self._file_out:
paths = []
for path_template in self._file_out:
if isinstance(path_template, template.Template):
paths.append(path_template.render(
camera_entity=self._camera_entity))
else:
paths.append(path_template)
self._save_image(image, matches, paths)
self._matches = matches
self._total_matches = total_matches | skalavala/smarthome | custom_components/image_processing/tensorflow.py | Python | mit | 13,028 |
# Written by John Hoffman
# derived from NATPortMapping.py by Yejun Yang
# and from example code by Myers Carpenter
# see LICENSE.txt for license information
import socket
from random import random
from hashlib import sha1
from traceback import print_exc
from subnetparse import IP_List
from clock import clock
try:
True
except:
True = 1
False = 0
#DEBUG = True
DEBUG = False
EXPIRE_CACHE = 30 # seconds
ID = "LF-" + sha1(str(random())).hexdigest()[-4:] # modify by Su-Jin
try:
import pythoncom, win32com.client
_supported = 1
except ImportError:
_supported = 0
class _UPnP1: # derived from Myers Carpenter's code
# seems to use the machine's local UPnP
# system for its operation. Runs fairly fast
def __init__(self):
self.map = None
self.last_got_map = -10e10
def _get_map(self):
if self.last_got_map + EXPIRE_CACHE < clock():
try:
dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP")
self.map = dispatcher.StaticPortMappingCollection
self.last_got_map = clock()
except:
self.map = None
return self.map
def test(self):
try:
assert self._get_map() # make sure a map was found
success = True
except:
success = False
return success
def open(self, ip, p):
map = self._get_map()
try:
map.Add(p,'TCP',p, ip, True, ID)
if DEBUG:
print 'port opened: '+ip+':'+str(p)
success = True
except:
if DEBUG:
print "COULDN'T OPEN "+str(p)
print_exc()
success = False
return success
def close(self, p):
map = self._get_map()
try:
map.Remove(p,'TCP')
success = True
if DEBUG:
print 'port closed: '+str(p)
except:
if DEBUG:
print 'ERROR CLOSING '+str(p)
print_exc()
success = False
return success
def clean(self, retry = False):
if not _supported:
return
try:
map = self._get_map()
ports_in_use = []
for i in xrange(len(map)):
try:
mapping = map[i]
port = mapping.ExternalPort
prot = str(mapping.Protocol).lower()
desc = str(mapping.Description).lower()
except:
port = None
if port and prot == 'tcp' and desc[:3] == 'lf-':
ports_in_use.append(port)
success = True
for port in ports_in_use:
try:
map.Remove(port,'TCP')
except:
success = False
if not success and not retry:
self.clean(retry = True)
except:
pass
class _UPnP2: # derived from Yejun Yang's code
# apparently does a direct search for UPnP hardware
# may work in some cases where _UPnP1 won't, but is slow
# still need to implement "clean" method
def __init__(self):
self.services = None
self.last_got_services = -10e10
def _get_services(self):
if not self.services or self.last_got_services + EXPIRE_CACHE < clock():
self.services = []
try:
f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder")
for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1",
"urn:schemas-upnp-org:service:WANPPPConnection:1" ):
try:
conns = f.FindByType(t,0)
for c in xrange(len(conns)):
try:
svcs = conns[c].Services
for s in xrange(len(svcs)):
try:
self.services.append(svcs[s])
except:
pass
except:
pass
except:
pass
except:
pass
self.last_got_services = clock()
return self.services
def test(self):
try:
assert self._get_services() # make sure some services can be found
success = True
except:
success = False
return success
def open(self, ip, p):
svcs = self._get_services()
success = False
for s in svcs:
try:
s.InvokeAction('AddPortMapping',['',p,'TCP',p,ip,True,ID,0],'')
success = True
except:
pass
if DEBUG and not success:
print "COULDN'T OPEN "+str(p)
print_exc()
return success
def close(self, p):
svcs = self._get_services()
success = False
for s in svcs:
try:
s.InvokeAction('DeletePortMapping', ['',p,'TCP'], '')
success = True
except:
pass
if DEBUG and not success:
print "COULDN'T OPEN "+str(p)
print_exc()
return success
class _UPnP: # master holding class
def __init__(self):
self.upnp1 = _UPnP1()
self.upnp2 = _UPnP2()
self.upnplist = (None, self.upnp1, self.upnp2)
self.upnp = None
self.local_ip = None
self.last_got_ip = -10e10
def get_ip(self):
if self.last_got_ip + EXPIRE_CACHE < clock():
local_ips = IP_List()
local_ips.set_intranet_addresses()
try:
for info in socket.getaddrinfo(socket.gethostname(),0,socket.AF_INET):
# exception if socket library isn't recent
self.local_ip = info[4][0]
if local_ips.includes(self.local_ip):
self.last_got_ip = clock()
if DEBUG:
print 'Local IP found: '+self.local_ip
break
else:
raise ValueError('couldn\'t find intranet IP')
except:
self.local_ip = None
if DEBUG:
print 'Error finding local IP'
print_exc()
return self.local_ip
def test(self, upnp_type):
if DEBUG:
print 'testing UPnP type '+str(upnp_type)
if not upnp_type or not _supported or self.get_ip() is None:
if DEBUG:
print 'not supported'
return 0
pythoncom.CoInitialize() #@UndefinedVariable
self.upnp = self.upnplist[upnp_type] # cache this
if self.upnp.test():
if DEBUG:
print 'ok'
return upnp_type
if DEBUG:
print 'tested bad'
return 0
def open(self, p):
assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
return self.upnp.open(self.get_ip(), p)
def close(self, p):
assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
return self.upnp.close(p)
def clean(self):
return self.upnp1.clean()
_upnp_ = _UPnP()
UPnP_get_ip = _upnp_.get_ip
UPnP_test = _upnp_.test
UPnP_open_port = _upnp_.open
UPnP_close_port = _upnp_.close
UPnP_reset = _upnp_.clean
# for test
if __name__ == '__main__':
#print(_supported)
print('ip: ' + _upnp_.get_ip())
print(UPnP_test(1))
| NecromancerLev0001/LightingFury | LF/natpunch.py | Python | mit | 7,898 |
from app.models.user import User
from app import login_manager
from flask import current_app
# Python module which helps securely sign cookies
from itsdangerous import URLSafeTimedSerializer
@login_manager.user_loader
def load_user(user_id):
'''
Flask-Login user_loader callback.
The user_loader function asks this function to get a User Object or return
None based on the user_id.
The user_id was stored in the session environment by Flask-Login.
user_loader stores the returned User object in current_user during every
flask request.
To be used load user object from session when access URL
:param user_id: Should be the same as user.get_id()
'''
user = User.get_user_by_id(user_id)
if user:
return User(user)
return None
@login_manager.token_loader
def load_token(token):
"""
Flask-Login token_loader callback.
The token_loader function asks this function to take the token that was
stored on the users computer process it to check if its valid and then
return a User Object if its valid or None if its not valid.
"""
#The Token itself was generated by User.get_auth_token. So it is up to
#us to known the format of the token data itself.
#The Token was encrypted using itsdangerous.URLSafeTimedSerializer which
#allows us to have a max_age on the token itself. When the cookie is stored
#on the users computer it also has a exipry date, but could be changed by
#the user, so this feature allows us to enforce the exipry date of the token
#server side and not rely on the users cookie to exipre.
max_age = current_app.config["COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
try:
login_serializer = URLSafeTimedSerializer(current_app.config["SECRET_KEY"])
data = login_serializer.loads(token, max_age=max_age)
except:
return None
#Find the User
user_id = data[0]
hashed_password = data[1]
user = User.get(user_id)
#Check Password and return user or None
if user and hashed_password == user.password:
return user
return None
| benjaminhuanghuang/math_clone | app/permission_control/falsk_login_helper.py | Python | mit | 2,166 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('advertisements', '0006_auto_20151115_1408'),
]
operations = [
migrations.AlterField(
model_name='advertisement',
name='Buyer_User_ID',
field=models.ForeignKey(related_name='Buyer_User_ID', db_column=b'Buyer_User_ID', to='login.user_details'),
),
migrations.AlterField(
model_name='advertisement',
name='Seller_User_ID',
field=models.ForeignKey(related_name='Seller_User_ID', db_column=b'Seller_User_ID', to='login.user_details'),
),
migrations.AlterField(
model_name='book',
name='Product_ID',
field=models.ForeignKey(to='advertisements.category', db_column=b'Product_ID'),
),
migrations.AlterField(
model_name='category',
name='Advertisement_ID',
field=models.ForeignKey(to='advertisements.advertisement', db_column=b'Advertisement_ID'),
),
migrations.AlterField(
model_name='electronic_gadget',
name='Product_ID',
field=models.ForeignKey(to='advertisements.category', db_column=b'Product_ID'),
),
migrations.AlterField(
model_name='household_item',
name='Product_ID',
field=models.ForeignKey(to='advertisements.category', db_column=b'Product_ID'),
),
migrations.AlterField(
model_name='vehicle',
name='Product_ID',
field=models.ForeignKey(to='advertisements.category', db_column=b'Product_ID'),
),
]
| mdsafwan/Deal-My-Stuff | advertisements/migrations/0007_auto_20151115_1859.py | Python | apache-2.0 | 1,758 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
psutil test suite. Run it with:
$ make test
If you're on Python < 2.7 unittest2 module must be installed first:
https://pypi.python.org/pypi/unittest2
"""
from __future__ import division
import ast
import atexit
import collections
import contextlib
import datetime
import errno
import functools
import json
import os
import pickle
import re
import select
import shutil
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import traceback
import types
import warnings
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
try:
import ipaddress # python >= 3.3
except ImportError:
ipaddress = None
import psutil
from psutil._compat import PY3, callable, long, unicode
if sys.version_info < (2, 7):
import unittest2 as unittest # https://pypi.python.org/pypi/unittest2
else:
import unittest
if sys.version_info >= (3, 4):
import enum
else:
enum = None
# ===================================================================
# --- Constants
# ===================================================================
# conf for retry_before_failing() decorator
NO_RETRIES = 10
# bytes tolerance for OS memory related tests
TOLERANCE = 500 * 1024 # 500KB
# the timeout used in functions which have to wait
GLOBAL_TIMEOUT = 3
AF_INET6 = getattr(socket, "AF_INET6")
AF_UNIX = getattr(socket, "AF_UNIX", None)
PYTHON = os.path.realpath(sys.executable)
DEVNULL = open(os.devnull, 'r+')
TESTFN = os.path.join(os.getcwd(), "$testfile")
TESTFN_UNICODE = TESTFN + "ƒőő"
TESTFILE_PREFIX = 'psutil-test-suite-'
if not PY3:
try:
TESTFN_UNICODE = unicode(TESTFN_UNICODE, sys.getfilesystemencoding())
except UnicodeDecodeError:
TESTFN_UNICODE = TESTFN + "???"
EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'examples'))
POSIX = os.name == 'posix'
WINDOWS = os.name == 'nt'
if WINDOWS:
WIN_VISTA = (6, 0, 0)
LINUX = sys.platform.startswith("linux")
OSX = sys.platform.startswith("darwin")
BSD = sys.platform.startswith("freebsd")
SUNOS = sys.platform.startswith("sunos")
VALID_PROC_STATUSES = [getattr(psutil, x) for x in dir(psutil)
if x.startswith('STATUS_')]
# whether we're running this test suite on Travis (https://travis-ci.org/)
TRAVIS = bool(os.environ.get('TRAVIS'))
if TRAVIS or 'tox' in sys.argv[0]:
import ipaddress
# ===================================================================
# --- Utility functions
# ===================================================================
def cleanup():
reap_children(search_all=True)
safe_remove(TESTFN)
try:
safe_rmdir(TESTFN_UNICODE)
except UnicodeEncodeError:
pass
for path in _testfiles:
safe_remove(path)
atexit.register(cleanup)
atexit.register(lambda: DEVNULL.close())
_subprocesses_started = set()
def get_test_subprocess(cmd=None, stdout=DEVNULL, stderr=DEVNULL,
stdin=DEVNULL, wait=False):
"""Return a subprocess.Popen object to use in tests.
By default stdout and stderr are redirected to /dev/null and the
python interpreter is used as test process.
If 'wait' is True attemps to make sure the process is in a
reasonably initialized state.
"""
if cmd is None:
pyline = ""
if wait:
pyline += "open(r'%s', 'w'); " % TESTFN
pyline += "import time; time.sleep(60);"
cmd_ = [PYTHON, "-c", pyline]
else:
cmd_ = cmd
sproc = subprocess.Popen(cmd_, stdout=stdout, stderr=stderr, stdin=stdin)
if wait:
if cmd is None:
stop_at = time.time() + 3
while stop_at > time.time():
if os.path.exists(TESTFN):
break
time.sleep(0.001)
else:
warn("couldn't make sure test file was actually created")
else:
wait_for_pid(sproc.pid)
_subprocesses_started.add(psutil.Process(sproc.pid))
return sproc
_testfiles = []
def pyrun(src):
"""Run python code 'src' in a separate interpreter.
Return interpreter subprocess.
"""
if PY3:
src = bytes(src, 'ascii')
with tempfile.NamedTemporaryFile(
prefix=TESTFILE_PREFIX, delete=False) as f:
_testfiles.append(f.name)
f.write(src)
f.flush()
subp = get_test_subprocess([PYTHON, f.name], stdout=None,
stderr=None)
wait_for_pid(subp.pid)
return subp
def warn(msg):
"""Raise a warning msg."""
warnings.warn(msg, UserWarning)
def sh(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""run cmd in a subprocess and return its output.
raises RuntimeError on error.
"""
p = subprocess.Popen(cmdline, shell=True, stdout=stdout, stderr=stderr)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise RuntimeError(stderr)
if stderr:
warn(stderr)
if PY3:
stdout = str(stdout, sys.stdout.encoding)
return stdout.strip()
def which(program):
"""Same as UNIX which command. Return None on command not found."""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if POSIX:
def get_kernel_version():
"""Return a tuple such as (2, 6, 36)."""
s = ""
uname = os.uname()[2]
for c in uname:
if c.isdigit() or c == '.':
s += c
else:
break
if not s:
raise ValueError("can't parse %r" % uname)
minor = 0
micro = 0
nums = s.split('.')
major = int(nums[0])
if len(nums) >= 2:
minor = int(nums[1])
if len(nums) >= 3:
micro = int(nums[2])
return (major, minor, micro)
if LINUX:
RLIMIT_SUPPORT = get_kernel_version() >= (2, 6, 36)
else:
RLIMIT_SUPPORT = False
def wait_for_pid(pid, timeout=GLOBAL_TIMEOUT):
"""Wait for pid to show up in the process list then return.
Used in the test suite to give time the sub process to initialize.
"""
raise_at = time.time() + timeout
while True:
if pid in psutil.pids():
# give it one more iteration to allow full initialization
time.sleep(0.01)
return
time.sleep(0.0001)
if time.time() >= raise_at:
raise RuntimeError("Timed out")
def wait_for_file(fname, timeout=GLOBAL_TIMEOUT, delete_file=True):
"""Wait for a file to be written on disk."""
stop_at = time.time() + 3
while time.time() < stop_at:
try:
with open(fname, "r") as f:
data = f.read()
if not data:
continue
if delete_file:
os.remove(fname)
return data
except IOError:
time.sleep(0.001)
raise RuntimeError("timed out (couldn't read file)")
def reap_children(search_all=False):
"""Kill any subprocess started by this test suite and ensure that
no zombies stick around to hog resources and create problems when
looking for refleaks.
"""
global _subprocesses_started
procs = _subprocesses_started.copy()
if search_all:
this_process = psutil.Process()
for p in this_process.children(recursive=True):
procs.add(p)
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(procs, timeout=GLOBAL_TIMEOUT)
for p in alive:
warn("couldn't terminate process %s" % p)
try:
p.kill()
except psutil.NoSuchProcess:
pass
_, alive = psutil.wait_procs(alive, timeout=GLOBAL_TIMEOUT)
if alive:
warn("couldn't not kill processes %s" % str(alive))
_subprocesses_started = set(alive)
def check_ip_address(addr, family):
"""Attempts to check IP address's validity."""
if enum and PY3:
assert isinstance(family, enum.IntEnum), family
if family == AF_INET:
octs = [int(x) for x in addr.split('.')]
assert len(octs) == 4, addr
for num in octs:
assert 0 <= num <= 255, addr
if ipaddress:
if not PY3:
addr = unicode(addr)
ipaddress.IPv4Address(addr)
elif family == AF_INET6:
assert isinstance(addr, str), addr
if ipaddress:
if not PY3:
addr = unicode(addr)
ipaddress.IPv6Address(addr)
elif family == psutil.AF_LINK:
assert re.match('([a-fA-F0-9]{2}[:|\-]?){6}', addr) is not None, addr
else:
raise ValueError("unknown family %r", family)
def check_connection_ntuple(conn):
"""Check validity of a connection namedtuple."""
valid_conn_states = [getattr(psutil, x) for x in dir(psutil) if
x.startswith('CONN_')]
assert conn[0] == conn.fd
assert conn[1] == conn.family
assert conn[2] == conn.type
assert conn[3] == conn.laddr
assert conn[4] == conn.raddr
assert conn[5] == conn.status
assert conn.type in (SOCK_STREAM, SOCK_DGRAM), repr(conn.type)
assert conn.family in (AF_INET, AF_INET6, AF_UNIX), repr(conn.family)
assert conn.status in valid_conn_states, conn.status
# check IP address and port sanity
for addr in (conn.laddr, conn.raddr):
if not addr:
continue
if conn.family in (AF_INET, AF_INET6):
assert isinstance(addr, tuple), addr
ip, port = addr
assert isinstance(port, int), port
assert 0 <= port <= 65535, port
check_ip_address(ip, conn.family)
elif conn.family == AF_UNIX:
assert isinstance(addr, (str, None)), addr
else:
raise ValueError("unknown family %r", conn.family)
if conn.family in (AF_INET, AF_INET6):
# actually try to bind the local socket; ignore IPv6
# sockets as their address might be represented as
# an IPv4-mapped-address (e.g. "::127.0.0.1")
# and that's rejected by bind()
if conn.family == AF_INET:
s = socket.socket(conn.family, conn.type)
with contextlib.closing(s):
try:
s.bind((conn.laddr[0], 0))
except socket.error as err:
if err.errno != errno.EADDRNOTAVAIL:
raise
elif conn.family == AF_UNIX:
assert not conn.raddr, repr(conn.raddr)
assert conn.status == psutil.CONN_NONE, conn.status
if getattr(conn, 'fd', -1) != -1:
assert conn.fd > 0, conn
if hasattr(socket, 'fromfd') and not WINDOWS:
try:
dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
except (socket.error, OSError) as err:
if err.args[0] != errno.EBADF:
raise
else:
with contextlib.closing(dupsock):
assert dupsock.family == conn.family
assert dupsock.type == conn.type
def safe_remove(file):
"Convenience function for removing temporary test files"
try:
os.remove(file)
except OSError as err:
if err.errno != errno.ENOENT:
# file is being used by another process
if WINDOWS and isinstance(err, WindowsError) and err.errno == 13:
return
raise
def safe_rmdir(dir):
"Convenience function for removing temporary test directories"
try:
os.rmdir(dir)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def call_until(fun, expr, timeout=GLOBAL_TIMEOUT):
"""Keep calling function for timeout secs and exit if eval()
expression is True.
"""
stop_at = time.time() + timeout
while time.time() < stop_at:
ret = fun()
if eval(expr):
return ret
time.sleep(0.001)
raise RuntimeError('timed out (ret=%r)' % ret)
def retry_before_failing(ntimes=None):
"""Decorator which runs a test function and retries N times before
actually failing.
"""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
for x in range(ntimes or NO_RETRIES):
try:
return fun(*args, **kwargs)
except AssertionError:
pass
raise
return wrapper
return decorator
def skip_on_access_denied(only_if=None):
"""Decorator to Ignore AccessDenied exceptions."""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except psutil.AccessDenied:
if only_if is not None:
if not only_if:
raise
msg = "%r was skipped because it raised AccessDenied" \
% fun.__name__
raise unittest.SkipTest(msg)
return wrapper
return decorator
def skip_on_not_implemented(only_if=None):
"""Decorator to Ignore NotImplementedError exceptions."""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except NotImplementedError:
if only_if is not None:
if not only_if:
raise
msg = "%r was skipped because it raised NotImplementedError" \
% fun.__name__
raise unittest.SkipTest(msg)
return wrapper
return decorator
def supports_ipv6():
"""Return True if IPv6 is supported on this platform."""
if not socket.has_ipv6 or not hasattr(socket, "AF_INET6"):
return False
sock = None
try:
sock = socket.socket(AF_INET6, SOCK_STREAM)
sock.bind(("::1", 0))
except (socket.error, socket.gaierror):
return False
else:
return True
finally:
if sock is not None:
sock.close()
if WINDOWS:
def get_winver():
wv = sys.getwindowsversion()
if hasattr(wv, 'service_pack_major'): # python >= 2.7
sp = wv.service_pack_major or 0
else:
r = re.search("\s\d$", wv[4])
if r:
sp = int(r.group(0))
else:
sp = 0
return (wv[0], wv[1], sp)
class ThreadTask(threading.Thread):
"""A thread object used for running process thread tests."""
def __init__(self):
threading.Thread.__init__(self)
self._running = False
self._interval = None
self._flag = threading.Event()
def __repr__(self):
name = self.__class__.__name__
return '<%s running=%s at %#x>' % (name, self._running, id(self))
def start(self, interval=0.001):
"""Start thread and keep it running until an explicit
stop() request. Polls for shutdown every 'timeout' seconds.
"""
if self._running:
raise ValueError("already started")
self._interval = interval
threading.Thread.start(self)
self._flag.wait()
def run(self):
self._running = True
self._flag.set()
while self._running:
time.sleep(self._interval)
def stop(self):
"""Stop thread execution and and waits until it is stopped."""
if not self._running:
raise ValueError("already stopped")
self._running = False
self.join()
# ===================================================================
# --- System-related API tests
# ===================================================================
class TestSystemAPIs(unittest.TestCase):
"""Tests for system-related APIs."""
def setUp(self):
safe_remove(TESTFN)
def tearDown(self):
reap_children()
def test_process_iter(self):
self.assertIn(os.getpid(), [x.pid for x in psutil.process_iter()])
sproc = get_test_subprocess()
self.assertIn(sproc.pid, [x.pid for x in psutil.process_iter()])
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
self.assertNotIn(sproc.pid, [x.pid for x in psutil.process_iter()])
@retry_before_failing(50)
def test_process_iter_against_pids(self):
self.assertEqual(len(list(psutil.process_iter())), len(psutil.pids()))
def test_wait_procs(self):
def callback(p):
l.append(p.pid)
l = []
sproc1 = get_test_subprocess()
sproc2 = get_test_subprocess()
sproc3 = get_test_subprocess()
procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)]
self.assertRaises(ValueError, psutil.wait_procs, procs, timeout=-1)
t = time.time()
gone, alive = psutil.wait_procs(procs, timeout=0.01, callback=callback)
self.assertLess(time.time() - t, 0.5)
self.assertEqual(gone, [])
self.assertEqual(len(alive), 3)
self.assertEqual(l, [])
for p in alive:
self.assertFalse(hasattr(p, 'returncode'))
sproc3.terminate()
gone, alive = psutil.wait_procs(procs, timeout=0.03, callback=callback)
self.assertEqual(len(gone), 1)
self.assertEqual(len(alive), 2)
self.assertIn(sproc3.pid, [x.pid for x in gone])
if POSIX:
self.assertEqual(gone.pop().returncode, signal.SIGTERM)
else:
self.assertEqual(gone.pop().returncode, 1)
self.assertEqual(l, [sproc3.pid])
for p in alive:
self.assertFalse(hasattr(p, 'returncode'))
sproc1.terminate()
sproc2.terminate()
gone, alive = psutil.wait_procs(procs, timeout=0.03, callback=callback)
self.assertEqual(len(gone), 3)
self.assertEqual(len(alive), 0)
self.assertEqual(set(l), set([sproc1.pid, sproc2.pid, sproc3.pid]))
for p in gone:
self.assertTrue(hasattr(p, 'returncode'))
def test_wait_procs_no_timeout(self):
sproc1 = get_test_subprocess()
sproc2 = get_test_subprocess()
sproc3 = get_test_subprocess()
procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)]
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs)
def test_boot_time(self):
bt = psutil.boot_time()
self.assertIsInstance(bt, float)
self.assertGreater(bt, 0)
self.assertLess(bt, time.time())
@unittest.skipUnless(POSIX, 'posix only')
def test_PAGESIZE(self):
# pagesize is used internally to perform different calculations
# and it's determined by using SC_PAGE_SIZE; make sure
# getpagesize() returns the same value.
import resource
self.assertEqual(os.sysconf("SC_PAGE_SIZE"), resource.getpagesize())
def test_virtual_memory(self):
mem = psutil.virtual_memory()
assert mem.total > 0, mem
assert mem.available > 0, mem
assert 0 <= mem.percent <= 100, mem
assert mem.used > 0, mem
assert mem.free >= 0, mem
for name in mem._fields:
value = getattr(mem, name)
if name != 'percent':
self.assertIsInstance(value, (int, long))
if name != 'total':
if not value >= 0:
self.fail("%r < 0 (%s)" % (name, value))
if value > mem.total:
self.fail("%r > total (total=%s, %s=%s)"
% (name, mem.total, name, value))
def test_swap_memory(self):
mem = psutil.swap_memory()
assert mem.total >= 0, mem
assert mem.used >= 0, mem
if mem.total > 0:
# likely a system with no swap partition
assert mem.free > 0, mem
else:
assert mem.free == 0, mem
assert 0 <= mem.percent <= 100, mem
assert mem.sin >= 0, mem
assert mem.sout >= 0, mem
def test_pid_exists(self):
sproc = get_test_subprocess(wait=True)
self.assertTrue(psutil.pid_exists(sproc.pid))
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
self.assertFalse(psutil.pid_exists(sproc.pid))
self.assertFalse(psutil.pid_exists(-1))
self.assertEqual(psutil.pid_exists(0), 0 in psutil.pids())
def test_pid_exists_2(self):
reap_children()
pids = psutil.pids()
for pid in pids:
try:
assert psutil.pid_exists(pid)
except AssertionError:
# in case the process disappeared in meantime fail only
# if it is no longer in psutil.pids()
time.sleep(.1)
if pid in psutil.pids():
self.fail(pid)
pids = range(max(pids) + 5000, max(pids) + 6000)
for pid in pids:
self.assertFalse(psutil.pid_exists(pid), msg=pid)
def test_pids(self):
plist = [x.pid for x in psutil.process_iter()]
pidlist = psutil.pids()
self.assertEqual(plist.sort(), pidlist.sort())
# make sure every pid is unique
self.assertEqual(len(pidlist), len(set(pidlist)))
def test_test(self):
# test for psutil.test() function
stdout = sys.stdout
sys.stdout = DEVNULL
try:
psutil.test()
finally:
sys.stdout = stdout
def test_cpu_count(self):
logical = psutil.cpu_count()
self.assertEqual(logical, len(psutil.cpu_times(percpu=True)))
self.assertGreaterEqual(logical, 1)
#
with open("/proc/cpuinfo") as fd:
cpuinfo_data = fd.read()
if "physical id" not in cpuinfo_data:
raise unittest.SkipTest("cpuinfo doesn't include physical id")
physical = psutil.cpu_count(logical=False)
self.assertGreaterEqual(physical, 1)
self.assertGreaterEqual(logical, physical)
def test_sys_cpu_times(self):
total = 0
times = psutil.cpu_times()
sum(times)
for cp_time in times:
self.assertIsInstance(cp_time, float)
self.assertGreaterEqual(cp_time, 0.0)
total += cp_time
self.assertEqual(total, sum(times))
str(times)
if not WINDOWS:
# CPU times are always supposed to increase over time or
# remain the same but never go backwards, see:
# https://github.com/giampaolo/psutil/issues/392
last = psutil.cpu_times()
for x in range(100):
new = psutil.cpu_times()
for field in new._fields:
new_t = getattr(new, field)
last_t = getattr(last, field)
self.assertGreaterEqual(new_t, last_t,
msg="%s %s" % (new_t, last_t))
last = new
def test_sys_cpu_times2(self):
t1 = sum(psutil.cpu_times())
time.sleep(0.1)
t2 = sum(psutil.cpu_times())
difference = t2 - t1
if not difference >= 0.05:
self.fail("difference %s" % difference)
def test_sys_per_cpu_times(self):
for times in psutil.cpu_times(percpu=True):
total = 0
sum(times)
for cp_time in times:
self.assertIsInstance(cp_time, float)
self.assertGreaterEqual(cp_time, 0.0)
total += cp_time
self.assertEqual(total, sum(times))
str(times)
self.assertEqual(len(psutil.cpu_times(percpu=True)[0]),
len(psutil.cpu_times(percpu=False)))
if not WINDOWS:
# CPU times are always supposed to increase over time or
# remain the same but never go backwards, see:
# https://github.com/giampaolo/psutil/issues/392
last = psutil.cpu_times(percpu=True)
for x in range(100):
new = psutil.cpu_times(percpu=True)
for index in range(len(new)):
newcpu = new[index]
lastcpu = last[index]
for field in newcpu._fields:
new_t = getattr(newcpu, field)
last_t = getattr(lastcpu, field)
self.assertGreaterEqual(
new_t, last_t, msg="%s %s" % (lastcpu, newcpu))
last = new
def test_sys_per_cpu_times2(self):
tot1 = psutil.cpu_times(percpu=True)
stop_at = time.time() + 0.1
while True:
if time.time() >= stop_at:
break
tot2 = psutil.cpu_times(percpu=True)
for t1, t2 in zip(tot1, tot2):
t1, t2 = sum(t1), sum(t2)
difference = t2 - t1
if difference >= 0.05:
return
self.fail()
def _test_cpu_percent(self, percent):
self.assertIsInstance(percent, float)
self.assertGreaterEqual(percent, 0.0)
self.assertLessEqual(percent, 100.0 * psutil.cpu_count())
def test_sys_cpu_percent(self):
psutil.cpu_percent(interval=0.001)
for x in range(100):
self._test_cpu_percent(psutil.cpu_percent(interval=None))
def test_sys_per_cpu_percent(self):
self.assertEqual(len(psutil.cpu_percent(interval=0.001, percpu=True)),
psutil.cpu_count())
for x in range(100):
percents = psutil.cpu_percent(interval=None, percpu=True)
for percent in percents:
self._test_cpu_percent(percent)
def test_sys_cpu_times_percent(self):
psutil.cpu_times_percent(interval=0.001)
for x in range(100):
cpu = psutil.cpu_times_percent(interval=None)
for percent in cpu:
self._test_cpu_percent(percent)
self._test_cpu_percent(sum(cpu))
def test_sys_per_cpu_times_percent(self):
self.assertEqual(len(psutil.cpu_times_percent(interval=0.001,
percpu=True)),
psutil.cpu_count())
for x in range(100):
cpus = psutil.cpu_times_percent(interval=None, percpu=True)
for cpu in cpus:
for percent in cpu:
self._test_cpu_percent(percent)
self._test_cpu_percent(sum(cpu))
@unittest.skipIf(POSIX and not hasattr(os, 'statvfs'),
"os.statvfs() function not available on this platform")
def test_disk_usage(self):
usage = psutil.disk_usage(os.getcwd())
assert usage.total > 0, usage
assert usage.used > 0, usage
assert usage.free > 0, usage
assert usage.total > usage.used, usage
assert usage.total > usage.free, usage
assert 0 <= usage.percent <= 100, usage.percent
if hasattr(shutil, 'disk_usage'):
# py >= 3.3, see: http://bugs.python.org/issue12442
shutil_usage = shutil.disk_usage(os.getcwd())
tolerance = 5 * 1024 * 1024 # 5MB
self.assertEqual(usage.total, shutil_usage.total)
self.assertAlmostEqual(usage.free, shutil_usage.free,
delta=tolerance)
self.assertAlmostEqual(usage.used, shutil_usage.used,
delta=tolerance)
# if path does not exist OSError ENOENT is expected across
# all platforms
fname = tempfile.mktemp()
try:
psutil.disk_usage(fname)
except OSError as err:
if err.args[0] != errno.ENOENT:
raise
else:
self.fail("OSError not raised")
@unittest.skipIf(POSIX and not hasattr(os, 'statvfs'),
"os.statvfs() function not available on this platform")
def test_disk_usage_unicode(self):
# see: https://github.com/giampaolo/psutil/issues/416
# XXX this test is not really reliable as it always fails on
# Python 3.X (2.X is fine)
try:
safe_rmdir(TESTFN_UNICODE)
os.mkdir(TESTFN_UNICODE)
psutil.disk_usage(TESTFN_UNICODE)
safe_rmdir(TESTFN_UNICODE)
except UnicodeEncodeError:
pass
@unittest.skipIf(POSIX and not hasattr(os, 'statvfs'),
"os.statvfs() function not available on this platform")
@unittest.skipIf(LINUX and TRAVIS, "unknown failure on travis")
def test_disk_partitions(self):
# all = False
ls = psutil.disk_partitions(all=False)
# on travis we get:
# self.assertEqual(p.cpu_affinity(), [n])
# AssertionError: Lists differ: [0, 1, 2, 3, 4, 5, 6, 7,... != [0]
self.assertTrue(ls, msg=ls)
for disk in ls:
if WINDOWS and 'cdrom' in disk.opts:
continue
if not POSIX:
assert os.path.exists(disk.device), disk
else:
# we cannot make any assumption about this, see:
# http://goo.gl/p9c43
disk.device
if SUNOS:
# on solaris apparently mount points can also be files
assert os.path.exists(disk.mountpoint), disk
else:
assert os.path.isdir(disk.mountpoint), disk
assert disk.fstype, disk
self.assertIsInstance(disk.opts, str)
# all = True
ls = psutil.disk_partitions(all=True)
self.assertTrue(ls, msg=ls)
for disk in psutil.disk_partitions(all=True):
if not WINDOWS:
try:
os.stat(disk.mountpoint)
except OSError as err:
# http://mail.python.org/pipermail/python-dev/
# 2012-June/120787.html
if err.errno not in (errno.EPERM, errno.EACCES):
raise
else:
if SUNOS:
# on solaris apparently mount points can also be files
assert os.path.exists(disk.mountpoint), disk
else:
assert os.path.isdir(disk.mountpoint), disk
self.assertIsInstance(disk.fstype, str)
self.assertIsInstance(disk.opts, str)
def find_mount_point(path):
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
mount = find_mount_point(__file__)
mounts = [x.mountpoint for x in psutil.disk_partitions(all=True)]
self.assertIn(mount, mounts)
psutil.disk_usage(mount)
@skip_on_access_denied()
def test_net_connections(self):
def check(cons, families, types_):
for conn in cons:
self.assertIn(conn.family, families, msg=conn)
if conn.family != getattr(socket, 'AF_UNIX', object()):
self.assertIn(conn.type, types_, msg=conn)
from psutil._common import conn_tmap
for kind, groups in conn_tmap.items():
if SUNOS and kind == 'unix':
continue
families, types_ = groups
cons = psutil.net_connections(kind)
self.assertEqual(len(cons), len(set(cons)))
check(cons, families, types_)
def test_net_io_counters(self):
def check_ntuple(nt):
self.assertEqual(nt[0], nt.bytes_sent)
self.assertEqual(nt[1], nt.bytes_recv)
self.assertEqual(nt[2], nt.packets_sent)
self.assertEqual(nt[3], nt.packets_recv)
self.assertEqual(nt[4], nt.errin)
self.assertEqual(nt[5], nt.errout)
self.assertEqual(nt[6], nt.dropin)
self.assertEqual(nt[7], nt.dropout)
assert nt.bytes_sent >= 0, nt
assert nt.bytes_recv >= 0, nt
assert nt.packets_sent >= 0, nt
assert nt.packets_recv >= 0, nt
assert nt.errin >= 0, nt
assert nt.errout >= 0, nt
assert nt.dropin >= 0, nt
assert nt.dropout >= 0, nt
ret = psutil.net_io_counters(pernic=False)
check_ntuple(ret)
ret = psutil.net_io_counters(pernic=True)
self.assertNotEqual(ret, [])
for key in ret:
self.assertTrue(key)
check_ntuple(ret[key])
def test_net_if_addrs(self):
nics = psutil.net_if_addrs()
assert nics, nics
# Not reliable on all platforms (net_if_addrs() reports more
# interfaces).
# self.assertEqual(sorted(nics.keys()),
# sorted(psutil.net_io_counters(pernic=True).keys()))
families = set([socket.AF_INET, AF_INET6, psutil.AF_LINK])
for nic, addrs in nics.items():
self.assertEqual(len(set(addrs)), len(addrs))
for addr in addrs:
self.assertIsInstance(addr.family, int)
self.assertIsInstance(addr.address, str)
self.assertIsInstance(addr.netmask, (str, type(None)))
self.assertIsInstance(addr.broadcast, (str, type(None)))
self.assertIn(addr.family, families)
if sys.version_info >= (3, 4):
self.assertIsInstance(addr.family, enum.IntEnum)
if addr.family == socket.AF_INET:
s = socket.socket(addr.family)
with contextlib.closing(s):
s.bind((addr.address, 0))
elif addr.family == socket.AF_INET6:
info = socket.getaddrinfo(
addr.address, 0, socket.AF_INET6, socket.SOCK_STREAM,
0, socket.AI_PASSIVE)[0]
af, socktype, proto, canonname, sa = info
s = socket.socket(af, socktype, proto)
with contextlib.closing(s):
s.bind(sa)
for ip in (addr.address, addr.netmask, addr.broadcast):
if ip is not None:
# TODO: skip AF_INET6 for now because I get:
# AddressValueError: Only hex digits permitted in
# u'c6f3%lxcbr0' in u'fe80::c8e0:fff:fe54:c6f3%lxcbr0'
if addr.family != AF_INET6:
check_ip_address(ip, addr.family)
if BSD or OSX or SUNOS:
if hasattr(socket, "AF_LINK"):
self.assertEqual(psutil.AF_LINK, socket.AF_LINK)
elif LINUX:
self.assertEqual(psutil.AF_LINK, socket.AF_PACKET)
elif WINDOWS:
self.assertEqual(psutil.AF_LINK, -1)
@unittest.skipIf(TRAVIS, "EPERM on travis")
def test_net_if_stats(self):
nics = psutil.net_if_stats()
assert nics, nics
all_duplexes = (psutil.NIC_DUPLEX_FULL,
psutil.NIC_DUPLEX_HALF,
psutil.NIC_DUPLEX_UNKNOWN)
for nic, stats in nics.items():
isup, duplex, speed, mtu = stats
self.assertIsInstance(isup, bool)
self.assertIn(duplex, all_duplexes)
self.assertIn(duplex, all_duplexes)
self.assertGreaterEqual(speed, 0)
self.assertGreaterEqual(mtu, 0)
@unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
'/proc/diskstats not available on this linux version')
def test_disk_io_counters(self):
def check_ntuple(nt):
self.assertEqual(nt[0], nt.read_count)
self.assertEqual(nt[1], nt.write_count)
self.assertEqual(nt[2], nt.read_bytes)
self.assertEqual(nt[3], nt.write_bytes)
self.assertEqual(nt[4], nt.read_time)
self.assertEqual(nt[5], nt.write_time)
assert nt.read_count >= 0, nt
assert nt.write_count >= 0, nt
assert nt.read_bytes >= 0, nt
assert nt.write_bytes >= 0, nt
assert nt.read_time >= 0, nt
assert nt.write_time >= 0, nt
ret = psutil.disk_io_counters(perdisk=False)
check_ntuple(ret)
ret = psutil.disk_io_counters(perdisk=True)
# make sure there are no duplicates
self.assertEqual(len(ret), len(set(ret)))
for key in ret:
assert key, key
check_ntuple(ret[key])
if LINUX and key[-1].isdigit():
# if 'sda1' is listed 'sda' shouldn't, see:
# https://github.com/giampaolo/psutil/issues/338
while key[-1].isdigit():
key = key[:-1]
self.assertNotIn(key, ret.keys())
def test_users(self):
users = psutil.users()
self.assertNotEqual(users, [])
for user in users:
assert user.name, user
user.terminal
user.host
assert user.started > 0.0, user
datetime.datetime.fromtimestamp(user.started)
# ===================================================================
# --- psutil.Process class tests
# ===================================================================
class TestProcess(unittest.TestCase):
"""Tests for psutil.Process class."""
def setUp(self):
safe_remove(TESTFN)
def tearDown(self):
reap_children()
def test_pid(self):
self.assertEqual(psutil.Process().pid, os.getpid())
sproc = get_test_subprocess()
self.assertEqual(psutil.Process(sproc.pid).pid, sproc.pid)
def test_kill(self):
sproc = get_test_subprocess(wait=True)
test_pid = sproc.pid
p = psutil.Process(test_pid)
p.kill()
sig = p.wait()
self.assertFalse(psutil.pid_exists(test_pid))
if POSIX:
self.assertEqual(sig, signal.SIGKILL)
def test_terminate(self):
sproc = get_test_subprocess(wait=True)
test_pid = sproc.pid
p = psutil.Process(test_pid)
p.terminate()
sig = p.wait()
self.assertFalse(psutil.pid_exists(test_pid))
if POSIX:
self.assertEqual(sig, signal.SIGTERM)
def test_send_signal(self):
sig = signal.SIGKILL if POSIX else signal.SIGTERM
sproc = get_test_subprocess()
test_pid = sproc.pid
p = psutil.Process(test_pid)
p.send_signal(sig)
exit_sig = p.wait()
self.assertFalse(psutil.pid_exists(test_pid))
if POSIX:
self.assertEqual(exit_sig, sig)
def test_wait(self):
# check exit code signal
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
code = p.wait()
if POSIX:
self.assertEqual(code, signal.SIGKILL)
else:
self.assertEqual(code, 0)
self.assertFalse(p.is_running())
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.terminate()
code = p.wait()
if POSIX:
self.assertEqual(code, signal.SIGTERM)
else:
self.assertEqual(code, 0)
self.assertFalse(p.is_running())
# check sys.exit() code
code = "import time, sys; time.sleep(0.01); sys.exit(5);"
sproc = get_test_subprocess([PYTHON, "-c", code])
p = psutil.Process(sproc.pid)
self.assertEqual(p.wait(), 5)
self.assertFalse(p.is_running())
# Test wait() issued twice.
# It is not supposed to raise NSP when the process is gone.
# On UNIX this should return None, on Windows it should keep
# returning the exit code.
sproc = get_test_subprocess([PYTHON, "-c", code])
p = psutil.Process(sproc.pid)
self.assertEqual(p.wait(), 5)
self.assertIn(p.wait(), (5, None))
# test timeout
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.name()
self.assertRaises(psutil.TimeoutExpired, p.wait, 0.01)
# timeout < 0 not allowed
self.assertRaises(ValueError, p.wait, -1)
# XXX why is this skipped on Windows?
@unittest.skipUnless(POSIX, 'skipped on Windows')
def test_wait_non_children(self):
# test wait() against processes which are not our children
code = "import sys;"
code += "from subprocess import Popen, PIPE;"
code += "cmd = ['%s', '-c', 'import time; time.sleep(60)'];" % PYTHON
code += "sp = Popen(cmd, stdout=PIPE);"
code += "sys.stdout.write(str(sp.pid));"
sproc = get_test_subprocess([PYTHON, "-c", code],
stdout=subprocess.PIPE)
grandson_pid = int(sproc.stdout.read())
grandson_proc = psutil.Process(grandson_pid)
try:
self.assertRaises(psutil.TimeoutExpired, grandson_proc.wait, 0.01)
grandson_proc.kill()
ret = grandson_proc.wait()
self.assertEqual(ret, None)
finally:
if grandson_proc.is_running():
grandson_proc.kill()
grandson_proc.wait()
def test_wait_timeout_0(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertRaises(psutil.TimeoutExpired, p.wait, 0)
p.kill()
stop_at = time.time() + 2
while True:
try:
code = p.wait(0)
except psutil.TimeoutExpired:
if time.time() >= stop_at:
raise
else:
break
if POSIX:
self.assertEqual(code, signal.SIGKILL)
else:
self.assertEqual(code, 0)
self.assertFalse(p.is_running())
def test_cpu_percent(self):
p = psutil.Process()
p.cpu_percent(interval=0.001)
p.cpu_percent(interval=0.001)
for x in range(100):
percent = p.cpu_percent(interval=None)
self.assertIsInstance(percent, float)
self.assertGreaterEqual(percent, 0.0)
if not POSIX:
self.assertLessEqual(percent, 100.0)
else:
self.assertGreaterEqual(percent, 0.0)
def test_cpu_times(self):
times = psutil.Process().cpu_times()
assert (times.user > 0.0) or (times.system > 0.0), times
# make sure returned values can be pretty printed with strftime
time.strftime("%H:%M:%S", time.localtime(times.user))
time.strftime("%H:%M:%S", time.localtime(times.system))
# Test Process.cpu_times() against os.times()
# os.times() is broken on Python 2.6
# http://bugs.python.org/issue1040026
# XXX fails on OSX: not sure if it's for os.times(). We should
# try this with Python 2.7 and re-enable the test.
@unittest.skipUnless(sys.version_info > (2, 6, 1) and not OSX,
'os.times() is not reliable on this Python version')
def test_cpu_times2(self):
user_time, kernel_time = psutil.Process().cpu_times()
utime, ktime = os.times()[:2]
# Use os.times()[:2] as base values to compare our results
# using a tolerance of +/- 0.1 seconds.
# It will fail if the difference between the values is > 0.1s.
if (max([user_time, utime]) - min([user_time, utime])) > 0.1:
self.fail("expected: %s, found: %s" % (utime, user_time))
if (max([kernel_time, ktime]) - min([kernel_time, ktime])) > 0.1:
self.fail("expected: %s, found: %s" % (ktime, kernel_time))
def test_create_time(self):
sproc = get_test_subprocess(wait=True)
now = time.time()
p = psutil.Process(sproc.pid)
create_time = p.create_time()
# Use time.time() as base value to compare our result using a
# tolerance of +/- 1 second.
# It will fail if the difference between the values is > 2s.
difference = abs(create_time - now)
if difference > 2:
self.fail("expected: %s, found: %s, difference: %s"
% (now, create_time, difference))
# make sure returned value can be pretty printed with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(p.create_time()))
@unittest.skipIf(WINDOWS, 'Windows only')
def test_terminal(self):
terminal = psutil.Process().terminal()
if sys.stdin.isatty():
self.assertEqual(terminal, sh('tty'))
else:
assert terminal, repr(terminal)
@unittest.skipUnless(LINUX or BSD or WINDOWS,
'not available on this platform')
@skip_on_not_implemented(only_if=LINUX)
def test_io_counters(self):
p = psutil.Process()
# test reads
io1 = p.io_counters()
with open(PYTHON, 'rb') as f:
f.read()
io2 = p.io_counters()
if not BSD:
assert io2.read_count > io1.read_count, (io1, io2)
self.assertEqual(io2.write_count, io1.write_count)
assert io2.read_bytes >= io1.read_bytes, (io1, io2)
assert io2.write_bytes >= io1.write_bytes, (io1, io2)
# test writes
io1 = p.io_counters()
with tempfile.TemporaryFile(prefix=TESTFILE_PREFIX) as f:
if PY3:
f.write(bytes("x" * 1000000, 'ascii'))
else:
f.write("x" * 1000000)
io2 = p.io_counters()
assert io2.write_count >= io1.write_count, (io1, io2)
assert io2.write_bytes >= io1.write_bytes, (io1, io2)
assert io2.read_count >= io1.read_count, (io1, io2)
assert io2.read_bytes >= io1.read_bytes, (io1, io2)
@unittest.skipUnless(LINUX or (WINDOWS and get_winver() >= WIN_VISTA),
'Linux and Windows Vista only')
@unittest.skipIf(LINUX and TRAVIS, "unknown failure on travis")
def test_ionice(self):
if LINUX:
from psutil import (IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE)
self.assertEqual(IOPRIO_CLASS_NONE, 0)
self.assertEqual(IOPRIO_CLASS_RT, 1)
self.assertEqual(IOPRIO_CLASS_BE, 2)
self.assertEqual(IOPRIO_CLASS_IDLE, 3)
p = psutil.Process()
try:
p.ionice(2)
ioclass, value = p.ionice()
if enum is not None:
self.assertIsInstance(ioclass, enum.IntEnum)
self.assertEqual(ioclass, 2)
self.assertEqual(value, 4)
#
p.ionice(3)
ioclass, value = p.ionice()
self.assertEqual(ioclass, 3)
self.assertEqual(value, 0)
#
p.ionice(2, 0)
ioclass, value = p.ionice()
self.assertEqual(ioclass, 2)
self.assertEqual(value, 0)
p.ionice(2, 7)
ioclass, value = p.ionice()
self.assertEqual(ioclass, 2)
self.assertEqual(value, 7)
self.assertRaises(ValueError, p.ionice, 2, 10)
finally:
p.ionice(IOPRIO_CLASS_NONE)
else:
p = psutil.Process()
original = p.ionice()
self.assertIsInstance(original, int)
try:
value = 0 # very low
if original == value:
value = 1 # low
p.ionice(value)
self.assertEqual(p.ionice(), value)
finally:
p.ionice(original)
#
self.assertRaises(ValueError, p.ionice, 3)
self.assertRaises(TypeError, p.ionice, 2, 1)
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_get(self):
import resource
p = psutil.Process(os.getpid())
names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
assert names, names
for name in names:
value = getattr(psutil, name)
self.assertGreaterEqual(value, 0)
if name in dir(resource):
self.assertEqual(value, getattr(resource, name))
self.assertEqual(p.rlimit(value), resource.getrlimit(value))
else:
ret = p.rlimit(value)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_set(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.rlimit(psutil.RLIMIT_NOFILE, (5, 5))
self.assertEqual(p.rlimit(psutil.RLIMIT_NOFILE), (5, 5))
def test_num_threads(self):
# on certain platforms such as Linux we might test for exact
# thread number, since we always have with 1 thread per process,
# but this does not apply across all platforms (OSX, Windows)
p = psutil.Process()
step1 = p.num_threads()
thread = ThreadTask()
thread.start()
try:
step2 = p.num_threads()
self.assertEqual(step2, step1 + 1)
thread.stop()
finally:
if thread._running:
thread.stop()
@unittest.skipUnless(WINDOWS, 'Windows only')
def test_num_handles(self):
# a better test is done later into test/_windows.py
p = psutil.Process()
self.assertGreater(p.num_handles(), 0)
def test_threads(self):
p = psutil.Process()
step1 = p.threads()
thread = ThreadTask()
thread.start()
try:
step2 = p.threads()
self.assertEqual(len(step2), len(step1) + 1)
# on Linux, first thread id is supposed to be this process
if LINUX:
self.assertEqual(step2[0].id, os.getpid())
athread = step2[0]
# test named tuple
self.assertEqual(athread.id, athread[0])
self.assertEqual(athread.user_time, athread[1])
self.assertEqual(athread.system_time, athread[2])
# test num threads
thread.stop()
finally:
if thread._running:
thread.stop()
def test_memory_info(self):
p = psutil.Process()
# step 1 - get a base value to compare our results
rss1, vms1 = p.memory_info()
percent1 = p.memory_percent()
self.assertGreater(rss1, 0)
self.assertGreater(vms1, 0)
# step 2 - allocate some memory
memarr = [None] * 1500000
rss2, vms2 = p.memory_info()
percent2 = p.memory_percent()
# make sure that the memory usage bumped up
self.assertGreater(rss2, rss1)
self.assertGreaterEqual(vms2, vms1) # vms might be equal
self.assertGreater(percent2, percent1)
del memarr
# def test_memory_info_ex(self):
# # tested later in fetch all test suite
def test_memory_maps(self):
p = psutil.Process()
maps = p.memory_maps()
paths = [x for x in maps]
self.assertEqual(len(paths), len(set(paths)))
ext_maps = p.memory_maps(grouped=False)
for nt in maps:
if not nt.path.startswith('['):
assert os.path.isabs(nt.path), nt.path
if POSIX:
assert os.path.exists(nt.path), nt.path
else:
# XXX - On Windows we have this strange behavior with
# 64 bit dlls: they are visible via explorer but cannot
# be accessed via os.stat() (wtf?).
if '64' not in os.path.basename(nt.path):
assert os.path.exists(nt.path), nt.path
for nt in ext_maps:
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
continue
elif fname in ('addr', 'perms'):
assert value, value
else:
self.assertIsInstance(value, (int, long))
assert value >= 0, value
def test_memory_percent(self):
p = psutil.Process()
self.assertGreater(p.memory_percent(), 0.0)
def test_is_running(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
assert p.is_running()
assert p.is_running()
p.kill()
p.wait()
assert not p.is_running()
assert not p.is_running()
def test_exe(self):
sproc = get_test_subprocess(wait=True)
exe = psutil.Process(sproc.pid).exe()
try:
self.assertEqual(exe, PYTHON)
except AssertionError:
if WINDOWS and len(exe) == len(PYTHON):
# on Windows we don't care about case sensitivity
self.assertEqual(exe.lower(), PYTHON.lower())
else:
# certain platforms such as BSD are more accurate returning:
# "/usr/local/bin/python2.7"
# ...instead of:
# "/usr/local/bin/python"
# We do not want to consider this difference in accuracy
# an error.
ver = "%s.%s" % (sys.version_info[0], sys.version_info[1])
self.assertEqual(exe.replace(ver, ''), PYTHON.replace(ver, ''))
def test_cmdline(self):
cmdline = [PYTHON, "-c", "import time; time.sleep(60)"]
sproc = get_test_subprocess(cmdline, wait=True)
self.assertEqual(' '.join(psutil.Process(sproc.pid).cmdline()),
' '.join(cmdline))
def test_name(self):
sproc = get_test_subprocess(PYTHON, wait=True)
name = psutil.Process(sproc.pid).name().lower()
pyexe = os.path.basename(os.path.realpath(sys.executable)).lower()
assert pyexe.startswith(name), (pyexe, name)
@unittest.skipUnless(POSIX, 'posix only')
def test_uids(self):
p = psutil.Process()
real, effective, saved = p.uids()
# os.getuid() refers to "real" uid
self.assertEqual(real, os.getuid())
# os.geteuid() refers to "effective" uid
self.assertEqual(effective, os.geteuid())
# no such thing as os.getsuid() ("saved" uid), but starting
# from python 2.7 we have os.getresuid()[2]
if hasattr(os, "getresuid"):
self.assertEqual(saved, os.getresuid()[2])
@unittest.skipUnless(POSIX, 'posix only')
def test_gids(self):
p = psutil.Process()
real, effective, saved = p.gids()
# os.getuid() refers to "real" uid
self.assertEqual(real, os.getgid())
# os.geteuid() refers to "effective" uid
self.assertEqual(effective, os.getegid())
# no such thing as os.getsuid() ("saved" uid), but starting
# from python 2.7 we have os.getresgid()[2]
if hasattr(os, "getresuid"):
self.assertEqual(saved, os.getresgid()[2])
def test_nice(self):
p = psutil.Process()
self.assertRaises(TypeError, p.nice, "str")
if WINDOWS:
try:
init = p.nice()
if sys.version_info > (3, 4):
self.assertIsInstance(init, enum.IntEnum)
else:
self.assertIsInstance(init, int)
self.assertEqual(init, psutil.NORMAL_PRIORITY_CLASS)
p.nice(psutil.HIGH_PRIORITY_CLASS)
self.assertEqual(p.nice(), psutil.HIGH_PRIORITY_CLASS)
p.nice(psutil.NORMAL_PRIORITY_CLASS)
self.assertEqual(p.nice(), psutil.NORMAL_PRIORITY_CLASS)
finally:
p.nice(psutil.NORMAL_PRIORITY_CLASS)
else:
try:
first_nice = p.nice()
p.nice(1)
self.assertEqual(p.nice(), 1)
# going back to previous nice value raises
# AccessDenied on OSX
if not OSX:
p.nice(0)
self.assertEqual(p.nice(), 0)
except psutil.AccessDenied:
pass
finally:
try:
p.nice(first_nice)
except psutil.AccessDenied:
pass
def test_status(self):
p = psutil.Process()
self.assertEqual(p.status(), psutil.STATUS_RUNNING)
def test_username(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
if POSIX:
import pwd
self.assertEqual(p.username(), pwd.getpwuid(os.getuid()).pw_name)
elif WINDOWS and 'USERNAME' in os.environ:
expected_username = os.environ['USERNAME']
expected_domain = os.environ['USERDOMAIN']
domain, username = p.username().split('\\')
self.assertEqual(domain, expected_domain)
self.assertEqual(username, expected_username)
else:
p.username()
def test_cwd(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
self.assertEqual(p.cwd(), os.getcwd())
def test_cwd_2(self):
cmd = [PYTHON, "-c", "import os, time; os.chdir('..'); time.sleep(60)"]
sproc = get_test_subprocess(cmd, wait=True)
p = psutil.Process(sproc.pid)
call_until(p.cwd, "ret == os.path.dirname(os.getcwd())")
@unittest.skipUnless(WINDOWS or LINUX or BSD,
'not available on this platform')
@unittest.skipIf(LINUX and TRAVIS, "unknown failure on travis")
def test_cpu_affinity(self):
p = psutil.Process()
initial = p.cpu_affinity()
if hasattr(os, "sched_getaffinity"):
self.assertEqual(initial, list(os.sched_getaffinity(p.pid)))
self.assertEqual(len(initial), len(set(initial)))
all_cpus = list(range(len(psutil.cpu_percent(percpu=True))))
# setting on travis doesn't seem to work (always return all
# CPUs on get):
# AssertionError: Lists differ: [0, 1, 2, 3, 4, 5, 6, ... != [0]
for n in all_cpus:
p.cpu_affinity([n])
self.assertEqual(p.cpu_affinity(), [n])
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
#
p.cpu_affinity(all_cpus)
self.assertEqual(p.cpu_affinity(), all_cpus)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
#
self.assertRaises(TypeError, p.cpu_affinity, 1)
p.cpu_affinity(initial)
# it should work with all iterables, not only lists
p.cpu_affinity(set(all_cpus))
p.cpu_affinity(tuple(all_cpus))
invalid_cpu = [len(psutil.cpu_times(percpu=True)) + 10]
self.assertRaises(ValueError, p.cpu_affinity, invalid_cpu)
self.assertRaises(ValueError, p.cpu_affinity, range(10000, 11000))
self.assertRaises(TypeError, p.cpu_affinity, [0, "1"])
# TODO
@unittest.skipIf(BSD, "broken on BSD, see #595")
def test_open_files(self):
# current process
p = psutil.Process()
files = p.open_files()
self.assertFalse(TESTFN in files)
with open(TESTFN, 'w'):
call_until(p.open_files, "len(ret) != %i" % len(files))
filenames = [x.path for x in p.open_files()]
self.assertIn(TESTFN, filenames)
for file in filenames:
assert os.path.isfile(file), file
# another process
cmdline = "import time; f = open(r'%s', 'r'); time.sleep(60);" % TESTFN
sproc = get_test_subprocess([PYTHON, "-c", cmdline], wait=True)
p = psutil.Process(sproc.pid)
for x in range(100):
filenames = [x.path for x in p.open_files()]
if TESTFN in filenames:
break
time.sleep(.01)
else:
self.assertIn(TESTFN, filenames)
for file in filenames:
assert os.path.isfile(file), file
# TODO
@unittest.skipIf(BSD, "broken on BSD, see #595")
def test_open_files2(self):
# test fd and path fields
with open(TESTFN, 'w') as fileobj:
p = psutil.Process()
for path, fd in p.open_files():
if path == fileobj.name or fd == fileobj.fileno():
break
else:
self.fail("no file found; files=%s" % repr(p.open_files()))
self.assertEqual(path, fileobj.name)
if WINDOWS:
self.assertEqual(fd, -1)
else:
self.assertEqual(fd, fileobj.fileno())
# test positions
ntuple = p.open_files()[0]
self.assertEqual(ntuple[0], ntuple.path)
self.assertEqual(ntuple[1], ntuple.fd)
# test file is gone
self.assertTrue(fileobj.name not in p.open_files())
def compare_proc_sys_cons(self, pid, proc_cons):
from psutil._common import pconn
sys_cons = []
for c in psutil.net_connections(kind='all'):
if c.pid == pid:
sys_cons.append(pconn(*c[:-1]))
if BSD:
# on BSD all fds are set to -1
proc_cons = [pconn(*[-1] + list(x[1:])) for x in proc_cons]
self.assertEqual(sorted(proc_cons), sorted(sys_cons))
@skip_on_access_denied(only_if=OSX)
def test_connections(self):
def check_conn(proc, conn, family, type, laddr, raddr, status, kinds):
all_kinds = ("all", "inet", "inet4", "inet6", "tcp", "tcp4",
"tcp6", "udp", "udp4", "udp6")
check_connection_ntuple(conn)
self.assertEqual(conn.family, family)
self.assertEqual(conn.type, type)
self.assertEqual(conn.laddr, laddr)
self.assertEqual(conn.raddr, raddr)
self.assertEqual(conn.status, status)
for kind in all_kinds:
cons = proc.connections(kind=kind)
if kind in kinds:
self.assertNotEqual(cons, [])
else:
self.assertEqual(cons, [])
# compare against system-wide connections
# XXX Solaris can't retrieve system-wide UNIX
# sockets.
if not SUNOS:
self.compare_proc_sys_cons(proc.pid, [conn])
tcp_template = textwrap.dedent("""
import socket, time
s = socket.socket($family, socket.SOCK_STREAM)
s.bind(('$addr', 0))
s.listen(1)
with open('$testfn', 'w') as f:
f.write(str(s.getsockname()[:2]))
time.sleep(60)
""")
udp_template = textwrap.dedent("""
import socket, time
s = socket.socket($family, socket.SOCK_DGRAM)
s.bind(('$addr', 0))
with open('$testfn', 'w') as f:
f.write(str(s.getsockname()[:2]))
time.sleep(60)
""")
from string import Template
testfile = os.path.basename(TESTFN)
tcp4_template = Template(tcp_template).substitute(
family=int(AF_INET), addr="127.0.0.1", testfn=testfile)
udp4_template = Template(udp_template).substitute(
family=int(AF_INET), addr="127.0.0.1", testfn=testfile)
tcp6_template = Template(tcp_template).substitute(
family=int(AF_INET6), addr="::1", testfn=testfile)
udp6_template = Template(udp_template).substitute(
family=int(AF_INET6), addr="::1", testfn=testfile)
# launch various subprocess instantiating a socket of various
# families and types to enrich psutil results
tcp4_proc = pyrun(tcp4_template)
tcp4_addr = eval(wait_for_file(testfile))
udp4_proc = pyrun(udp4_template)
udp4_addr = eval(wait_for_file(testfile))
if supports_ipv6():
tcp6_proc = pyrun(tcp6_template)
tcp6_addr = eval(wait_for_file(testfile))
udp6_proc = pyrun(udp6_template)
udp6_addr = eval(wait_for_file(testfile))
else:
tcp6_proc = None
udp6_proc = None
tcp6_addr = None
udp6_addr = None
for p in psutil.Process().children():
cons = p.connections()
self.assertEqual(len(cons), 1)
for conn in cons:
# TCP v4
if p.pid == tcp4_proc.pid:
check_conn(p, conn, AF_INET, SOCK_STREAM, tcp4_addr, (),
psutil.CONN_LISTEN,
("all", "inet", "inet4", "tcp", "tcp4"))
# UDP v4
elif p.pid == udp4_proc.pid:
check_conn(p, conn, AF_INET, SOCK_DGRAM, udp4_addr, (),
psutil.CONN_NONE,
("all", "inet", "inet4", "udp", "udp4"))
# TCP v6
elif p.pid == getattr(tcp6_proc, "pid", None):
check_conn(p, conn, AF_INET6, SOCK_STREAM, tcp6_addr, (),
psutil.CONN_LISTEN,
("all", "inet", "inet6", "tcp", "tcp6"))
# UDP v6
elif p.pid == getattr(udp6_proc, "pid", None):
check_conn(p, conn, AF_INET6, SOCK_DGRAM, udp6_addr, (),
psutil.CONN_NONE,
("all", "inet", "inet6", "udp", "udp6"))
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'AF_UNIX is not supported')
@skip_on_access_denied(only_if=OSX)
def test_connections_unix(self):
def check(type):
safe_remove(TESTFN)
sock = socket.socket(AF_UNIX, type)
with contextlib.closing(sock):
sock.bind(TESTFN)
cons = psutil.Process().connections(kind='unix')
conn = cons[0]
check_connection_ntuple(conn)
if conn.fd != -1: # != sunos and windows
self.assertEqual(conn.fd, sock.fileno())
self.assertEqual(conn.family, AF_UNIX)
self.assertEqual(conn.type, type)
self.assertEqual(conn.laddr, TESTFN)
if not SUNOS:
# XXX Solaris can't retrieve system-wide UNIX
# sockets.
self.compare_proc_sys_cons(os.getpid(), cons)
check(SOCK_STREAM)
check(SOCK_DGRAM)
@unittest.skipUnless(hasattr(socket, "fromfd"),
'socket.fromfd() is not availble')
@unittest.skipIf(WINDOWS or SUNOS,
'connection fd not available on this platform')
def test_connection_fromfd(self):
with contextlib.closing(socket.socket()) as sock:
sock.bind(('localhost', 0))
sock.listen(1)
p = psutil.Process()
for conn in p.connections():
if conn.fd == sock.fileno():
break
else:
self.fail("couldn't find socket fd")
dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
with contextlib.closing(dupsock):
self.assertEqual(dupsock.getsockname(), conn.laddr)
self.assertNotEqual(sock.fileno(), dupsock.fileno())
def test_connection_constants(self):
ints = []
strs = []
for name in dir(psutil):
if name.startswith('CONN_'):
num = getattr(psutil, name)
str_ = str(num)
assert str_.isupper(), str_
assert str_ not in strs, str_
assert num not in ints, num
ints.append(num)
strs.append(str_)
if SUNOS:
psutil.CONN_IDLE
psutil.CONN_BOUND
if WINDOWS:
psutil.CONN_DELETE_TCB
@unittest.skipUnless(POSIX, 'posix only')
def test_num_fds(self):
p = psutil.Process()
start = p.num_fds()
file = open(TESTFN, 'w')
self.addCleanup(file.close)
self.assertEqual(p.num_fds(), start + 1)
sock = socket.socket()
self.addCleanup(sock.close)
self.assertEqual(p.num_fds(), start + 2)
file.close()
sock.close()
self.assertEqual(p.num_fds(), start)
@skip_on_not_implemented(only_if=LINUX)
def test_num_ctx_switches(self):
p = psutil.Process()
before = sum(p.num_ctx_switches())
for x in range(500000):
after = sum(p.num_ctx_switches())
if after > before:
return
self.fail("num ctx switches still the same after 50.000 iterations")
def test_parent_ppid(self):
this_parent = os.getpid()
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertEqual(p.ppid(), this_parent)
self.assertEqual(p.parent().pid, this_parent)
# no other process is supposed to have us as parent
for p in psutil.process_iter():
if p.pid == sproc.pid:
continue
self.assertTrue(p.ppid() != this_parent)
def test_children(self):
p = psutil.Process()
self.assertEqual(p.children(), [])
self.assertEqual(p.children(recursive=True), [])
sproc = get_test_subprocess()
children1 = p.children()
children2 = p.children(recursive=True)
for children in (children1, children2):
self.assertEqual(len(children), 1)
self.assertEqual(children[0].pid, sproc.pid)
self.assertEqual(children[0].ppid(), os.getpid())
def test_children_recursive(self):
# here we create a subprocess which creates another one as in:
# A (parent) -> B (child) -> C (grandchild)
s = "import subprocess, os, sys, time;"
s += "PYTHON = os.path.realpath(sys.executable);"
s += "cmd = [PYTHON, '-c', 'import time; time.sleep(60);'];"
s += "subprocess.Popen(cmd);"
s += "time.sleep(60);"
get_test_subprocess(cmd=[PYTHON, "-c", s])
p = psutil.Process()
self.assertEqual(len(p.children(recursive=False)), 1)
# give the grandchild some time to start
stop_at = time.time() + 1.5
while time.time() < stop_at:
children = p.children(recursive=True)
if len(children) > 1:
break
self.assertEqual(len(children), 2)
self.assertEqual(children[0].ppid(), os.getpid())
self.assertEqual(children[1].ppid(), children[0].pid)
def test_children_duplicates(self):
# find the process which has the highest number of children
table = collections.defaultdict(int)
for p in psutil.process_iter():
try:
table[p.ppid()] += 1
except psutil.Error:
pass
# this is the one, now let's make sure there are no duplicates
pid = sorted(table.items(), key=lambda x: x[1])[-1][0]
p = psutil.Process(pid)
try:
c = p.children(recursive=True)
except psutil.AccessDenied: # windows
pass
else:
self.assertEqual(len(c), len(set(c)))
def test_suspend_resume(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
p.suspend()
for x in range(100):
if p.status() == psutil.STATUS_STOPPED:
break
time.sleep(0.01)
p.resume()
self.assertNotEqual(p.status(), psutil.STATUS_STOPPED)
def test_invalid_pid(self):
self.assertRaises(TypeError, psutil.Process, "1")
self.assertRaises(ValueError, psutil.Process, -1)
def test_as_dict(self):
p = psutil.Process()
d = p.as_dict(attrs=['exe', 'name'])
self.assertEqual(sorted(d.keys()), ['exe', 'name'])
p = psutil.Process(min(psutil.pids()))
d = p.as_dict(attrs=['connections'], ad_value='foo')
if not isinstance(d['connections'], list):
self.assertEqual(d['connections'], 'foo')
def test_halfway_terminated_process(self):
# Test that NoSuchProcess exception gets raised in case the
# process dies after we create the Process object.
# Example:
# >>> proc = Process(1234)
# >>> time.sleep(2) # time-consuming task, process dies in meantime
# >>> proc.name()
# Refers to Issue #15
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
excluded_names = ['pid', 'is_running', 'wait', 'create_time']
if LINUX and not RLIMIT_SUPPORT:
excluded_names.append('rlimit')
for name in dir(p):
if (name.startswith('_') or
name in excluded_names):
continue
try:
meth = getattr(p, name)
# get/set methods
if name == 'nice':
if POSIX:
meth(1)
else:
meth(psutil.NORMAL_PRIORITY_CLASS)
elif name == 'ionice':
meth()
meth(2)
elif name == 'rlimit':
meth(psutil.RLIMIT_NOFILE)
meth(psutil.RLIMIT_NOFILE, (5, 5))
elif name == 'cpu_affinity':
meth()
meth([0])
elif name == 'send_signal':
meth(signal.SIGTERM)
else:
meth()
except psutil.ZombieProcess:
self.fail("ZombieProcess for %r was not supposed to happen" %
name)
except psutil.NoSuchProcess:
pass
except NotImplementedError:
pass
else:
self.fail("NoSuchProcess exception not raised for %r" % name)
self.assertFalse(p.is_running())
@unittest.skipUnless(POSIX, 'posix only')
def test_zombie_process(self):
def succeed_or_zombie_p_exc(fun, *args, **kwargs):
try:
fun(*args, **kwargs)
except (psutil.ZombieProcess, psutil.AccessDenied):
pass
# Note: in this test we'll be creating two sub processes.
# Both of them are supposed to be freed / killed by
# reap_children() as they are attributable to 'us'
# (os.getpid()) via children(recursive=True).
src = textwrap.dedent("""\
import os, sys, time, socket, contextlib
child_pid = os.fork()
if child_pid > 0:
time.sleep(3000)
else:
# this is the zombie process
s = socket.socket(socket.AF_UNIX)
with contextlib.closing(s):
s.connect('%s')
if sys.version_info < (3, ):
pid = str(os.getpid())
else:
pid = bytes(str(os.getpid()), 'ascii')
s.sendall(pid)
""" % TESTFN)
with contextlib.closing(socket.socket(socket.AF_UNIX)) as sock:
try:
sock.settimeout(GLOBAL_TIMEOUT)
sock.bind(TESTFN)
sock.listen(1)
pyrun(src)
conn, _ = sock.accept()
select.select([conn.fileno()], [], [], GLOBAL_TIMEOUT)
zpid = int(conn.recv(1024))
zproc = psutil.Process(zpid)
call_until(lambda: zproc.status(),
"ret == psutil.STATUS_ZOMBIE")
# A zombie process should always be instantiable
zproc = psutil.Process(zpid)
# ...and at least its status always be querable
self.assertEqual(zproc.status(), psutil.STATUS_ZOMBIE)
# ...and it should be considered 'running'
self.assertTrue(zproc.is_running())
# ...and as_dict() shouldn't crash
zproc.as_dict()
if hasattr(zproc, "rlimit"):
succeed_or_zombie_p_exc(zproc.rlimit, psutil.RLIMIT_NOFILE)
succeed_or_zombie_p_exc(zproc.rlimit, psutil.RLIMIT_NOFILE,
(5, 5))
# set methods
succeed_or_zombie_p_exc(zproc.parent)
if hasattr(zproc, 'cpu_affinity'):
succeed_or_zombie_p_exc(zproc.cpu_affinity, [0])
succeed_or_zombie_p_exc(zproc.nice, 0)
if hasattr(zproc, 'ionice'):
if LINUX:
succeed_or_zombie_p_exc(zproc.ionice, 2, 0)
else:
succeed_or_zombie_p_exc(zproc.ionice, 0) # Windows
if hasattr(zproc, 'rlimit'):
succeed_or_zombie_p_exc(zproc.rlimit,
psutil.RLIMIT_NOFILE, (5, 5))
succeed_or_zombie_p_exc(zproc.suspend)
succeed_or_zombie_p_exc(zproc.resume)
succeed_or_zombie_p_exc(zproc.terminate)
succeed_or_zombie_p_exc(zproc.kill)
# ...its parent should 'see' it
# edit: not true on BSD and OSX
# descendants = [x.pid for x in psutil.Process().children(
# recursive=True)]
# self.assertIn(zpid, descendants)
# XXX should we also assume ppid be usable? Note: this
# would be an important use case as the only way to get
# rid of a zombie is to kill its parent.
# self.assertEqual(zpid.ppid(), os.getpid())
# ...and all other APIs should be able to deal with it
self.assertTrue(psutil.pid_exists(zpid))
self.assertIn(zpid, psutil.pids())
self.assertIn(zpid, [x.pid for x in psutil.process_iter()])
psutil._pmap = {}
self.assertIn(zpid, [x.pid for x in psutil.process_iter()])
finally:
reap_children(search_all=True)
def test_pid_0(self):
# Process(0) is supposed to work on all platforms except Linux
if 0 not in psutil.pids():
self.assertRaises(psutil.NoSuchProcess, psutil.Process, 0)
return
p = psutil.Process(0)
self.assertTrue(p.name())
if POSIX:
try:
self.assertEqual(p.uids().real, 0)
self.assertEqual(p.gids().real, 0)
except psutil.AccessDenied:
pass
self.assertIn(p.ppid(), (0, 1))
# self.assertEqual(p.exe(), "")
p.cmdline()
try:
p.num_threads()
except psutil.AccessDenied:
pass
try:
p.memory_info()
except psutil.AccessDenied:
pass
# username property
try:
if POSIX:
self.assertEqual(p.username(), 'root')
elif WINDOWS:
self.assertEqual(p.username(), 'NT AUTHORITY\\SYSTEM')
else:
p.username()
except psutil.AccessDenied:
pass
self.assertIn(0, psutil.pids())
self.assertTrue(psutil.pid_exists(0))
def test_Popen(self):
# Popen class test
# XXX this test causes a ResourceWarning on Python 3 because
# psutil.__subproc instance doesn't get propertly freed.
# Not sure what to do though.
cmd = [PYTHON, "-c", "import time; time.sleep(60);"]
proc = psutil.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
proc.name()
proc.stdin
self.assertTrue(hasattr(proc, 'name'))
self.assertTrue(hasattr(proc, 'stdin'))
self.assertRaises(AttributeError, getattr, proc, 'foo')
finally:
proc.kill()
proc.wait()
self.assertIsNotNone(proc.returncode)
# ===================================================================
# --- Featch all processes test
# ===================================================================
class TestFetchAllProcesses(unittest.TestCase):
"""Test which iterates over all running processes and performs
some sanity checks against Process API's returned values.
"""
def setUp(self):
if POSIX:
import pwd
pall = pwd.getpwall()
self._uids = set([x.pw_uid for x in pall])
self._usernames = set([x.pw_name for x in pall])
def test_fetch_all(self):
valid_procs = 0
excluded_names = set([
'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
'as_dict', 'cpu_percent', 'parent', 'children', 'pid'])
if LINUX and not RLIMIT_SUPPORT:
excluded_names.add('rlimit')
attrs = []
for name in dir(psutil.Process):
if name.startswith("_"):
continue
if name in excluded_names:
continue
attrs.append(name)
default = object()
failures = []
for name in attrs:
for p in psutil.process_iter():
ret = default
try:
try:
args = ()
attr = getattr(p, name, None)
if attr is not None and callable(attr):
if name == 'rlimit':
args = (psutil.RLIMIT_NOFILE,)
ret = attr(*args)
else:
ret = attr
valid_procs += 1
except NotImplementedError:
msg = "%r was skipped because not implemented" % (
self.__class__.__name__ + '.test_' + name)
warn(msg)
except (psutil.NoSuchProcess, psutil.AccessDenied) as err:
self.assertEqual(err.pid, p.pid)
if err.name:
# make sure exception's name attr is set
# with the actual process name
self.assertEqual(err.name, p.name())
self.assertTrue(str(err))
self.assertTrue(err.msg)
else:
if ret not in (0, 0.0, [], None, ''):
assert ret, ret
meth = getattr(self, name)
meth(ret)
except Exception as err:
s = '\n' + '=' * 70 + '\n'
s += "FAIL: test_%s (proc=%s" % (name, p)
if ret != default:
s += ", ret=%s)" % repr(ret)
s += ')\n'
s += '-' * 70
s += "\n%s" % traceback.format_exc()
s = "\n".join((" " * 4) + i for i in s.splitlines())
failures.append(s)
break
if failures:
self.fail(''.join(failures))
# we should always have a non-empty list, not including PID 0 etc.
# special cases.
self.assertTrue(valid_procs > 0)
def cmdline(self, ret):
pass
def exe(self, ret):
if not ret:
self.assertEqual(ret, '')
else:
assert os.path.isabs(ret), ret
# Note: os.stat() may return False even if the file is there
# hence we skip the test, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
if POSIX and os.path.isfile(ret):
if hasattr(os, 'access') and hasattr(os, "X_OK"):
# XXX may fail on OSX
self.assertTrue(os.access(ret, os.X_OK))
def ppid(self, ret):
self.assertTrue(ret >= 0)
def name(self, ret):
self.assertIsInstance(ret, (str, unicode))
self.assertTrue(ret)
def create_time(self, ret):
self.assertTrue(ret > 0)
# this can't be taken for granted on all platforms
# self.assertGreaterEqual(ret, psutil.boot_time())
# make sure returned value can be pretty printed
# with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
def uids(self, ret):
for uid in ret:
self.assertTrue(uid >= 0)
self.assertIn(uid, self._uids)
def gids(self, ret):
# note: testing all gids as above seems not to be reliable for
# gid == 30 (nodoby); not sure why.
for gid in ret:
self.assertTrue(gid >= 0)
# self.assertIn(uid, self.gids
def username(self, ret):
self.assertTrue(ret)
if POSIX:
self.assertIn(ret, self._usernames)
def status(self, ret):
self.assertTrue(ret != "")
self.assertTrue(ret != '?')
self.assertIn(ret, VALID_PROC_STATUSES)
def io_counters(self, ret):
for field in ret:
if field != -1:
self.assertTrue(field >= 0)
def ionice(self, ret):
if LINUX:
self.assertTrue(ret.ioclass >= 0)
self.assertTrue(ret.value >= 0)
else:
self.assertTrue(ret >= 0)
self.assertIn(ret, (0, 1, 2))
def num_threads(self, ret):
self.assertTrue(ret >= 1)
def threads(self, ret):
for t in ret:
self.assertTrue(t.id >= 0)
self.assertTrue(t.user_time >= 0)
self.assertTrue(t.system_time >= 0)
def cpu_times(self, ret):
self.assertTrue(ret.user >= 0)
self.assertTrue(ret.system >= 0)
def memory_info(self, ret):
self.assertTrue(ret.rss >= 0)
self.assertTrue(ret.vms >= 0)
def memory_info_ex(self, ret):
for name in ret._fields:
self.assertTrue(getattr(ret, name) >= 0)
if POSIX and ret.vms != 0:
# VMS is always supposed to be the highest
for name in ret._fields:
if name != 'vms':
value = getattr(ret, name)
assert ret.vms > value, ret
elif WINDOWS:
assert ret.peak_wset >= ret.wset, ret
assert ret.peak_paged_pool >= ret.paged_pool, ret
assert ret.peak_nonpaged_pool >= ret.nonpaged_pool, ret
assert ret.peak_pagefile >= ret.pagefile, ret
def open_files(self, ret):
for f in ret:
if WINDOWS:
assert f.fd == -1, f
else:
self.assertIsInstance(f.fd, int)
assert os.path.isabs(f.path), f
assert os.path.isfile(f.path), f
def num_fds(self, ret):
self.assertTrue(ret >= 0)
def connections(self, ret):
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
check_connection_ntuple(conn)
def cwd(self, ret):
if ret is not None: # BSD may return None
assert os.path.isabs(ret), ret
try:
st = os.stat(ret)
except OSError as err:
# directory has been removed in mean time
if err.errno != errno.ENOENT:
raise
else:
self.assertTrue(stat.S_ISDIR(st.st_mode))
def memory_percent(self, ret):
assert 0 <= ret <= 100, ret
def is_running(self, ret):
self.assertTrue(ret)
def cpu_affinity(self, ret):
assert ret != [], ret
def terminal(self, ret):
if ret is not None:
assert os.path.isabs(ret), ret
assert os.path.exists(ret), ret
def memory_maps(self, ret):
for nt in ret:
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
if not value.startswith('['):
assert os.path.isabs(nt.path), nt.path
# commented as on Linux we might get
# '/foo/bar (deleted)'
# assert os.path.exists(nt.path), nt.path
elif fname in ('addr', 'perms'):
self.assertTrue(value)
else:
self.assertIsInstance(value, (int, long))
assert value >= 0, value
def num_handles(self, ret):
if WINDOWS:
self.assertGreaterEqual(ret, 0)
else:
self.assertGreaterEqual(ret, 0)
def nice(self, ret):
if POSIX:
assert -20 <= ret <= 20, ret
else:
priorities = [getattr(psutil, x) for x in dir(psutil)
if x.endswith('_PRIORITY_CLASS')]
self.assertIn(ret, priorities)
def num_ctx_switches(self, ret):
self.assertTrue(ret.voluntary >= 0)
self.assertTrue(ret.involuntary >= 0)
def rlimit(self, ret):
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
# ===================================================================
# --- Limited user tests
# ===================================================================
@unittest.skipUnless(hasattr(os, 'getuid') and os.getuid() == 0,
"super user privileges are required")
class LimitedUserTestCase(TestProcess):
"""Repeat the previous tests by using a limited user.
Executed only on UNIX and only if the user who run the test script
is root.
"""
# the uid/gid the test suite runs under
if hasattr(os, 'getuid'):
PROCESS_UID = os.getuid()
PROCESS_GID = os.getgid()
def __init__(self, *args, **kwargs):
TestProcess.__init__(self, *args, **kwargs)
# re-define all existent test methods in order to
# ignore AccessDenied exceptions
for attr in [x for x in dir(self) if x.startswith('test')]:
meth = getattr(self, attr)
def test_(self):
try:
meth()
except psutil.AccessDenied:
pass
setattr(self, attr, types.MethodType(test_, self))
def setUp(self):
safe_remove(TESTFN)
TestProcess.setUp(self)
os.setegid(1000)
os.seteuid(1000)
def tearDown(self):
os.setegid(self.PROCESS_UID)
os.seteuid(self.PROCESS_GID)
TestProcess.tearDown(self)
def test_nice(self):
try:
psutil.Process().nice(-1)
except psutil.AccessDenied:
pass
else:
self.fail("exception not raised")
def test_zombie_process(self):
# causes problems if test test suite is run as root
pass
# ===================================================================
# --- Misc tests
# ===================================================================
class TestMisc(unittest.TestCase):
"""Misc / generic tests."""
def test__str__(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertIn(str(sproc.pid), str(p))
# python shows up as 'Python' in cmdline on OS X so
# test fails on OS X
if not OSX:
self.assertIn(os.path.basename(PYTHON), str(p))
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
self.assertIn(str(sproc.pid), str(p))
self.assertIn("terminated", str(p))
def test__eq__(self):
p1 = psutil.Process()
p2 = psutil.Process()
self.assertEqual(p1, p2)
p2._ident = (0, 0)
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1, 'foo')
def test__hash__(self):
s = set([psutil.Process(), psutil.Process()])
self.assertEqual(len(s), 1)
def test__all__(self):
for name in dir(psutil):
if name in ('callable', 'error', 'namedtuple',
'long', 'test', 'NUM_CPUS', 'BOOT_TIME',
'TOTAL_PHYMEM'):
continue
if not name.startswith('_'):
try:
__import__(name)
except ImportError:
if name not in psutil.__all__:
fun = getattr(psutil, name)
if fun is None:
continue
if (fun.__doc__ is not None and
'deprecated' not in fun.__doc__.lower()):
self.fail('%r not in psutil.__all__' % name)
def test_memoize(self):
from psutil._common import memoize
@memoize
def foo(*args, **kwargs):
"foo docstring"
calls.append(None)
return (args, kwargs)
calls = []
# no args
for x in range(2):
ret = foo()
expected = ((), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 1)
# with args
for x in range(2):
ret = foo(1)
expected = ((1, ), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 2)
# with args + kwargs
for x in range(2):
ret = foo(1, bar=2)
expected = ((1, ), {'bar': 2})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 3)
# clear cache
foo.cache_clear()
ret = foo()
expected = ((), {})
self.assertEqual(ret, expected)
self.assertEqual(len(calls), 4)
# docstring
self.assertEqual(foo.__doc__, "foo docstring")
def test_serialization(self):
def check(ret):
if json is not None:
json.loads(json.dumps(ret))
a = pickle.dumps(ret)
b = pickle.loads(a)
self.assertEqual(ret, b)
check(psutil.Process().as_dict())
check(psutil.virtual_memory())
check(psutil.swap_memory())
check(psutil.cpu_times())
check(psutil.cpu_times_percent(interval=0))
check(psutil.net_io_counters())
if LINUX and not os.path.exists('/proc/diskstats'):
pass
else:
check(psutil.disk_io_counters())
check(psutil.disk_partitions())
check(psutil.disk_usage(os.getcwd()))
check(psutil.users())
# ===================================================================
# --- Example script tests
# ===================================================================
class TestExampleScripts(unittest.TestCase):
"""Tests for scripts in the examples directory."""
def assert_stdout(self, exe, args=None):
exe = os.path.join(EXAMPLES_DIR, exe)
if args:
exe = exe + ' ' + args
try:
out = sh(sys.executable + ' ' + exe).strip()
except RuntimeError as err:
if 'AccessDenied' in str(err):
return str(err)
else:
raise
assert out, out
return out
def assert_syntax(self, exe, args=None):
exe = os.path.join(EXAMPLES_DIR, exe)
with open(exe, 'r') as f:
src = f.read()
ast.parse(src)
def test_check_presence(self):
# make sure all example scripts have a test method defined
meths = dir(self)
for name in os.listdir(EXAMPLES_DIR):
if name.endswith('.py'):
if 'test_' + os.path.splitext(name)[0] not in meths:
# self.assert_stdout(name)
self.fail('no test defined for %r script'
% os.path.join(EXAMPLES_DIR, name))
def test_disk_usage(self):
self.assert_stdout('disk_usage.py')
def test_free(self):
self.assert_stdout('free.py')
def test_meminfo(self):
self.assert_stdout('meminfo.py')
def test_process_detail(self):
self.assert_stdout('process_detail.py')
def test_who(self):
self.assert_stdout('who.py')
def test_ps(self):
self.assert_stdout('ps.py')
def test_pstree(self):
self.assert_stdout('pstree.py')
def test_netstat(self):
self.assert_stdout('netstat.py')
@unittest.skipIf(TRAVIS, "permission denied on travis")
def test_ifconfig(self):
self.assert_stdout('ifconfig.py')
def test_pmap(self):
self.assert_stdout('pmap.py', args=str(os.getpid()))
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_killall(self):
self.assert_syntax('killall.py')
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_nettop(self):
self.assert_syntax('nettop.py')
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_top(self):
self.assert_syntax('top.py')
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_iotop(self):
self.assert_syntax('iotop.py')
def test_pidof(self):
output = self.assert_stdout('pidof.py %s' % psutil.Process().name())
self.assertIn(str(os.getpid()), output)
def main():
tests = []
test_suite = unittest.TestSuite()
tests.append(TestSystemAPIs)
tests.append(TestProcess)
tests.append(TestFetchAllProcesses)
tests.append(TestMisc)
tests.append(TestExampleScripts)
tests.append(LimitedUserTestCase)
if POSIX:
from _posix import PosixSpecificTestCase
tests.append(PosixSpecificTestCase)
# import the specific platform test suite
stc = None
if LINUX:
from _linux import LinuxSpecificTestCase as stc
elif WINDOWS:
from _windows import WindowsSpecificTestCase as stc
from _windows import TestDualProcessImplementation
tests.append(TestDualProcessImplementation)
elif OSX:
from _osx import OSXSpecificTestCase as stc
elif BSD:
from _bsd import BSDSpecificTestCase as stc
elif SUNOS:
from _sunos import SunOSSpecificTestCase as stc
if stc is not None:
tests.append(stc)
for test_class in tests:
test_suite.addTest(unittest.makeSuite(test_class))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
| packages/psutil | test/test_psutil.py | Python | bsd-3-clause | 100,023 |
#!/usr/bin/env python3
import os
import sys
import json
import time
import functools
import contextlib
import subprocess
@contextlib.contextmanager
def change_dir(dirname):
curdir = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def merge_dicts(a, b):
return dict(tuple(a.items()) +
tuple(b.items()))
class Command(object):
def __init__(self, cmd, args, kwargs, env, input_, parser):
self.__cmd = cmd
self.__args = args
self.__kwargs = kwargs
self.__env = env
self.__input = input_
self.__parser = parser
def __getattr__(self, name):
return Command(self.__cmd + [name],
self.__args,
self.__kwargs,
self.__env,
self.__input,
self.__parser)
def _parser(self, parser):
return Command(self.__cmd,
self.__args,
self.__kwargs,
self.__env,
self.__input,
parser)
def _input(self, input_):
return Command(self.__cmd,
self.__args,
self.__kwargs,
self.__env,
input_,
self.__parser)
def _env(self, env):
return Command(self.__cmd,
self.__args,
self.__kwargs,
merge_dicts(self.__env, env),
self.__input,
self.__parser)
def _args(self, *args, **kwargs):
args = self.__args + list(args)
kwargs_ = dict(self.__kwargs)
kwargs_.update(kwargs)
return Command(self.__cmd,
args,
kwargs,
self.__env,
self.__input,
self.__parser)
def __build(self):
fix_underscore = lambda s: s.replace('_', '-')
cmd = list(map(fix_underscore, self.__cmd))
for arg, value in self.__kwargs.items():
cmd.append("--%s=%s" % (fix_underscore(arg), value))
for arg in self.__args:
cmd.append(arg)
return cmd
def __str__(self):
return "Command({})".format(self.__build())
def _run(self):
try:
process = subprocess.run(
self.__build(),
input=self.__input,
env=merge_dicts(os.environ, self.__env),
capture_output=True,
encoding='utf8',
check=True
)
except subprocess.CalledProcessError as p:
raise Exception(p.stderr)
return self.__parser(process.stdout)
def __call__(self, *args, **kwargs):
return self._args(*args, **kwargs)._run()
def command(program, args=[], kwargs={}, env={}, input_="", parser=str):
return Command([program], args=args, kwargs=kwargs, env=env, input_=input_, parser=parser)
class Timer(object):
def __init__(self):
self.__start, self.__end = None, None
def start(self): self.__start = time.time()
def stop(self): self.__end = time.time()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def elapsed(self):
assert self.__start is not None, "start() not called"
end = self.__end if self.__end is not None else time.time()
return end - self.__start
def get_ms(self): return int(round(self.elapsed() * 1000, 0))
def __str__(self): return "{}ms".format(self.get_ms())
non_empty = lambda l: [x for x in l if x]
git = command("git", parser=lambda s: non_empty(s.split("\n")))
class WorkingCopy(object):
def __init__(self, path):
self.__path = os.path.abspath(path)
def commit(self):
with change_dir(self.__path):
return git.show()[0].split()[1]
def tags(self):
with change_dir(self.__path):
return git.tag("-l", points_at=self.commit())
def dirty(self):
with change_dir(self.__path):
return any(git.status("--porcelain"))
def branch(self):
with change_dir(self.__path):
return next(filter(lambda x: x.startswith('*'), git.branch())).split()[1]
def __str__(self):
return "%s%s (%s) [%s]" % (
self.commit(),
"-dirty" if self.dirty() else "",
self.branch(),
", ".join(self.tags())
)
make = command("make", parser=lambda s: non_empty(s.split("\n")))
cargo = command("cargo", parser=lambda s: non_empty(s.split("\n")))
class Environment:
def __init__(self, desc, cwd):
self.desc = desc
self.cwd = cwd
def builder(self) -> Command: raise NotImplemented()
def runner(self) -> Command: raise NotImplemented()
class Test:
def __init__(self, desc):
self.desc = desc
def command(self, runner) -> Command: raise NotImplemented()
def run(tests, env):
results = []
with change_dir(env.cwd):
builder = env.builder()
if builder:
print("Building env '{}'...".format(env.desc))
output = builder._run()
else:
print("Skipping build in env '{}'...".format(env.desc))
for test in tests:
sys.stdout.write("Running '{}' in env '{}'... ".format(test.desc, env.desc))
sys.stdout.flush()
with Timer() as timer:
test.command(env.runner())._run()
sys.stdout.write("{}\n".format(timer))
results.append(str(timer))
return results
class C(Environment):
def __init__(self):
super().__init__("C", "./c")
def builder(self): return make
def runner(self): return command("./brainfuck")
class CPPClang():
def builder(self): return make.clean.all._args("CXX=c++")
class CPPGCC():
def builder(self): return make.clean.all._args("CXX=g++-10")
class CPPADT():
def runner(self): return command("./brainfuck-adt")
class CPPJIT():
def runner(self): return command("./brainfuck-jit")
class CPPOOP():
def runner(self): return command("./brainfuck-oop")
class CPPADTClang(CPPADT, CPPClang, Environment):
def __init__(self):
super().__init__("C++-ADT (Clang)", "./cpp")
class CPPJITClang(CPPJIT, CPPClang, Environment):
def __init__(self):
super().__init__("C++-JIT (Clang)", "./cpp")
class CPPOOPClang(CPPOOP, CPPClang, Environment):
def __init__(self):
super().__init__("C++-OOP (Clang)", "./cpp")
class CPPADTGCC(CPPADT, CPPGCC, Environment):
def __init__(self):
super().__init__("C++-ADT (GCC)", "./cpp")
class CPPJITGCC(CPPJIT, CPPGCC, Environment):
def __init__(self):
super().__init__("C++-JIT (GCC)", "./cpp")
class CPPOOPGCC(CPPOOP, CPPGCC, Environment):
def __init__(self):
super().__init__("C++-OOP (GCC)", "./cpp")
class Rust(Environment):
def __init__(self):
super().__init__("Rust", "./rust")
def builder(self): return cargo.build
def runner(self): return command("./target/debug/main")
class RustJIT(Environment):
def __init__(self):
super().__init__("RustJIT", "./rust")
def builder(self): return cargo.build
def runner(self): return command("./target/debug/jit")
class Go(Environment):
def __init__(self):
super().__init__("Go", "./go")
def builder(self): return None
def runner(self):
return command("go") \
._args("run", "brainfuck.go") \
._env({"GOPATH": os.getcwd()})
class PyPy(Environment):
def __init__(self):
super().__init__("PyPy", "./python")
def builder(self): return None
def runner(self): return command("pypy")._args("./brainfuck-simple.py")
class Hello(Test):
def __init__(self):
super().__init__("Hello")
def command(self, runner) -> Command:
return runner._args("../programs/hello.bf")
class Primes(Test):
def __init__(self, up_to):
super().__init__("Primes up to {}".format(up_to))
self.up_to = up_to
def command(self, runner) -> Command:
return runner._input("{}\n".format(self.up_to))._args("../programs/primes.bf")
class Mandelbrot(Test):
def __init__(self):
super().__init__("Mandelbrot")
def command(self, runner) -> Command:
return runner._args("../programs/mandelbrot.bf")
def markdown_table(headers, rows, align=-1):
# prepend or append 'f' 'c' times to 'v', depending
# on alignment (-1: left, 1: right):
fill = lambda v, f, c: \
(f*c) + v if align > 0 else v + (f*c)
# calculate the width of each column
widths = functools.reduce(
lambda a, r: tuple(
map(max, zip(a, map(len, r)))
),
rows,
map(len, headers)
)
# helpers to fill with spaces each column
# and to render a row in markdown format
space_fill = lambda f, w: map(
lambda p: fill(p[0], ' ', (p[1] + 1 - len(p[0]))),
zip(f, w)
)
markdown_row = lambda f, w: "|{}|".format(
"|".join(space_fill(f, w))
)
# render table
headers = markdown_row(headers, widths)
separator = markdown_row(
map(lambda w: fill(':', '-', w-1), widths),
widths
)
rows = "\n".join(
map(lambda f: markdown_row(f, widths), rows)
)
return "\n".join([headers, separator, rows])
def main():
print("Running on {}".format(WorkingCopy('.')))
envs = [
C(),
CPPADTClang(),
CPPJITClang(),
CPPOOPClang(),
CPPADTGCC(),
CPPJITGCC(),
CPPOOPGCC(),
Rust(),
RustJIT(),
Go(),
PyPy(),
]
tests = [
Hello(),
Primes(200),
Mandelbrot(),
]
headers = [''] + [t.desc for t in tests]
rows = []
for env in envs:
results = run(tests, env)
rows.append([env.desc] + results)
print(markdown_table(headers, rows, align=1))
if __name__ == '__main__':
main()
| pablojorge/brainfuck | run.py | Python | mit | 10,250 |
"""
test_universe contains the unit tests for the universe class
To run all test, go to the the main ./portfolioFactory directory and run
>> python -m unittest discover
Author: Peter Li
"""
import unittest
import portfolioFactory.universe.universe as universe
from ..utils import customExceptions as customExceptions
class TestUniverse(unittest.TestCase):
'''Test cases for universe'''
def setUp(self):
# File paths
self.validPath = './portfolioFactory/universe/totalReturnData'
self.invalidPath = './portfolioFactory/universe/universe.py'
self.name = 'testName'
##############################################################################
# Test init
##############################################################################
def testuniverse_inputNotPickle(self):
# retun file path is not a pickle
self.assertRaises(customExceptions.badData, universe.universe,self.name, self.invalidPath)
def testuniverse_inputNotFile(self):
# return file is not a file
self.assertRaises(customExceptions.badData, universe.universe,self.name, 2)
def testuniverse_badName(self):
# invlaid name
self.assertRaises(customExceptions.badData, universe.universe, ['abc'], self.validPath)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUniverse)
unittest.TextTestRunner(verbosity=2).run(suite) | pli1988/portfolioFactory | portfolioFactory/universe/test_universe.py | Python | mit | 1,498 |
#!/usr/bin/env python
import os
import unittest
import sys
from test_common import TestCommon
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'library'))
from fastly_service import FastlyConfiguration
class TestFastlyHealthchecks(TestCommon):
@TestCommon.vcr.use_cassette()
def test_fastly_healthchecks(self):
healthcheck_configuration = self.minimal_configuration.copy()
healthcheck_configuration.update({
'healthchecks': [{
'name' : 'test_healthcheck',
'host' : self.FASTLY_TEST_DOMAIN,
'method' : 'GET',
'path' : '/healthcheck',
'expected_response' : 200,
# Fastly Medium setup:
'threshold' : 3,
'window' : 5,
'initial' : 4,
'check_interval' : 15000,
'timeout' : 5000,
}],
'backends': [{
'name': 'localhost',
'address': '127.0.0.1',
'healthcheck' : 'test_healthcheck'
}],
})
configuration = FastlyConfiguration(healthcheck_configuration)
service = self.enforcer.apply_configuration(self.FASTLY_TEST_SERVICE, configuration).service
self.assertEqual(service.active_version.configuration.healthchecks[0].name, 'test_healthcheck')
self.assertEqual(service.active_version.configuration.healthchecks[0].host, self.FASTLY_TEST_DOMAIN)
self.assertEqual(service.active_version.configuration.healthchecks[0].method, 'GET')
self.assertEqual(service.active_version.configuration.healthchecks[0].path, '/healthcheck')
self.assertEqual(service.active_version.configuration.healthchecks[0].expected_response, 200)
self.assertEqual(service.active_version.configuration.healthchecks[0].threshold, 3)
self.assertEqual(service.active_version.configuration.healthchecks[0].window, 5)
self.assertEqual(service.active_version.configuration.healthchecks[0].initial, 4)
self.assertEqual(service.active_version.configuration.healthchecks[0].check_interval, 15000)
self.assertEqual(service.active_version.configuration.healthchecks[0].timeout, 5000)
self.assertEqual(service.active_version.configuration.backends[0].healthcheck, 'test_healthcheck')
self.assertEqual(service.active_version.configuration, configuration)
active_version_number = service.active_version.number
service = self.enforcer.apply_configuration(self.FASTLY_TEST_SERVICE, configuration).service
self.assertEqual(service.active_version.number, active_version_number)
if __name__ == '__main__':
unittest.main()
| Jimdo/ansible-fastly | tests/test_fastly_healthchecks.py | Python | mit | 2,816 |
from jsonrpcclient.log import _trim_message, _trim_string, _trim_values
def test_trim_string():
message = _trim_string("foo" * 100)
assert "..." in message
def test_trim_values():
message = _trim_values({"list": [0] * 100})
assert "..." in message["list"]
def test_trim_values_nested():
message = _trim_values({"obj": {"obj2": {"string2": "foo" * 100}}})
assert "..." in message["obj"]["obj2"]["string2"]
def test_trim_values_batch():
message = _trim_values([{"list": [0] * 100}])
assert "..." in message[0]["list"]
def test_trim_message():
message = _trim_message("foo" * 100)
assert "..." in message
| bcb/jsonrpcclient | tests/test_log.py | Python | mit | 652 |
from django.shortcuts import render
from django.contrib.admin.views.decorators import staff_member_required
@staff_member_required
def pipstatus(request):
return render(request, 'pipstatus_standalone.html')
| soerenbe/django-pipstatus | pipstatus/views.py | Python | gpl-2.0 | 213 |
"""aws-orgs setup"""
from awsorgs import __version__
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='aws-orgs',
version=__version__,
description='Tools to manage AWS Organizations',
long_description=long_description,
url='https://github.com/ucopacme/aws-orgs',
author='Ashley Gould',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='aws organizations',
packages=find_packages(exclude=['scratch', 'notes']),
install_requires=[
'boto3',
'docopt',
'PyYAML',
'passwordgenerator',
'cerberus',
],
package_data={
'awsorgs': [
'data/*',
'spec_init_data/*',
'spec_init_data/spec.d/*',
],
},
entry_points={
'console_scripts': [
'awsorgs=awsorgs.orgs:main',
'awsaccounts=awsorgs.accounts:main',
'awsauth=awsorgs.auth:main',
'awsloginprofile=awsorgs.loginprofile:main',
'awsorgs-accessrole=awsorgs.tools.accessrole:main',
'awsorgs-spec-init=awsorgs.tools.spec_init:main',
],
},
)
| ashleygould/aws-orgs | setup.py | Python | mit | 1,633 |
#
# SecureDrop whistleblower submission system
# Copyright (C) 2017 Loic Dachary <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from tests.functional import journalist_navigation_steps
from tests.functional import source_navigation_steps
from . import functional_test
import pytest
@pytest.mark.pagelayout
class TestSourceLayout(
functional_test.FunctionalTest,
source_navigation_steps.SourceNavigationStepsMixin,
journalist_navigation_steps.JournalistNavigationStepsMixin):
def test_index(self):
self._source_visits_source_homepage()
self._screenshot('source-index.png')
def test_lookup(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._screenshot('source-lookup.png')
def test_lookup_shows_codename(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_shows_codename()
self._screenshot('source-lookup-shows-codename.png')
def test_login(self):
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._screenshot('source-login.png')
def test_enters_text_in_login_form(self):
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._source_enters_codename_in_login_form()
self._screenshot('source-enter-codename-in-login.png')
def test_use_tor_browser(self):
self._source_visits_use_tor()
self._screenshot('source-use_tor_browser.png')
def test_generate(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._screenshot('source-generate.png')
def test_logout_flashed_message(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_logs_out()
self._screenshot('source-logout_flashed_message.png')
def test_submission_entered_text(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_enters_text_in_message_field()
self._screenshot('source-submission_entered_text.png')
def test_next_submission_flashed_message(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_submits_a_message()
self._screenshot('source-next_submission_flashed_message.png')
def test_source_checks_for_reply(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_logs_out()
self._journalist_logs_in()
self._journalist_checks_messages()
self._journalist_downloads_message()
self._journalist_sends_reply_to_source()
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._source_proceeds_to_login()
self._screenshot('source-checks_for_reply.png')
self._source_deletes_a_journalist_reply()
self._screenshot('source-deletes_reply.png')
def test_source_flagged(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_logs_out()
self._journalist_logs_in()
self._source_delete_key()
self._journalist_visits_col()
self._journalist_flags_source()
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._source_proceeds_to_login()
self._screenshot('source-flagged.png')
def test_notfound(self):
self._source_not_found()
self._screenshot('source-notfound.png')
def test_tor2web_warning(self):
self._source_tor2web_warning()
self._screenshot('source-tor2web_warning.png')
def test_why_journalist_key(self):
self._source_why_journalist_key()
self._screenshot('source-why_journalist_key.png')
@pytest.mark.pagelayout
class TestSourceSessionLayout(
functional_test.FunctionalTest,
source_navigation_steps.SourceNavigationStepsMixin,
journalist_navigation_steps.JournalistNavigationStepsMixin):
def setup(self):
self.session_length_minutes = 0.03
super(TestSourceSessionLayout, self).setup(
session_expiration=self.session_length_minutes)
def test_source_session_timeout(self):
self._source_visits_source_homepage()
self._source_clicks_submit_documents_on_homepage()
self._source_continues_to_submit_page()
self._source_waits_for_session_to_timeout(self.session_length_minutes)
self._source_enters_text_in_message_field()
self._source_visits_source_homepage()
self._screenshot('source-session_timeout.png')
| ehartsuyker/securedrop | securedrop/tests/pageslayout/test_source.py | Python | agpl-3.0 | 5,984 |
#!/usr/bin/env python
# DOENDO - A Python-Based Fortran Refactoring Tool
# Copyright (C) 2011 Felipe H. da Jornada <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
import re
import sys
import xml.dom.minidom
import analyze
import rename
from common import *
from optparse import OptionParser
def parse():
usage='usage: %prog [options] file'
parser = OptionParser(usage)
#parser.add_option()
(options, args) = parser.parse_args()
if len(args)!=1:
parser.error('incorrect number of arguments')
return (options,args)
#TASKS
def task_info(doc, lines):
analyze.print_info(doc)
def task_details(doc, lines):
print
print 'Not implemented! How about you implementing this feature?'
def task_auto_rename(doc, lines):
print
print 'TASK: Automatically Renaming Single-Letter Variables'
print '==================================================='
small_vars = analyze.get_small_vars(doc, 1)
N = len(small_vars)
if N==1:
print
print 'There are no single-letter variables!'
print 'Auto-renaming will abort'
return
#first, create new names for vars
new_names=[]
new_safe=[]
n=0
for var in small_vars:
n+=1
#new_names.append('%s__%d'%\
# (var.getAttribute('name'), n))
new_names.append('%s'%\
(var.getAttribute('name')*2))
new_safe.append( analyze.check_var_free(var, new_names[-1]) )
#this loop is used while the user is manually renaming the vars
char=''
while 1:
#print vars. to be renamed
last_block=''
n=0
print
print ' The following variables will be renamed'
print ' ---------------------------------------'
print
for var in small_vars:
n+=1
block=var.parentNode.getAttribute('name')
if block!=last_block:
print ' %s %s'%\
(blocks_name[int(var.parentNode.getAttribute('type'))],
block)
last_block=block
name = var.getAttribute('name')
new_name = new_names[n-1]
if not new_safe[n-1]:
new_name += '*'
print ' %5s %s -> %-3s :: %s'%\
('[%d]'%(n), name, new_name,\
var.getAttribute('type'))
if not all(new_safe):
print
print ' (* variable name already used)'
print
print ' Choose one option:'
print ' m - manually configure the name of one variable'
print ' A - accept options and rename'
print ' q - cancel task'
char = raw_input(' ')
if char=='q': return
elif char=='A':
if not all(new_safe):
print ' Note: some varible names are already being used.'
if raw_input(' Continue [y/n]? ').lower()!='y':
continue
break #leave loop and fix vars
elif char== 'm': pass #we will continue later
else: continue
#if got here, configure name of variable
n=-1
while (n<0 or n>N):
char = raw_input(' Choose variable to rename [1-%d, other value to cancel]: '%(N))
try:
n = int(char)
except:
n = 0
if n==0: continue
v_name = small_vars[n-1].getAttribute('name')
new_name = raw_input(' Enter name for variable %s (leave it empty to cancel): '%(v_name))
if len(new_name):
new_names[n-1] = new_name
new_safe[n-1] = analyze.check_var_free(var, new_name)
#rename
renames = dict(zip(small_vars, new_names))
rename.rename(lines, doc, renames)
print
print ' Rename complete!'
#save
import os.path
fname = doc.childNodes[0].nodeName
(head,tail) = os.path.split(fname)
tail = 'doendo_'+tail
new_fname = os.path.join(head,tail)
print ' Writing %s'%(new_fname)
fout = open(new_fname,'w')
fout.write(''.join(lines))
fout.close()
def task_manual(doc, lines):
print
print 'Not implemented! How about you implementing this feature?'
def task_loop(doc, lines):
tasks = {
'i': task_info,
'd': task_details,
'a': task_auto_rename,
'm': task_manual,
'q': None
}
char=''
while not (char in tasks.keys()):
print
print 'Please choose one task'
print ' i - show brief info about the source code'
print ' d - show detailed info about the source code'
print ' a - automatically rename single-letter variables'
print ' m - manually rename variables'
print ' q - quit'
char = raw_input('')
if char=='q':
print
sys.exit()
#call the function
tasks[char](doc, lines)
return char
def main():
(options, args) = parse()
print '''
==========================================================================
Welcome to DOENDO - the tool that makes your FORTRAN code less painful
DOENDO Copyright (C) 2011 Felipe H. da Jornada
This program comes with ABSOLUTELY NO WARRANTY.
=========================================================================='''
fname = sys.argv[1]
fin = open(fname)
lines = fin.readlines()
fin.close()
#need file as single character string
data = ''.join(lines)
#prepare DOM of source code
doc = analyze.analyze(fname, data)
#print useful info about code (get small variables for free)
small_vars = analyze.print_info(doc)
while (1):
task_loop(doc, lines)
if __name__=='__main__':
main()
| jornada/DOENDO | doendo.py | Python | gpl-3.0 | 5,149 |
"""Configuration options for the main app"""
from django.utils.translation import ugettext_lazy as _
from smart_settings.api import register_setting
register_setting(
namespace=u'main',
module=u'main.settings',
name=u'SIDE_BAR_SEARCH',
global_name=u'MAIN_SIDE_BAR_SEARCH',
default=False,
description=_(u'Controls whether the search functionality is provided by a sidebar widget or by a menu entry.')
)
register_setting(
namespace=u'main',
module=u'main.settings',
name=u'DISABLE_HOME_VIEW',
global_name=u'MAIN_DISABLE_HOME_VIEW',
default=False,
)
register_setting(
namespace=u'main',
module=u'main.settings',
name=u'DISABLE_ICONS',
global_name=u'MAIN_DISABLE_ICONS',
default=False,
)
| rosarior/rua | rua/apps/main/settings.py | Python | gpl-3.0 | 753 |
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
# Taher Shihadeh <[email protected]>
#
# Copyright (C) 2001-2011 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Page
import Cherokee
import os
from config_version import config_version_get_current
from consts import *
from configured import *
URL_BASE = '/create_config'
URL_APPLY = '/create_config/apply'
HELPS = [('index', N_("Index"))]
NOTE_LOADING = N_("Loading new configuration file..")
WARNING_NOT_FOUND = N_("<b>The configuration is not found</b>.<br />You can create a new configuration file and proceed to customize the web server.")
DEFAULT_PID_LOCATIONS = [
'/var/run/cherokee.pid',
os.path.join (PREFIX, 'var/run/cherokee.pid')
]
CHEROKEE_MIN_DEFAULT_CONFIG = """# Default configuration
server!pid_file = %s
vserver!1!nick = default
vserver!1!document_root = /tmp
vserver!1!rule!1!match = default
vserver!1!rule!1!handler = common
""" % (DEFAULT_PID_LOCATIONS[0])
class ConfigCreator:
def __call__ (self, profile):
if profile == 'regular':
return self._create_config ("cherokee.conf.sample")
elif profile == 'static':
return self._create_config ("performance.conf.sample")
elif profile == 'development':
re = self._create_config ("cherokee.conf.sample")
if not re:
return False
self._tweak_config_for_dev()
return True
def _create_config (self, template_file):
# Configuration file
filename = CTK.cfg.file
if os.path.exists (filename):
return True
dirname = os.path.dirname(filename)
if dirname and not os.path.exists (dirname):
try:
os.mkdir (dirname)
except:
print "ERROR: Could not create directory '%s'" %(dirname)
return False
# Configuration content
content = "config!version = %s\n" %(config_version_get_current())
# Add basic content
conf_sample_sys = os.path.join (CHEROKEE_ADMINDIR, template_file)
conf_sample_dev = os.path.join (os.path.realpath (__file__ + '/../../%s'%(template_file)))
if os.path.exists (conf_sample_sys):
content += open(conf_sample_sys, 'r').read()
elif os.path.exists (conf_sample_dev):
content += open(conf_sample_dev, 'r').read()
else:
content += CHEROKEE_MIN_DEFAULT_CONFIG
# Write it
try:
f = open(filename, 'w+')
f.write (content)
f.close()
except:
print "ERROR: Could not open '%s' for writing" %(filename)
return False
CTK.cfg.load()
return True
def _tweak_config_for_dev (self):
del(CTK.cfg['server!bind'])
del(CTK.cfg['server!pid_file'])
del(CTK.cfg['vserver!1!logger'])
CTK.cfg['server!bind!1!port'] = "1234"
CTK.cfg['server!log_flush_lapse'] = "0"
CTK.cfg['vserver!1!rule!5!handler!type'] = "normal"
CTK.cfg['vserver!1!error_writer!type'] = "stderr"
CTK.cfg['source!2!type'] = "host"
CTK.cfg['source!2!nick'] = "localhost 8000"
CTK.cfg['source!2!host'] = "localhost:8000"
CTK.cfg.save()
def apply():
creator = ConfigCreator()
profile = CTK.post.pop('create')
if creator (profile):
return CTK.cfg_reply_ajax_ok()
return {'ret': 'fail'}
class Form (CTK.Container):
def __init__ (self, key, name, label, **kwargs):
CTK.Container.__init__ (self, **kwargs)
box = CTK.Box({'class': 'create-box %s' %(key)})
box += CTK.RawHTML('<h3>%s</h3>' %(name))
box += CTK.RawHTML('<span>%s</span>' %(label))
submit = CTK.Submitter(URL_APPLY)
submit += CTK.Hidden('create', key)
submit += CTK.SubmitterButton (_('Create'))
submit.bind ('submit_success',
"$('#main').html('<h1>%s</h1>');"%(NOTE_LOADING) + CTK.JS.GotoURL('/'))
box += submit
box += CTK.RawHTML('<div class="ui-helper-clearfix"></div>')
self += box
class Render:
def __call__ (self):
container = CTK.Container()
container += CTK.RawHTML("<h2>%s</h2>" %(_('Create a new configuration file:')))
key = 'regular'
name = _('Regular')
label = _('Regular configuration: Apache logs, MIME types, icons, etc.')
container += Form (key, name, label)
key = 'static'
name = _('Static Content')
label = _('Optimized to send static content.')
container += Form (key, name, label)
key = 'development'
name = _('Server Development')
label = _('No standard port, No log files, No PID file, etc.')
container += Form (key, name, label)
page = Page.Base(_('New Configuration File'), body_id='new-config', helps=HELPS)
page += CTK.RawHTML("<h1>%s</h1>" %(_('Configuration File Not Found')))
page += CTK.Notice ('warning', CTK.RawHTML(_(WARNING_NOT_FOUND)))
page += CTK.Indenter (container)
return page.Render()
CTK.publish ('^%s'%(URL_BASE), Render)
CTK.publish ('^%s'%(URL_APPLY), apply, method="POST")
| nuxleus/cherokee-webserver | admin/PageNewConfig.py | Python | gpl-2.0 | 5,998 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTestrepository(PythonPackage):
"""A repository of test results."""
homepage = "https://launchpad.net/testrepository"
url = "https://pypi.io/packages/source/t/testrepository/testrepository-0.0.20.tar.gz"
import_modules = [
'testrepository', 'testrepository.arguments',
'testrepository.commands', 'testrepository.repository',
'testrepository.tests', 'testrepository.tests.arguments',
'testrepository.tests.commands', 'testrepository.tests.repository',
'testrepository.tests.ui', 'testrepository.ui',
]
version('0.0.20', sha256='752449bc98c20253ec4611c40564aea93d435a5bf3ff672208e01cc10e5858eb')
depends_on('py-setuptools', type='build')
depends_on('py-fixtures', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/py-testrepository/package.py | Python | lgpl-2.1 | 1,122 |
from bs4 import BeautifulSoup
import urllib
import requests
def createFilename(url, name, folder):
dotSplit = url.split('.')
if name == None:
# use the same as the url
slashSplit = dotSplit[-2].split('/')
name = slashSplit[-1]
ext = dotSplit[-1]
file = '{}{}.{}'.format(folder, name, ext)
return file
def getImage(url, name=None, folder='./'):
file = createFilename(url, name, folder)
with open(file, 'wb') as f:
r = requests.get(url, stream=True)
for block in r.iter_content(1024):
if not block:
break
f.write(block)
def main():
print "Enter the Tag or Category :"
tag=raw_input()
for url_range in range(1,10):
main_url = 'http://wall.alphacoders.com/search.php?search='+tag+'&page=' + str(url_range)
print "Entered Page " + str(url_range)
main_url_opener = urllib.urlopen(main_url)
main_url_response = main_url_opener.read()
main_url_soup = BeautifulSoup(main_url_response)
mylist = []
for wall_link in main_url_soup.find_all('a'):
all_links = wall_link.get('href')
try:
if all_links.find('big.php?') >= 0:
if all_links not in mylist:
mylist.append(all_links)
except:
pass
print mylist
new_list = ['http://wall.alphacoders.com/' + suit for suit in mylist]#[x+'http://wall.alphacoders.com/' for x in my_list]
print new_list
source_list=[]
for element in new_list:
print element
opener = urllib.urlopen(element)
response = opener.read()
soupedversion = BeautifulSoup(response)
for imglink in soupedversion.find_all('img' , {'id' : 'main_wallpaper'}):
mylink = imglink.get('src')
print mylink
source_list.append(mylink)
print source_list
for i in range(0,len(source_list)):
a=source_list[i]
print a
getImage(a)
if __name__ == "__main__":
main()
| wolfdale/Alphacoder | Alphacoder.py | Python | mit | 2,168 |
#!/usr/bin/python
import sys
sys.path.append('/homes/gws/aritter/twitter_nlp/python')
from twokenize import tokenize
from LdaFeatures import LdaFeatures
from Vocab import Vocab
from Dictionaries import Dictionaries
entityDocs = {}
prevText = None
for line in sys.stdin:
line = line.rstrip('\n')
fields = line.split('\t')
sid = fields[0]
text = fields[6]
words = tokenize(text)
confidence = 1.0 / float(fields[-1])
eType = fields[-2]
entity = fields[-3]
neTags = fields[-4].split(' ')
pos = fields[-5].split(' ')
words = fields[-6].split(' ')
#Just skip duplicate texts (will come from tweets with more than one entiity)
if prevText and prevText == text:
continue
prevText = text
features = LdaFeatures(words, neTags)
for i in range(len(features.entities)):
entity = ' '.join(features.words[features.entities[i][0]:features.entities[i][1]])
entityDocs[entity] = entityDocs.get(entity,[])
entityDocs[entity].append(features.features[i])
dictionaries = Dictionaries('/homes/gws/aritter/twitter_nlp/data/LabeledLDA_dictionaries')
vocab = Vocab()
keys = entityDocs.keys()
keys.sort(cmp=lambda a,b: cmp(len(entityDocs[b]),len(entityDocs[a])))
eOut = open('entities', 'w')
lOut = open('labels', 'w')
dOut = open('dictionaries', 'w')
for e in keys:
labels = dictionaries.GetDictVector(e)
###############################################################################
#NOTE: For now, only include entities which appear in one or more dictionary
# we could modify this to give them membership in all, or no dictionaries
# (in LabeledLDA, don't impose any constraints)
###############################################################################
if sum(labels) > 0:
lOut.write(' '.join([str(x) for x in labels]) + "\n")
eOut.write("%s\n" % e)
print '\t'.join([' '.join([str(vocab.GetID(x)) for x in f]) for f in entityDocs[e]])
vocab.SaveVocab('vocab')
for d in dictionaries.dictionaries:
dOut.write(d + "\n")
| adarshlx/twitter_nlp | hbc/python/tweets2entityDocs.py | Python | gpl-3.0 | 2,087 |
"Test editor, coverage 35%."
from idlelib import editor
import unittest
from test.support import requires
from tkinter import Tk
Editor = editor.EditorWindow
class EditorWindowTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
@classmethod
def tearDownClass(cls):
cls.root.update_idletasks()
for id in cls.root.tk.call('after', 'info'):
cls.root.after_cancel(id)
cls.root.destroy()
del cls.root
def test_init(self):
e = Editor(root=self.root)
self.assertEqual(e.root, self.root)
e._close()
class EditorFunctionTest(unittest.TestCase):
def test_filename_to_unicode(self):
func = Editor._filename_to_unicode
class dummy():
filesystemencoding = 'utf-8'
pairs = (('abc', 'abc'), ('a\U00011111c', 'a\ufffdc'),
(b'abc', 'abc'), (b'a\xf0\x91\x84\x91c', 'a\ufffdc'))
for inp, out in pairs:
self.assertEqual(func(dummy, inp), out)
if __name__ == '__main__':
unittest.main(verbosity=2)
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/idlelib/idle_test/test_editor.py | Python | gpl-2.0 | 1,141 |
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/')
def home():
return '''
<h1>Welcome to silly project of Samuka!</h1>
<p>Avaliable endpoints:</p>
<ul>
<li><b>/</b> - this page</li>
<li><b>/ping</b> - example of rest api with json response</li>
<li><b>/modal</b> - exemple of html divs</li>
</ul>
<h3>More contacts: [email protected]</h3>
'''
@app.route('/ping')
def ping():
return jsonify(ping='pong')
@app.route('/modal')
def modal():
return """
<html>
<head></head>
<body>
<!-- Modal content-->
<div class="modal-content">
<div class="modal-header">
<h4 id="modal-title" class="modal-title">Modal Header</h4>
</div>
<div class="modal-body">
<p>Some text in the modal.</p>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</body>
</html>"""
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=80)
| samukasmk/grupy-flask-jenkins | app.py | Python | apache-2.0 | 1,035 |
from django.db import models, connection
def generateDataCube(owner_name=None, subject=None, timing=None, start_date=None, end_date=None):
assert timing in [False, 'year', 'week', 'month']
dbquery = "SELECT {0}, {1}, {2}, count(*) FROM images {4} GROUP BY {3} {5};"
zero = "owner_name" if owner_name else "null"
one = "subject" if subject else "null"
two = "date_trunc('%s', timing)"%(timing) if timing else "null"
# GROUP BY clause
three = ""
if owner_name: three += "owner_name, "
if subject: three += "subject, "
if timing: three += "3, "
if len(three) > 0: three = three[:-2]
# WHERE clause
four = ""
if start_date or end_date:
four += "WHERE "
if start_date:
four += "timing >= '%s' " % start_date
elif end_date:
four += "timing < ('%s'::date + '1 day'::interval) " % end_date
if end_date and start_date:
four += "AND timing < ('%s'::date + '1 day'::interval) " % end_date
# ORDER BY clause
five = "ORDER BY date_trunc('%s', timing)"%(timing) if timing else ''
dbquery = dbquery.format(zero, one, two, three, four, five)
if not any([owner_name, subject, timing]):
dbquery = dbquery[:-11]
cursor = connection.cursor()
cursor.execute(dbquery)
result = cursor.fetchall()
json = list()
for db_row in result:
row = dict()
if owner_name:
if db_row[0] is None:
row["Owner"] = "<No Owner>"
else:
row["Owner"] = db_row[0]
if subject:
if db_row[1] is None:
row["Subject"] = "<No Subject>"
else:
row["Subject"] = db_row[1]
if timing:
# timing_col = {"year":"Year", "Month
if db_row[2] is None:
row["Timing"] = "<No Date>"
elif timing == 'year':
row["Timing"] = "%d" % (db_row[2].year)
elif timing == 'month':
row["Timing"] = "%d-%02d" % (db_row[2].year, db_row[2].month)
elif timing == 'week':
row["Timing"] = "%d-%02d-%02d" % (db_row[2].year, db_row[2].month, db_row[2].day)
row["Count"] = db_row[3]
json.append(row)
print(dbquery)
return json
| jcairo/391_project | project_391/main/datacube.py | Python | mit | 2,332 |
from datetime import timedelta
from urllib2 import URLError
import json
import os
import traceback
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'Sabnzbd'
class Sabnzbd(DownloaderBase):
protocol = ['nzb']
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, media),
'priority': self.conf('priority'),
}
nzb_filename = None
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', filedata)
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, media)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if nzb_filename and req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
nzo_ids = sab_data.get('nzo_ids', [])
if sab_data.get('status') and not sab_data.get('error') and isinstance(nzo_ids, list) and len(nzo_ids) > 0:
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(nzo_ids[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def test(self):
""" Check if connection works
Return message if an old version of SAB is used
:return: bool
"""
try:
sab_data = self.call({
'mode': 'version',
})
v = sab_data.split('.')
if int(v[0]) == 0 and int(v[1]) < 7:
return False, 'Your Sabnzbd client is too old, please update to newest version.'
# the version check will work even with wrong api key, so we need the next check as well
sab_data = self.call({
'mode': 'qstatus',
})
if not sab_data:
return False
except:
return False
return True
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return []
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return []
release_downloads = ReleaseDownloadList(self)
# Get busy releases
for nzb in queue.get('slots', []):
if nzb['nzo_id'] in ids:
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for nzb in history.get('slots', []):
if nzb['nzo_id'] in ids:
status = 'busy'
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif nzb['status'] == 'Completed':
status = 'completed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
return release_downloads
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call({
'mode': 'queue',
'name': 'delete',
'del_files': '1',
'value': release_download['id']
}, use_json = False)
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': release_download['id']
}, use_json = False)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '0',
'value': release_download['id']
}, use_json = False)
except:
log.error('Failed removing: %s', traceback.format_exc(0))
return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
config = [{
'name': 'sabnzbd',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'sabnzbd',
'label': 'Sabnzbd',
'description': 'Use <a href="http://sabnzbd.org/" target="_blank">SABnzbd</a> (0.7+) to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'nzb',
},
{
'name': 'host',
'default': 'localhost:8080',
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'api_key',
'label': 'Api Key',
'description': 'Used for all calls to Sabnzbd.',
},
{
'name': 'category',
'label': 'Category',
'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>',
},
{
'name': 'priority',
'label': 'Priority',
'type': 'dropdown',
'default': '0',
'advanced': True,
'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)],
'description': 'Add to the queue with this priority.',
},
{
'name': 'manual',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'remove_complete',
'advanced': True,
'label': 'Remove NZB',
'default': False,
'type': 'bool',
'description': 'Remove the NZB from history after it completed.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| mattesno1/CouchPotatoServer | couchpotato/core/downloaders/sabnzbd.py | Python | gpl-3.0 | 10,799 |
#!/usr/bin/env python
import os
import sys
import unittest
from rclpy.node import Node
from std_srvs.srv import SetBool
from twisted.python import log
sys.path.append(os.path.dirname(__file__)) # enable importing from common.py in this directory
import common # noqa: E402
from common import expect_messages, websocket_test # noqa: E402
log.startLogging(sys.stderr)
generate_test_description = common.generate_test_description
class TestAdvertiseService(unittest.TestCase):
@websocket_test
async def test_two_concurrent_calls(self, node: Node, make_client):
ws_client = await make_client()
ws_client.sendJson(
{
"op": "advertise_service",
"type": "std_srvs/SetBool",
"service": "/test_service",
}
)
client = node.create_client(SetBool, "/test_service")
client.wait_for_service()
requests_future, ws_client.message_handler = expect_messages(
2, "WebSocket", node.get_logger()
)
requests_future.add_done_callback(lambda _: node.executor.wake())
response1_future = client.call_async(SetBool.Request(data=True))
response2_future = client.call_async(SetBool.Request(data=False))
requests = await requests_future
self.assertEqual(len(requests), 2)
self.assertEqual(requests[0]["op"], "call_service")
self.assertEqual(requests[0]["service"], "/test_service")
self.assertEqual(requests[0]["args"], {"data": True})
ws_client.sendJson(
{
"op": "service_response",
"service": "/test_service",
"values": {"success": True, "message": "Hello world 1"},
"id": requests[0]["id"],
"result": True,
}
)
self.assertEqual(requests[1]["op"], "call_service")
self.assertEqual(requests[1]["service"], "/test_service")
self.assertEqual(requests[1]["args"], {"data": False})
ws_client.sendJson(
{
"op": "service_response",
"service": "/test_service",
"values": {"success": True, "message": "Hello world 2"},
"id": requests[1]["id"],
"result": True,
}
)
self.assertEqual(
await response1_future, SetBool.Response(success=True, message="Hello world 1")
)
self.assertEqual(
await response2_future, SetBool.Response(success=True, message="Hello world 2")
)
node.destroy_client(client)
| RobotWebTools/rosbridge_suite | rosbridge_server/test/websocket/advertise_service.test.py | Python | bsd-3-clause | 2,612 |
############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
import ConfigParser
import Zorp.Config
class Singleton(object):
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def Instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
@Singleton
class ZorpctlConfig(object):
def __init__(self):
defaults = {
# specifies whether --auto-restart is default
'AUTO_RESTART' : 1,
# wait a starting process to report back for this amount of time, assume the
# startup failed if it exceeds this time.
'START_WAIT_TIMEOUT' : 10,
# The interval (in seconds) to check a stopping Zorp instance at, second
# fractions are allowed.
'STOP_CHECK_DELAY' : 1,
# The number of seconds to wait for a stopping Zorp instance
'STOP_CHECK_TIMEOUT' : 5,
# automatically append this string to each Zorp command line, this was
# renamed in Zorp 3.0.3, but the original APPEND_ARGS option still works.
'ZORP_APPEND_ARGS' : '',
# arguments appended to the zorpctl instance specific options, such as
# --enable-core
'ZORPCTL_APPEND_ARGS' : '',
# whether to check /etc/zorp permissions
'CHECK_PERMS' : 1,
'CONFIG_DIR' : '/etc/zorp',
'CONFIG_DIR_OWNER' : 'root',
'CONFIG_DIR_GROUP' : 'zorp',
'CONFIG_DIR_MODE' : 0750,
# directory where Zorp stores its pidfiles
'PIDFILE_DIR' : '/var/run/zorp',
# set pidfile directory ownership according to the settings below (umask is
# applied).
# DO not modify this, unless you know what you are doing.
'PIDFILE_DIR_OWNER' : 'zorp',
'PIDFILE_DIR_GROUP' : 'zorp',
'PIDFILE_DIR_MODE' : 0770,
}
self.config = ConfigParser.RawConfigParser(defaults)
self.path = Zorp.Config.config.dirs.sysconfdir
def __getitem__(self, key):
if key == "ZORP_PRODUCT_NAME":
return Zorp.Config.config.options.product_name
if key == 'ZORP_LIBDIR':
return Zorp.Config.config.dirs.libdir
if key == 'ZORP_SBINDIR':
return Zorp.Config.config.dirs.sbindir
if key == 'ZORP_SYSCONFDIR':
return Zorp.Config.config.dirs.sysconfdir
if key == 'ZORP_PIDFILEDIR':
return Zorp.Config.config.dirs.pidfiledir
try:
default_value = self.config.get('DEFAULT', key)
try:
config_value = self.config.get('zorpctl', key)
config_value = config_value.strip('\"')
# permissions have to be octal values
if key.endswith('_MODE'):
value = int(config_value, base=8)
else:
# since ConfigParser only uses strings as values,
# cast the value to the type of the default one
default_type = type(default_value)
value = default_type(config_value)
except:
value = default_value
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
raise KeyError(key)
return value
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self.parse()
def parse(self):
if not self.config.read(self.path + '/zorpctl.conf'):
self.config.read(self.path)
| mochrul/zorp | zorpctl/ZorpctlConf.py | Python | gpl-2.0 | 5,339 |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# We need data in a different format for this guy
from LOTlib.DataAndObjects import *
# The arguments are [concept,object]
data = [
FunctionData(['A', Obj(shape='square', color='red')], True),
FunctionData(['A', Obj(shape='square', color='blue')], False),
FunctionData(['A', Obj(shape='triangle', color='blue')], False),
FunctionData(['A', Obj(shape='triangle', color='red')], False),
FunctionData(['B', Obj(shape='square', color='red')], False),
FunctionData(['B', Obj(shape='square', color='blue')], True),
FunctionData(['B', Obj(shape='triangle', color='blue')], True),
FunctionData(['B', Obj(shape='triangle', color='red')], True)
] * 10 # number of data points exactly like these
| moverlan/LOTlib | LOTlib/Examples/RationalRules/TwoConcepts/Data.py | Python | gpl-3.0 | 814 |
# Copyright (C) 2017 Alexander Kuvaev ([email protected])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame
import common
from enum import Enum
class CursorType(Enum):
POINTER = 0
SCROLL_UP = 1
SCROLL_RIGHT = 2
SCROLL_DOWN = 4
SCROLL_LEFT = 8
SCROLL_UP_RIGHT = SCROLL_UP + SCROLL_RIGHT
SCROLL_DOWN_RIGHT = SCROLL_DOWN + SCROLL_RIGHT
SCROLL_DOWN_LEFT = SCROLL_DOWN + SCROLL_LEFT
SCROLL_UP_LEFT = SCROLL_UP + SCROLL_LEFT
class Cursor:
move_up = False
move_down = False
move_left = False
move_right = False
pos = common.Point(0, 0)
cursor_type = CursorType.POINTER
@classmethod
def update(cls):
mouse_x, mouse_y = pygame.mouse.get_pos()
cls.pos = common.Point(mouse_x, mouse_y)
cls.move_up = False
cls.move_down = False
cls.move_left = False
cls.move_right = False
ct_ind = CursorType.POINTER.value
if mouse_x <= 10:
cls.move_left = True
ct_ind += CursorType.SCROLL_LEFT.value
if mouse_x >= common.WIDTH - 10:
cls.move_right = True
ct_ind += CursorType.SCROLL_RIGHT.value
if mouse_y <= 10:
cls.move_up = True
ct_ind += CursorType.SCROLL_UP.value
if mouse_y >= common.HEIGHT - 10:
cls.move_down = True
ct_ind += CursorType.SCROLL_DOWN.value
cls.cursor_type = CursorType(ct_ind)
return
| Vinatorul/Codpled | src/cursor.py | Python | gpl-3.0 | 2,074 |
#!/usr/bin/env python
class Solution:
def majorityElement(self, nums: list) -> int:
from collections import defaultdict
numCount, length = defaultdict(int), len(nums)
for n in nums:
numCount[n] += 1
if numCount[n] > length/2:
return n
sol = Solution()
nums = [3,2,3]
nums = [2,2,1,1,1,2,2]
print(sol.majorityElement(nums))
| eroicaleo/LearningPython | interview/leet/169_Majority_Element.py | Python | mit | 393 |
#this one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
# ok, that *args is actually pointless, we can just do
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
# this just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
# this one takes no arguments
def print_none():
print "I got nothin'."
print_two("Zed", "Shaw")
print_two_again("Zed", "Shaw")
print_one("First!")
print_none()
| mshcruz/LearnPythonTheHardWay | ex18.py | Python | gpl-2.0 | 513 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for working with containers in config."""
import yaml
def GenerateManifest(context):
"""Generates a Container Manifest given a Template context.
Args:
context: Template context, which must contain dockerImage and port
properties, and an optional dockerEnv property.
Returns:
A Container Manifest as a YAML string.
"""
env_list = []
if 'dockerEnv' in context.properties:
for key, value in context.properties['dockerEnv'].iteritems():
env_list.append({'name': key, 'value': str(value)})
manifest = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': context.env['name']
},
'spec': {
'containers': [{
'name': context.env['name'],
'image': context.properties['dockerImage'],
'ports': [{
'hostPort': context.properties['port'],
'containerPort': context.properties['port']
}],
}]
}
}
if env_list:
manifest['spec']['containers'][0]['env'] = env_list
return yaml.dump(manifest, default_flow_style=False)
| jaivasanth-google/deploymentmanager-samples | examples/v2/common/python/container_helper.py | Python | apache-2.0 | 1,754 |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.forms.models import (
inlineformset_factory,
ModelForm, ModelFormMetaclass,
)
from django.utils import six
class InlineModelFormMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
options = attrs.get('Meta')
for base in reversed(bases):
if options is None:
options = getattr(base, 'Meta', None)
inlines = getattr(options, 'inlines', ())
super_new = super(InlineModelFormMetaclass, cls).__new__
new_cls = super_new(cls, name, bases, attrs)
new_cls._meta.inlines = inlines
return new_cls
@six.add_metaclass(InlineModelFormMetaclass)
class InlineModelForm(ModelForm):
def __init__(self, data=None, files=None, inlines=(), *args, **kwargs):
super(InlineModelForm, self).__init__(data, files, *args, **kwargs)
opts = self._meta
model = self.instance.__class__
self._inline_form_sets = []
for field_name in (inlines or opts.inlines):
kwargs = {'extra': 0}
if not isinstance(field_name, six.string_types):
kwargs.update(field_name[1])
field_name = field_name[0]
field = getattr(model, field_name).related
FormSet = inlineformset_factory(model, field.model, **kwargs)
form_set = FormSet(data=data, files=files, instance=self.instance)
self._inline_form_sets.append(form_set)
setattr(self, field_name, form_set)
def is_valid(self):
valid = super(InlineModelForm, self).is_valid()
for form_set in self._inline_form_sets:
if not form_set.is_valid():
valid = False
return valid
def save(self, commit=True):
instances = [super(InlineModelForm, self).save(commit=commit)]
for form_set in self._inline_form_sets:
instances.extend(form_set.save(commit=commit))
return instances
| samuelmaudo/yepes | yepes/forms/inline_model.py | Python | bsd-3-clause | 2,021 |
"""
conference_helper.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
created by @Robert_Avram on 2015 June 6
"""
import endpoints
from google.appengine.ext import ndb
from google.appengine.api import search
from google.appengine.api import taskqueue
from google.appengine.api import memcache
from google.net.proto.ProtocolBuffer import ProtocolBufferDecodeError
from webapp2 import cached_property
from models import Conference
from models import ConferenceSession
from models import ConferenceSpeaker
from models import Profile
from settings import MEMCACHE_ANNOUNCEMENTS_KEY
from settings import MEMCACHE_FEATURED_SPEAKER_KEY
from settings import ANNOUNCEMENT_TPL
from settings import DEFAULTS
from settings import OPERATORS
from settings import FIELDS
import message_models as mm
import logging
import utils
from datetime import datetime
def user_required(handler):
"""Decorator that checks if there's a user associated with the current session."""
def check_login(self, *args, **kwargs):
# Make sure there is a user authenticated
if not self.auth_user:
raise endpoints.UnauthorizedException('Authorization Required')
# Make sure the current user has a profile in our DB
if not self.user:
raise endpoints.UnauthorizedException('Even though you are authorized, you do not have a Profile \
please update your account first, Profile Required')
else:
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(object):
''' Basic Handler functions that can be inherited by any api '''
@cached_property
def user(self):
''' helper function that computes and caches current user profile
relies on auth_user, returns Profile or None'''
# check if there is a current user logged in
if self.auth_user:
# get user_id of current logged in auth_user
user_id = utils.getUserId(self.auth_user)
p_key = ndb.Key(Profile, user_id)
# get Profile from datastore
return p_key.get()
else:
return None
@cached_property
def auth_user(self):
''' helper function that computes and caches current_user '''
return endpoints.get_current_user()
class ApiHelper(BaseHandler):
''' Class meant to help the conference Api. Base: BaseHandler '''
@staticmethod
def get_websafe_key(urlsafeKey, modelkind):
''' takes a urlsafeKey and the kind of key it should be and
returns the ndb.Key or raises a BadRequest Error if the key
is not the propper format or not the right kind '''
try:
s_key = ndb.Key(urlsafe=urlsafeKey)
except ProtocolBufferDecodeError:
raise endpoints.BadRequestException(
'the key received is not a valid urlsafe key')
if (not s_key) or (not s_key.kind() == modelkind):
raise endpoints.BadRequestException(
'the key valid key for the kind %s' %
modelkind)
return s_key
@user_required
def _add_session_to_wishlist(self, request):
''' adds a session to the user's wishlist '''
# make sure that the websafeSessionKey is actually valid
s_key = self.get_websafe_key(
request.websafeSessionKey,
'ConferenceSession')
# check if the session exists in the db
session = s_key.get()
if not session:
raise endpoints.NotFoundException(
'The session you want to add does not exist')
# make sure the keys are not in the wishList already
if session.key.parent() not in self.user.wishList.conferences:
# if this conference doesn't exist in the wishList,
# add it since the session belongs to it
self.user.wishList.conferences.append(session.key.parent())
# this also implies that this session does not exist in the
# wishList
self.user.wishList.sessions.append(session.key)
self.user.put()
elif session.key not in self.user.wishList.sessions:
self.user.wishList.sessions.append(session.key)
self.user.put()
else:
raise endpoints.BadRequestException(
'the session is already in the wish list')
return True
def _query_index(self, qry):
''' Query the search index for sessions,
takes in search.Query '''
# Query the index.
index = search.Index(name='sessions')
try:
results = index.search(qry)
# Iterate through the search results.
items = []
for scored_document in results:
items.append(self._copy_session_doc_to_form(scored_document))
except search.Error as e:
logging.error(e)
return items
def _add_to_search_index(self, session, speaker, conference):
''' Create a search document based on session, speaker and conference,
and added to the search index '''
# define the index
index = search.Index(name='sessions')
# create the document object
doc = search.Document(
# the doc_id will be set to the key of the session
doc_id=session.key.urlsafe(),
fields=[
search.TextField(name='name', value=session.name),
search.TextField(name='type', value=session.type),
search.NumberField(name='duration', value=session.duration),
search.DateField(name="startDate", value=session.startDate),
search.NumberField(
name="startTime",
value=utils.time_to_minutes(
session.startTime)),
search.TextField(name='highlights', value=session.highlights),
search.TextField(
name='speakerName',
value=speaker.displayName),
search.TextField(name='conferenceName', value=conference.name),
search.TextField(name='conferenceTopics', value=" ".join(
[topic for topic in conference.topics])),
search.TextField(name='conferenceCity', value=conference.city),
search.TextField(
name='conferenceDescription',
value=conference.description),
])
try:
index.put(doc)
except search.PutError as e:
result = e.results[0]
if result.code == search.OperationResult.TRANSIENT_ERROR:
# if TRANSIENT_ERROR retry:
try:
index.put(result.object_id)
except search.Error as e:
logging.error(e)
except search.Error as e:
logging.error(e)
@user_required
def _remove_session_from_wishlist(
self, conf_sessionKey, removeConference=False):
''' Removes a session from the wishList '''
# make sure that the websafeSessionKey is actually valid
s_key = self.get_websafe_key(conf_sessionKey, 'ConferenceSession')
# check if the session exists in the db
session = s_key.get()
if not session:
raise endpoints.NotFoundException(
'The session you want to add does not exist')
# if key is in the wishList remove it otherwise BadRequestException
if session.key in self.user.wishList.sessions:
self.user.wishList.sessions.remove(session.key)
else:
raise endpoints.BadRequestException(
'the session is not in the wish list')
# if the user wants to remove the conference as well
if removeConference:
# check if there are any other sessions in the wishlist with the
# same conference
for sesskey in self.user.wishList.sessions:
if sesskey.parent() == session.key.parent():
raise endpoints.ConflictException(
"cannot remove conference because there are other sessions from this conference in the wish list")
self.user.wishList.conferences.remove(session.key.parent())
self.user.put()
return True
# cross-group needed because the speaker is not related to the session
@ndb.transactional(xg=True)
def _putSessionAndSpeaker(self, my_session, conf, speaker):
''' transactional put for session and speaker '''
my_session.put()
if conf.key not in speaker.conferences:
speaker.conferences.append(conf.key)
speaker.conferenceSessions.append(my_session.key)
speaker.put()
return (my_session, conf, speaker)
@user_required
def _get_wishlist(self):
return self.user.wishList.to_form()
@user_required
def _createSession(self, request):
'''creates a ConferenceSession, adds it as a child of the conference, returns the stored object'''
# make sure the speakerKey is for a valid speaker
speaker_key = self.get_websafe_key(
request.speakerKey,
"ConferenceSpeaker")
speaker = speaker_key.get()
# make sure there the speaker exists in the DB
if not speaker:
raise endpoints.NotFoundException(
"The speaker you requested was not found, \
Please register a speaker first")
# get Conference from the DB
wsck = self.get_websafe_key(request.websafeConferenceKey, "Conference")
conf = wsck.get()
# make sure conference exists and that it belongs to current user
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
if not conf.key.parent() == self.user.key:
raise endpoints.ForbiddenException(
'This conference was organized by a different user')
# get a key for the new session
s_id = ConferenceSession.allocate_ids(size=1, parent=conf.key)[0]
session_key = ndb.Key(ConferenceSession, s_id, parent=conf.key)
# put the session in the db and update conference
my_session = ConferenceSession.from_form(request, session_key)
# TODO: make sure that the session times fall between the conference
# times
# check if speaker already has a session within this conference
if conf.key in speaker.conferences:
# if yes retrieve the all the other session names for this speaker in this conference
# note the current session is not included because we don't want to
# retrieve it again, we can just pass the name
sessions_in_conference = [
skey.urlsafe() for skey in speaker.conferenceSessions if skey.parent() == conf.key]
# make this a featured speaker for this conference,
# as asked in task 4 of the project setting up a task to do this.
taskqueue.add(params={"speaker_name": speaker.displayName,
"sess_keys": sessions_in_conference,
"current_sess_name": my_session.name,
"conf": conf.name,
"conf_loc": conf.city},
url='/tasks/add_featured_speaker')
# use a transactional to make the updates
# current function would not allow a transactional because of the id
# allocation
self._putSessionAndSpeaker(my_session, conf, speaker)
# create an indexed document for the search API based on this session
self._add_to_search_index(my_session, speaker, conf)
return my_session.to_form(speaker)
@staticmethod
def _setFeaturedSpeaker(
speaker_name, sess_keys, current_sess_name, conf, conf_loc):
''' Sets the featured speaker in memchace '''
# get the sessions from sess_keys, we can assume that the sess_keys are valid since they
# are passed by the task
sessions = ndb.get_multi([ndb.Key(urlsafe=sk) for sk in sess_keys])
s_names = [s.name for s in sessions]
s_names.append(current_sess_name)
memcache.set(key=MEMCACHE_FEATURED_SPEAKER_KEY, value={"name": speaker_name,
"sessions": s_names,
"conf": conf,
"conf_loc": conf_loc})
@user_required
def _registerSpeaker(self, request):
'''registers a speaker, user needs to be logged in and conference organizer to register a speaker'''
# make sure the displayName received is valid format
if not utils.is_valid_name(request.displayName):
raise endpoints.BadRequestException(
"displayName is not valid: it must be between 3 and 50 characters with no special characters and title case")
# make sure user is has organizer privileges or has organized at least
# one conference
cnt = Conference.query(ancestor=self.user.key).count(limit=1)
if not cnt:
raise endpoints.ForbiddenException(
"You need to have organized at least one conference in order to register speakers")
speaker = ConferenceSpeaker(displayName=request.displayName)
speaker.put()
return speaker.to_form()
def _queryproblem(self, request):
''' session query method to search for unavailable after a certain time (in int hour blocks)
and exclude up to 3 types of sessions '''
# to reduce friction we will only allow 3 excludes
if len(request.exclude) > 3:
raise endpoints.BadRequestException(
"You are only allowed to exclude up to 3 types of Sessions.")
# list of all allowed timeslots
# ideally this list is created in order from the most popular session
# times
allowed_timeslots = [i for i in range(24)]
# compose a list of unavailable times
if request.afterTime:
dissalowed_timeslots = [i for i in xrange(request.afterTime, 24)]
else:
dissalowed_timeslots = []
# exclude dissalowedtimeslots
query_times = [
i for i in allowed_timeslots if i not in dissalowed_timeslots]
q = ConferenceSession.query()
q = q.filter(ConferenceSession.startTimeSlot.IN(query_times))
# filter out all excludes
for s_type in request.exclude:
q = q.filter(ConferenceSession.type != s_type)
# order by conference type first since that is the inequality filter
q.order(ConferenceSession.type)
q.order(ConferenceSession.startTime)
# fetch max 100 records
sessions = q.fetch(100)
speaker_keys = []
for sess in q:
speaker_keys.append(sess.speakerKey)
# get speakers for every session in order
speakers = ndb.get_multi(speaker_keys)
return mm.ConferenceSessionForms(
items=[sessions[i].to_form(speakers[i]) for i in range(len(sessions))])
def _copy_session_doc_to_form(self, doc):
''' copies a ScoredDocument to ConferenceSessionForm_search '''
form_out = mm.ConferenceSessionForm_search()
setattr(form_out, "websafeSessionKey", doc.doc_id)
for field in doc.fields:
if isinstance(field, search.NumberField):
if field.name == "startTime":
setattr(form_out,
field.name,
utils.minutes_to_timestring(int(field.value)))
continue
setattr(form_out, field.name, int(field.value))
elif isinstance(field, search.DateField):
setattr(form_out, field.name, str(field.value))
else:
setattr(form_out, field.name, field.value)
form_out.check_initialized()
return form_out
def _queryproblem2(self, request):
''' use the search API to query for specific sessions '''
# only allow up to 3 excludes and 3 includes
if len(request.exclude_types) > 3 or len(request.include_types) > 3:
raise endpoints.BadRequestException(
"you can only exclude or include max 3 types")
# limit the length of the search fields that someone sends
if (request.search_highlights and len(request.search_highlights) > 50)\
or (request.search_general and len(request.search_general) > 50):
raise endpoints.BadRequestException(
"your search query strings can only be up to 50 characters, longer blocks are useless anyway")
# start forming the query string qs
qs = ''
# check if the variables were passed in and update the qs accordingly
if request.before_time:
qs += 'startTime < ' + \
str(utils.time_to_minutes(request.before_time))
if request.after_time:
qs += ' startTime > ' + \
str(utils.time_to_minutes(request.after_time))
if request.exclude_types:
qs += " NOT type: ("
for i in range(len(request.exclude_types)):
qs += utils.clean_s(request.exclude_types[i])
if not i == len(request.exclude_types) - 1:
qs += " OR "
continue
qs += ")"
if request.include_types:
qs += " type: ("
for i in range(len(request.include_types)):
qs += utils.clean_s(request.include_types[i])
if not i == len(request.include_types) - 1:
qs += " OR "
continue
qs += ")"
if request.search_highlights:
qs += " highlights:" + utils.clean_s(request.search_highlights)
if request.search_general:
qs += " " + utils.clean_s(request.search_general)
# add some sorting options
sort1 = search.SortExpression(
expression='startDate',
direction=search.SortExpression.ASCENDING,
default_value=0)
# compose the sort options
# attn: Using match_scorer is more expensive to run but it sorts the
# documents based on relevance better.
sort_opts = search.SortOptions(
expressions=[sort1],
match_scorer=search.MatchScorer())
# add some query options, limit on 25 results
query_options = search.QueryOptions(
limit=25,
sort_options=sort_opts)
# compose the query
qry = search.Query(query_string=qs, options=query_options)
return self._query_index(qry)
# PREVIOUSLY EXISTING METHODS - - - - - - - - - - - - - - - - - - - - -
# - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = utils.getUserId(user)
# modify function to not allow creation of conferences without having a
# profile
if not self.user:
raise endpoints.ForbiddenException(
"Before creating conferences, you need a profile, run getProfile method first")
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {
field.name: getattr(
request,
field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound
# Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on
# start_date
if data['startDate']:
data['startDate'] = datetime.strptime(
data['startDate'][
:10],
"%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(
data['endDate'][
:10],
"%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = utils.getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {
field.name: getattr(
request,
field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
# by @Robert_Avram: replaced the self._copyConferenceToForm with
# conf.to_form
return conf.to_form(getattr(prof, 'displayName'))
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
filters = sorted(filters, key=lambda k: k['field'])
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(
filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
current_fields = []
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {
field.name: getattr(
f,
field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException(
"Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is
# performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException(
"Inequality filter is allowed on only one field.")
elif filtr["field"] in ["city", "topics"]:
raise endpoints.BadRequestException(
"Inequality filter not allowed on city or topics.")
else:
inequality_field = filtr["field"]
if filtr["field"] in current_fields:
raise endpoints.BadRequestException(
"You cannot query multiple fields of one type, %s" %
filtr['field'])
current_fields.append(filtr['field'])
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
# TODO: replace _copyProfileToForm with a to_form method on the Profile
# model
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = mm.ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(
pf,
field.name,
getattr(
mm.TeeShirtSize,
getattr(
prof,
field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = utils.getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(mm.TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
# else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
| robertavram/ConferenceAPI | conference_helper.py | Python | apache-2.0 | 30,790 |
# pylint: disable=unused-variable,expression-not-assigned,misplaced-comparison-constant,singleton-comparison
from pathlib import Path
import pytest
from expecter import expect
from memegen.domain import Font
def describe_font():
@pytest.fixture
def font():
return Font(Path('mock_dir', 'FooBar.otf'))
def describe_str():
def is_based_on_name(font):
expect(str(font)) == 'foobar'
def describe_bool():
def is_based_on_default(font):
expect(bool(font)) == True
font.DEFAULT = 'foobar'
expect(bool(font)) == False
def describe_name():
def is_derived_from_filename(font):
expect(font.name) == 'foobar'
def it_replaces_underscores(font):
font.path = Path('a_b')
expect(font.name) == 'a-b'
| DanLindeman/memegen | memegen/tests/test_domain_font.py | Python | mit | 841 |
"""
WSGI config for Banker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Banker.settings")
application = get_wsgi_application()
| christopher-henderson/Banker | Banker/Banker/wsgi.py | Python | mit | 389 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ObjectDoesNotExist
from taiga.base.api import serializers
from taiga.base.fields import Field, MethodField
from taiga.front.templatetags.functions import resolve as resolve_front_url
from taiga.projects.services import get_logo_big_thumbnail_url
from taiga.users.services import get_user_photo_url
from taiga.users.gravatar import get_user_gravatar_id
########################################################################
# WebHooks
########################################################################
class WebhookSerializer(serializers.LightSerializer):
id = Field()
project = Field(attr="project_id")
name = Field()
url = Field()
key = Field()
logs_counter = MethodField()
def get_logs_counter(self, obj):
return obj.logs.count()
class WebhookLogSerializer(serializers.LightSerializer):
id = Field()
webhook = Field(attr="webhook_id")
url = Field()
status = Field()
request_data = Field()
request_headers = Field()
response_data = Field()
response_headers = Field()
duration = Field()
created = Field()
########################################################################
# User
########################################################################
class UserSerializer(serializers.LightSerializer):
id = Field(attr="pk")
permalink = MethodField()
username = MethodField()
full_name = MethodField()
photo = MethodField()
gravatar_id = MethodField()
def get_permalink(self, obj):
return resolve_front_url("user", obj.username)
def get_username(self, obj):
return obj.get_username()
def get_full_name(self, obj):
return obj.get_full_name()
def get_photo(self, obj):
return get_user_photo_url(obj)
def get_gravatar_id(self, obj):
return get_user_gravatar_id(obj)
def to_value(self, instance):
if instance is None:
return None
return super().to_value(instance)
########################################################################
# Project
########################################################################
class ProjectSerializer(serializers.LightSerializer):
id = Field(attr="pk")
permalink = MethodField()
name = MethodField()
logo_big_url = MethodField()
def get_permalink(self, obj):
return resolve_front_url("project", obj.slug)
def get_name(self, obj):
return obj.name
def get_logo_big_url(self, obj):
return get_logo_big_thumbnail_url(obj)
########################################################################
# History Serializer
########################################################################
class HistoryDiffField(Field):
def to_value(self, value):
# Tip: 'value' is the object returned by
# taiga.projects.history.models.HistoryEntry.values_diff()
ret = {}
for key, val in value.items():
if key in ["attachments", "custom_attributes", "description_diff"]:
ret[key] = val
elif key == "points":
ret[key] = {k: {"from": v[0], "to": v[1]} for k, v in val.items()}
else:
ret[key] = {"from": val[0], "to": val[1]}
return ret
class HistoryEntrySerializer(serializers.LightSerializer):
comment = Field()
comment_html = Field()
delete_comment_date = Field()
comment_versions = Field()
edit_comment_date = Field()
diff = HistoryDiffField(attr="values_diff")
########################################################################
# _Misc_
########################################################################
class CustomAttributesValuesWebhookSerializerMixin(serializers.LightSerializer):
custom_attributes_values = MethodField()
def custom_attributes_queryset(self, project):
raise NotImplementedError()
def get_custom_attributes_values(self, obj):
def _use_name_instead_id_as_key_in_custom_attributes_values(custom_attributes, values):
ret = {}
for attr in custom_attributes:
value = values.get(str(attr["id"]), None)
if value is not None:
ret[attr["name"]] = value
return ret
try:
values = obj.custom_attributes_values.attributes_values
custom_attributes = self.custom_attributes_queryset(obj.project).values('id', 'name')
return _use_name_instead_id_as_key_in_custom_attributes_values(custom_attributes, values)
except ObjectDoesNotExist:
return None
class RolePointsSerializer(serializers.LightSerializer):
role = MethodField()
name = MethodField()
value = MethodField()
def get_role(self, obj):
return obj.role.name
def get_name(self, obj):
return obj.points.name
def get_value(self, obj):
return obj.points.value
class EpicStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
class UserStoryStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
is_archived = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
def get_is_archived(self, obj):
return obj.is_archived
class TaskStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
class IssueStatusSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
slug = MethodField()
color = MethodField()
is_closed = MethodField()
def get_name(self, obj):
return obj.name
def get_slug(self, obj):
return obj.slug
def get_color(self, obj):
return obj.color
def get_is_closed(self, obj):
return obj.is_closed
class IssueTypeSerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
color = MethodField()
def get_name(self, obj):
return obj.name
def get_color(self, obj):
return obj.color
class PrioritySerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
color = MethodField()
def get_name(self, obj):
return obj.name
def get_color(self, obj):
return obj.color
class SeveritySerializer(serializers.LightSerializer):
id = Field(attr="pk")
name = MethodField()
color = MethodField()
def get_name(self, obj):
return obj.name
def get_color(self, obj):
return obj.color
########################################################################
# Milestone
########################################################################
class MilestoneSerializer(serializers.LightSerializer):
id = Field()
name = Field()
slug = Field()
estimated_start = Field()
estimated_finish = Field()
created_date = Field()
modified_date = Field()
closed = Field()
disponibility = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
def get_permalink(self, obj):
return resolve_front_url("taskboard", obj.project.slug, obj.slug)
def to_value(self, instance):
if instance is None:
return None
return super().to_value(instance)
########################################################################
# User Story
########################################################################
class UserStorySerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
project = ProjectSerializer()
is_closed = Field()
created_date = Field()
modified_date = Field()
finish_date = Field()
due_date = Field()
due_date_reason = Field()
subject = Field()
client_requirement = Field()
team_requirement = Field()
generated_from_issue = Field(attr="generated_from_issue_id")
generated_from_task = Field(attr="generated_from_task_id")
from_task_ref = Field()
external_reference = Field()
tribe_gig = Field()
watchers = MethodField()
is_blocked = Field()
blocked_note = Field()
description = Field()
tags = Field()
permalink = MethodField()
owner = UserSerializer()
assigned_to = UserSerializer()
assigned_users = MethodField()
points = MethodField()
status = UserStoryStatusSerializer()
milestone = MilestoneSerializer()
def get_permalink(self, obj):
return resolve_front_url("userstory", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.userstorycustomattributes.all()
def get_assigned_users(self, obj):
"""Get the assigned of an object.
:return: User queryset object representing the assigned users
"""
return [user.id for user in obj.assigned_users.all()]
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
def get_points(self, obj):
return RolePointsSerializer(obj.role_points.all(), many=True).data
########################################################################
# Task
########################################################################
class TaskSerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
created_date = Field()
modified_date = Field()
finished_date = Field()
due_date = Field()
due_date_reason = Field()
subject = Field()
us_order = Field()
taskboard_order = Field()
is_iocaine = Field()
external_reference = Field()
watchers = MethodField()
is_blocked = Field()
blocked_note = Field()
description = Field()
tags = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
assigned_to = UserSerializer()
status = TaskStatusSerializer()
user_story = UserStorySerializer()
milestone = MilestoneSerializer()
promoted_to = MethodField()
def get_permalink(self, obj):
return resolve_front_url("task", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.taskcustomattributes.all()
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
def get_promoted_to(self, obj):
return list(obj.generated_user_stories.values_list("id", flat=True))
########################################################################
# Issue
########################################################################
class IssueSerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
created_date = Field()
modified_date = Field()
finished_date = Field()
due_date = Field()
due_date_reason = Field()
subject = Field()
external_reference = Field()
watchers = MethodField()
description = Field()
tags = Field()
permalink = MethodField()
project = ProjectSerializer()
milestone = MilestoneSerializer()
owner = UserSerializer()
assigned_to = UserSerializer()
status = IssueStatusSerializer()
type = IssueTypeSerializer()
priority = PrioritySerializer()
severity = SeveritySerializer()
promoted_to = MethodField()
def get_permalink(self, obj):
return resolve_front_url("issue", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.issuecustomattributes.all()
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
def get_promoted_to(self, obj):
return list(obj.generated_user_stories.values_list("id", flat=True))
########################################################################
# Wiki Page
########################################################################
class WikiPageSerializer(serializers.LightSerializer):
id = Field()
slug = Field()
content = Field()
created_date = Field()
modified_date = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
last_modifier = UserSerializer()
def get_permalink(self, obj):
return resolve_front_url("wiki", obj.project.slug, obj.slug)
########################################################################
# Epic
########################################################################
class EpicSerializer(CustomAttributesValuesWebhookSerializerMixin, serializers.LightSerializer):
id = Field()
ref = Field()
created_date = Field()
modified_date = Field()
subject = Field()
watchers = MethodField()
description = Field()
tags = Field()
permalink = MethodField()
project = ProjectSerializer()
owner = UserSerializer()
assigned_to = UserSerializer()
status = EpicStatusSerializer()
epics_order = Field()
color = Field()
client_requirement = Field()
team_requirement = Field()
client_requirement = Field()
team_requirement = Field()
def get_permalink(self, obj):
return resolve_front_url("epic", obj.project.slug, obj.ref)
def custom_attributes_queryset(self, project):
return project.epiccustomattributes.all()
def get_watchers(self, obj):
return list(obj.get_watchers().values_list("id", flat=True))
class EpicRelatedUserStorySerializer(serializers.LightSerializer):
id = Field()
user_story = MethodField()
epic = MethodField()
order = Field()
def get_user_story(self, obj):
return UserStorySerializer(obj.user_story).data
def get_epic(self, obj):
return EpicSerializer(obj.epic).data
| taigaio/taiga-back | taiga/webhooks/serializers.py | Python | agpl-3.0 | 15,461 |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from benchmarks import memory
from core import perf_benchmark
from contrib.cluster_telemetry import ct_benchmarks_util
from contrib.cluster_telemetry import page_set
from telemetry.page import traffic_setting
class MemoryClusterTelemetry(perf_benchmark.PerfBenchmark):
options = {'upload_results': True}
_ALL_NET_CONFIGS = traffic_setting.NETWORK_CONFIGS.keys()
enable_heap_profiling = True
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
super(MemoryClusterTelemetry, cls).AddBenchmarkCommandLineArgs(parser)
ct_benchmarks_util.AddBenchmarkCommandLineArgs(parser)
parser.add_option(
'--wait-time', action='store', type='int',
default=60, help=('Number of seconds to wait for after navigation '
'and before taking memory dump.'))
parser.add_option(
'--traffic-setting', choices=cls._ALL_NET_CONFIGS,
default=traffic_setting.REGULAR_4G,
help='Traffic condition (string). Default to "%%default". Can be: %s' %
', '.join(cls._ALL_NET_CONFIGS))
parser.add_option(
'--disable-heap-profiling', action='store_true',
help=('Disable heap profiling to reduce perf overhead. Notes that this '
'makes test more realistic but give less accurate memory '
'metrics'))
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
super(MemoryClusterTelemetry, cls).ProcessCommandLineArgs(parser, args)
cls.enable_heap_profiling = not args.disable_heap_profiling
def CreateCoreTimelineBasedMeasurementOptions(self):
return memory.CreateCoreTimelineBasedMemoryMeasurementOptions()
def SetExtraBrowserOptions(self, options):
memory.SetExtraBrowserOptionsForMemoryMeasurement(options)
if self.enable_heap_profiling:
options.AppendExtraBrowserArgs([
'--memlog=all --memlog-stack-mode=pseudo',
])
def CreateStorySet(self, options):
def WaitAndMeasureMemory(action_runner):
action_runner.Wait(options.wait_time)
action_runner.MeasureMemory(deterministic_mode=True)
return page_set.CTPageSet(
options.urls_list, options.user_agent, options.archive_data_file,
traffic_setting=options.traffic_setting,
run_page_interaction_callback=WaitAndMeasureMemory)
@classmethod
def Name(cls):
return 'memory.cluster_telemetry'
| endlessm/chromium-browser | tools/perf/contrib/cluster_telemetry/memory_ct.py | Python | bsd-3-clause | 2,525 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
thisAlgorithmBecomingSkynetCost = 99999999999
from .source import ectools, wrdslib, static, sas
from .source import wrds_loop, get_wrds, find_wrds, setup_wrds_key
from .source import user_info | jbrockmendel/pywrds | __init__.py | Python | bsd-3-clause | 238 |
"""
WSGI config for blog3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import sys
sys.path.insert(0, '/home/daniilr/upravd/')
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "housem.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
######################################### | dan4ik95dv/housemanagement | tsj/wsgi.py | Python | mit | 482 |
# Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from neutron.agent import l2population_rpc
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.tests import base
class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin):
def fdb_add(self, context, fdb_entries):
pass
def fdb_remove(self, context, fdb_entries):
pass
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
pass
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
pass
def setup_tunnel_port(self, br, remote_ip, network_type):
pass
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
pass
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
pass
class TestL2populationRpcCallBackTunnelMixinBase(base.BaseTestCase):
def setUp(self):
super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp()
self.fakeagent = FakeNeutronAgent()
self.fakebr = mock.Mock()
Port = collections.namedtuple('Port', 'ip, ofport')
LVM = collections.namedtuple(
'LVM', 'net, vlan, phys, segid, mac, ip, vif, port')
self.local_ip = '127.0.0.1'
self.type_gre = 'gre'
self.ports = [Port(ip='10.1.0.1', ofport='ofport1'),
Port(ip='10.1.0.2', ofport='ofport2'),
Port(ip='10.1.0.3', ofport='ofport3')]
self.ofports = {
self.type_gre: {
self.ports[0].ip: self.ports[0].ofport,
self.ports[1].ip: self.ports[1].ofport,
self.ports[2].ip: self.ports[2].ofport,
}
}
self.lvms = [LVM(net='net1', vlan=1, phys='phys1', segid='tun1',
mac='mac1', ip='1.1.1.1', vif='vifid1',
port='port1'),
LVM(net='net2', vlan=2, phys='phys2', segid='tun2',
mac='mac2', ip='2.2.2.2', vif='vifid2',
port='port2'),
LVM(net='net3', vlan=3, phys='phys3', segid='tun3',
mac='mac3', ip='3.3.3.3', vif='vifid3',
port='port3')]
self.agent_ports = {
self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)],
self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
}
self.fdb_entries1 = {
self.lvms[0].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[0].segid,
'ports': {
self.local_ip: [],
self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)]},
},
self.lvms[1].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[1].segid,
'ports': {
self.local_ip: [],
self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)]},
},
self.lvms[2].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[2].segid,
'ports': {
self.local_ip: [],
self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)]},
},
}
self.lvm1 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[0].vlan, self.type_gre, self.lvms[0].phys,
self.lvms[0].segid, {self.lvms[0].vif: self.lvms[0].port})
self.lvm2 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[1].vlan, self.type_gre, self.lvms[1].phys,
self.lvms[1].segid, {self.lvms[1].vif: self.lvms[1].port})
self.lvm3 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[2].vlan, self.type_gre, self.lvms[2].phys,
self.lvms[2].segid, {self.lvms[2].vif: self.lvms[2].port})
self.local_vlan_map1 = {
self.lvms[0].net: self.lvm1,
self.lvms[1].net: self.lvm2,
self.lvms[2].net: self.lvm3,
}
self.upd_fdb_entry1_val = {
self.lvms[0].net: {
self.ports[0].ip: {
'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
self.lvms[0].ip)],
'after': [l2pop_rpc.PortInfo(self.lvms[1].mac,
self.lvms[1].ip)],
},
self.ports[1].ip: {
'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
self.lvms[0].ip)],
'after': [l2pop_rpc.PortInfo(self.lvms[1].mac,
self.lvms[1].ip)],
},
},
self.lvms[1].net: {
self.ports[2].ip: {
'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
self.lvms[0].ip)],
'after': [l2pop_rpc.PortInfo(self.lvms[2].mac,
self.lvms[2].ip)],
},
},
}
self.upd_fdb_entry1 = {'chg_ip': self.upd_fdb_entry1_val}
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.ofports[network_type].get(remote_ip)
| rdo-management/neutron | neutron/tests/unit/agent/l2population_rpc_base.py | Python | apache-2.0 | 6,121 |
"""
Script to do SVD on the covariance matrix of the voxel by time matrix.
Run with:
python pca_script.py
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.linalg as npl
import nibabel as nib
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
# Relative paths to project and data.
project_path = "../../../"
path_to_data = project_path+"data/ds009/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
behav_suffix = "/behav/task001_run001/behavdata.txt"
sys.path.append(location_of_functions)
from Image_Visualizing import make_mask
# List of subject directories.
sub_list = os.listdir(path_to_data)
sub_list = [i for i in sub_list if 'sub' in i]
# Initialize array to store variance proportions.
masked_var_array = np.zeros((10, len(sub_list)))
# Loop through all the subjects.
for j in range(len(sub_list)):
name = sub_list[j]
# amount of beginning TRs not standardized at 6
behav=pd.read_table(path_to_data+name+behav_suffix,sep=" ")
num_TR = float(behav["NumTRs"])
# Load image data.
img = nib.load(path_to_data+ name+ "/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data.astype(float)
# Load mask.
mask = nib.load(path_to_data+ name+'/anatomy/inplane001_brain_mask.nii.gz')
mask_data = mask.get_data()
# Drop the appropriate number of volumes from the beginning.
first_n_vols=data.shape[-1]
num_TR_cut=int(first_n_vols-num_TR)
data = data[...,num_TR_cut:]
# Now fit a mask to the 3-d image for each time point.
my_mask = np.zeros(data.shape)
for i in range(my_mask.shape[-1]):
my_mask[...,i] = make_mask(data[...,i], mask_data, fit=True)
# Reshape stuff to 2-d (voxel by time) and mask the data.
# This should cut down the number of volumes by more than 50%.
my_mask_2d = my_mask.reshape((-1,my_mask.shape[-1]))
data_2d = data.reshape((-1,data.shape[-1]))
masked_data_2d = data_2d[my_mask_2d.sum(1) != 0,:]
# Subtract means over voxels (columns).
data_2d = data_2d - np.mean(data_2d, 0)
masked_data_2d = masked_data_2d - np.mean(masked_data_2d, 0)
# Subtract means over time (rows)
data_2d = data_2d - np.mean(data_2d, axis=1)[:, None]
masked_data_2d = masked_data_2d - np.mean(masked_data_2d, axis=1)[:, None]
# PCA analysis on unmasked data:
# Do SVD on the time by time matrix and get explained variance.
U, S, VT = npl.svd(data_2d.T.dot(data_2d))
exp_var = S / np.sum(S)
var_sums = np.cumsum(exp_var)
# PCA analysis on MASKED data:
# Do SVD on the time by time matrix and get explained variance.
U_masked, S_masked, VT_masked = npl.svd(masked_data_2d.T.dot(masked_data_2d))
exp_var_masked = S_masked / np.sum(S_masked)
var_sums_masked= np.cumsum(exp_var_masked)
masked_var_array[:,j] = exp_var_masked[:10] # Store the first 10 variance proportions.
# Setting up legend colors.
hand_un = mlines.Line2D([], [], color='b', label='Not Masked')
hand_mask = mlines.Line2D([], [], color='r', label='Masked')
# Compare proportion of variance explained by each component for masked and unmasked data.
plt.plot(np.arange(1,11), exp_var[:10], 'b-o')
plt.plot(np.arange(1,11), exp_var_masked[:10], 'r-o')
plt.legend(handles=[hand_un, hand_mask])
plt.xlabel("Principal Components")
plt.title("Proportion of Variance Explained by Each Component for " + name)
plt.savefig(location_of_images+'pcapropvar'+name+'.png')
plt.close()
# Compare sum of proportion of variance explained by each component for masked and unmasked data.
plt.plot(np.arange(1,11), var_sums[:10], 'b-o')
plt.plot(np.arange(1,11), var_sums_masked[:10], 'r-o')
plt.axhline(y=0.4, color='k')
plt.legend(handles=[hand_un, hand_mask], loc=4)
plt.xlabel("Number of Principal Components")
plt.title("Sum of Proportions of Variance Explained by Components for " + name)
plt.savefig(location_of_images+'pcacumsums'+name+'.png')
plt.close()
masked_cumsums_array = masked_var_array.cumsum(0)
#######################
# Plots of Components #
#######################
pd.DataFrame(masked_cumsums_array).plot(x=np.arange(1,11), color=['0.2'], legend=False)
plt.plot(np.arange(1,11), pd.DataFrame(masked_cumsums_array).median(1), 'r-o')
plt.grid()
plt.axhline(y=0.4, color='k', linestyle="--")
plt.xlabel("Principal Components")
plt.title("Sum of Proportions of Variance Explained by Components")
plt.savefig(location_of_images+'pcaALL.png')
plt.close()
##########################
# Boxplots of components #
##########################
plt.boxplot(masked_cumsums_array.T)
plt.scatter(np.ones((24,10))*np.arange(1,11), masked_cumsums_array.T)
plt.grid()
plt.axhline(y=0.4, color='k', linestyle="--")
plt.xlabel("Principal Components")
plt.title("Sum of Proportions of Variance Explained by Components")
plt.savefig(location_of_images+'pcaBOX.png')
| berkeley-stat159/project-alpha | code/utils/scripts/pca_script.py | Python | bsd-3-clause | 5,106 |
class CampbellRobertsonSequence(DefaultSequence) :
def boot(self, frameRateDivisor=1):
super().boot( frameRateDivisor=frameRateDivisor )
| szecsi/Gears | GearsPy/Project/Experiments/8_Patterns/8_CampbellRobertson/CampbellRobertsonExperimentExperiment.py | Python | gpl-2.0 | 150 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('vile_korga_battlelord')
mobileTemplate.setLevel(78)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(3)
mobileTemplate.setMaxSpawnDistance(5)
mobileTemplate.setDeathblow(True)
mobileTemplate.setSocialGroup('korga tribe')
mobileTemplate.setAssistRange(12)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
mobileTemplate.setStalker(True)
templates = Vector()
templates.add('object/mobile/shared_dulok_male.iff')
templates.add('object/mobile/shared_dulok_female.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 65
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
lootPoolNames_2 = ['random_loot_primitives']
lootPoolChances_2 = [100]
lootGroupChance_2 = 35
mobileTemplate.addToLootGroups(lootPoolNames_2,lootPoolChances_2,lootGroupChance_2)
core.spawnService.addMobileTemplate('vile_korga_battlelord', mobileTemplate)
return | ProjectSWGCore/NGECore2 | scripts/mobiles/endor/vile_korga_battlelord.py | Python | lgpl-3.0 | 1,773 |
# -*- coding: utf-8 -*-
"""
edacc.web
---------
In this module the flask application instance is defined and configured
according to the settings in config.py.
:copyright: (c) 2010 by Daniel Diepold.
:license: MIT, see LICENSE for details.
"""
import uuid, datetime, os
from jinja2 import FileSystemBytecodeCache
from werkzeug import ImmutableDict
from flask import Flask, Request, g, Blueprint
from flask.ext.cache import Cache
from flask.ext.mail import Mail
from simplekv.fs import FilesystemStore
from flask.ext.kvsession import KVSessionExtension
from edacc import config, models, utils
try:
os.makedirs(config.TEMP_DIR)
except OSError:
pass
Flask.jinja_options = ImmutableDict({
'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_'],
'bytecode_cache': FileSystemBytecodeCache(config.TEMP_DIR),
'trim_blocks': True
})
app = Flask(__name__)
app.Debug = config.DEBUG
cache = Cache()
mail = Mail()
#session_store = FilesystemStore(config.TEMP_DIR, perm=0600)
if config.LOGGING:
# set up logging if configured
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler(config.LOG_FILE)
file_handler.setLevel(logging.WARNING)
formatter = logging.Formatter("---------------------------\n" + \
"%(asctime)s - %(name)s - " + \
"%(levelname)s\n%(message)s")
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
# initialize configured database connections
for username, password, database, label, hidden in config.DEFAULT_DATABASES:
models.add_database(username, password, database, label, hidden)
class LimitedRequest(Request):
""" extending Flask's request class to limit form uploads to 500 MB """
max_form_memory_size = 500 * 1024 * 1024
app.request_class = LimitedRequest
app.config.update(
SECRET_KEY=config.SECRET_KEY,
PERMANENT_SESSION_LIFETIME=datetime.timedelta(days=14),
CACHE_TYPE='filesystem',
CACHE_DIR=config.TEMP_DIR,
MAIL_SERVER=config.MAIL_SERVER,
MAIL_PORT=config.MAIL_PORT,
MAIL_USE_TLS=config.MAIL_USE_TLS,
MAIL_USE_SSL=config.MAIL_USE_SSL,
MAIL_USERNAME=config.MAIL_USERNAME,
MAIL_PASSWORD=config.MAIL_PASSWORD,
DEFAULT_MAIL_SENDER=config.DEFAULT_MAIL_SENDER
)
cache.init_app(app)
mail.init_app(app)
#KVSessionExtension(session_store, app)
# register view modules
from edacc.views.admin import admin
from edacc.views.accounts import accounts
from edacc.views.frontend import frontend
from edacc.views.analysis import analysis
from edacc.views.plot import plot
from edacc.views.api import api
app.register_blueprint(admin)
app.register_blueprint(accounts)
app.register_blueprint(frontend)
app.register_blueprint(analysis)
app.register_blueprint(plot)
app.register_blueprint(api)
from edacc.plugins.borgexplorer import borgexplorer
app.register_blueprint(borgexplorer)
app.jinja_env.filters['download_size'] = utils.download_size
app.jinja_env.filters['job_status_color'] = utils.job_status_color
app.jinja_env.filters['job_result_code_color'] = utils.job_result_code_color
app.jinja_env.filters['launch_command'] = utils.launch_command
app.jinja_env.filters['datetimeformat'] = utils.datetimeformat
app.jinja_env.filters['competition_phase'] = utils.competition_phase
app.jinja_env.filters['result_time'] = utils.result_time
app.jinja_env.filters['render_formula'] = utils.render_formula
app.jinja_env.filters['truncate_name'] = utils.truncate_name
app.jinja_env.filters['parameter_template'] = utils.parameter_template
if config.PIWIK:
@app.before_request
def register_piwik():
""" Attach piwik URL to g """
g.PIWIK_URL = config.PIWIK_URL
@app.before_request
def make_unique_id():
""" Attach an unique ID to the request """
g.unique_id = uuid.uuid4().hex
@app.after_request
def shutdown_session(response):
""" remove SQLAlchemy session from thread after requests - might not even be needed for
non-declarative SQLAlchemy usage according to the SQLAlchemy documentation.
"""
for db in models.get_databases().itervalues():
db.session.remove()
return response
| EDACC/edacc_web | edacc/web.py | Python | mit | 4,224 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tripoli documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 12 11:05:04 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tripoli'
copyright = '2016, Eivind Fonn'
author = 'Eivind Fonn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Tripoli v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tripolidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Tripoli.tex', 'Tripoli Documentation',
'Eivind Fonn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tripoli', 'Tripoli Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Tripoli', 'Tripoli Documentation',
author, 'Tripoli', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| TheBB/tripoli | docs/source/conf.py | Python | gpl-3.0 | 9,843 |
# coding=utf-8
"""
This module, problem_019.py, solves the nineteenth project euler problem.
"""
from project_euler_problems.problem import Problem
from datetime import date
'''
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
# Solution from captainsafia, thanks! Link : https://gist.github.com/captainsafia/3390092
class ImplementedProblem(Problem):
"""This specific problem's implementation.
"""
def get_solution(self):
"""Solves the solution for problem 019.
:return: The solution for problem 019.
"""
number_of_sundays = 0
for year in range(1901, 2001):
for month in range(1, 13):
# date(...) will create a Date() instance.
# weekday() gets the current day as an integer between 0-6.
if date(year, month, 1).weekday() == 6:
number_of_sundays += 1
return number_of_sundays
| utarsuno/urbtek | project_euler_problems/problems/problem_019.py | Python | apache-2.0 | 1,284 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# hunger_games.py
# this makes an arena map for minecraft
import os
import sys
import minecraft_builder
import castle_maker
import clear_area
import math
style_stone = {'roof':'planks 1', 'walls':'minecraft:stone 0', 'floor':'minecraft:stone 4', 'posts':'minecraft:stone 3'}
x_cent = -351
y_cent = 62
z_cent = 300 #10000
w = 1 #34+18
h = 29
d = 1
r = 50
def main():
myrcon = castle_maker.rcon_connection()
#make_circle(x_cent, y_cent, z_cent, r, h, w, d, style_stone['walls'],myrcon)
#make_circle(x_cent, y_cent, z_cent, r, h, w-1, d, style_stone['posts'],myrcon)
#make_circle(x_cent, y_cent, z_cent, r, h, w+1, d, style_stone['posts'],myrcon)
# make the top parapet
# clear mistake make_circle(x_cent-1, y_cent+h, z_cent-1, r, h+1, w+1, d+1, 'minecraft:air',myrcon)
make_circle(x_cent-2, y_cent+h, z_cent-2, r, 2, w+2, d+2, 'minecraft:air',myrcon)
make_circle(x_cent, y_cent+h, z_cent, r, 2, w+2, d+2, style_stone['posts'],myrcon)
def make_circle(x, y, z, radius, height, width, depth, style,myrcon):
"""
makes a circluar wall
"""
PI = math.pi
for angle in range(1, 360, 1):
angle_rad = angle / PI
cx = int(x + radius * math.cos(angle_rad))
cz = int(z + radius * math.sin(angle_rad))
castle_maker.fill_area(cx, y, cz, cx+width, y+height, cz+depth,style,myrcon)
main()
| acutesoftware/worldbuild | scripts/minecraft/hunger_games.py | Python | gpl-2.0 | 1,412 |
# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://github.com/brkt/brkt-cli/blob/master/LICENSE
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
from brkt_cli.esx import esx_args
def setup_update_vmdk_args(parser):
esx_args.add_vcenter_host(parser)
esx_args.add_vcenter_port(parser)
esx_args.add_vcenter_datacenter(parser)
esx_args.add_vcenter_datastore(parser)
esx_args.add_vcenter_cluster(parser)
esx_args.add_vcenter_network_name(parser)
esx_args.add_static_ip_address(
parser,
help="Specify the static IP address of the updater VM"
)
esx_args.add_static_subnet_mask(
parser,
help="Specify the static subnet mask of the updater VM"
)
esx_args.add_static_default_router(
parser,
help="Specify the static default router of the updater VM"
)
esx_args.add_static_dns_domain(
parser,
help="Specify the static DNS domain of the updater VM"
)
esx_args.add_static_dns_server(
parser,
help="Specify the static DNS server of the updater VM"
)
esx_args.add_cpu(parser, help="Number of CPUs to assign to the Updater VM")
esx_args.add_memory(parser, help="Memory to assign to the Updater VM")
esx_args.add_template_vm_name(
parser,
help="Specify the name of the template VM to be updated"
)
esx_args.add_encrypted_image_directory(
parser,
help="Directory to fetch the encrypted OVF/OVA image"
)
esx_args.add_ovftool_path(parser)
esx_args.add_encrypted_image_name(
parser,
help="Specify the name of the encrypted OVF/OVA image to update"
)
parser.add_argument(
'--update-ovf',
dest='create_ovf',
action='store_true',
default=False,
help="Update OVF package"
)
parser.add_argument(
'--update-ova',
dest='create_ova',
action='store_true',
default=False,
help="Update OVA package"
)
esx_args.add_no_verify_cert(parser)
esx_args.add_ovf_source_directory(parser)
esx_args.add_metavisor_ovf_image_name(parser)
esx_args.add_metavisor_version(parser)
esx_args.add_use_esx_host(parser)
esx_args.add_http_s3_proxy(parser)
esx_args.add_encryptor_vmdk(parser)
esx_args.add_ssh_public_key(parser)
esx_args.add_bucket_name(parser)
esx_args.add_nic_type(parser)
esx_args.add_no_cleanup(parser)
| brkt/brkt-cli | brkt_cli/esx/update_encrypted_vmdk_args.py | Python | apache-2.0 | 2,864 |
# tests/products/test_ninja.py ----------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# ----------------------------------------------------------------------------
import argparse
import os
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
# from swift_build_support import cmake
from swift_build_support import shell
from swift_build_support.products import CMark
from swift_build_support.targets import StdlibDeploymentTarget
from swift_build_support.toolchain import host_toolchain
from swift_build_support.workspace import Workspace
class CMarkTestCase(unittest.TestCase):
def setUp(self):
# Setup workspace
tmpdir1 = os.path.realpath(tempfile.mkdtemp())
tmpdir2 = os.path.realpath(tempfile.mkdtemp())
os.makedirs(os.path.join(tmpdir1, 'cmark'))
self.workspace = Workspace(source_root=tmpdir1,
build_root=tmpdir2)
self.host = StdlibDeploymentTarget.host_target()
# Setup toolchain
self.toolchain = host_toolchain()
self.toolchain.cc = '/path/to/cc'
self.toolchain.cxx = '/path/to/cxx'
# Setup args
self.args = argparse.Namespace(
build_cmark=True,
cmake_generator="Ninja",
cmark_build_type="Release",
rebuild=False,
extra_cmake_options=[],
skip_build=False,
darwin_deployment_version_osx="10.9",
cmark_build_variant="Debug",
export_compile_commands=False,
reconfigure=False,
distcc=None,
sccache=None,
cmake_c_launcher=None,
cmake_cxx_launcher=None,
clang_user_visible_version=None,
build_ninja=False,
enable_asan=False,
enable_lsan=False,
enable_sanitize_coverage=False,
enable_tsan=False,
enable_ubsan=False)
# Setup shell
shell.dry_run = True
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
shutil.rmtree(self.workspace.build_root)
shutil.rmtree(self.workspace.source_root)
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
shell.dry_run = False
self.workspace = None
self.toolchain = None
self.args = None
def test_build(self):
# Test disabled until we've moved to cmake toolchains
True
# cmark = CMark(
# args=self.args,
# toolchain=self.toolchain,
# source_dir=self.workspace.source_root,
# build_dir=self.workspace.build_root)
# cmark.build(host_target=self.host.name)
# _cmake = cmake.CMake(self.args, self.toolchain)
# self.assertEqual(self.stdout.getvalue(), """\
# + pushd {build_dir}
# + {cmake} -DCMAKE_BUILD_TYPE:STRING={build_variant} {cmake_args} {source_dir}
# + popd
# + {cmake} --build {build_dir} --config {build_variant} -- all
# """.format(build_dir=self.workspace.build_root,
# source_dir=self.workspace.source_root,
# cmake=self.toolchain.cmake,
# cmake_args=' '.join(_cmake.common_options()),
# build_variant=self.args.cmark_build_variant))
def test_should_test(self):
cmark = CMark(
args=argparse.Namespace(test_cmark=True, cross_compile_hosts=[]),
toolchain=self.toolchain,
source_dir=self.workspace.source_root,
build_dir=self.workspace.build_root)
self.assertTrue(cmark.should_test(self.host.name))
def test_should_skip_test(self):
cmark = CMark(
args=argparse.Namespace(test_cmark=False, cross_compile_hosts=[]),
toolchain=self.toolchain,
source_dir=self.workspace.source_root,
build_dir=self.workspace.build_root)
self.assertFalse(cmark.should_test(self.host.name))
def test_should_skip_test_cross_compile(self):
cmark = CMark(
args=argparse.Namespace(test_cmark=True,
cross_compile_hosts=[self.host.name]),
toolchain=self.toolchain,
source_dir=self.workspace.source_root,
build_dir=self.workspace.build_root)
self.assertFalse(cmark.should_test(self.host.name))
| rudkx/swift | utils/swift_build_support/tests/products/test_cmark.py | Python | apache-2.0 | 4,913 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from py2neo.experimental.storage import FrozenGraphStore, MutableGraphStore
class GraphStoreTestCase(TestCase):
store = MutableGraphStore()
a, b, c, d = store.add_nodes((
(["X"], {"name": "Alice"}),
(["X", "Y"], {"name": "Bob"}),
(["X", "Y"], {"name": "Carol"}),
(["Y"], {"name": "Dave"}),
))
(a_likes_b, b_likes_a, a_knows_b, a_knows_c,
c_knows_b, c_married_to_d) = store.add_relationships((
("LIKES", (a, b), {}),
("LIKES", (b, a), {}),
("KNOWS", (a, b), {"since": 1999}),
("KNOWS", (a, c), {"since": 2000}),
("KNOWS", (c, b), {"since": 2001}),
("MARRIED_TO", (c, d), {}),
)
)
def test_should_get_counts(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.node_count(), 4)
self.assertEqual(store.node_count("X"), 3)
self.assertEqual(store.relationship_count(), 6)
self.assertEqual(store.relationship_count("KNOWS"), 3)
self.assertEqual(store.node_labels(), {"X", "Y"})
self.assertEqual(store.relationship_types(), {"LIKES", "KNOWS", "MARRIED_TO"})
def test_should_get_node_degree(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.relationship_count(n_ids={self.a}), 4)
self.assertEqual(store.relationship_count(r_type="LIKES", n_ids={self.a}), 2)
self.assertEqual(store.relationship_count(n_ids={self.b}), 4)
self.assertEqual(store.relationship_count(n_ids={self.c}), 3)
self.assertEqual(store.relationship_count(n_ids={self.d}), 1)
def test_should_get_nodes(self):
store = FrozenGraphStore(self.store)
self.assertEqual(set(store.nodes()), {self.a, self.b, self.c, self.d})
def test_should_get_nodes_with_a_label(self):
store = FrozenGraphStore(self.store)
self.assertEqual(set(store.nodes("X")), {self.a, self.b, self.c})
self.assertEqual(set(store.nodes("Y")), {self.b, self.c, self.d})
self.assertFalse(set(store.nodes("Z")))
def test_should_get_nodes_with_multiple_labels(self):
store = FrozenGraphStore(self.store)
self.assertEqual(set(store.nodes("X", "Y")), {self.b, self.c})
self.assertFalse(set(store.nodes("X", "Z")))
def test_should_get_node_labels(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.node_labels(), {"X", "Y"})
self.assertEqual(store.node_labels(self.a), {"X"})
self.assertEqual(store.node_labels(self.b), {"X", "Y"})
self.assertEqual(store.node_labels(self.c), {"X", "Y"})
self.assertEqual(store.node_labels(self.d), {"Y"})
self.assertIs(store.node_labels(object()), None)
def test_should_get_node_properties(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.node_properties(self.a), {"name": "Alice"})
self.assertEqual(store.node_properties(self.b), {"name": "Bob"})
self.assertEqual(store.node_properties(self.c), {"name": "Carol"})
self.assertEqual(store.node_properties(self.d), {"name": "Dave"})
self.assertIs(store.node_properties(object()), None)
def test_should_get_relationships(self):
store = FrozenGraphStore(self.store)
self.assertEqual(set(store.relationships()), {self.a_likes_b, self.b_likes_a, self.a_knows_b,
self.a_knows_c, self.c_knows_b, self.c_married_to_d})
self.assertEqual(set(store.relationships("KNOWS")), {self.a_knows_b, self.a_knows_c, self.c_knows_b})
self.assertEqual(set(store.relationships("MARRIED_TO")), {self.c_married_to_d})
self.assertEqual(set(store.relationships(n_ids=(self.a, None))), {self.a_likes_b, self.a_knows_b,
self.a_knows_c})
self.assertEqual(set(store.relationships("KNOWS", (self.a, None))), {self.a_knows_b, self.a_knows_c})
self.assertEqual(set(store.relationships(n_ids=(None, self.b))), {self.a_likes_b, self.a_knows_b,
self.c_knows_b})
self.assertEqual(set(store.relationships("KNOWS", n_ids=(None, self.b))), {self.a_knows_b, self.c_knows_b})
self.assertEqual(set(store.relationships(n_ids=(self.a, self.b))), {self.a_likes_b, self.a_knows_b})
self.assertEqual(set(store.relationships("KNOWS", (self.a, self.b))), {self.a_knows_b})
self.assertEqual(set(store.relationships(n_ids={self.a})), {self.a_likes_b, self.b_likes_a,
self.a_knows_b, self.a_knows_c})
self.assertEqual(set(store.relationships("KNOWS", {self.a})), {self.a_knows_b, self.a_knows_c})
self.assertEqual(set(store.relationships(n_ids={self.a, self.b})), {self.a_likes_b, self.b_likes_a,
self.a_knows_b})
self.assertEqual(set(store.relationships("KNOWS", n_ids={self.a, self.b})), {self.a_knows_b})
def test_should_fail_on_bad_node_sequence(self):
store = FrozenGraphStore(self.store)
self.assertEqual(list(store.relationships(n_ids=(self.a, self.b, self.c))), [])
def test_should_fail_on_bad_node_set(self):
store = FrozenGraphStore(self.store)
_ = store.relationships(n_ids={self.a, self.b, self.c})
def test_should_fail_on_bad_node_type(self):
store = FrozenGraphStore(self.store)
with self.assertRaises(TypeError):
_ = store.relationships(n_ids=1)
def test_should_get_relationship_nodes(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.relationship_nodes(self.a_likes_b), (self.a, self.b))
self.assertEqual(store.relationship_nodes(self.b_likes_a), (self.b, self.a))
self.assertEqual(store.relationship_nodes(self.a_knows_b), (self.a, self.b))
self.assertEqual(store.relationship_nodes(self.a_knows_c), (self.a, self.c))
self.assertEqual(store.relationship_nodes(self.c_knows_b), (self.c, self.b))
self.assertEqual(store.relationship_nodes(self.c_married_to_d), (self.c, self.d))
self.assertIs(store.relationship_nodes(object()), None)
def test_should_get_relationship_properties(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.relationship_properties(self.a_knows_b), {"since": 1999})
self.assertEqual(store.relationship_properties(self.a_knows_c), {"since": 2000})
self.assertEqual(store.relationship_properties(self.c_knows_b), {"since": 2001})
self.assertIs(store.relationship_properties(object()), None)
def test_should_get_relationship_type(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.relationship_type(self.a_likes_b), "LIKES")
self.assertEqual(store.relationship_type(self.b_likes_a), "LIKES")
self.assertEqual(store.relationship_type(self.a_knows_b), "KNOWS")
self.assertEqual(store.relationship_type(self.a_knows_c), "KNOWS")
self.assertEqual(store.relationship_type(self.c_knows_b), "KNOWS")
self.assertEqual(store.relationship_type(self.c_married_to_d), "MARRIED_TO")
self.assertIs(store.relationship_type(object()), None)
class FrozenGraphStoreTestCase(TestCase):
store = MutableGraphStore()
a, b, c, d = store.add_nodes((
(["X"], {"name": "Alice"}),
(["X", "Y"], {"name": "Bob"}),
(["X", "Y"], {"name": "Carol"}),
(["Y"], {"name": "Dave"}),
))
store.add_relationships((
("KNOWS", (a, b), {}),
("KNOWS", (a, c), {}),
("KNOWS", (b, c), {}),
("KNOWS", (c, d), {}),
))
store = FrozenGraphStore(store)
def test_should_create_empty_on_none(self):
store = FrozenGraphStore()
self.assertEqual(store.node_count(), 0)
self.assertEqual(store.relationship_count(), 0)
self.assertFalse(store.node_labels())
self.assertFalse(store.relationship_types())
def test_should_not_create_from_non_store(self):
with self.assertRaises(TypeError):
_ = FrozenGraphStore(object())
def test_should_create_copy_of_frozen_store(self):
store = FrozenGraphStore(FrozenGraphStore(self.store))
self.assertEqual(store.node_count(), 4)
self.assertEqual(store.relationship_count(), 4)
self.assertEqual(store.node_labels(), {"X", "Y"})
self.assertEqual(store.relationship_types(), {"KNOWS"})
def test_should_create_copy_of_mutable_store(self):
store = FrozenGraphStore(self.store)
self.assertEqual(store.node_count(), 4)
self.assertEqual(store.relationship_count(), 4)
self.assertEqual(store.node_labels(), {"X", "Y"})
self.assertEqual(store.relationship_types(), {"KNOWS"})
def test_should_allow_construction_arguments(self):
store = FrozenGraphStore.build({
"a": (["Person"], {"name": "Alice", "age": 33}),
"b": (["Person"], {"name": "Bob", "age": 44}),
}, {
"ab": ("KNOWS", ("a", "b"), {"since": 1999}),
})
self.assertIsInstance(store, FrozenGraphStore)
self.assertEqual(store.node_count(), 2)
self.assertEqual(store.relationship_count(), 1)
self.assertEqual(store.node_labels(), {"Person"})
self.assertEqual(store.relationship_types(), {"KNOWS"})
self.assertEqual(set(store.nodes("Person")), {"a", "b"})
self.assertEqual(store.node_labels("a"), {"Person"})
self.assertEqual(store.node_labels("b"), {"Person"})
self.assertEqual(store.node_properties("a"), {"name": "Alice", "age": 33})
self.assertEqual(store.node_properties("b"), {"name": "Bob", "age": 44})
self.assertEqual(set(store.relationships("KNOWS")), {"ab"})
self.assertEqual(store.relationship_type("ab"), "KNOWS")
self.assertEqual(store.relationship_properties("ab"), {"since": 1999})
class MutableGraphStoreTestCase(TestCase):
store = MutableGraphStore()
a, b, c, d = store.add_nodes((
(["X"], {"name": "Alice"}),
(["X", "Y"], {"name": "Bob"}),
(["X", "Y"], {"name": "Carol"}),
(["Y"], {"name": "Dave"}),
))
store.add_relationships((
("KNOWS", (a, b), {}),
("KNOWS", (a, c), {}),
("KNOWS", (b, c), {}),
("KNOWS", (c, d), {}),
))
def test_should_create_empty_on_none(self):
store = MutableGraphStore()
self.assertEqual(store.node_count(), 0)
self.assertEqual(store.relationship_count(), 0)
self.assertFalse(store.node_labels())
self.assertFalse(store.relationship_types())
def test_should_create_copy_of_frozen_store(self):
store = MutableGraphStore(FrozenGraphStore(self.store))
self.assertEqual(store.node_count(), 4)
self.assertEqual(store.relationship_count(), 4)
self.assertEqual(store.node_labels(), {"X", "Y"})
self.assertEqual(store.relationship_types(), {"KNOWS"})
def test_should_create_copy_of_mutable_store(self):
store = MutableGraphStore(self.store)
self.assertEqual(store.node_count(), 4)
self.assertEqual(store.relationship_count(), 4)
self.assertEqual(store.node_labels(), {"X", "Y"})
self.assertEqual(store.relationship_types(), {"KNOWS"})
def test_can_add_new_label(self):
store = MutableGraphStore(self.store)
labels = store.node_labels(self.a)
self.assertEqual(labels, {"X"})
labels.add("Z")
self.assertEqual(store.node_labels(self.a), {"X", "Z"})
assert "Z" in set(store.node_labels())
def test_can_add_existing_label(self):
store = MutableGraphStore(self.store)
labels = store.node_labels(self.a)
self.assertEqual(labels, {"X"})
labels.add("X")
self.assertEqual(store.node_labels(self.a), {"X"})
def test_can_remove_label(self):
store = MutableGraphStore(self.store)
labels = store.node_labels(self.a)
self.assertEqual(labels, {"X"})
labels.remove("X")
self.assertFalse(store.node_labels(self.a))
def test_can_discard_label(self):
store = MutableGraphStore(self.store)
labels = store.node_labels(self.a)
self.assertEqual(labels, {"X"})
labels.discard("Z")
self.assertEqual(store.node_labels(self.a), {"X"})
def test_can_clear_labels(self):
store = MutableGraphStore(self.store)
labels = store.node_labels(self.b)
self.assertEqual(labels, {"X", "Y"})
labels.clear()
self.assertFalse(store.node_labels(self.b))
def test_can_add_properties(self):
store = MutableGraphStore(self.store)
properties = store.node_properties(self.a)
self.assertEqual(properties, {"name": "Alice"})
properties["age"] = 33
self.assertEqual(store.node_properties(self.a), {"name": "Alice", "age": 33})
def test_can_update_properties(self):
store = MutableGraphStore(self.store)
properties = store.node_properties(self.a)
self.assertEqual(properties, {"name": "Alice"})
properties["name"] = "Alistair"
self.assertEqual(store.node_properties(self.a), {"name": "Alistair"})
def test_can_remove_properties(self):
store = MutableGraphStore(self.store)
properties = store.node_properties(self.a)
self.assertEqual(properties, {"name": "Alice"})
del properties["name"]
self.assertEqual(store.node_properties(self.a), {})
def test_should_allow_construction_arguments(self):
store = MutableGraphStore.build({
"a": (["Person"], {"name": "Alice", "age": 33}),
"b": (["Person"], {"name": "Bob", "age": 44}),
}, {
"ab": ("KNOWS", ("a", "b"), {"since": 1999}),
})
self.assertIsInstance(store, MutableGraphStore)
self.assertEqual(store.node_count(), 2)
self.assertEqual(store.relationship_count(), 1)
self.assertEqual(store.node_labels(), {"Person"})
self.assertEqual(store.relationship_types(), {"KNOWS"})
self.assertEqual(set(store.nodes("Person")), {"a", "b"})
self.assertEqual(store.node_labels("a"), {"Person"})
self.assertEqual(store.node_labels("b"), {"Person"})
self.assertEqual(store.node_properties("a"), {"name": "Alice", "age": 33})
self.assertEqual(store.node_properties("b"), {"name": "Bob", "age": 44})
self.assertEqual(set(store.relationships(r_type="KNOWS")), {"ab"})
self.assertEqual(store.relationship_type("ab"), "KNOWS")
self.assertEqual(store.relationship_properties("ab"), {"since": 1999})
| technige/py2neo | test/unit/test_storage.py | Python | apache-2.0 | 15,510 |
# -*- coding: utf-8 -*-
# Copyright 2016-2017 Nate Bogdanowicz
import datetime
__distname__ = "Instrumental-lib"
__version__ = "0.6"
__author__ = "Nate Bogdanowicz"
__email__ = "[email protected]"
__url__ = 'https://github.com/mabuchilab/Instrumental'
__license__ = "GPLv3"
__copyright__ = "Copyright 2013-{}, {}".format(datetime.date.today().year, __author__)
| mabuchilab/Instrumental | instrumental/__about__.py | Python | gpl-3.0 | 374 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_gridlines04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test XlsxWriter gridlines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'radar'})
chart.axis_ids = [54977280, 54978816]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'major_gridlines': {'visible': 0}})
chart.y_axis['major_tick_mark'] = 'cross'
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_chart_gridlines04.py | Python | bsd-2-clause | 1,652 |
"""
Tests for the django-reversion API.
These tests require Python 2.5 to run.
"""
from __future__ import unicode_literals
import datetime, os
from unittest import skipUnless
from django.db import models
from django.test import TestCase
from django.core.management import call_command
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.contrib import admin
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5 pragma: no cover
from django.contrib.auth.models import User
else:
User = get_user_model()
from django.db.models.signals import pre_delete
from django.utils import timezone
import reversion
from reversion.revisions import RegistrationError, RevisionManager
from reversion.models import Revision, Version
from test_reversion.models import (
ReversionTestModel1,
ReversionTestModel1Child,
ReversionTestModel2,
ReversionTestModel3,
TestFollowModel,
ReversionTestModel1Proxy,
RevisionMeta,
ParentTestAdminModel,
ChildTestAdminModel,
InlineTestParentModel,
InlineTestChildModel,
InlineTestChildGenericModel
)
from test_reversion import admin # Force early registration of all admin models.
ZERO = datetime.timedelta(0)
class RegistrationTest(TestCase):
def check_registration(self, test_model):
# Register the model and test.
reversion.register(test_model)
self.assertTrue(reversion.is_registered(test_model))
self.assertRaises(RegistrationError, lambda: reversion.register(test_model))
self.assertTrue(test_model in reversion.get_registered_models())
self.assertTrue(isinstance(reversion.get_adapter(test_model), reversion.VersionAdapter))
def check_deregistration(self, test_model):
# Unregister the model and text.
reversion.unregister(test_model)
self.assertFalse(reversion.is_registered(test_model))
self.assertRaises(RegistrationError, lambda: reversion.unregister(test_model))
self.assertTrue(test_model not in reversion.get_registered_models())
self.assertRaises(RegistrationError, lambda: isinstance(reversion.get_adapter(test_model)))
def testRegistration(self):
self.check_registration(ReversionTestModel1)
self.check_deregistration(ReversionTestModel1)
def testProxyRegistration(self):
# ProxyModel registered as usual model
self.check_registration(ReversionTestModel1Proxy)
self.check_deregistration(ReversionTestModel1Proxy)
def testDecorator(self):
# Test the use of register as a decorator
@reversion.register
class DecoratorModel(models.Model):
pass
self.assertTrue(reversion.is_registered(DecoratorModel))
def testDecoratorArgs(self):
# Test a decorator with arguments
@reversion.register(format='yaml')
class DecoratorArgsModel(models.Model):
pass
self.assertTrue(reversion.is_registered(DecoratorArgsModel))
def testEagerRegistration(self):
# Register the model and test.
reversion.register(ReversionTestModel3, eager_signals=[pre_delete])
self.assertTrue(reversion.is_registered(ReversionTestModel3))
self.assertRaises(RegistrationError, lambda: reversion.register(ReversionTestModel3, eager_signals=[pre_delete]))
self.assertTrue(ReversionTestModel3 in reversion.get_registered_models())
self.assertTrue(isinstance(reversion.get_adapter(ReversionTestModel3), reversion.VersionAdapter))
self.assertEquals([], reversion.default_revision_manager._signals[ReversionTestModel3])
self.assertEquals([pre_delete], reversion.default_revision_manager._eager_signals[ReversionTestModel3])
# Unregister the model and text.
reversion.unregister(ReversionTestModel3)
self.assertFalse(reversion.is_registered(ReversionTestModel3))
self.assertRaises(RegistrationError, lambda: reversion.unregister(ReversionTestModel3))
self.assertTrue(ReversionTestModel3 not in reversion.get_registered_models())
self.assertRaises(RegistrationError, lambda: isinstance(reversion.get_adapter(ReversionTestModel3)))
self.assertFalse(ReversionTestModel3 in reversion.default_revision_manager._signals)
self.assertFalse(ReversionTestModel3 in reversion.default_revision_manager._eager_signals)
class ReversionTestBase(TestCase):
def setUp(self):
# Unregister all registered models.
self.initial_registered_models = []
for registered_model in reversion.get_registered_models():
self.initial_registered_models.append((registered_model, reversion.get_adapter(registered_model).__class__))
reversion.unregister(registered_model)
# Register the test models.
reversion.register(ReversionTestModel1)
reversion.register(ReversionTestModel2)
reversion.register(ReversionTestModel3, eager_signals=[pre_delete])
# Create some test data.
self.test11 = ReversionTestModel1.objects.create(
name = "model1 instance1 version1",
)
self.test12 = ReversionTestModel1.objects.create(
name = "model1 instance2 version1",
)
self.test21 = ReversionTestModel2.objects.create(
name = "model2 instance1 version1",
)
self.test22 = ReversionTestModel2.objects.create(
name = "model2 instance2 version1",
)
self.test31 = ReversionTestModel3.objects.create(
name = "model3 instance1 version1",
)
self.test32 = ReversionTestModel3.objects.create(
name = "model3 instance2 version1",
)
self.user = User.objects.create(
username = "user1",
)
def tearDown(self):
# Unregister the test models.
reversion.unregister(ReversionTestModel1)
reversion.unregister(ReversionTestModel2)
reversion.unregister(ReversionTestModel3)
# Delete the test models.
ReversionTestModel1.objects.all().delete()
ReversionTestModel2.objects.all().delete()
ReversionTestModel3.objects.all().delete()
User.objects.all().delete()
del self.test11
del self.test12
del self.test21
del self.test22
del self.test31
del self.test32
del self.user
# Delete the revisions index.
Revision.objects.all().delete()
# Unregister all remaining models.
for registered_model in reversion.get_registered_models():
reversion.unregister(registered_model)
# Re-register initial registered models.
for initial_model, adapter in self.initial_registered_models:
reversion.register(initial_model, adapter_cls=adapter)
del self.initial_registered_models
class RevisionTestBase(ReversionTestBase):
@reversion.create_revision()
def setUp(self):
super(RevisionTestBase, self).setUp()
class InternalsTest(RevisionTestBase):
def testRevisionsCreated(self):
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
def testContextManager(self):
# New revision should be created.
with reversion.create_revision():
with reversion.create_revision():
self.test11.name = "model1 instance1 version2"
self.test11.save()
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.count(), 5)
def testManualRevisionManagement(self):
# When manage manually is on, no revisions created.
with reversion.create_revision(manage_manually=True):
self.test11.name = "model1 instance1 version2"
self.test11.save()
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
# Save a manual revision.
reversion.default_revision_manager.save_revision([self.test11])
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.count(), 5)
def testEmptyRevisionNotCreated(self):
with reversion.create_revision():
pass
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
def testRevisionContextAbandonedOnError(self):
with reversion.create_revision():
try:
with reversion.create_revision():
self.test11.name = "model1 instance1 version2"
self.test11.save()
raise Exception("Foo")
except:
pass
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
def testRevisionDecoratorAbandonedOnError(self):
@reversion.create_revision()
def make_revision():
self.test11.name = "model1 instance1 version2"
self.test11.save()
raise Exception("Foo")
try:
make_revision()
except:
pass
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
def testRevisionCreatedOnDelete(self):
with reversion.create_revision():
self.test31.delete()
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.count(), 5)
def testNoVersionForObjectCreatedAndDeleted(self):
with reversion.create_revision():
new_object = ReversionTestModel1.objects.create()
new_object.delete()
# No Revision and no Version should have been created.
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
class ApiTest(RevisionTestBase):
def setUp(self):
super(ApiTest, self).setUp()
with reversion.create_revision():
self.test11.name = "model1 instance1 version2"
self.test11.save()
self.test12.name = "model1 instance2 version2"
self.test12.save()
self.test21.name = "model2 instance1 version2"
self.test21.save()
self.test22.name = "model2 instance2 version2"
self.test22.save()
def testRevisionSignals(self):
pre_revision_receiver_called = []
def pre_revision_receiver(**kwargs):
self.assertEqual(kwargs["instances"], [self.test11])
self.assertTrue(isinstance(kwargs["revision"], Revision))
self.assertEqual(len(kwargs["versions"]), 1)
pre_revision_receiver_called.append(True)
post_revision_receiver_called = []
def post_revision_receiver(**kwargs):
self.assertEqual(kwargs["instances"], [self.test11])
self.assertTrue(isinstance(kwargs["revision"], Revision))
self.assertEqual(len(kwargs["versions"]), 1)
post_revision_receiver_called.append(True)
reversion.pre_revision_commit.connect(pre_revision_receiver)
reversion.post_revision_commit.connect(post_revision_receiver)
# Create a revision.
with reversion.create_revision():
self.test11.save()
# Check the signals were called.
self.assertTrue(pre_revision_receiver_called)
self.assertTrue(post_revision_receiver_called)
def testCanGetForObjectReference(self):
# Test a model with an int pk.
versions = reversion.get_for_object_reference(ReversionTestModel1, self.test11.pk)
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].field_dict["name"], "model1 instance1 version2")
self.assertEqual(versions[1].field_dict["name"], "model1 instance1 version1")
# Test a model with a str pk.
versions = reversion.get_for_object_reference(ReversionTestModel2, self.test21.pk)
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].field_dict["name"], "model2 instance1 version2")
self.assertEqual(versions[1].field_dict["name"], "model2 instance1 version1")
def testCanGetForObject(self):
# Test a model with an int pk.
versions = reversion.get_for_object(self.test11)
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].field_dict["name"], "model1 instance1 version2")
self.assertEqual(versions[1].field_dict["name"], "model1 instance1 version1")
# Test a model with a str pk.
versions = reversion.get_for_object(self.test21)
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].field_dict["name"], "model2 instance1 version2")
self.assertEqual(versions[1].field_dict["name"], "model2 instance1 version1")
def testCanGetUniqueForObject(self):
with reversion.create_revision():
self.test11.save()
self.test21.save()
# Test a model with an int pk.
self.assertEqual(reversion.get_for_object(self.test11).count(), 3)
self.assertEqual(len(reversion.get_unique_for_object(self.test11)), 2)
# Test a model with a str pk.
self.assertEqual(reversion.get_for_object(self.test21).count(), 3)
self.assertEqual(len(reversion.get_unique_for_object(self.test21)), 2)
def testCanGetUnique(self):
with reversion.create_revision():
self.test11.save()
self.test21.save()
# Test a model with an int pk.
self.assertEqual(reversion.get_for_object(self.test11).count(), 3)
self.assertEqual(len(list(reversion.get_for_object(self.test11).get_unique())), 2)
# Test a model with a str pk.
self.assertEqual(reversion.get_for_object(self.test21).count(), 3)
self.assertEqual(len(list(reversion.get_for_object(self.test21).get_unique())), 2)
def testCanGetForDate(self):
now = timezone.now()
# Test a model with an int pk.
version = reversion.get_for_date(self.test11, now)
self.assertEqual(version.field_dict["name"], "model1 instance1 version2")
self.assertRaises(Version.DoesNotExist, lambda: reversion.get_for_date(self.test11, datetime.datetime(1970, 1, 1, tzinfo=timezone.utc)))
# Test a model with a str pk.
version = reversion.get_for_date(self.test21, now)
self.assertEqual(version.field_dict["name"], "model2 instance1 version2")
self.assertRaises(Version.DoesNotExist, lambda: reversion.get_for_date(self.test21, datetime.datetime(1970, 1, 1, tzinfo=timezone.utc)))
def testCanGetDeleted(self):
with reversion.create_revision():
self.test11.delete()
self.test21.delete()
# Test a model with an int pk.
versions = reversion.get_deleted(ReversionTestModel1)
self.assertEqual(len(versions), 1)
self.assertEqual(versions[0].field_dict["name"], "model1 instance1 version2")
# Test a model with a str pk.
versions = reversion.get_deleted(ReversionTestModel2)
self.assertEqual(len(versions), 1)
self.assertEqual(versions[0].field_dict["name"], "model2 instance1 version2")
def testCanRevertVersion(self):
reversion.get_for_object(self.test11)[1].revert()
self.assertEqual(ReversionTestModel1.objects.get(id=self.test11.pk).name, "model1 instance1 version1")
def testCanRevertRevision(self):
reversion.get_for_object(self.test11)[1].revision.revert()
self.assertEqual(ReversionTestModel1.objects.get(id=self.test11.pk).name, "model1 instance1 version1")
self.assertEqual(ReversionTestModel1.objects.get(id=self.test12.pk).name, "model1 instance2 version1")
self.assertEqual(ReversionTestModel2.objects.get(id=self.test22.pk).name, "model2 instance2 version1")
self.assertEqual(ReversionTestModel2.objects.get(id=self.test22.pk).name, "model2 instance2 version1")
def testCanRevertRevisionWithDeletedVersions(self):
self.assertEqual(ReversionTestModel1.objects.count(), 2)
self.assertEqual(ReversionTestModel2.objects.count(), 2)
with reversion.create_revision():
self.test11.name = "model1 instance1 version3"
self.test11.save()
self.test12.delete()
self.test21.name = "model2 instance1 version3"
self.test21.save()
self.test22.delete()
self.assertEqual(ReversionTestModel1.objects.count(), 1)
self.assertEqual(ReversionTestModel2.objects.count(), 1)
with reversion.create_revision():
self.test11.name = "model1 instance1 version4"
self.test11.save()
self.test21.name = "model2 instance1 version4"
self.test21.save()
self.assertEqual(ReversionTestModel1.objects.count(), 1)
self.assertEqual(ReversionTestModel2.objects.count(), 1)
# Revert to a revision where some deletes were logged.
reversion.get_for_object(self.test11)[1].revision.revert()
self.assertEqual(ReversionTestModel1.objects.count(), 1)
self.assertEqual(ReversionTestModel2.objects.count(), 1)
self.assertEqual(ReversionTestModel1.objects.get(id=self.test11.id).name, "model1 instance1 version3")
self.assertEqual(ReversionTestModel2.objects.get(id=self.test21.id).name, "model2 instance1 version3")
# Revert the a revision before the deletes were logged.
reversion.get_for_object(self.test11)[2].revision.revert()
self.assertEqual(ReversionTestModel1.objects.count(), 2)
self.assertEqual(ReversionTestModel2.objects.count(), 2)
def testCanSaveIgnoringDuplicates(self):
with reversion.create_revision():
self.test11.save()
self.test12.save()
self.test21.save()
self.test22.save()
self.assertFalse(reversion.get_ignore_duplicates())
reversion.set_ignore_duplicates(True)
self.assertTrue(reversion.get_ignore_duplicates())
self.assertEqual(reversion.get_for_object(self.test11).count(), 2)
# Save a non-duplicate revision.
with reversion.create_revision():
self.test11.save()
self.assertFalse(reversion.get_ignore_duplicates())
reversion.set_ignore_duplicates(True)
self.assertEqual(reversion.get_for_object(self.test11).count(), 3)
def testCanAddMetaToRevision(self):
# Create a revision with lots of meta data.
with reversion.create_revision():
self.test11.save()
reversion.set_comment("Foo bar")
self.assertEqual(reversion.get_comment(), "Foo bar")
reversion.set_user(self.user)
self.assertEqual(reversion.get_user(), self.user)
reversion.add_meta(RevisionMeta, age=5)
# Test the revision data.
revision = reversion.get_for_object(self.test11)[0].revision
self.assertEqual(revision.user, self.user)
self.assertEqual(revision.comment, "Foo bar")
self.assertEqual(revision.revisionmeta.age, 5)
class MultiTableInheritanceApiTest(RevisionTestBase):
def setUp(self):
super(MultiTableInheritanceApiTest, self).setUp()
reversion.register(ReversionTestModel1Child, follow=("reversiontestmodel1_ptr",))
with reversion.create_revision():
self.testchild1 = ReversionTestModel1Child.objects.create(
name = "modelchild1 instance1 version 1",
)
def testCanRetreiveFullFieldDict(self):
self.assertEqual(reversion.get_for_object(self.testchild1)[0].field_dict["name"], "modelchild1 instance1 version 1")
def tearDown(self):
super(MultiTableInheritanceApiTest, self).tearDown()
del self.testchild1
class ReversionTestModel1ChildProxy(ReversionTestModel1Child):
class Meta:
proxy = True
class ProxyModelApiTest(RevisionTestBase):
def setUp(self):
super(ProxyModelApiTest, self).setUp()
reversion.register(ReversionTestModel1Proxy)
self.concrete = self.test11
self.proxy = ReversionTestModel1Proxy.objects.get(pk=self.concrete.pk)
with reversion.create_revision():
self.proxy.name = "proxy model"
self.proxy.save()
def testCanGetForObjectReference(self):
# Can get version for proxy model
proxy_versions = reversion.get_for_object_reference(ReversionTestModel1Proxy, self.proxy.id)
self.assertEqual(len(proxy_versions), 2)
self.assertEqual(proxy_versions[0].field_dict["name"], self.proxy.name)
self.assertEqual(proxy_versions[1].field_dict["name"], self.concrete.name)
# Can get the same version for concrete model
concrete_versions = reversion.get_for_object_reference(ReversionTestModel1, self.concrete.id)
self.assertEqual(list(concrete_versions), list(proxy_versions))
def testCanGetForObject(self):
# Can get version for proxy model
proxy_versions = reversion.get_for_object(self.proxy)
self.assertEqual(len(proxy_versions), 2)
self.assertEqual(proxy_versions[0].field_dict["name"], self.proxy.name)
self.assertEqual(proxy_versions[1].field_dict["name"], self.concrete.name)
# Can get the same version for concrete model
concrete_versions = reversion.get_for_object(self.concrete)
self.assertEqual(list(concrete_versions), list(proxy_versions))
def testCanRevertVersion(self):
self.assertEqual(ReversionTestModel1.objects.get(pk=self.concrete.pk).name, self.proxy.name)
reversion.get_for_object(self.proxy)[1].revert()
self.assertEqual(ReversionTestModel1.objects.get(pk=self.concrete.pk).name, self.concrete.name)
def testMultiTableInheritanceProxyModel(self):
reversion.register(ReversionTestModel1Child, follow=("reversiontestmodel1_ptr",))
reversion.register(ReversionTestModel1ChildProxy, follow=("reversiontestmodel1_ptr",))
with reversion.create_revision():
concrete = ReversionTestModel1Child.objects.create(name="modelchild1 instance1 version 1")
proxy = ReversionTestModel1ChildProxy.objects.get(pk=concrete.pk)
with reversion.create_revision():
proxy.name = "proxy model"
proxy.save()
proxy_versions = reversion.get_for_object(proxy)
self.assertEqual(proxy_versions[0].field_dict["name"], proxy.name)
self.assertEqual(proxy_versions[1].field_dict["name"], concrete.name)
class FollowModelsTest(ReversionTestBase):
@reversion.create_revision()
def setUp(self):
super(FollowModelsTest, self).setUp()
reversion.unregister(ReversionTestModel1)
reversion.register(ReversionTestModel1, follow=("testfollowmodel_set",))
reversion.register(TestFollowModel, follow=("test_model_1", "test_model_2s",))
self.follow1 = TestFollowModel.objects.create(
name = "related instance1 version 1",
test_model_1 = self.test11,
)
self.follow1.test_model_2s.add(self.test21, self.test22)
def testRelationsFollowed(self):
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 5)
with reversion.create_revision():
self.follow1.save()
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.count(), 9)
def testRevertWithDelete(self):
with reversion.create_revision():
test23 = ReversionTestModel2.objects.create(
name = "model2 instance3 version1",
)
self.follow1.test_model_2s.add(test23)
self.follow1.save()
self.assertEqual(reversion.get_for_object(test23).count(), 1)
self.assertEqual(self.follow1.test_model_2s.all().count(), 3)
# Test that a revert with delete works.
test23_pk = test23.pk
self.assertEqual(ReversionTestModel2.objects.count(), 3)
with reversion.create_revision():
reversion.get_for_object(self.follow1)[1].revision.revert(delete=True)
self.assertEqual(ReversionTestModel1.objects.get(id=self.test11.pk).name, "model1 instance1 version1")
self.assertEqual(ReversionTestModel2.objects.get(id=self.test22.pk).name, "model2 instance2 version1")
self.assertEqual(ReversionTestModel2.objects.get(id=self.test22.pk).name, "model2 instance2 version1")
self.assertEqual(ReversionTestModel2.objects.count(), 2)
self.assertRaises(ReversionTestModel2.DoesNotExist, lambda: ReversionTestModel2.objects.get(id=test23_pk))
# Roll back to the revision where all models were present.
reversion.get_for_object(self.follow1)[1].revision.revert()
self.assertEqual(self.follow1.test_model_2s.all().count(), 3)
# Roll back to a revision where a delete flag is present.
reversion.get_for_object(self.follow1)[0].revision.revert(delete=True)
self.assertEqual(self.follow1.test_model_2s.all().count(), 2)
def testReverseRelationsFollowed(self):
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 5)
with reversion.create_revision():
self.test11.save()
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.count(), 9)
def testReverseFollowRevertWithDelete(self):
with reversion.create_revision():
follow2 = TestFollowModel.objects.create(
name = "related instance2 version 1",
test_model_1 = self.test11,
)
# Test that a revert with delete works.
follow2_pk = follow2.pk
reversion.get_for_object(self.test11)[1].revision.revert(delete=True)
self.assertEqual(TestFollowModel.objects.count(), 1)
self.assertRaises(TestFollowModel.DoesNotExist, lambda: TestFollowModel.objects.get(id=follow2_pk))
def testRecoverDeleted(self):
# Delete the test model.
with reversion.create_revision():
self.test11.delete()
self.assertEqual(TestFollowModel.objects.count(), 0)
self.assertEqual(ReversionTestModel1.objects.count(), 1)
# Recover the test model.
with reversion.create_revision():
reversion.get_deleted(ReversionTestModel1)[0].revision.revert()
# Make sure it was recovered.
self.assertEqual(TestFollowModel.objects.count(), 1)
self.assertEqual(ReversionTestModel1.objects.count(), 2)
def tearDown(self):
reversion.unregister(TestFollowModel)
TestFollowModel.objects.all().delete()
del self.follow1
super(FollowModelsTest, self).tearDown()
excluded_revision_manager = RevisionManager("excluded")
class ExcludedFieldsTest(RevisionTestBase):
def setUp(self):
excluded_revision_manager.register(ReversionTestModel1, fields=("id",))
excluded_revision_manager.register(ReversionTestModel2, exclude=("name",))
super(ExcludedFieldsTest, self).setUp()
def testExcludedRevisionManagerIsSeparate(self):
self.assertEqual(excluded_revision_manager.get_for_object(self.test11).count(), 1)
def testExcludedFieldsAreRespected(self):
self.assertEqual(excluded_revision_manager.get_for_object(self.test11)[0].field_dict["id"], self.test11.id)
self.assertEqual(excluded_revision_manager.get_for_object(self.test11)[0].field_dict["name"], "")
self.assertEqual(excluded_revision_manager.get_for_object(self.test21)[0].field_dict["id"], self.test21.id)
self.assertEqual(excluded_revision_manager.get_for_object(self.test21)[0].field_dict["name"], "")
def tearDown(self):
super(ExcludedFieldsTest, self).tearDown()
excluded_revision_manager.unregister(ReversionTestModel1)
excluded_revision_manager.unregister(ReversionTestModel2)
class CreateInitialRevisionsTest(ReversionTestBase):
def testCreateInitialRevisions(self):
self.assertEqual(Revision.objects.count(), 0)
self.assertEqual(Version.objects.count(), 0)
call_command("createinitialrevisions")
revcount = Revision.objects.count()
vercount = Version.objects.count()
self.assertTrue(revcount >= 4)
self.assertTrue(vercount >= 4)
call_command("createinitialrevisions")
self.assertEqual(Revision.objects.count(), revcount)
self.assertEqual(Version.objects.count(), vercount)
def testCreateInitialRevisionsSpecificApps(self):
call_command("createinitialrevisions", "test_reversion")
self.assertEqual(Revision.objects.count(), 6)
self.assertEqual(Version.objects.count(), 6)
def testCreateInitialRevisionsSpecificModels(self):
call_command("createinitialrevisions", "test_reversion.ReversionTestModel1")
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.count(), 2)
call_command("createinitialrevisions", "test_reversion.ReversionTestModel2")
self.assertEqual(Revision.objects.count(), 4)
self.assertEqual(Version.objects.count(), 4)
def testCreateInitialRevisionsSpecificComment(self):
call_command("createinitialrevisions", comment="Foo bar")
self.assertEqual(Revision.objects.all()[0].comment, "Foo bar")
# Tests for reversion functionality that's tied to requests.
class RevisionMiddlewareTest(ReversionTestBase):
def testRevisionMiddleware(self):
self.assertEqual(Revision.objects.count(), 0)
self.assertEqual(Version.objects.count(), 0)
self.client.get("/success/")
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
def testRevisionMiddlewareInvalidatesRevisionOnError(self):
self.assertEqual(Revision.objects.count(), 0)
self.assertEqual(Version.objects.count(), 0)
self.assertRaises(Exception, lambda: self.client.get("/error/"))
self.assertEqual(Revision.objects.count(), 0)
self.assertEqual(Version.objects.count(), 0)
def testRevisionMiddlewareErrorOnDoubleMiddleware(self):
self.assertRaises(ImproperlyConfigured, lambda: self.client.get("/double/"))
class VersionAdminTest(TestCase):
def setUp(self):
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(admin.__file__), "templates"),
)
self.user = User(
username = "foo",
is_staff = True,
is_superuser = True,
)
self.user.set_password("bar")
self.user.save()
# Log the user in.
self.client.login(
username = "foo",
password = "bar",
)
def testAutoRegisterWorks(self):
self.assertTrue(reversion.is_registered(ChildTestAdminModel))
self.assertTrue(reversion.is_registered(ParentTestAdminModel))
self.assertTrue(reversion.is_registered(InlineTestChildModel))
self.assertTrue(reversion.is_registered(InlineTestChildGenericModel))
self.assertTrue(reversion.is_registered(InlineTestParentModel))
def testChangelist(self):
response = self.client.get("/admin/test_reversion/childtestadminmodel/")
self.assertEqual(response.status_code, 200)
def testRevisionSavedOnPost(self):
self.assertEqual(ChildTestAdminModel.objects.count(), 0)
# Create an instance via the admin.
response = self.client.post("/admin/test_reversion/childtestadminmodel/add/", {
"parent_name": "parent instance1 version1",
"child_name": "child instance1 version1",
"_continue": 1,
})
self.assertEqual(response.status_code, 302)
obj_pk = response["Location"].split("/")[-2]
obj = ChildTestAdminModel.objects.get(id=obj_pk)
# Check that a version is created.
versions = reversion.get_for_object(obj)
self.assertEqual(versions.count(), 1)
self.assertEqual(versions[0].field_dict["parent_name"], "parent instance1 version1")
self.assertEqual(versions[0].field_dict["child_name"], "child instance1 version1")
# Save a new version.
response = self.client.post("/admin/test_reversion/childtestadminmodel/%s/" % obj_pk, {
"parent_name": "parent instance1 version2",
"child_name": "child instance1 version2",
"_continue": 1,
})
self.assertEqual(response.status_code, 302)
# Check that a version is created.
versions = reversion.get_for_object(obj)
self.assertEqual(versions.count(), 2)
self.assertEqual(versions[0].field_dict["parent_name"], "parent instance1 version2")
self.assertEqual(versions[0].field_dict["child_name"], "child instance1 version2")
# Check that the versions can be listed.
response = self.client.get("/admin/test_reversion/childtestadminmodel/%s/history/" % obj_pk)
self.assertContains(response, "child instance1 version2")
self.assertContains(response, "child instance1 version1")
# Check that version data can be loaded.
response = self.client.get("/admin/test_reversion/childtestadminmodel/%s/history/%s/" % (obj_pk, versions[1].pk))
self.assertContains(response, "parent instance1 version1")
self.assertContains(response, "child instance1 version1")
# Check that loading the version data didn't roll it back!
obj = ChildTestAdminModel.objects.get(pk=obj.pk)
self.assertEqual(obj.child_name, "child instance1 version2")
self.assertEqual(obj.parent_name, "parent instance1 version2")
self.assertEqual(reversion.get_for_object(obj).count(), 2)
# Check that a version can be rolled back.
response = self.client.post("/admin/test_reversion/childtestadminmodel/%s/history/%s/" % (obj_pk, versions[1].pk), {
"parent_name": "parent instance1 version3",
"child_name": "child instance1 version3",
})
self.assertEqual(response.status_code, 302)
# Check that the models were rolled back.
obj = ChildTestAdminModel.objects.get(pk=obj.pk)
self.assertEqual(obj.child_name, "child instance1 version3")
self.assertEqual(obj.parent_name, "parent instance1 version3")
# Check that a version is created.
versions = reversion.get_for_object(obj)
self.assertEqual(versions.count(), 3)
self.assertEqual(versions[0].field_dict["child_name"], "child instance1 version3")
self.assertEqual(versions[0].field_dict["parent_name"], "parent instance1 version3")
# Check that a deleted version can be viewed in the list.
obj.delete()
response = self.client.get("/admin/test_reversion/childtestadminmodel/recover/")
self.assertContains(response, "child instance1 version3")
# Check that a delete version can be viewed in detail.
response = self.client.get("/admin/test_reversion/childtestadminmodel/recover/%s/" % versions[0].pk)
self.assertContains(response, "parent instance1 version3")
self.assertContains(response, "child instance1 version3")
# Check that a deleted version can be recovered.
response = self.client.post("/admin/test_reversion/childtestadminmodel/recover/%s/" % versions[0].pk, {
"parent_name": "parent instance1 version4",
"child_name": "child instance1 version4",
})
# Check that the models were rolled back.
obj = ChildTestAdminModel.objects.get(pk=obj_pk)
self.assertEqual(obj.child_name, "child instance1 version4")
self.assertEqual(obj.parent_name, "parent instance1 version4")
# Check that a version is created.
versions = reversion.get_for_object_reference(ChildTestAdminModel, obj_pk)
self.assertEqual(versions.count(), 4)
self.assertEqual(versions[0].field_dict["parent_name"], "parent instance1 version4")
self.assertEqual(versions[0].field_dict["child_name"], "child instance1 version4")
def createInlineObjects(self):
# Create an instance via the admin without a child.
response = self.client.post("/admin/test_reversion/inlinetestparentmodel/add/", {
"name": "parent version1",
"children-TOTAL_FORMS": "0",
"children-INITIAL_FORMS": "0",
"test_reversion-inlinetestchildgenericmodel-content_type-object_id-TOTAL_FORMS": "0",
"test_reversion-inlinetestchildgenericmodel-content_type-object_id-INITIAL_FORMS": "0",
"_continue": 1,
})
self.assertEqual(response.status_code, 302)
parent_pk = response["Location"].split("/")[-2]
parent = InlineTestParentModel.objects.get(id=parent_pk)
# Update instance via the admin to add a child
response = self.client.post("/admin/test_reversion/inlinetestparentmodel/%s/" % parent_pk, {
"name": "parent version1",
"children-TOTAL_FORMS": "1",
"children-INITIAL_FORMS": "0",
"children-0-name": "child version 1",
"test_reversion-inlinetestchildgenericmodel-content_type-object_id-TOTAL_FORMS": "1",
"test_reversion-inlinetestchildgenericmodel-content_type-object_id-INITIAL_FORMS": "0",
"test_reversion-inlinetestchildgenericmodel-content_type-object_id-0-name": "generic child version 1",
"_continue": 1,
})
self.assertEqual(response.status_code, 302)
children = InlineTestChildModel.objects.filter(parent=parent_pk)
self.assertEqual(children.count(), 1)
generic_children = parent.generic_children.all()
self.assertEqual(generic_children.count(), 1)
# get list of versions
version_list = reversion.get_for_object(parent)
self.assertEqual(len(version_list), 2)
def testInlineAdmin(self):
self.assertTrue(reversion.is_registered(InlineTestParentModel))
# make sure model is following the child FK
self.assertTrue('children' in reversion.get_adapter(InlineTestParentModel).follow)
self.createInlineObjects()
# unregister model
reversion.unregister(InlineTestParentModel)
self.assertFalse(reversion.is_registered(InlineTestParentModel))
# re-register without following
reversion.register(InlineTestParentModel)
self.assertTrue(reversion.is_registered(InlineTestParentModel))
# make sure model is NOT following the child FK
self.assertFalse('children' in reversion.get_adapter(InlineTestParentModel).follow)
self.createInlineObjects()
def tearDown(self):
self.client.logout()
self.user.delete()
del self.user
ChildTestAdminModel.objects.all().delete()
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
# Tests for optional patch generation methods.
try:
from reversion.helpers import generate_patch, generate_patch_html
except ImportError: # pragma: no cover
can_test_patch = False
else:
can_test_patch = True
class PatchTest(RevisionTestBase):
def setUp(self):
super(PatchTest, self).setUp()
with reversion.create_revision():
self.test11.name = "model1 instance1 version2"
self.test11.save()
self.version2, self.version1 = reversion.get_for_object(self.test11)
@skipUnless(can_test_patch, "Diff match patch library not installed")
def testCanGeneratePatch(self):
self.assertEqual(
generate_patch(self.version1, self.version2, "name"),
"@@ -17,9 +17,9 @@\n version\n-1\n+2\n",
)
@skipUnless(can_test_patch, "Diff match patch library not installed")
def testCanGeneratePathHtml(self):
self.assertEqual(
generate_patch_html(self.version1, self.version2, "name"),
'<span>model1 instance1 version</span><del style="background:#ffe6e6;">1</del><ins style="background:#e6ffe6;">2</ins>',
)
def tearDown(self):
super(PatchTest, self).tearDown()
del self.version1
del self.version2
# test preserve deleted User Revisions
class DeleteUserTest(RevisionTestBase):
def testDeleteUser(self):
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
rev = Revision.objects.all()[0]
rev.user = self.user
rev.save()
self.user.delete()
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.count(), 4)
| lutoma/django-reversion | src/tests/test_reversion/tests.py | Python | bsd-3-clause | 40,796 |
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.solvers import linearconstraints
LOG = logging.getLogger(__name__)
class AllHostsConstraint(linearconstraints.BaseLinearConstraint):
"""NoOp constraint. Passes all hosts."""
# The linear constraint should be formed as:
# coeff_vector * var_vector' <operator> <constants>
# where <operator> is ==, >, >=, <, <=, !=, etc.
def __init__(self, variables, hosts, instance_uuids, request_spec,
filter_properties):
[self.num_hosts, self.num_instances] = self._get_host_instance_nums(
hosts, instance_uuids, request_spec)
self._check_variables_size(variables)
def _get_host_instance_nums(self, hosts, instance_uuids, request_spec):
"""This method calculates number of hosts and instances."""
num_hosts = len(hosts)
if instance_uuids:
num_instances = len(instance_uuids)
else:
num_instances = request_spec.get('num_instances', 1)
return [num_hosts, num_instances]
def _check_variables_size(self, variables):
"""This method checks the size of variable matirx."""
# Supposed to be a <num_hosts> by <num_instances> matrix.
if len(variables) != self.num_hosts:
raise ValueError(_('Variables row length should match'
'number of hosts.'))
for row in variables:
if len(row) != self.num_instances:
raise ValueError(_('Variables column length should'
'match number of instances.'))
return True
def get_coefficient_vectors(self, variables, hosts, instance_uuids,
request_spec, filter_properties):
"""Calculate the coeffivient vectors."""
# Coefficients are 0 for active hosts and 1 otherwise
coefficient_vectors = []
for host in hosts:
coefficient_vectors.append([0 for j in range(self.num_instances)])
return coefficient_vectors
def get_variable_vectors(self, variables, hosts, instance_uuids,
request_spec, filter_properties):
"""Reorganize the variables."""
# The variable_vectors[i][j] denotes the relationship between host[i]
# and instance[j].
variable_vectors = []
variable_vectors = [[variables[i][j] for j in
range(self.num_instances)] for i in range(self.num_hosts)]
return variable_vectors
def get_operations(self, variables, hosts, instance_uuids, request_spec,
filter_properties):
"""Set operations for each constraint function."""
# Operations are '=='.
operations = [(lambda x: x == 0) for i in range(self.num_hosts)]
return operations
| rishabh1jain1/schedwise | nova/scheduler/solvers/linearconstraints/all_hosts_constraint.py | Python | apache-2.0 | 3,558 |
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011 Etienne Millon <[email protected]>
from toflib import Plugin
class PluginRick(Plugin):
def handle_msg(self, msg_text, chan, nick):
rick_list = ["okqEVeNqBhc"
,"XZ5TajZYW6Y"
,"dQw4w9WgXcQ"
]
for r in rick_list:
if r in msg_text:
self.say("We're no strangers to love...")
| martinkirch/tofbot | plugins/rick.py | Python | bsd-2-clause | 620 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRsnns(RPackage):
"""Neural Networks using the Stuttgart Neural Network Simulator (SNNS)
The Stuttgart Neural Network Simulator (SNNS) is a library containing
many standard implementations of neural networks. This package wraps the
SNNS functionality to make it available from within R. Using the RSNNS
low-level interface, all of the algorithmic functionality and flexibility
of SNNS can be accessed. Furthermore, the package contains a convenient
high-level interface, so that the most common neural network topologies
and learning algorithms integrate seamlessly into R."""
homepage = "https://sci2s.ugr.es/dicits/software/RSNNS"
url = "https://cloud.r-project.org/src/contrib/RSNNS_0.4-7.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/RSNNS"
version('0.4-12', sha256='b18dfeda71573bc92c6888af72da407651bff7571967965fd3008f0d331743b9')
version('0.4-11', sha256='87943126e98ae47f366e3025d0f3dc2f5eb0aa2924508fd9ee9a0685d7cb477c')
version('0.4-10.1', sha256='38bb3d172390bd01219332ec834744274b87b01f94d23b29a9d818c2bca04071')
version('0.4-7', sha256='ec941dddda55e4e29ed281bd8768a93d65e0d86d56ecab0f2013c64c8d1a4994')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-rsnns/package.py | Python | lgpl-2.1 | 1,542 |
from __future__ import absolute_import
from . import mpl
from . import io
__version__ = "1.0.0"
| goerz/mgplottools | mgplottools/__init__.py | Python | gpl-3.0 | 98 |
#!/usr/bin/python
#
# This file is part of DynaMiFLiC
#
# Copyright (C) 2012 slacknux <[email protected]>
# http://www.slacknux.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cgi
import json
from DynaMiFLiC import DynaMiFLiC
print "Content-type: text/html\n"
artPath = "../db/articles/"
form = cgi.FieldStorage()
obj = DynaMiFLiC(artPath)
if "init" in form:
artList = obj.lst
artList.reverse()
print json.dumps(artList)
elif "article" in form:
artContent = obj.content(form["article"].value)
print artContent
| slacknux/DynaMiFLiC | server/cgi-bin/loadArticle.py | Python | gpl-3.0 | 1,123 |
"""
Views for the User model.
"""
import functools
from flask import Blueprint, render_template, url_for, redirect
from flask_oauthlib.client import OAuth
from flask_oauthlib.contrib.apps import github
from flask_login import login_required, current_user, login_user, logout_user
from codegolf import login_manager, app
from codegolf.database import db_session
from codegolf.models import User, Admin
user = Blueprint('user', __name__, url_prefix='/user')
oauth = OAuth(app)
git_auth = github.register_to(oauth)
@login_manager.user_loader
def user_loader(user_id):
return db_session.query(User).filter(User.id == user_id).one()
def admin_required(f):
"""
A decorator similar to login_required requiring an admin user
:param f: view to be made admin only
"""
@functools.wraps(f)
def wrapper(*a, **kw):
if db_session.query(Admin).filter(Admin.id == current_user.id).first() is None:
return redirect(url_for('index'))
return f(*a, **kw)
return wrapper
@user.route('/')
@user.route('/account')
@login_required
def account():
"""
View list of submissions for a given user and their total points
"""
return render_template('user/account.html')
@user.route('/delete')
@admin_required
def delete():
"""
Admin only page to delete a user (for example troll account). Option to also delete all the users submissions or to
keep them but prevent them from submitting further solutions.
"""
return render_template('layout.html')
@user.route('/login')
def login():
"""
Redirects to github oauth which redirects to /authorize
"""
if app.testing:
callback_url = url_for('user.authorize', _external=True)
else:
callback_url = 'https://codegolf.uqcs.org.au/user/authorize'
return git_auth.authorize(callback=callback_url)
@user.route("/logout")
@login_required
def logout():
"""
Logs out a user
:return:
"""
logout_user()
return redirect(url_for('index'))
@user.route('/authorize')
def authorize():
"""
Use information from github oauth log in to either create a new user or log in as an existing user.
"""
resp = git_auth.authorized_response()
user_info = git_auth.get('user', token=(resp["access_token"],)).data
u = db_session.query(User).filter(User.email == user_info['email']).first()
if not u:
u = User(user_info['login'], user_info['email'])
db_session.add(u)
db_session.commit()
login_user(u, remember=True)
return redirect(url_for('index'))
| UQComputingSociety/codegolf | codegolf/user/views.py | Python | mit | 2,567 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import rapidsms
import re
class App(rapidsms.app.App):
prefix = re.compile(r'^echo\s+',re.I)
def handle(self, message):
self.debug("got message %s", message.text)
if self.prefix.search(message.text):
response = self.prefix.sub("",message.text)
self.debug("responding with %s", response)
message.respond(response)
return True
| takinbo/rapidsms-borno | apps/echo/app.py | Python | lgpl-3.0 | 443 |
"""
Created on 15 Feb 2015
@author: Ronny Andersson ([email protected])
@copyright: (c) 2015 Ronny Andersson
@license: MIT
"""
# Standard library
import logging
import warnings
from abc import ABCMeta, abstractmethod
# Third party
import numpy as np
# Internal
from zignal import Audio, Noise
try:
import pyaudio
except ImportError:
warnings.warn("PyAudio not found. Will not be able to create sndcard instances",
category=ImportWarning)
def list_devices():
"""List all available sound cards."""
return PA.list_devices()
# ==================================================================================================
# Abstract Base Class, inherit and implement the methods marked as @abstractmethod
# ==================================================================================================
class _Device(object, metaclass=ABCMeta):
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__)
def __str__(self):
s = '=======================================\n'
s += 'classname : %s\n' % self.__class__.__name__
return s
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self):
self._logger.debug("--- open")
def close(self):
self._logger.debug("--- close")
@abstractmethod
def play(self, x, **kwargs):
"""Play audio"""
self._logger.debug("--- play")
assert isinstance(x, Audio)
@abstractmethod
def rec(self, duration=None, channels=1, fs=96000, **kwargs):
"""Record audio"""
self._logger.debug("--- rec")
assert duration is not None, "Specify a duration (in seconds) to record for"
@abstractmethod
def play_rec(self, x, **kwargs):
"""Play and record audio"""
self._logger.debug("--- play_rec")
assert isinstance(x, Audio)
# ==================================================================================================
# Stub class
# ==================================================================================================
class Stub(_Device):
"""Stub device that can be dropped in anywhere as a fake sound card.
The record methods will return noise. This is intended to be used
during development when a real device would block.
"""
def play(self, x, **kwargs):
_Device.play(self, x, **kwargs)
self._logger.warn("*** Stub play")
def rec(self, duration=None, channels=1, fs=96000, **kwargs):
_Device.rec(self, duration=duration, channels=channels, fs=fs, **kwargs)
self._logger.warn("*** Stub rec")
# fake a signal with white noise
n = Noise(channels=channels, fs=fs, duration=duration, gaindb=-60)
n.convert_to_float(targetbits=32)
y = Audio(fs=fs, initialdata=n.samples)
return y
def play_rec(self, x, **kwargs):
_Device.play_rec(self, x, **kwargs)
self._logger.warn("*** Stub play_rec")
# fake a signal with white noise
n = Noise(channels=x.ch, fs=x.fs, nofsamples=x.nofsamples, gaindb=-60)
n.convert_to_float(targetbits=32)
y = Audio(fs=x.fs, initialdata=n.samples)
return y
# ==================================================================================================
# PyAudio (Portaudio) implementation
# ==================================================================================================
class PA(_Device):
"""PyAudio wrapper. Uses the Audio base class as input signal and returns
Audio instances after a recording. This implementation is using the blocking
strategy."""
# -------------------------------------------------------------------------------
# Portaudio frame
#
# http://music.columbia.edu/pipermail/portaudio/2007-January/006615.html
#
# A 'frame' is all data required at a snapshot in time, where snapshots
# are the same as samplerate. This means that 2 ch of 16bit audio is
# a frame of size 32 bits
#
# +------------+------------+
# stereo | 16bit ch1 | 16bit ch2 |
# +------------+------------+
#
# |
# v
#
# +-------------------------+
# | 4 bytes |
# +-------------------------+
#
# frame size calculation:
# 2 bytes per sample, 2 channels --> 2*2=4 bytes
#
# Another example:
# 32 bit float, 8 channels --> 4 bytes per channel, 8 channels == 32 bytes
#
# +-----+-----+-----+-----+-----+-----+-----+-----+
# | ch1 | ch2 | ch3 | ch4 | ch5 | ch6 | ch7 | ch8 |
# +-----+-----+-----+-----+-----+-----+-----+-----+
#
# +-----------------------------------------------+
# | (32 bytes) |
# +-----------------------------------------------+
#
# In other words, because we have audio data in a numpy vector
# where (rows,colums) --> (samples, channels) this means that each *row*
# is a frame.
#
# -------------------------------------------------------------------------------
def __init__(self, device_out='default', device_in='default'):
"""Set the device_out and/or device_in string based on the name of the
sound card. An int can also be used if the id is known beforehand. Note that
the id can change when another sound card is detected, for example when a USB
card is connected. The available sound cards can be found by calling list_devices()
"""
_Device.__init__(self)
self._device_out = device_out
self._device_in = device_in
if isinstance(device_out, int):
self._index_out = device_out
else:
self._index_out = self._get_id(name=device_out, find_output=True)
if isinstance(device_in, int):
self._index_in = device_in
else:
self._index_in = self._get_id(name=device_in, find_output=False)
def __str__(self):
s = _Device.__str__(self)
s += 'portaudio : %s %s\n' % (pyaudio.get_portaudio_version(),
pyaudio.get_portaudio_version_text())
s += 'pyaudio : %s\n' % pyaudio.__version__
s += 'output device : id %i, %s\n' % (self._index_out, self._device_out)
s += 'input device : id %i, %s\n' % (self._index_in, self._device_in)
return s
def _get_id(self, name=None, find_output=True):
"""Find the id of the sound card that matches the string name"""
retval = -1
pa_get_id = pyaudio.PyAudio()
try:
if name == 'default':
if find_output:
device = pa_get_id.get_default_output_device_info()
if device['maxOutputChannels'] > 0:
self._device_out = device['name']
retval = device['index']
else:
device = pa_get_id.get_default_input_device_info()
if device['maxInputChannels'] > 0:
self._device_in = device['name']
retval = device['index']
else:
for idx in range(pa_get_id.get_device_count()):
device = pa_get_id.get_device_info_by_index(idx)
if find_output:
if device['maxOutputChannels'] > 0:
if device['name'] == name:
retval = idx
break
else:
if device['maxInputChannels'] > 0:
if device['name'] == name:
retval = idx
break
finally:
pa_get_id.terminate()
if retval == -1:
s = "Device '%s' not found. Check the inputs and outputs arguments" % name
print(s)
try:
print("Available devices: \n%s" % self.list_devices())
finally:
raise LookupError(s)
return retval
def open(self):
"""Open a PyAudio instance. This needs to be called before play(),
play_rec() or rec() is called. This can be done in two ways:
snd = PA()
snd.open()
try:
snd.play(x)
finally:
snd.close()
or use the 'with' statement:
with PA() as snd:
snd.play(x)
"""
self._logger.debug("creating pyaudio instance")
self.pa = pyaudio.PyAudio()
def close(self):
"""Terminate the PyAudio instance. Must be called if open() has been called"""
self.pa.terminate()
self._logger.debug("pyaudio instance terminated")
@classmethod
def list_devices(cls):
"""Get a pretty string with all available sound cards.
When using a portaudio instance, the id of the sound device needs
to be known. This method is listing the available devices so that
the id can be found.
"""
s = ''
s += '--------------------------------------------------------------------\n'
s += 'id out in def.fs API name\n'
s += '--------------------------------------------------------------------\n'
# -->| 0 2 2 44100.0 ALSA Intel 82801AA-ICH: - (hw:0,0)
pa_list_dev = pyaudio.PyAudio()
try:
for idx in range(pa_list_dev.get_device_count()):
device = pa_list_dev.get_device_info_by_index(idx)
s += '%2i %3i %3i %8.1f %s %s\n' % (
device['index'],
device['maxOutputChannels'],
device['maxInputChannels'],
device['defaultSampleRate'],
pa_list_dev.get_host_api_info_by_index(
device['hostApi'])['name'].ljust(len('Windows WASAPI')),
device['name'],
)
s += '\n'
s += 'default output device id: %i\n' \
% pa_list_dev.get_default_output_device_info()['index']
s += 'default input device id: %i\n' \
% pa_list_dev.get_default_input_device_info()['index']
s += '--------------------------------------------------------------------\n'
finally:
pa_list_dev.terminate()
return s
def _data_format(self, x):
"""The data types in numpy needs to be mapped to the equivalent type in
portaudio. This is an issue for 24 bit audio files since there isn't a
24 bit data type in numpy. This is currently not implemented. There are
some options on how to do this. We could for example use a 32 bit int and
store the 24 bits either so that bits 1 to 8 is set to zeroes or so that
bits 25 to 32 is set to zeros.
"""
retval = None
if x.samples.dtype == np.dtype(np.float32):
self._logger.debug("pyaudio.paFloat32")
retval = pyaudio.paFloat32
elif x.samples.dtype == np.dtype(np.int16):
self._logger.debug("pyaudio.paInt16")
retval = pyaudio.paInt16
elif x.samples.dtype == np.dtype(np.int32):
self._logger.debug("pyaudio.paInt32")
retval = pyaudio.paInt32
else:
raise NotImplementedError("Data type not understood: %s" % x.samples.dtype)
return retval
def _check_pow2(self, n):
"""Check that buffer size is a power of 2 (32, 64, ..., 1024, 2048, ...)"""
check = 2**int(np.round(np.log2(n))) == n
return check
def _validate(self, frames_per_buffer):
assert hasattr(self, "pa"), \
"Call open() or use the 'with' statement before using play(), rec() or play_rec()"
assert self._check_pow2(frames_per_buffer), \
"Use a buffer size that is a power of 2 (1024, 2048, 4096, ...)"
return True
def _get_missing_frames(self, frames_per_buffer, length):
"""Calculate the number of frames missing to fill a buffer"""
missing_frames = frames_per_buffer - (length % frames_per_buffer)
self._logger.debug("frames per buffer : %i" % frames_per_buffer)
self._logger.debug("missing frames : %i" % missing_frames)
return missing_frames
def play(self, x, frames_per_buffer=1024):
"""Play audio. If dropouts or buffer underruns occur try different
values for the frames_per_buffer variable."""
_Device.play(self, x)
self._validate(frames_per_buffer)
missing_frames = self._get_missing_frames(frames_per_buffer, len(x))
# generate silence to fill up missing frames
pad = Audio(channels=x.ch, fs=x.fs, nofsamples=missing_frames, dtype=x.samples.dtype)
# append the missing frames to a copy of the audio to be played. We now have
# audio that can be split into complete (full) buffers
cpy = Audio(fs=x.fs, initialdata=x.samples)
cpy.concat(pad)
assert len(cpy) % frames_per_buffer == 0
stream = self.pa.open(
format=self._data_format(x),
channels=x.ch,
rate=x.fs,
frames_per_buffer=frames_per_buffer,
output_device_index=self._index_out,
input=False,
output=True,
)
try:
self._logger.info("play: start")
counter = 0
# split the audio into chunks the size of one buffer, so we can
# iterate over the audio in chunksizes of the same size as one buffer
it = iter(np.split(cpy.samples, len(cpy)/frames_per_buffer))
try:
while True:
chunk = next(it)
stream.write(chunk.tostring(), num_frames=frames_per_buffer)
counter += 1
except StopIteration:
pass
finally:
stream.stop_stream()
self._logger.debug("chunks played : %i" % counter)
self._logger.debug("samples played : %i" % (counter*frames_per_buffer))
self._logger.debug("duration : %.3f" % (counter*frames_per_buffer/x.fs))
finally:
self._logger.debug("play: close stream")
stream.close()
self._logger.info("play: done")
def play_rec(self, x, frames_per_buffer=1024):
"""Play audio and record from input. If dropouts or buffer underruns occur
try different values for the frames_per_buffer variable."""
_Device.play_rec(self, x)
self._validate(frames_per_buffer)
missing_frames = self._get_missing_frames(frames_per_buffer, len(x))
# generate silence to fill up missing frames
pad = Audio(channels=x.ch, fs=x.fs, nofsamples=missing_frames, dtype=x.samples.dtype)
# append the missing frames to a copy of the audio to be played. We now have
# audio that can be split into complete (full) buffers
cpy = Audio(fs=x.fs, initialdata=x.samples)
cpy.concat(pad)
assert len(cpy) % frames_per_buffer == 0
rec = Audio(channels=cpy.ch, fs=cpy.fs, nofsamples=len(cpy), dtype=cpy.samples.dtype)
stream = self.pa.open(
format=self._data_format(x),
channels=x.ch,
rate=x.fs,
frames_per_buffer=frames_per_buffer,
input_device_index=self._index_in,
output_device_index=self._index_out,
input=True,
output=True,
)
try:
self._logger.info("play_rec: start")
counter = 0
# split the audio into chunks the size of one buffer, so we can
# iterate over the audio in chunksizes of the same size as one buffer
it_out = iter(np.split(cpy.samples, len(cpy)/frames_per_buffer))
it_in = iter(np.split(rec.samples, len(rec)/frames_per_buffer))
try:
while True:
chunk_out = next(it_out)
chunk_in = next(it_in)
stream.write(chunk_out.tostring(), num_frames=frames_per_buffer)
raw_1d = np.fromstring(stream.read(frames_per_buffer),
dtype=rec.samples.dtype)
# because we use an iterator chunk_in is a sliding window in the rec variable
chunk_in[:] = raw_1d.reshape((frames_per_buffer, rec.ch))
counter += 1
except StopIteration:
pass
finally:
stream.stop_stream()
self._logger.debug("chunks played : %i" % counter)
self._logger.debug("samples played : %i" % (counter*frames_per_buffer))
self._logger.debug("duration : %.3f" % (counter*frames_per_buffer/x.fs))
finally:
self._logger.debug("play_rec: close stream")
stream.close()
# remove the padding (empty frames) added to fill the last buffer. Trim
# at the start, since we can treat that as latency.
rec.trim(start=missing_frames, end=None)
self._logger.debug("play_rec: trimmed %i samples from the start" % missing_frames)
self._check_if_clipped(rec)
self._logger.info("play_rec: done")
return rec
def rec(self, duration=None, channels=1, fs=96000, frames_per_buffer=1024, dtype=np.float32):
"""Record. If dropouts or buffer underruns occur try different
values for the frames_per_buffer variable."""
_Device.rec(self, duration=duration, channels=channels, fs=fs)
self._validate(frames_per_buffer)
missing_frames = self._get_missing_frames(frames_per_buffer, int(duration*fs))
nofsamples = missing_frames+int(duration*fs)
rec = Audio(channels=channels, fs=fs, nofsamples=nofsamples, dtype=dtype)
assert len(rec) % frames_per_buffer == 0
stream = self.pa.open(
format=self._data_format(rec),
channels=rec.ch,
rate=rec.fs,
frames_per_buffer=frames_per_buffer,
input_device_index=self._index_in,
input=True,
output=False,
)
try:
self._logger.info("rec: start")
counter = 0
# split the audio into chunks the size of one buffer, so we can
# iterate over the audio in chunksizes of the same size as one buffer
it_in = iter(np.split(rec.samples, len(rec)/frames_per_buffer))
try:
while True:
chunk_in = next(it_in)
raw_1d = np.fromstring(stream.read(frames_per_buffer),
dtype=rec.samples.dtype)
# because we use an iterator chunk_in is a sliding window in the rec variable
chunk_in[:] = raw_1d.reshape((frames_per_buffer, rec.ch))
counter += 1
except StopIteration:
pass
finally:
stream.stop_stream()
self._logger.debug("chunks recorded : %i" % counter)
self._logger.debug("samples recorded: %i" % (counter*frames_per_buffer))
self._logger.debug("duration : %.3f" % (counter*frames_per_buffer/rec.fs))
finally:
self._logger.debug("rec: close stream")
stream.close()
# remove the padding (empty frames) added to fill the last buffer. Trim
# at the start, since we can treat that as latency.
rec.trim(start=missing_frames, end=None)
self._logger.debug("rec: trimmed %i samples from the start" % missing_frames)
self._check_if_clipped(rec)
self._logger.info("rec: done")
return rec
def _check_if_clipped(self, rec):
"""check if the recording clipped, log the first clip for each channel"""
clipped = False
if np.issubdtype(rec.samples.dtype, np.floating):
max_possible_positive_value = 1.0
else:
# integers used.
# get the size of the integer type used, in bytes (2 for 16bit, 4 for 32bit)
dt = np.dtype(rec.samples.dtype)
# calculate the maximum possible postitive value. The maximum negative
# value is max_possible_positive_value+1 (two's complement)
max_possible_positive_value = 2**((8*dt.itemsize)-1) - 1
self._logger.debug("maximum possible positive value: %i" % max_possible_positive_value)
for i, peaks in enumerate(zip(rec.peak()[0], rec.peak()[1])):
peak_val, peak_pos = peaks
# abs(-32768) overflows in signed 16 bit, use long(...) in py2 to get a bigger data type
if abs(int(peak_val)) >= max_possible_positive_value:
clipped = True
clip_position = peak_pos/rec.fs
self._logger.warn("channel %02i clipped at %.3f" % (i+1, clip_position))
return clipped
__all__ = [
'list_devices',
'PA',
'Stub',
]
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s',
level='DEBUG',
)
print(list_devices())
print('++ End of script ++')
| ronnyandersson/zignal | zignal/sndcard.py | Python | mit | 21,899 |
"""Next-gen alignments with BWA (http://bio-bwa.sourceforge.net/)
"""
import os
import subprocess
import toolz as tz
from bcbio.pipeline import config_utils
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.ngsalign import alignprep, novoalign, postalign
from bcbio.provenance import do
galaxy_location_file = "bwa_index.loc"
def align_bam(in_bam, ref_file, names, align_dir, data):
"""Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate
"""
config = data["config"]
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bedtools = config_utils.get_program("bedtools", config)
bwa = config_utils.get_program("bwa", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used for input and output
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
3, "decrease").upper()
bwa_resources = config_utils.get_resources("bwa", data["config"])
bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])])
if "options" in bwa_resources else "")
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
in_bam = utils.remote_cl_input(in_bam)
cmd = ("{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa} mem -p -M -t {num_cores} {bwa_params} -R '{rg_info}' -v 1 {ref_file} - | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
return out_file
def _can_use_mem(fastq_file, data):
"""bwa-mem handle longer (> 70bp) reads with improved piping.
Randomly samples 5000 reads from the first two million.
Default to no piping if more than 75% of the sampled reads are small.
"""
min_size = 70
thresh = 0.75
head_count = 8000000
tocheck = 5000
seqtk = config_utils.get_program("seqtk", data["config"])
fastq_file = utils.remote_cl_input(fastq_file)
gzip_cmd = "zcat {fastq_file}" if fastq_file.endswith(".gz") else "cat {fastq_file}"
cmd = (gzip_cmd + " | head -n {head_count} | "
"{seqtk} sample -s42 - {tocheck} | "
"awk '{{if(NR%4==2) print length($1)}}' | sort | uniq -c")
count_out = subprocess.check_output(cmd.format(**locals()), shell=True,
executable="/bin/bash", stderr=open("/dev/null", "w"))
if not count_out.strip():
raise IOError("Failed to check fastq file sizes with: %s" % cmd.format(**locals()))
shorter = 0
for count, size in (l.strip().split() for l in count_out.strip().split("\n")):
if int(size) < min_size:
shorter += int(count)
return (float(shorter) / float(tocheck)) <= thresh
def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
qual_format = data["config"]["algorithm"].get("quality_format", "").lower()
if data.get("align_split"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file = alignprep.split_namedpipe_cl(fastq_file, data)
if pair_file:
pair_file = alignprep.split_namedpipe_cl(pair_file, data)
else:
final_file = None
if qual_format == "illumina":
fastq_file = alignprep.fastq_convert_pipe_cl(fastq_file, data)
if pair_file:
pair_file = alignprep.fastq_convert_pipe_cl(pair_file, data)
else:
fastq_file = utils.remote_cl_input(fastq_file)
pair_file = utils.remote_cl_input(pair_file)
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
# If we cannot do piping, use older bwa aln approach
if ("bwa-mem" in tz.get_in(["config", "algorithm", "tools_off"], data, [])
or not _can_use_mem(fastq_file, data)):
out_file = _align_backtrack(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
else:
out_file = _align_mem(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
data["work_bam"] = out_file
return data
def _align_mem(fastq_file, pair_file, ref_file, out_file, names, rg_info, data):
"""Perform bwa-mem alignment on supported read lengths.
"""
bwa = config_utils.get_program("bwa", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
bwa_resources = config_utils.get_resources("bwa", data["config"])
bwa_params = (" ".join([str(x) for x in bwa_resources.get("options", [])])
if "options" in bwa_resources else "")
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
cmd = ("{bwa} mem -M -t {num_cores} {bwa_params} -R '{rg_info}' -v 1 {ref_file} "
"{fastq_file} {pair_file} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)])
return out_file
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data):
"""Perform a BWA alignment using 'aln' backtrack algorithm.
"""
assert not data.get("align_split"), "Do not handle split alignments with non-piped bwa"
bwa = config_utils.get_program("bwa", data["config"])
config = data["config"]
sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0]
sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else ""
if not utils.file_exists(sai1_file):
with file_transaction(data, sai1_file) as tx_sai1_file:
_run_bwa_align(fastq_file, ref_file, tx_sai1_file, config)
if sai2_file and not utils.file_exists(sai2_file):
with file_transaction(data, sai2_file) as tx_sai2_file:
_run_bwa_align(pair_file, ref_file, tx_sai2_file, config)
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
align_type = "sampe" if sai2_file else "samse"
cmd = ("{bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} "
"{fastq_file} {pair_file} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa %s" % align_type, data)
return out_file
def _bwa_args_from_config(config):
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-t", str(num_cores)] if num_cores > 1 else []
qual_format = config["algorithm"].get("quality_format", "").lower()
qual_flags = ["-I"] if qual_format == "illumina" else []
return core_flags + qual_flags
def _run_bwa_align(fastq_file, ref_file, out_file, config):
aln_cl = [config_utils.get_program("bwa", config), "aln",
"-n 2", "-k 2"]
aln_cl += _bwa_args_from_config(config)
aln_cl += [ref_file, fastq_file]
cmd = "{cl} > {out_file}".format(cl=" ".join(aln_cl), out_file=out_file)
do.run(cmd, "bwa aln: {f}".format(f=os.path.basename(fastq_file)), None)
| SciLifeLab/bcbio-nextgen | bcbio/ngsalign/bwa.py | Python | mit | 8,435 |
#!/usr/bin/env python
from sys import argv, exit, stdout, stderr
from random import randint
method = 0
global n
global dataset_filename
subset_filename = ""
rest_filename = ""
def exit_with_help():
print("""\
Usage: %s [options] dataset number [output1] [output2]
This script selects a subset of the given dataset.
options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection
output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""" % argv[0])
exit(1)
def process_options():
global method, n
global dataset_filename, subset_filename, rest_filename
argc = len(argv)
if argc < 3:
exit_with_help()
i = 1
while i < len(argv):
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method < 0 or method > 1:
print("Unknown selection method %d" % (method))
exit_with_help()
i = i + 1
dataset_filename = argv[i]
n = int(argv[i+1])
if i+2 < argc:
subset_filename = argv[i+2]
if i+3 < argc:
rest_filename = argv[i+3]
def main():
class Label:
def __init__(self, label, index, selected):
self.label = label
self.index = index
self.selected = selected
process_options()
# get labels
i = 0
labels = []
f = open(dataset_filename, 'r')
for line in f:
labels.append(Label(float((line.split())[0]), i, 0))
i = i + 1
f.close()
l = i
# determine where to output
if subset_filename != "":
file1 = open(subset_filename, 'w')
else:
file1 = stdout
split = 0
if rest_filename != "":
split = 1
file2 = open(rest_filename, 'w')
# select the subset
warning = 0
if method == 0: # stratified
labels.sort(key = lambda x: x.label)
label_end = labels[l-1].label + 1
labels.append(Label(label_end, l, 0))
begin = 0
label = labels[begin].label
for i in range(l+1):
new_label = labels[i].label
if new_label != label:
nr_class = i - begin
k = i*n//l - begin*n//l
# at least one instance per class
if k == 0:
k = 1
warning = warning + 1
for j in range(nr_class):
if randint(0, nr_class-j-1) < k:
labels[begin+j].selected = 1
k = k - 1
begin = i
label = new_label
elif method == 1: # random
k = n
for i in range(l):
if randint(0,l-i-1) < k:
labels[i].selected = 1
k = k - 1
i = i + 1
# output
i = 0
if method == 0:
labels.sort(key = lambda x: int(x.index))
f = open(dataset_filename, 'r')
for line in f:
if labels[i].selected == 1:
file1.write(line)
else:
if split == 1:
file2.write(line)
i = i + 1
if warning > 0:
stderr.write("""\
Warning:
1. You may have regression data. Please use -s 1.
2. Classification data unbalanced or too small. We select at least 1 per class.
The subset thus contains %d instances.
""" % (n+warning))
# cleanup
f.close()
file1.close()
if split == 1:
file2.close()
main()
| KaiXin93/Text2 | tools/subset.py | Python | apache-2.0 | 2,987 |
import datetime
import json
import matplotlib.dates as mdates
from matplotlib.axes import Subplot
from matplotlib.figure import Figure
from PySide import QtGui, QtCore
from backend_pysideagg import FigureCanvasQTAgg as FigureCanvas
from DatePlot import DatetimeCollection, KenLocator, KenFormatter
from SignalWidget import SignalTreeWidget, SignalListEditorDialog
from ViewWidget import BaseTabViewWidget
from util import link
__all__ = ["LiveGraphTabView"]
class LiveGraphTabView(BaseTabViewWidget, FigureCanvas):
"""
LiveGraphTabView implements the live graphing functionality of the viewer,
including supporting dropping signals into the graph, and pulling live
data from multiple data sources and redrawing.
class variables:
view_name - the user-visible title for LiveGraphTabViews
view_id - the type name stored when the tab is serialized to json
in this case, 'live.graph'
view_icon - the default icon filename for live graph widgets,
in this case, 'graph-icon.png'
view_desc - the user-visible description of the LiveGraphTabView
instance variables:
figure - the matplotlib figure object associated with the widget
plots - the subplot objects being displayed
tab_bar - the TabWidget to which this tab belongs
timescale - the duration in seconds of previous data displayed
method summary:
json_friendly - returns a simple representation of the tab view suitable
for serialization to json. Specifically exports the
timescale and a list of plots.
cleanup - frees up the plots belonging to this figure and the
figure itself so that the tab can be cleanly deleted
update_view(now) - updates the current plots by copying any pending data
to their collections and updating the view limits to
reflect the current timescale relative to now.
add_plot(names) - adds a new subplot to this view with all of the signals
in names automatically added. Rescales existing plots
so that they all occupy the same amount of space.
remove_plot(plot)- removes the subplot from this view and rescales the
remaining subplots.
redraw - redraws all plots and the figure.
"""
view_name = "Live Graph View"
view_id = "live.graph"
view_icon = "graph-icon.png"
view_desc = "A live stream of data visualized on a plot vs time"
def __init__(self, tab_bar, source_finder, parent=None):
figure = Figure(figsize=(3,3), dpi=72)
FigureCanvas.__init__(self, figure, parent)
BaseTabViewWidget.__init__(self, parent, init=False)
self.figure = figure
self.plots = []
self.tab_bar = tab_bar
self.timescale = 30
self.find_source = source_finder
#General plan for actions:
#Plot specific actions are dispatched through the contextMenuEvent
#handler which finds the selected plot using get_axes_at_point
#and then generates a context menu with actions bound to the specific
#plot. Generic non-plot-specific actions like adjusting the timescale
#and renaming the tab are bound to the tab view and the tab widget
#respectively.
self.timescale_action = QtGui.QAction("Adjust timescale", self)
self.addAction(self.timescale_action)
link(self.timescale_action.triggered, self.adjust_timescale)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.setAcceptDrops(True)
#Drag-and-Drop support
def dragEnterEvent(self, event):
if event.mimeData().hasFormat(SignalTreeWidget.mimeType):
event.acceptProposedAction()
else:
event.ignore()
def dropEvent(self, event):
sources = SignalTreeWidget.getMimeDataSignals(event.mimeData())
if sources is not None:
event.acceptProposedAction()
self.add_plot(sources)
else:
event.ignore()
def add_plot(self, sources):
"Add a subplot to this widget displaying all of the signals in names"
rows = len(self.plots) + 1
for i, plot in enumerate(self.plots):
plot.change_geometry(rows, 1, i+1)
plot.label_outer()
new_plot = LiveSubplot(self.find_source, self, rows, 1, rows)
td = datetime.timedelta(seconds=self.timescale)
now = datetime.datetime.utcnow()
new_plot.set_xbound(mdates.date2num(now - td),
mdates.date2num(now))
if len(sources) == 1:
new_plot.set_title(sources[0]["name"])
for descr in sources:
new_plot.add_signal(descr["identifier"],
color=descr["color"],
style=descr["style"])
self.figure.add_subplot(new_plot)
self.plots.append(new_plot)
return new_plot
def remove_plot(self, plot):
"Remove the subplot from this view"
self.figure.delaxes(plot)
self.plots.remove(plot)
plot.cleanup()
rows = len(self.plots)
for i, axes in enumerate(self.plots):
axes.change_geometry(rows, 1, i+1)
#TabView maintenance methods
def cleanup(self):
"Frees the plots and the figure"
for plot in self.plots:
plot.cleanup()
self.figure.delaxes(plot)
def json_friendly(self):
"Serializes to a json-friendly data-structure. Adds timescale and plot info"
json = BaseTabViewWidget.json_friendly(self)
json["timescale"] = self.timescale
json["plots"] = [plot.json_friendly() for plot in self.plots]
return json
@classmethod
def from_json(cls, json, tab_bar, find_source, parent=None):
tab = cls(tab_bar, find_source, parent)
tab.timescale = json["timescale"]
for plot_desc in json["plots"]:
plot = tab.add_plot([])
plot.init_from_json(plot_desc)
return tab
def update_view(self, now):
"Copy any pending data to the plots and update the data limits"
td = datetime.timedelta(seconds=self.timescale)
for plot in self.plots:
plot.update_data(now, td)
def redraw(self):
"Redraw with updated axes ticks"
self.draw()
self.figure.canvas.draw()
#Context menu handlers
def adjust_timescale(self):
new_scale, accepted = QtGui.QInputDialog.getDouble(self,
"Seconds of past data to display",
"Seconds", self.timescale, 0.0)
if accepted:
self.timescale = new_scale
def get_axes_at_point(self, x, y):
trans = self.figure.transFigure.inverted()
figure_point = trans.transform([x, self.figure.bbox.height - y])
fx, fy = figure_point[0], figure_point[1]
for plot in self.plots:
if plot.get_position().contains(fx, fy):
return plot
else:
return None
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
event.ignore()
return
axes = self.get_axes_at_point(event.x(), event.y())
if axes is None:
event.ignore()
return
else:
axes.toggle_pause()
event.accept()
#To do: do stuff with the mouse-click.
def contextMenuEvent(self, event):
event.accept()
axes = self.get_axes_at_point(event.x(), event.y())
if axes is None:
menu = QtGui.QMenu(self)
menu.addActions([self.tab_bar.rename_action, self.timescale_action])
menu.popup(event.globalPos())
return
menu = QtGui.QMenu(self)
menu.addAction(axes.set_title_action)
menu.addAction(axes.adjust_axes_action)
menu.addAction(axes.adjust_signals_action)
delete_action = menu.addAction("Delete Plot")
menu.addSeparator()
menu.addAction(self.tab_bar.rename_action)
menu.addAction(self.timescale_action)
link(delete_action.triggered, lambda: self.remove_plot(axes))
menu.popup(event.globalPos())
class LiveSubplot(Subplot):
"""
LiveSubplot implements the plotting functionality for individual plots.
instance variables:
signals - a mapping from signal names to source objects and
collections
parent - the LiveGraphTabView that holds this plot
paused - whether or not this plot is paused and should not update
its data limits
autoscale - whether or not the plot should automatically calculate
the y-axis data-limits from the data seen so far or
use pre-determined limits
static - whether or not the plot should use static x-axis time
data limits instead of being pegged to the present.
method summary:
json_friendly - returns a json-friendly data structure containing:
* a list of signal names and color/line-styles
* autoscaling status
* static status
* y-axis current data-limits (ignored if autoscaling)
* x-axis current data-limits (ignored if not static)
* current y-axis units label
* current plot title
cleanup - frees all of the collections in use
add_signal(name) - adds the signal to the plot, if it doesn't already exist
remove_signal(name) - removes the signal from the plot
toggle_pause - toggles paused status on/off
update_data - updates plot data and data limits for any unpaused plots
"""
def __init__(self, find_source, parent, *args, **kwargs):
Subplot.__init__(self, parent.figure, *args, **kwargs)
self.signals = {}
self.parent = parent
self.paused = False
self.autoscale = True
self.static = False
self.find_source = find_source
self.set_title_action = QtGui.QAction("Set plot title", parent)
self.adjust_axes_action = QtGui.QAction("Adjust plot axes", parent)
self.adjust_signals_action = QtGui.QAction("Change signals plotted", parent)
link(self.set_title_action.triggered, self.adjust_title)
link(self.adjust_axes_action.triggered, self.adjust_axes)
link(self.adjust_signals_action.triggered, self.adjust_signals)
def json_friendly(self):
signal_list = []
for name, (source, col) in self.signals.items():
signal_list.append((name, tuple(col.get_color()[0]), col.get_linestyle()[0]))
return { "signals" : signal_list,
"autoscale" : self.autoscale,
"static" : self.static,
"yview" : tuple(self.yaxis.get_view_interval()),
"xview" : tuple(self.xaxis.get_view_interval()),
"units" : self.yaxis.get_label_text(),
"title" : self.get_title()
}
def init_from_json(self, json):
self.autoscale = json["autoscale"]
self.static = json["static"]
if json.get("title"):
self.set_title(json["title"])
if json.get("units"):
self.yaxis.set_label_text(json["units"])
y_min, y_max = json["yview"]
self.yaxis.set_view_interval(y_min, y_max, ignore=True)
self.yaxis.reset_ticks()
for (name, color, style) in json["signals"]:
self.add_signal(name)
def cleanup(self):
for name, (signal, collection) in self.signals.items():
collection.remove()
def add_signal(self, name, color=None, style=None):
if name in self.signals:
return
collection = DatetimeCollection([])
self.add_collection(collection)
self.signals[name] = (self.find_source(name), collection)
def remove_signal(self, name):
self.signals[name].remove()
del self.signals[name]
def toggle_pause(self):
self.paused = not self.paused
def update_data(self, now, delta):
if self.paused:
return
locator = KenLocator(5)
self.xaxis.set_major_locator(locator)
formatter = KenFormatter(locator)
self.xaxis.set_major_formatter(formatter)
for name, (signal, collection) in self.signals.items():
ymin, ymax = signal.data.y_bounds
collection.set_segments([signal.data.export()])
if self.autoscale:
cmin, cmax = self.get_ybound()
self.yaxis.set_view_interval(ymin, ymax, ignore=False)
self.set_xbound(now-delta, now)
def adjust_title(self):
title, accepted = QtGui.QInputDialog.getText(self.parent,
"Change plot title",
"New title",
text=self.get_title())
if accepted:
self.set_title(title)
def adjust_axes(self):
dialog = QtGui.QDialog(self.parent)
dialog.setWindowTitle('Axis parameters for "%s"' % self.get_title())
L1 = QtGui.QVBoxLayout(dialog)
L2 = QtGui.QHBoxLayout()
L1.addLayout(L2)
xbox = QtGui.QGroupBox("Static X-Axis (Time)", dialog)
xbox.setCheckable(True)
xbox.setChecked(self.static)
xlayout = QtGui.QFormLayout(xbox)
x_min = CustomDateTimeEdit(parent=xbox)
x_max = CustomDateTimeEdit(parent=xbox)
xlayout.addRow("Start", x_min)
xlayout.addRow("End", x_max)
xbox.setLayout(xlayout)
ybox = QtGui.QGroupBox("Y-Axis", dialog)
ylayout = QtGui.QFormLayout(ybox)
y_units = QtGui.QLineEdit(self.yaxis.get_label_text(), ybox)
autoscale = QtGui.QCheckBox("&Autoscale axis", dialog)
state = QtCore.Qt.Checked if self.autoscale else QtCore.Qt.Unchecked
autoscale.setCheckState(state)
y_min = QtGui.QDoubleSpinBox(ybox)
y_min.setDecimals(5)
y_min.setRange(-2e308, 2e308)
a, b = self.yaxis.get_view_interval()
y_min.setValue(a)
y_max = QtGui.QDoubleSpinBox(ybox)
y_max.setDecimals(5)
y_max.setRange(-2e308, 2e308)
y_max.setValue(b)
if self.autoscale:
y_min.setEnabled(False)
y_max.setEnabled(False)
ylayout.addRow("Units", y_units)
ylayout.addWidget(autoscale)
ylayout.addRow("Max", y_max)
ylayout.addRow("Min", y_min)
ybox.setLayout(ylayout)
L2.addWidget(xbox)
L2.addWidget(ybox)
buttonbox = QtGui.QDialogButtonBox((QtGui.QDialogButtonBox.Ok
|QtGui.QDialogButtonBox.Apply
|QtGui.QDialogButtonBox.Cancel),
parent=dialog)
ok = buttonbox.button(QtGui.QDialogButtonBox.Ok)
apply = buttonbox.button(QtGui.QDialogButtonBox.Apply)
cancel = buttonbox.button(QtGui.QDialogButtonBox.Cancel)
def apply_changes():
self.yaxis.set_label_text(y_units.text())
if autoscale.isChecked():
self.autoscale = True
else:
self.yaxis.set_view_interval(y_min.value(), y_max.value(), ignore=True)
self.yaxis.reset_ticks()
if xbox.isChecked():
print x_min.pyDateTime(), x_max.pyDateTime()
L1.addWidget(buttonbox)
dialog.setLayout(L1)
link(autoscale.stateChanged, lambda state: (y_min.setEnabled(not state),
y_max.setEnabled(not state)))
link(cancel.pressed, dialog.close)
link(apply.pressed, apply_changes)
link(ok.pressed, lambda: (apply_changes(), dialog.close()))
dialog.show()
def adjust_signals(self):
print "Adjust sources selected"
dialog = SignalListEditorDialog(self.parent)
descrs = []
for name in self.signals:
descr = {"identifier":name,
"color": [0.0, 0.0, 1.0, 1.0],
"style": "solid"}
descrs.append(descr)
dialog.setup(descrs)
def say_hi(*args):
print "Hi", args
link(dialog.signal_added, self.add_signal)
link(dialog.signal_modified, say_hi)
link(dialog.signal_removed, say_hi)
dialog.show()
class CustomDateTimeEdit(QtGui.QDateTimeEdit):
pyDateTimeChanged = QtCore.Signal([object])
def __init__(self, date=None, time=None, parent=None):
QtGui.QDateTimeEdit.__init__(self, parent)
date = date if date is not None else QtCore.QDate.currentDate()
self.setDate(date)
self.setMinimumDate(QtCore.QDate(1993, 6, 20))
self.setDisplayFormat("ddd MM/dd/yyyy h:mm:ss AP")
self.setCalendarPopup(True)
link(self.dateTimeChanged, self.emit_datetime)
def pyDateTime(self):
qdt = self.dateTime().toUTC()
qdate, qtime = qdt.date(), qdt.time()
dt = datetime.datetime(qdate.year(), qdate.month(), qdate.day(),
qtime.hour(), qtime.minute(), qtime.second())
return dt
def emit_datetime(self, qdt):
qdate, qtime = qdt.date(), qdt.time()
dt = datetime.datetime(qdate.year(), qdate.month(), qdate.day(),
qtime.hour(), qtime.minute(), qtime.second())
self.pyDateTimeChanged.emit(dt)
def setPyDateTime(self, dt):
qdate = QtCore.QDate(dt.year, dt.month, dt.day)
qtime = QtCore.QTime(dt.hour, dt.minute, dt.second)
qdt = QtCore.QDateTime(qdate, qtime)
| CalSol/Impulse | Telemetry/viewer/LiveGraph.py | Python | apache-2.0 | 18,513 |
import codecs
import yaml
import operator
import os
import tempfile
import textwrap
import re
import ast
from mako.template import Template
from .. import Messages, blocks
from ..Constants import TOP_BLOCK_FILE_MODE
from .FlowGraphProxy import FlowGraphProxy
from ..utils import expr_utils
from .top_block import TopBlockGenerator
DATA_DIR = os.path.dirname(__file__)
HEADER_TEMPLATE = os.path.join(DATA_DIR, 'cpp_templates/flow_graph.hpp.mako')
SOURCE_TEMPLATE = os.path.join(DATA_DIR, 'cpp_templates/flow_graph.cpp.mako')
CMAKE_TEMPLATE = os.path.join(DATA_DIR, 'cpp_templates/CMakeLists.txt.mako')
header_template = Template(filename=HEADER_TEMPLATE)
source_template = Template(filename=SOURCE_TEMPLATE)
cmake_template = Template(filename=CMAKE_TEMPLATE)
class CppTopBlockGenerator(TopBlockGenerator):
def __init__(self, flow_graph, file_path):
"""
Initialize the C++ top block generator object.
Args:
flow_graph: the flow graph object
file_path: the path where we want to create
a new directory with C++ files
"""
self._flow_graph = FlowGraphProxy(flow_graph)
self._generate_options = self._flow_graph.get_option('generate_options')
self._mode = TOP_BLOCK_FILE_MODE
# Handle the case where the directory is read-only
# In this case, use the system's temp directory
if not os.access(file_path, os.W_OK):
file_path = tempfile.gettempdir()
# When generating C++ code, we create a new directory
# (file_path) and generate the files inside that directory
filename = self._flow_graph.get_option('id')
self.file_path = os.path.join(file_path, filename)
self._dirname = file_path
def write(self):
"""create directory, generate output and write it to files"""
self._warnings()
fg = self._flow_graph
platform = fg.parent
self.title = fg.get_option('title') or fg.get_option('id').replace('_', ' ').title()
variables = fg.get_cpp_variables()
parameters = fg.get_parameters()
monitors = fg.get_monitors()
self._variable_types()
self._parameter_types()
self.namespace = {
'flow_graph': fg,
'variables': variables,
'parameters': parameters,
'monitors': monitors,
'generate_options': self._generate_options,
'config': platform.config
}
if not os.path.exists(self.file_path):
os.makedirs(self.file_path)
for filename, data in self._build_cpp_header_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
if not self._generate_options.startswith('hb'):
if not os.path.exists(os.path.join(self.file_path, 'build')):
os.makedirs(os.path.join(self.file_path, 'build'))
for filename, data in self._build_cpp_source_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
if fg.get_option('gen_cmake') == 'On':
for filename, data in self._build_cmake_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
def _build_cpp_source_code_from_template(self):
"""
Convert the flow graph to a C++ source file.
Returns:
a string of C++ code
"""
file_path = self.file_path + '/' + self._flow_graph.get_option('id') + '.cpp'
output = []
flow_graph_code = source_template.render(
title=self.title,
includes=self._includes(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((file_path, flow_graph_code))
return output
def _build_cpp_header_code_from_template(self):
"""
Convert the flow graph to a C++ header file.
Returns:
a string of C++ code
"""
file_path = self.file_path + '/' + self._flow_graph.get_option('id') + '.hpp'
output = []
flow_graph_code = header_template.render(
title=self.title,
includes=self._includes(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((file_path, flow_graph_code))
return output
def _build_cmake_code_from_template(self):
"""
Convert the flow graph to a CMakeLists.txt file.
Returns:
a string of CMake code
"""
filename = 'CMakeLists.txt'
file_path = os.path.join(self.file_path, filename)
cmake_tuples = []
cmake_opt = self._flow_graph.get_option("cmake_opt")
cmake_opt = " " + cmake_opt # To make sure we get rid of the "-D"s when splitting
for opt_string in cmake_opt.split(" -D"):
opt_string = opt_string.strip()
if opt_string:
cmake_tuples.append(tuple(opt_string.split("=")))
output = []
flow_graph_code = cmake_template.render(
title=self.title,
includes=self._includes(),
blocks=self._blocks(),
callbacks=self._callbacks(),
connections=self._connections(),
links=self._links(),
cmake_tuples=cmake_tuples,
**self.namespace
)
# strip trailing white-space
flow_graph_code = "\n".join(line.rstrip() for line in flow_graph_code.split("\n"))
output.append((file_path, flow_graph_code))
return output
def _links(self):
fg = self._flow_graph
links = fg.links()
seen = set()
output = []
for link_list in links:
if link_list:
for link in link_list:
seen.add(link)
return list(seen)
def _includes(self):
fg = self._flow_graph
includes = fg.includes()
seen = set()
output = []
def is_duplicate(l):
if l.startswith('#include') and l in seen:
return True
seen.add(line)
return False
for block_ in includes:
for include_ in block_:
if not include_:
continue
line = include_.rstrip()
if not is_duplicate(line):
output.append(line)
return output
def _blocks(self):
fg = self._flow_graph
parameters = fg.get_parameters()
# List of blocks not including variables and imports and parameters and disabled
def _get_block_sort_text(block):
code = block.cpp_templates.render('make').replace(block.name, ' ')
try:
code += block.params['gui_hint'].get_value() # Newer gui markup w/ qtgui
except:
pass
return code
blocks = [
b for b in fg.blocks
if b.enabled and not (b.get_bypassed() or b.is_import or b in parameters or b.key == 'options' or b.is_virtual_source() or b.is_virtual_sink())
]
blocks = expr_utils.sort_objects(blocks, operator.attrgetter('name'), _get_block_sort_text)
blocks_make = []
for block in blocks:
translations = block.cpp_templates.render('translations')
make = block.cpp_templates.render('make')
declarations = block.cpp_templates.render('declarations')
if translations:
translations = yaml.safe_load(translations)
else:
translations = {}
translations.update(
{r"gr\.sizeof_([\w_]+)": r"sizeof(\1)"}
)
for key in translations:
make = re.sub(key.replace("\\\\", "\\"), translations[key], make)
declarations = declarations.replace(key, translations[key])
if make:
blocks_make.append((block, make, declarations))
elif 'qt' in block.key:
# The QT Widget blocks are technically variables,
# but they contain some code we don't want to miss
blocks_make.append(('', make, declarations))
return blocks_make
def _variable_types(self):
fg = self._flow_graph
variables = fg.get_cpp_variables()
type_translation = {'complex': 'gr_complex', 'real': 'double', 'float': 'float', 'int': 'int', 'complex_vector': 'std::vector<gr_complex>', 'real_vector': 'std::vector<double>', 'float_vector': 'std::vector<float>', 'int_vector': 'std::vector<int>', 'string': 'std::string', 'bool': 'bool'}
# If the type is explcitly specified, translate to the corresponding C++ type
for var in list(variables):
if var.params['value'].dtype != 'raw':
var.vtype = type_translation[var.params['value'].dtype]
variables.remove(var)
# If the type is 'raw', we'll need to evaluate the variable to infer the type.
# Create an executable fragment of code containing all 'raw' variables in
# order to infer the lvalue types.
#
# Note that this differs from using ast.literal_eval() as literal_eval evaluates one
# variable at a time. The code fragment below evaluates all variables together which
# allows the variables to reference each other (i.e. a = b * c).
prog = 'def get_decl_types():\n'
prog += '\tvar_types = {}\n'
for var in variables:
prog += '\t' + str(var.params['id'].value) + '=' + str(var.params['value'].value) + '\n'
prog += '\tvar_types = {}\n';
for var in variables:
prog += '\tvar_types[\'' + str(var.params['id'].value) + '\'] = type(' + str(var.params['id'].value) + ')\n'
prog += '\treturn var_types'
# Execute the code fragment in a separate namespace and retrieve the lvalue types
var_types = {}
namespace = {}
try:
exec(prog, namespace)
var_types = namespace['get_decl_types']()
except Exception as excp:
print('Failed to get parameter lvalue types: %s' %(excp))
# Format the rvalue of each variable expression
for var in variables:
var.format_expr(var_types[str(var.params['id'].value)])
def _parameter_types(self):
fg = self._flow_graph
parameters = fg.get_parameters()
for param in parameters:
type_translation = {'eng_float' : 'double', 'intx' : 'int', 'str' : 'std::string', 'complex': 'gr_complex'};
param.vtype = type_translation[param.params['type'].value]
if param.vtype == 'gr_complex':
evaluated = ast.literal_eval(param.params['value'].value.strip())
cpp_cmplx = '{' + str(evaluated.real) + ', ' + str(evaluated.imag) + '}'
# Update the 'var_make' entry in the cpp_templates dictionary
d = param.cpp_templates
cpp_expr = d['var_make'].replace('${value}', cpp_cmplx)
d.update({'var_make':cpp_expr})
param.cpp_templates = d
def _callbacks(self):
fg = self._flow_graph
variables = fg.get_cpp_variables()
parameters = fg.get_parameters()
# List of variable names
var_ids = [var.name for var in parameters + variables]
replace_dict = dict((var_id, 'this->' + var_id) for var_id in var_ids)
callbacks_all = []
for block in fg.iter_enabled_blocks():
if not (block.is_virtual_sink() or block.is_virtual_source()):
callbacks_all.extend(expr_utils.expr_replace(cb, replace_dict) for cb in block.get_cpp_callbacks())
# Map var id to callbacks
def uses_var_id(callback):
used = expr_utils.get_variable_dependencies(callback, [var_id])
return used and ('this->' + var_id in callback) # callback might contain var_id itself
callbacks = {}
for var_id in var_ids:
callbacks[var_id] = [callback for callback in callbacks_all if uses_var_id(callback)]
return callbacks
def _connections(self):
fg = self._flow_graph
templates = {key: Template(text)
for key, text in fg.parent_platform.cpp_connection_templates.items()}
def make_port_sig(port):
if port.parent.key in ('pad_source', 'pad_sink'):
block = 'self()'
key = fg.get_pad_port_global_key(port)
else:
block = 'this->' + port.parent_block.name
key = port.key
if not key.isdigit():
# TODO What use case is this supporting?
toks = re.findall(r'\d+', key)
if len(toks) > 0:
key = toks[0]
else:
# Assume key is a string
key = '"' + key + '"'
return '{block}, {key}'.format(block=block, key=key)
connections = fg.get_enabled_connections()
# Get the virtual blocks and resolve their connections
connection_factory = fg.parent_platform.Connection
virtual_source_connections = [c for c in connections if isinstance(c.source_block, blocks.VirtualSource)]
for connection in virtual_source_connections:
sink = connection.sink_port
for source in connection.source_port.resolve_virtual_source():
resolved = connection_factory(fg.orignal_flowgraph, source, sink)
connections.append(resolved)
virtual_connections = [c for c in connections if (isinstance(c.source_block, blocks.VirtualSource) or isinstance(c.sink_block, blocks.VirtualSink))]
for connection in virtual_connections:
# Remove the virtual connection
connections.remove(connection)
# Bypassing blocks: Need to find all the enabled connections for the block using
# the *connections* object rather than get_connections(). Create new connections
# that bypass the selected block and remove the existing ones. This allows adjacent
# bypassed blocks to see the newly created connections to downstream blocks,
# allowing them to correctly construct bypass connections.
bypassed_blocks = fg.get_bypassed_blocks()
for block in bypassed_blocks:
# Get the upstream connection (off of the sink ports)
# Use *connections* not get_connections()
source_connection = [c for c in connections if c.sink_port == block.sinks[0]]
# The source connection should never have more than one element.
assert (len(source_connection) == 1)
# Get the source of the connection.
source_port = source_connection[0].source_port
# Loop through all the downstream connections
for sink in (c for c in connections if c.source_port == block.sources[0]):
if not sink.enabled:
# Ignore disabled connections
continue
connection = connection_factory(fg.orignal_flowgraph, source_port, sink.sink_port)
connections.append(connection)
# Remove this sink connection
connections.remove(sink)
# Remove the source connection
connections.remove(source_connection[0])
# List of connections where each endpoint is enabled (sorted by domains, block names)
def by_domain_and_blocks(c):
return c.type, c.source_block.name, c.sink_block.name
rendered = []
for con in sorted(connections, key=by_domain_and_blocks):
template = templates[con.type]
if con.source_port.dtype != 'bus':
code = template.render(make_port_sig=make_port_sig, source=con.source_port, sink=con.sink_port)
if not self._generate_options.startswith('hb'):
code = 'this->tb->' + code
rendered.append(code)
else:
# Bus ports need to iterate over the underlying connections and then render
# the code for each subconnection
porta = con.source_port
portb = con.sink_port
fg = self._flow_graph
if porta.dtype == 'bus' and portb.dtype == 'bus':
# which bus port is this relative to the bus structure
if len(porta.bus_structure) == len(portb.bus_structure):
for port_num in porta.bus_structure:
hidden_porta = porta.parent.sources[port_num]
hidden_portb = portb.parent.sinks[port_num]
connection = fg.parent_platform.Connection(
parent=self, source=hidden_porta, sink=hidden_portb)
code = template.render(make_port_sig=make_port_sig, source=hidden_porta, sink=hidden_portb)
if not self._generate_options.startswith('hb'):
code = 'this->tb->' + code
rendered.append(code)
return rendered
| mrjacobagilbert/gnuradio | grc/core/generator/cpp_top_block.py | Python | gpl-3.0 | 17,872 |
from flask import Flask
app = Flask(__name__)
from media import Movie
from flask import render_template
import re
@app.route('/')
def index():
'''View function for index page.'''
toy_story = Movie(title = "Toy Story 3", trailer_youtube_url ="https://www.youtube.com/watch?v=QW0sjQFpXTU",
poster_image_url="https://images-na.ssl-images-amazon.com/images/M/MV5BMTgxOTY4Mjc0MF5BMl5BanBnXkFtZTcwNTA4MDQyMw@@._V1_UY268_CR3,0,182,268_AL_.jpg",
storyline='''Andy's toys get mistakenly delivered to a day care centre.
Woody convinces the other toys that they weren't dumped and leads them on an expedition back
home.''')
pulp_fiction = Movie(title = "Pulp Fiction ", trailer_youtube_url ="https://www.youtube.com/watch?v=s7EdQ4FqbhY",
poster_image_url="https://images-na.ssl-images-amazon.com/images/M/MV5BMTkxMTA5OTAzMl5BMl5BanBnXkFtZTgwNjA5MDc3NjE@._V1_UX182_CR0,0,182,268_AL_.jpg",
storyline='''The lives of two mob hit men, a boxer, a gangster's wife, and a pair of diner bandits
intertwine in four tales of violence and redemption''')
shawshank = Movie(title = "The Shawshank Redemption", trailer_youtube_url ="https://www.youtube.com/watch?v=KtwXlIwozog",
poster_image_url="https://images-na.ssl-images-amazon.com/images/M/MV5BODU4MjU4NjIwNl5BMl5BanBnXkFtZTgwMDU2MjEyMDE@._V1_UX182_CR0,0,182,268_AL_.jpg",
storyline='''Two imprisoned men bond over a number of years, finding solace
and eventual redemption through acts of common decency.''')
godfather = Movie(title = "The Godfather ", trailer_youtube_url ="https://www.youtube.com/watch?v=sY1S34973zA",
poster_image_url="https://images-na.ssl-images-amazon.com/images/M/MV5BMjEyMjcyNDI4MF5BMl5BanBnXkFtZTcwMDA5Mzg3OA@@._V1_UX182_CR0,0,182,268_AL_.jpg",
storyline='''The aging patriarch of an organized crime dynasty transfers control of his clandestine empire to his reluctant son.''')
dark_knight = Movie(title = "The Dark Knight ", trailer_youtube_url ="https://www.youtube.com/watch?v=EXeTwQWrcwY",
poster_image_url="https://images-na.ssl-images-amazon.com/images/M/MV5BMTMxNTMwODM0NF5BMl5BanBnXkFtZTcwODAyMTk2Mw@@._V1_UX182_CR0,0,182,268_AL_.jpg",
storyline='''Set within a year after the events of Batman Begins, Batman, Lieutenant James Gordon, and new district attorney Harvey Dent successfully begin to round up the criminals''')
movies=[toy_story,pulp_fiction,dark_knight,godfather,shawshank]
# Replace `Youtube URL` with just `Youtube video ID`
for movie in movies:
youtube_id_match = re.search(r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match else None)
movie.trailer_youtube_url = trailer_youtube_id
return render_template('index.html',
data=movies)
if __name__ == '__main__':
app.run(debug=True)
| mr-karan/Udacity-FullStack-ND004 | Project1/projects/movieServer/app.py | Python | mit | 2,980 |
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.edgeos import edgeos_facts
from units.modules.utils import set_module_args
from .edgeos_module import TestEdgeosModule, load_fixture
class TestEdgeosFactsModule(TestEdgeosModule):
module = edgeos_facts
def setUp(self):
super(TestEdgeosFactsModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.edgeos.edgeos_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestEdgeosFactsModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_edgeos_facts_default(self):
set_module_args(dict(gather_subset='default'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 5)
self.assertEqual(facts['ansible_net_hostname'].strip(), 'er01')
self.assertEqual(facts['ansible_net_version'], '1.9.7+hotfix.4')
def test_edgeos_facts_not_all(self):
set_module_args(dict(gather_subset='!all'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 5)
self.assertEqual(facts['ansible_net_hostname'].strip(), 'er01')
self.assertEqual(facts['ansible_net_version'], '1.9.7+hotfix.4')
def test_edgeos_facts_exclude_most(self):
set_module_args(dict(gather_subset=['!neighbors', '!config']))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 5)
self.assertEqual(facts['ansible_net_hostname'].strip(), 'er01')
self.assertEqual(facts['ansible_net_version'], '1.9.7+hotfix.4')
def test_edgeos_facts_invalid_subset(self):
set_module_args(dict(gather_subset='cereal'))
result = self.execute_module(failed=True)
| ptisserand/ansible | test/units/modules/network/edgeos/test_edgeos_facts.py | Python | gpl-3.0 | 3,270 |
from mount import Plugin, command_basic
class MyPlugin(Plugin):
"""Example plugin. You can declare your commands in __server_commands__ or __host_commands__"""
def __init__(self):
"""This function is called when the plugin is loaded"""
print("Test plugin loaded!")
@command_basic
def hello(self, server, args):
"""Demo command that prints the arguments that you passed it"""
print("You called hello with args: ", args)
| panagiks/reverse_shell | Server/Plugins/template.py | Python | mit | 469 |
'''The problem is not specified for python, I didn't noticed that yet made a solution for it in python. It C++ solution is also available here.'''
def sumOfSq(n):
if n==1:
return 1
else:
return n**2+sumOfSq(n-1)
while True:
no=input()
if no==0:
break
else:
print sumOfSq(no) | ProgDan/maratona | SPOJ/SAMER08F.py | Python | gpl-3.0 | 327 |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the ROI pooling layer
'''
import itertools as itt
import numpy as np
from neon.backends import gen_backend
from neon import NervanaObject
from timeit import default_timer as timeit
spatial_scale = 1.0/16
def _fprop_slice_np(h, stride, H, roi_offset):
"""
slicing in this 1 dimension
h: is the index on the pooled map (output index)
stride:
H: the max of the input map
roi_offset: how far hstart is from 0
"""
hstart = int(np.floor(float(h) * stride))
hend = int(np.ceil(float(h+1) * stride))
hstart = min(max(hstart + roi_offset, 0), H)
hend = min(max(hend + roi_offset, 0), H)
return slice(hstart, hend), hend-hstart
def pytest_generate_tests(metafunc):
if 'fargs' in metafunc.fixturenames:
fargs = []
bsz_rng = [2]
roi_num_rng = [2]
img_fm_c_rng = [2]
img_fm_h_rng = [62]
img_fm_w_rng = [62]
roi_size_rng = [6]
fargs = itt.product(roi_num_rng, img_fm_c_rng, img_fm_h_rng,
img_fm_w_rng, roi_size_rng, bsz_rng)
metafunc.parametrize('fargs', fargs)
def bprop_roipooling_ref(fm, rois, error, fm_channel, fm_height, fm_width,
bsz, rois_per_image, H, W):
"""
Function to perform a bprop of ROIPooling. It uses a different way from the
that in CPU backend
"""
feature_maps = fm.reshape(fm_channel, fm_height, fm_width, bsz)
rois_per_batch = rois_per_image * bsz
error_in = error.reshape(fm_channel, H, W, rois_per_batch)
delta = np.zeros(feature_maps.shape).reshape(fm_channel, fm_height, fm_width, bsz)
# combine the feature map with ROIs
for b_id in xrange(rois_per_batch):
[idx, xmin, ymin, xmax, ymax] = rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height)/float(H)
stride_w = float(roi_width)/float(W)
for h_out in xrange(H):
sliceh, lenh = _fprop_slice_np(h_out, stride_h, fm_height, ymin)
if sliceh.stop <= sliceh.start:
continue
for w_out in xrange(W):
slicew, lenw = _fprop_slice_np(w_out, stride_w, fm_width, xmin)
if slicew.stop <= slicew.start:
continue
else:
array_I = feature_maps[:, sliceh, slicew, int(idx)].reshape(
fm_channel, -1)
max_idx = np.argmax(array_I, axis=1)
delta_view = delta[:, sliceh, slicew, int(idx)].reshape(
fm_channel, -1)
delta_view[
range(fm_channel), max_idx] += error_in[:, h_out, w_out, b_id]
delta[:, sliceh, slicew, int(idx)] = delta_view.reshape(fm_channel,
lenh,
lenw)
return delta
def fprop_roipooling_ref(fm, rois, fm_channel, fm_height, fm_width, bsz, rois_per_image, H, W):
feature_maps = fm.reshape(fm_channel, fm_height, fm_width, bsz)
rois_per_batch = rois_per_image * bsz
outputs = np.zeros((fm_channel, H, W, rois_per_batch))
# combine the feature map with ROIs
for b_id in xrange(rois_per_batch):
[idx, xmin, ymin, xmax, ymax] = rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / H
stride_w = float(roi_width) / W
for h_out in xrange(H):
sliceh, _ = _fprop_slice_np(h_out, stride_h, fm_height, ymin)
if sliceh.stop <= sliceh.start:
continue
for w_out in xrange(W):
slicew, _ = _fprop_slice_np(w_out, stride_w, fm_width, xmin)
if slicew.stop <= slicew.start:
continue
else:
array_I = feature_maps[:, sliceh, slicew, int(idx)].reshape(
fm_channel, -1)
outputs[:, h_out, w_out, b_id] = np.max(array_I, axis=1)
return outputs.reshape(-1, rois_per_batch)
def test_roipooling_fprop_random(backend_default, fargs):
rois_per_image, img_fm_c, img_fm_h, img_fm_w, roi_size, bsz = fargs
# generate a random feature map and some random ROIs
feature_maps = np.random.random(
(img_fm_c, img_fm_h, img_fm_w, bsz)).reshape(-1, bsz)
rois_per_batch = rois_per_image * bsz
rois_idx = np.vstack([i*np.ones((rois_per_image, 1)) for i in range(bsz)])
rois = np.random.random((rois_per_batch, 4)) * min(img_fm_h, img_fm_w)
rois = np.zeros((rois_per_batch, 4))
rois[:, 0] = np.random.random((rois_per_batch,)) * 10 / spatial_scale
rois[:, 1] = np.random.random((rois_per_batch,)) * 25 / spatial_scale
rois[:, 2] = (
np.random.random((rois_per_batch,)) * 27 + (img_fm_w - 27)) / spatial_scale
rois[:, 3] = (
np.random.random((rois_per_batch,)) * 12 + (img_fm_h - 12)) / spatial_scale
rois = np.hstack((rois_idx, rois))
# run the numpy roi fprop (function inside this test script)
outputs_np = fprop_roipooling_ref(feature_maps, rois,
img_fm_c, img_fm_h, img_fm_w,
bsz, rois_per_image, roi_size, roi_size)
# call backend roipooling kernel
NervanaObject.be.bsz = bsz
be = NervanaObject.be
input_dev = be.array(feature_maps)
rois_dev = be.array(rois)
output_shape = (img_fm_c, roi_size, roi_size, rois_per_batch)
outputs_dev = be.zeros(output_shape)
# make sure the type being int
argmax_dev = be.zeros(output_shape, np.int32)
start_time = timeit()
be.roipooling_fprop(input_dev, rois_dev, outputs_dev, argmax_dev, rois_per_batch,
img_fm_c, img_fm_h, img_fm_w, roi_size, roi_size, spatial_scale)
print "Nervana backend roipooling fprop (sec): {}".format(timeit() - start_time)
outputs_be = outputs_dev.get().reshape(-1, rois_per_batch)
assert np.allclose(outputs_np, outputs_be, atol=1e-6, rtol=0)
def test_roipooling_fprop_ref(backend_default, rois=None, inputs=None, outputs_ref=None):
if rois is None and inputs is None and outputs_ref is None:
return
(bsz, img_fm_c, img_fm_h, img_fm_w) = inputs.shape
(rois_per_batch, _, roi_size, _) = outputs_ref.shape
outputs_ref_in = outputs_ref.reshape(rois_per_batch, -1).T
rois_per_image = rois_per_batch / bsz
feature_maps = inputs.reshape(bsz, -1).T.astype(np.float, order='C')
# run the numpy roi fprop (function inside this test script)
outputs_np = fprop_roipooling_ref(feature_maps, rois,
img_fm_c, img_fm_h, img_fm_w,
bsz, rois_per_image, roi_size, roi_size)
assert np.allclose(outputs_ref_in, outputs_np, atol=1e-6, rtol=0)
# call NervanaGPU roipooling kernel
NervanaObject.be.bsz = bsz
be = NervanaObject.be
input_dev = be.array(feature_maps)
rois_dev = be.array(rois)
output_shape = (img_fm_c, roi_size, roi_size, rois_per_batch)
outputs_dev = be.zeros(output_shape, dtype=np.float32)
# make sure the type being int
argmax_dev = be.zeros(output_shape, dtype=np.int32)
start_time = timeit()
be.roipooling_fprop(input_dev, rois_dev, outputs_dev, argmax_dev, rois_per_batch,
img_fm_c, img_fm_h, img_fm_w, roi_size, roi_size, spatial_scale)
outputs_backend = outputs_dev.get().reshape(-1, rois_per_batch)
print "Nervana backend roipooling fprop (sec): {}".format(timeit() - start_time)
assert np.allclose(outputs_ref_in, outputs_backend, atol=1e-6, rtol=0)
def test_roipooling_bprop_random(backend_default, fargs):
rois_per_image, img_fm_c, img_fm_h, img_fm_w, roi_size, bsz = fargs
rois_per_batch = rois_per_image * bsz
# generate a random feature map and some random ROIs
feature_map_size = img_fm_c * img_fm_h * img_fm_w * bsz
feature_maps = np.array(range(feature_map_size)).reshape(
(img_fm_c, img_fm_h, img_fm_w, bsz))
input_errors = np.zeros(
(img_fm_c, roi_size, roi_size, rois_per_batch))
range_num = roi_size * roi_size
input_errors[0, :, :, rois_per_batch-1] = np.array(
range(range_num)).reshape(input_errors[0, :, :, rois_per_batch-1].shape)
rois_idx = np.vstack([i*np.ones((rois_per_image, 1)) for i in range(bsz)])
rois = np.random.random((rois_per_batch, 4)) * min(img_fm_h, img_fm_w)
# use full frame as ROI
rois = np.zeros((rois_per_batch, 4))
rois[:, 0] = np.ones((rois_per_batch,))
rois[:, 1] = np.ones((rois_per_batch,))
rois[:, 2] = np.ones((rois_per_batch,)) * img_fm_w / spatial_scale
rois[:, 3] = np.ones((rois_per_batch,)) * img_fm_w / spatial_scale
rois = np.hstack((rois_idx, rois))
# run the numpy roi fprop (function inside this test script)
outputs_np = bprop_roipooling_ref(feature_maps, rois, input_errors,
img_fm_c, img_fm_h, img_fm_w,
bsz, rois_per_image, roi_size, roi_size)
# call backend roipooling kernel
NervanaObject.be.bsz = bsz
be = NervanaObject.be
input_dev = be.array(feature_maps)
rois_dev = be.array(rois)
output_shape = (img_fm_c, roi_size, roi_size, rois_per_batch)
outputs_dev = be.zeros(output_shape, dtype=np.float32)
# make sure the type being int
argmax_dev = be.zeros(output_shape, dtype=np.int32)
input_error_dev = be.array(input_errors)
output_error_dev = be.zeros(feature_maps.shape)
be.roipooling_fprop(input_dev, rois_dev, outputs_dev, argmax_dev, rois_per_batch,
img_fm_c, img_fm_h, img_fm_w, roi_size, roi_size, spatial_scale)
start_time = timeit()
be.roipooling_bprop(input_error_dev, rois_dev, output_error_dev, argmax_dev,
rois_per_batch, img_fm_c, img_fm_h, img_fm_w, roi_size,
roi_size, spatial_scale)
print "Nervana backend roipooling bprop (sec): {}".format(timeit() - start_time)
assert output_error_dev.get().reshape(
img_fm_c, img_fm_h, img_fm_w, bsz)[:, :, :, 0].sum() == 0
assert output_error_dev.get().reshape(
img_fm_c, img_fm_h, img_fm_w, bsz)[:, :, :, -1].sum() != 0
assert output_error_dev.get().sum() == input_errors.sum()
outputs_be = output_error_dev.get()
assert np.allclose(outputs_np, outputs_be, atol=1e-6, rtol=0)
def test_roipooling_bprop_ref(backend_default, rois=None, inputs=None, outputs_fprop_ref=None,
input_errors=None):
if rois is None and inputs is None and outputs_fprop_ref is None and input_errors is None:
return
(bsz, img_fm_c, img_fm_h, img_fm_w) = inputs.shape
(rois_per_batch, _, roi_size, _) = input_errors.shape
outputs_fprop_ref_in = outputs_fprop_ref.reshape(rois_per_batch, -1).T
feature_maps = inputs.reshape(bsz, -1).T.astype(np.float, order='C')
input_errors_in = input_errors.reshape(
rois_per_batch, -1).T.astype(np.float, order='C')
# compare with GPU kernel, need to call fprop first, then bprop
NervanaObject.be.bsz = bsz
be = NervanaObject.be
input_dev = be.array(feature_maps)
rois_dev = be.array(rois)
output_shape = (img_fm_c, roi_size, roi_size, rois_per_batch)
outputs_dev = be.zeros(output_shape, dtype=np.float32)
# make sure the type being int
argmax_dev = be.zeros(output_shape, dtype=np.int32)
input_error_dev = be.array(input_errors_in)
output_error_dev = be.zeros(outputs_fprop_ref_in.shape)
be.roipooling_fprop(input_dev, rois_dev, outputs_dev, argmax_dev, rois_per_batch,
img_fm_c, img_fm_h, img_fm_w, roi_size, roi_size, spatial_scale)
outputs_fprop_be = outputs_dev.get().reshape(-1, rois_per_batch)
assert np.allclose(
outputs_fprop_ref_in, outputs_fprop_be, atol=1e-6, rtol=0)
start_time = timeit()
be.roipooling_bprop(input_error_dev, rois_dev, output_error_dev, argmax_dev,
rois_per_batch, img_fm_c, img_fm_h, img_fm_w, roi_size,
roi_size, spatial_scale)
print "NervanaGPU roipooling bprop (sec): {}".format(timeit() - start_time)
outputs_backend = output_error_dev.get()
assert np.allclose(outputs_fprop_ref_in, outputs_backend, atol=1e-6, rtol=0)
if __name__ == '__main__':
bsz = 2
be = gen_backend(backend='gpu', batch_size=bsz, compat_mode='caffe')
# compare using random data
fargs = (2, 2, 62, 62, 6, bsz)
test_roipooling_fprop_random(be, fargs)
test_roipooling_bprop_random(be, fargs)
| coufon/neon-distributed | tests/test_roipooling_layer.py | Python | apache-2.0 | 13,940 |
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import requests
from requests.auth import HTTPBasicAuth
from pixiedust_flightpredict import Configuration
from pixiedust.utils.shellAccess import ShellAccess
import pixiedust
import dateutil.parser as parser
from pixiedust.utils.template import *
myLogger = pixiedust.getLogger(__name__)
historyDBName = "flightpredictorhistory"
env = PixiedustTemplateEnvironment()
def saveFlightResults(payload):
auth=HTTPBasicAuth(Configuration.cloudantUserName, Configuration.cloudantPassword)
#make sure the database is created first
url = Configuration.cloudantHost
if "://" not in url:
url = "https://"+url
r = requests.get(url + "/" + historyDBName, auth=auth)
if r.status_code != 200:
if r.status_code == 404:
r = requests.put(url+ "/" + historyDBName, auth=auth)
if r.status_code != 200 and r.status_code != 201 and r.status_code != 412:
return myLogger.error("Error creating to the history database: {0}".format(r.text))
else:
return myLogger.error("Error connecting to the history database: {0}".format(r.text))
depAirportInfo = payload["departureAirportInfo"]["airportInfo"]
arrAirportInfo = payload["arrivalAirportInfo"]["airportInfo"]
r = requests.post(url+"/" + historyDBName, auth=auth,json={
'depAirportFSCode': depAirportInfo["fs"],
'depAirportName': depAirportInfo["name"],
'depAirportLong': depAirportInfo["longitude"],
'depAirportLat': depAirportInfo["latitude"],
'arrAirportFSCode': arrAirportInfo["fs"],
'arrAirportName': arrAirportInfo["name"],
'arrAirportLong': arrAirportInfo["longitude"],
'arrAirportLat': arrAirportInfo["latitude"],
'prediction': payload["prediction"],
'carrierFsCode': payload["flightInfo"]["carrierFsCode"],
'flightNumber': payload["flightInfo"]["flightNumber"],
'departureDate': parser.parse( payload["flightInfo"]["departureTime"]).strftime("%B %d %Y"),
'departureTime': parser.parse( payload["flightInfo"]["departureTime"]).strftime("%I:%M %p")
})
if r.status_code != 200 and r.status_code != 201:
return myLogger.error("Error saving flight results in database to the history database: {0}".format(r.text))
"""
Load the flight history into a dataFrame
"""
def loadFlightHistory():
if Configuration.cloudantHost is None or Configuration.cloudantUserName is None or Configuration.cloudantPassword is None:
raise Exception("Missing credentials")
return ShellAccess.sqlContext.read.format("com.cloudant.spark")\
.option("cloudant.host",Configuration.cloudantHost)\
.option("cloudant.username",Configuration.cloudantUserName)\
.option("cloudant.password",Configuration.cloudantPassword)\
.option("schemaSampleSize", "-1")\
.load(historyDBName)
def getBadgeHtml(depAirportCode, arrAirportCode):
if ShellAccess.flightHistoryDF is None:
myLogger.info("Reloading flight history from getBadgeHtml")
ShellAccess.flightHistoryDF = loadFlightHistory()
df = ShellAccess.flightHistoryDF
res = df.filter( df['depAirportFSCode']== depAirportCode )\
.filter( df['arrAirportFSCode']==arrAirportCode )\
.map(lambda t: ((t["carrierFsCode"], t["flightNumber"], t["departureDate"],t["departureTime"], t["depAirportName"], t["arrAirportName"]), 1))\
.reduceByKey(lambda t,v : t+v)
return env.getTemplate("flightBadge.html").render(flights=res.collect())
| ibm-cds-labs/simple-data-pipe-connector-flightstats | pixiedust_flightpredict/pixiedust_flightpredict/running/flightHistory.py | Python | apache-2.0 | 4,263 |
"""Support for Z-Wave door locks."""
import logging
import voluptuous as vol
from homeassistant.components.lock import DOMAIN, LockEntity
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ZWaveDeviceEntity, const
_LOGGER = logging.getLogger(__name__)
ATTR_NOTIFICATION = "notification"
ATTR_LOCK_STATUS = "lock_status"
ATTR_CODE_SLOT = "code_slot"
ATTR_USERCODE = "usercode"
CONFIG_ADVANCED = "Advanced"
SERVICE_SET_USERCODE = "set_usercode"
SERVICE_GET_USERCODE = "get_usercode"
SERVICE_CLEAR_USERCODE = "clear_usercode"
POLYCONTROL = 0x10E
DANALOCK_V2_BTZE = 0x2
POLYCONTROL_DANALOCK_V2_BTZE_LOCK = (POLYCONTROL, DANALOCK_V2_BTZE)
WORKAROUND_V2BTZE = 1
WORKAROUND_DEVICE_STATE = 2
WORKAROUND_TRACK_MESSAGE = 4
WORKAROUND_ALARM_TYPE = 8
DEVICE_MAPPINGS = {
POLYCONTROL_DANALOCK_V2_BTZE_LOCK: WORKAROUND_V2BTZE,
# Kwikset 914TRL ZW500 99100-078
(0x0090, 0x440): WORKAROUND_DEVICE_STATE,
(0x0090, 0x446): WORKAROUND_DEVICE_STATE,
(0x0090, 0x238): WORKAROUND_DEVICE_STATE,
# Kwikset 888ZW500-15S Smartcode 888
(0x0090, 0x541): WORKAROUND_DEVICE_STATE,
# Kwikset 916
(0x0090, 0x0001): WORKAROUND_DEVICE_STATE,
# Kwikset Obsidian
(0x0090, 0x0742): WORKAROUND_DEVICE_STATE,
# Yale Locks
# Yale YRD210, YRD220, YRL220
(0x0129, 0x0000): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD210, YRD220
(0x0129, 0x0209): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRL210, YRL220
(0x0129, 0x0409): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD256
(0x0129, 0x0600): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD110, YRD120
(0x0129, 0x0800): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD446
(0x0129, 0x1000): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRL220
(0x0129, 0x2132): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
(0x0129, 0x3CAC): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD210, YRD220
(0x0129, 0xAA00): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD220
(0x0129, 0xFFFF): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRL256
(0x0129, 0x0F00): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD220 (Older Yale products with incorrect vendor ID)
(0x0109, 0x0000): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Schlage BE469
(0x003B, 0x5044): WORKAROUND_DEVICE_STATE | WORKAROUND_TRACK_MESSAGE,
# Schlage FE599NX
(0x003B, 0x504C): WORKAROUND_DEVICE_STATE,
}
LOCK_NOTIFICATION = {
"1": "Manual Lock",
"2": "Manual Unlock",
"5": "Keypad Lock",
"6": "Keypad Unlock",
"11": "Lock Jammed",
"254": "Unknown Event",
}
NOTIFICATION_RF_LOCK = "3"
NOTIFICATION_RF_UNLOCK = "4"
LOCK_NOTIFICATION[NOTIFICATION_RF_LOCK] = "RF Lock"
LOCK_NOTIFICATION[NOTIFICATION_RF_UNLOCK] = "RF Unlock"
LOCK_ALARM_TYPE = {
"9": "Deadbolt Jammed",
"16": "Unlocked by Bluetooth ",
"18": "Locked with Keypad by user ",
"19": "Unlocked with Keypad by user ",
"21": "Manually Locked ",
"22": "Manually Unlocked ",
"27": "Auto re-lock",
"33": "User deleted: ",
"112": "Master code changed or User added: ",
"113": "Duplicate Pin-code: ",
"130": "RF module, power restored",
"144": "Unlocked by NFC Tag or Card by user ",
"161": "Tamper Alarm: ",
"167": "Low Battery",
"168": "Critical Battery Level",
"169": "Battery too low to operate",
}
ALARM_RF_LOCK = "24"
ALARM_RF_UNLOCK = "25"
LOCK_ALARM_TYPE[ALARM_RF_LOCK] = "Locked by RF"
LOCK_ALARM_TYPE[ALARM_RF_UNLOCK] = "Unlocked by RF"
MANUAL_LOCK_ALARM_LEVEL = {
"1": "by Key Cylinder or Inside thumb turn",
"2": "by Touch function (lock and leave)",
}
TAMPER_ALARM_LEVEL = {"1": "Too many keypresses", "2": "Cover removed"}
LOCK_STATUS = {
"1": True,
"2": False,
"3": True,
"4": False,
"5": True,
"6": False,
"9": False,
"18": True,
"19": False,
"21": True,
"22": False,
"24": True,
"25": False,
"27": True,
}
ALARM_TYPE_STD = ["18", "19", "33", "112", "113", "144"]
SET_USERCODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
vol.Required(ATTR_USERCODE): cv.string,
}
)
GET_USERCODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
}
)
CLEAR_USERCODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
}
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Lock from Config Entry."""
@callback
def async_add_lock(lock):
"""Add Z-Wave Lock."""
async_add_entities([lock])
async_dispatcher_connect(hass, "zwave_new_lock", async_add_lock)
network = hass.data[const.DATA_NETWORK]
def set_usercode(service):
"""Set the usercode to index X on the lock."""
node_id = service.data.get(const.ATTR_NODE_ID)
lock_node = network.nodes[node_id]
code_slot = service.data.get(ATTR_CODE_SLOT)
usercode = service.data.get(ATTR_USERCODE)
for value in lock_node.get_values(
class_id=const.COMMAND_CLASS_USER_CODE
).values():
if value.index != code_slot:
continue
if len(str(usercode)) < 4:
_LOGGER.error(
"Invalid code provided: (%s) "
"usercode must be at least 4 and at most"
" %s digits",
usercode,
len(value.data),
)
break
value.data = str(usercode)
break
def get_usercode(service):
"""Get a usercode at index X on the lock."""
node_id = service.data.get(const.ATTR_NODE_ID)
lock_node = network.nodes[node_id]
code_slot = service.data.get(ATTR_CODE_SLOT)
for value in lock_node.get_values(
class_id=const.COMMAND_CLASS_USER_CODE
).values():
if value.index != code_slot:
continue
_LOGGER.info("Usercode at slot %s is: %s", value.index, value.data)
break
def clear_usercode(service):
"""Set usercode to slot X on the lock."""
node_id = service.data.get(const.ATTR_NODE_ID)
lock_node = network.nodes[node_id]
code_slot = service.data.get(ATTR_CODE_SLOT)
data = ""
for value in lock_node.get_values(
class_id=const.COMMAND_CLASS_USER_CODE
).values():
if value.index != code_slot:
continue
for i in range(len(value.data)):
data += "\0"
i += 1
_LOGGER.debug("Data to clear lock: %s", data)
value.data = data
_LOGGER.info("Usercode at slot %s is cleared", value.index)
break
hass.services.async_register(
DOMAIN, SERVICE_SET_USERCODE, set_usercode, schema=SET_USERCODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_GET_USERCODE, get_usercode, schema=GET_USERCODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_CLEAR_USERCODE, clear_usercode, schema=CLEAR_USERCODE_SCHEMA
)
def get_device(node, values, **kwargs):
"""Create Z-Wave entity device."""
return ZwaveLock(values)
class ZwaveLock(ZWaveDeviceEntity, LockEntity):
"""Representation of a Z-Wave Lock."""
def __init__(self, values):
"""Initialize the Z-Wave lock device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._state = None
self._notification = None
self._lock_status = None
self._v2btze = None
self._state_workaround = False
self._track_message_workaround = False
self._previous_message = None
self._alarm_type_workaround = False
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if self.node.manufacturer_id.strip() and self.node.product_id.strip():
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16),
)
if specific_sensor_key in DEVICE_MAPPINGS:
workaround = DEVICE_MAPPINGS[specific_sensor_key]
if workaround & WORKAROUND_V2BTZE:
self._v2btze = 1
_LOGGER.debug("Polycontrol Danalock v2 BTZE workaround enabled")
if workaround & WORKAROUND_DEVICE_STATE:
self._state_workaround = True
_LOGGER.debug("Notification device state workaround enabled")
if workaround & WORKAROUND_TRACK_MESSAGE:
self._track_message_workaround = True
_LOGGER.debug("Message tracking workaround enabled")
if workaround & WORKAROUND_ALARM_TYPE:
self._alarm_type_workaround = True
_LOGGER.debug("Alarm Type device state workaround enabled")
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
self._state = self.values.primary.data
_LOGGER.debug("lock state set to %s", self._state)
if self.values.access_control:
notification_data = self.values.access_control.data
self._notification = LOCK_NOTIFICATION.get(str(notification_data))
if self._state_workaround:
self._state = LOCK_STATUS.get(str(notification_data))
_LOGGER.debug("workaround: lock state set to %s", self._state)
if self._v2btze:
if (
self.values.v2btze_advanced
and self.values.v2btze_advanced.data == CONFIG_ADVANCED
):
self._state = LOCK_STATUS.get(str(notification_data))
_LOGGER.debug(
"Lock state set from Access Control value and is %s, get=%s",
str(notification_data),
self.state,
)
if self._track_message_workaround:
this_message = self.node.stats["lastReceivedMessage"][5]
if this_message == const.COMMAND_CLASS_DOOR_LOCK:
self._state = self.values.primary.data
_LOGGER.debug("set state to %s based on message tracking", self._state)
if self._previous_message == const.COMMAND_CLASS_DOOR_LOCK:
if self._state:
self._notification = LOCK_NOTIFICATION[NOTIFICATION_RF_LOCK]
self._lock_status = LOCK_ALARM_TYPE[ALARM_RF_LOCK]
else:
self._notification = LOCK_NOTIFICATION[NOTIFICATION_RF_UNLOCK]
self._lock_status = LOCK_ALARM_TYPE[ALARM_RF_UNLOCK]
return
self._previous_message = this_message
if not self.values.alarm_type:
return
alarm_type = self.values.alarm_type.data
if self.values.alarm_level:
alarm_level = self.values.alarm_level.data
else:
alarm_level = None
if not alarm_type:
return
if self._alarm_type_workaround:
self._state = LOCK_STATUS.get(str(alarm_type))
_LOGGER.debug(
"workaround: lock state set to %s -- alarm type: %s",
self._state,
str(alarm_type),
)
if alarm_type == 21:
self._lock_status = (
f"{LOCK_ALARM_TYPE.get(str(alarm_type))}"
f"{MANUAL_LOCK_ALARM_LEVEL.get(str(alarm_level))}"
)
return
if str(alarm_type) in ALARM_TYPE_STD:
self._lock_status = f"{LOCK_ALARM_TYPE.get(str(alarm_type))}{alarm_level}"
return
if alarm_type == 161:
self._lock_status = (
f"{LOCK_ALARM_TYPE.get(str(alarm_type))}"
f"{TAMPER_ALARM_LEVEL.get(str(alarm_level))}"
)
return
if alarm_type != 0:
self._lock_status = LOCK_ALARM_TYPE.get(str(alarm_type))
return
@property
def is_locked(self):
"""Return true if device is locked."""
return self._state
def lock(self, **kwargs):
"""Lock the device."""
self.values.primary.data = True
def unlock(self, **kwargs):
"""Unlock the device."""
self.values.primary.data = False
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = super().device_state_attributes
if self._notification:
data[ATTR_NOTIFICATION] = self._notification
if self._lock_status:
data[ATTR_LOCK_STATUS] = self._lock_status
return data
| tchellomello/home-assistant | homeassistant/components/zwave/lock.py | Python | apache-2.0 | 13,338 |
import fauxfactory
import pytest
from riggerlib import recursive_update
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils.appliance import ViaSSUI
from cfme.utils.appliance import ViaUI
from cfme.utils.generators import random_vm_name
from cfme.utils.update import update
pytestmark = [
test_requirements.quota,
pytest.mark.usefixtures('setup_provider_modscope'),
pytest.mark.provider([OpenStackProvider],
required_fields=[['provisioning', 'image']], scope="module")
]
@pytest.fixture
def admin_email(appliance):
"""Required for user quota tagging services to work, as it's mandatory for it's functioning."""
user = appliance.collections.users
admin = user.instantiate(name='Administrator')
with update(admin):
admin.email = fauxfactory.gen_email()
yield
with update(admin):
admin.email = ''
@pytest.fixture
def vm_name():
return random_vm_name(context='quota')
@pytest.fixture
def template_name(provisioning):
return provisioning["image"]["name"]
@pytest.fixture
def prov_data(provider, vm_name, template_name, provisioning):
if provider.one_of(OpenStackProvider):
return {
"catalog": {"vm_name": vm_name, "catalog_name": {"name": template_name}},
"environment": {"automatic_placement": True},
"properties": {
"instance_type": partial_match(
provisioning.get("instance_type2", "Instance type is not available")
)
},
}
@pytest.fixture(scope='module')
def domain(appliance):
domain = appliance.collections.domains.create(
fauxfactory.gen_alphanumeric(start="domain_"),
fauxfactory.gen_alphanumeric(15, start="domain_desc_"),
enabled=True
)
yield domain
if domain.exists:
domain.delete()
@pytest.fixture
def catalog_item(appliance, provider, dialog, catalog, prov_data):
collection = appliance.collections.catalog_items
catalog_item = collection.create(
provider.catalog_item_type,
name=fauxfactory.gen_alphanumeric(15, start="cat_item_"),
description='test catalog',
display_in=True,
catalog=catalog,
dialog=dialog,
prov_data=prov_data)
yield catalog_item
if catalog_item.exists:
catalog_item.delete()
@pytest.fixture(scope='module')
def max_quota_test_instance(appliance, domain):
miq = appliance.collections.domains.instantiate('ManageIQ')
original_instance = (
miq.namespaces.instantiate('System')
.namespaces.instantiate('CommonMethods')
.classes.instantiate('QuotaMethods')
.instances.instantiate('quota_source')
)
original_instance.copy_to(domain=domain)
original_instance = (
miq.namespaces.instantiate('System')
.namespaces.instantiate('CommonMethods')
.classes.instantiate('QuotaStateMachine')
.instances.instantiate('quota')
)
original_instance.copy_to(domain=domain)
instance = (
domain.namespaces.instantiate('System')
.namespaces.instantiate('CommonMethods')
.classes.instantiate('QuotaStateMachine')
.instances.instantiate('quota')
)
return instance
def set_entity_quota_source(max_quota_test_instance, entity):
with update(max_quota_test_instance):
max_quota_test_instance.fields = {'quota_source_type': {'value': entity}}
@pytest.fixture(params=['user', 'group'])
def set_entity_quota_source_change(max_quota_test_instance, request):
entity_value = request.param
with update(max_quota_test_instance):
max_quota_test_instance.fields = {'quota_source_type': {'value': entity_value}}
@pytest.fixture(params=[('groups', 'group', 'EvmGroup-super_administrator'),
('users', 'user', 'Administrator')], ids=['group', 'user'], scope='module')
def entities(appliance, request, max_quota_test_instance):
collection, entity, description = request.param
set_entity_quota_source(max_quota_test_instance, entity)
return getattr(appliance.collections, collection).instantiate(description)
@pytest.fixture(scope='function')
def set_entity_quota_tag(request, entities, appliance):
tag, value = request.param
tag = appliance.collections.categories.instantiate(
display_name=tag).collections.tags.instantiate(
display_name=value)
entities.add_tag(tag)
yield
# will refresh page as navigation to configuration is blocked if alert are on requests page
appliance.server.browser.refresh()
entities.remove_tag(tag)
@pytest.mark.parametrize(
['set_entity_quota_tag'],
[
[('Quota - Max Memory *', '1GB')],
[('Quota - Max Storage *', '10GB')],
[('Quota - Max CPUs *', '1')]
],
indirect=['set_entity_quota_tag'],
ids=['max_memory', 'max_storage', 'max_cpu']
)
def test_quota_tagging_cloud_via_lifecycle(request, appliance, provider, prov_data,
set_entity_quota_tag, template_name, vm_name):
"""Test Group and User Quota in UI using tagging
Polarion:
assignee: ghubale
casecomponent: Cloud
initialEstimate: 1/6h
tags: quota
"""
recursive_update(prov_data, {
'request': {'email': 'test_{}@example.com'.format(fauxfactory.gen_alphanumeric())}})
prov_data.update({'template_name': template_name})
appliance.collections.cloud_instances.create(vm_name, provider, prov_data)
# nav to requests page to check quota validation
request_description = 'Provision from [{template}] to [{vm}]'.format(template=template_name,
vm=vm_name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
@pytest.mark.parametrize('context', [ViaSSUI, ViaUI])
@pytest.mark.parametrize(
['set_entity_quota_tag'],
[
[('Quota - Max Memory *', '1GB')],
[('Quota - Max Storage *', '10GB')],
[('Quota - Max CPUs *', '1')]
],
indirect=['set_entity_quota_tag'],
ids=['max_memory', 'max_storage', 'max_cpu']
)
def test_quota_tagging_cloud_via_services(appliance, request, context, admin_email,
set_entity_quota_tag, catalog_item):
"""Test Group and User Quota in UI and SSUI using tagging
Polarion:
assignee: ghubale
casecomponent: Cloud
initialEstimate: 1/6h
tags: quota
"""
with appliance.context.use(context):
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
if context is ViaSSUI:
service_catalogs.add_to_shopping_cart()
service_catalogs.order()
# nav to requests page to check quota validation
request_description = 'Provisioning Service [{0}] from [{0}]'.format(catalog_item.name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
def test_cloud_quota_by_lifecycle(request, appliance, provider, set_entity_quota_source_change,
prov_data, vm_name, template_name):
"""Testing cloud quota for user and group by provisioning instance via lifecycle
Polarion:
assignee: ghubale
initialEstimate: 1/4h
casecomponent: Cloud
tags: quota
testSteps:
1. Navigate to Automation > automate > Explorer
2. Create new Domain and copy 'quota' and 'quota_source' method
3. Change 'value' of 'open source type' to 'user' or 'group' (one by one) in 'quota'
method
4. Provision instance via lifecycle
5. Make sure that provisioned 'template' is having more than assigned quota
6. Check whether instance provision 'Denied' with reason 'Quota Exceeded'
"""
recursive_update(prov_data, {
'request': {'email': 'test_{}@example.com'.format(fauxfactory.gen_alphanumeric())}})
prov_data.update({'template_name': template_name})
appliance.collections.cloud_instances.create(vm_name, provider, prov_data)
# nav to requests page to check quota validation
request_description = 'Provision from [{template}] to [{vm}]'.format(template=template_name,
vm=vm_name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
@pytest.mark.parametrize('context', [ViaSSUI, ViaUI])
def test_quota_cloud_via_services(appliance, request, admin_email, entities, prov_data,
catalog_item, context):
"""This test case verifies the quota assigned by automation method for user and group
is working correctly for the cloud providers.
Polarion:
assignee: ghubale
initialEstimate: 1/4h
casecomponent: Cloud
tags: quota
testSteps:
1. Navigate to Automation > Automate > Explorer
2. Add quota automation methods to domain
3. Change 'quota_source_type' to 'user' or 'group'
4. Test quota by provisioning instances over quota limit via UI or
SSUI for user and group
5. Check whether quota is exceeded or not
"""
with appliance.context.use(context):
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
if context is ViaSSUI:
service_catalogs.add_to_shopping_cart()
service_catalogs.order()
# nav to requests page to check quota validation
request_description = ("Provisioning Service [{catalog_item_name}] from [{catalog_item_name}]"
.format(catalog_item_name=catalog_item.name))
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
request.addfinalizer(provision_request.remove_request)
assert provision_request.row.reason.text == "Quota Exceeded"
| izapolsk/integration_tests | cfme/tests/cloud/test_quota_tagging.py | Python | gpl-2.0 | 10,743 |
"""Ensure credentials are preserved through the authorization.
The Authorization Code Grant will need to preserve state as well as redirect
uri and the Implicit Grant will need to preserve state.
"""
from __future__ import absolute_import, unicode_literals
import json
import mock
from .test_utils import get_query_credentials, get_fragment_credentials
from ....unittest import TestCase
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2 import WebApplicationServer, MobileApplicationServer
from oauthlib.oauth2.rfc6749 import errors
class PreservationTest(TestCase):
DEFAULT_REDIRECT_URI = 'http://i.b./path'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = self.DEFAULT_REDIRECT_URI
self.validator.authenticate_client.side_effect = self.set_client
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
def set_state(self, state):
def set_request_state(client_id, code, client, request):
request.state = state
return True
return set_request_state
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_state_preservation(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc&response_type='
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + 'code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
self.validator.validate_code.side_effect = self.set_state('xyz')
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['state'], 'xyz')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + 'token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['state'][0], 'xyz')
def test_redirect_uri_preservation(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
redirect_uri = 'http://i.b/path'
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + '&response_type=code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
# confirm_redirect_uri should return false if the redirect uri
# was given in the authorization but not in the token request.
self.validator.confirm_redirect_uri.return_value = False
code = get_query_credentials(h['Location'])['code'][0]
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['error'], 'access_denied')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + '&response_type=token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
def test_invalid_redirect_uri(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
self.validator.validate_redirect_uri.return_value = False
# authorization grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
def test_default_uri(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
self.validator.get_default_redirect_uri.return_value = None
# authorization grant
self.assertRaises(errors.MissingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MissingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
| nirmeshk/oh-mainline | vendor/packages/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py | Python | agpl-3.0 | 4,860 |
from __future__ import print_function
import os, json, zipfile
import sublime, sublime_plugin
"""
Copyright (C) 2015 Justin Decker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Original Copyright (c) 2014 Kim Blomqvist, kblomqvist.github.io
SublimeAVR plug-in for Sublime Text
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
PLUGIN_NAME = "ion"
PLUGIN_PATH = os.path.dirname(os.path.abspath(__file__))
class IonNewProjectCommand(sublime_plugin.WindowCommand):
def run(self, *args, **kwargs):
self.settings = sublime.load_settings(PLUGIN_NAME + ".sublime-settings")
# Initial location
self.initial_location = os.path.expanduser('~')
if self.window.folders():
self.initial_location = self.window.folders()[0]
# Ask location
self.window.show_input_panel(
"Create project in folder: ",
self.initial_location,
self.location_resolved,
None,
None
)
def location_resolved(self, location):
self.location = location
self.settings.set("location", location)
try:
self.new_project = True
os.makedirs(location)
except:
self.new_project = False
try:
with open(location + "/ion.sublime-project"):
dialog = "i/o node project found from \"%s\" ...\n\nDo you want to update it?" % location
except:
self.new_project = True
dialog = "Location \"%s\" already exists ...\n\nStill want to start i/o node project there?" % location
if not sublime.ok_cancel_dialog(dialog):
return
if self.new_project:
self.templates = []
self.templates_search_path = os.path.join(PLUGIN_PATH, "templates")
for f in os.listdir(self.templates_search_path):
if f.endswith(".zip"):
template_name = f.replace(".zip", "")
template_name = template_name.replace("_", " ")
self.templates.append(template_name)
if not self.templates:
print("%s: Cannot find a template" % PLUGIN_NAME)
sublime.status_message("%s: Cannot find a template." % PLUGIN_NAME)
# Ask template
self.window.show_quick_panel(self.templates, self.template_resolved)
else:
self.process_project_file()
def template_resolved(self, index):
if index == -1:
if os.listdir(self.location) == []:
# Avoid polluting user's file system with empty folders
os.rmdir(self.location)
return
self.template = os.path.join(self.templates_search_path, self.templates[index].replace(" ", "_") + ".zip")
try:
zf = zipfile.ZipFile(self.template)
zf.extractall(self.location)
zf.close()
except:
print("%S: Could not extract the template '%s'" % (PLUGIN_NAME, self.template))
return
self.process_project_file()
def process_project_file(self):
projectfile = IonSublimeProject(self.settings)
projectfile.save()
verb = "created" if self.new_project else "updated"
print("%s: Project %s in '%s'" % (PLUGIN_NAME, verb, self.location))
sublime.status_message("%s: Project %s in '%s'" % (PLUGIN_NAME, verb, self.location))
class ChooseSerialPortCommand(sublime_plugin.WindowCommand):
def run(self, serial_port):
app.constant.sketch_settings.set('serial_port', serial_port)
def is_checked(self, serial_port):
state = False
chosen_serial_port = app.constant.sketch_settings.get('serial_port', -1)
if serial_port == chosen_serial_port:
state = True
return state
class IonSublimeProject():
def __init__(self, settings):
s = settings
self.settings = settings
def save(self):
try:
location = self.settings.get("location")
f = open(location + "/ion.sublime-project", 'w+')
except:
return False
try:
project = json.load(f)
project["build_systems"]["env"].update(self.template()["build_systems"]["env"])
project["settings"]["sublimeclang_additional_language_options"].update(self.template()["settings"]["sublimeclang_additional_language_options"])
except:
project = self.template()
# Save SublimeAVR.sublime-project
f.seek(0)
f.write(json.dumps(project, sort_keys=False, indent=4))
f.truncate()
f.close()
return True
def template(self):
template = {
"build_systems":
[
{
"name": "i/o node",
"cmd": [
"make"
],
"path": os.environ['PATH'],
"working_dir": "${project_path}",
"selector": "source.c, source.asm",
}
],
"folders":
[
{
"path": "."
}
]
}
if self.settings.get("path", ""):
template['build_systems'][0]['path'] = os.path.normpath(self.settings.get("path")) + os.pathsep + template['build_systems'][0]['path']
return template | justind000/ionode_sublime_plugin | ion.py | Python | gpl-3.0 | 6,012 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20151201_1922'),
]
operations = [
migrations.AlterField(
model_name='post',
name='post_date',
field=models.DateTimeField(),
),
]
| I-prefer-the-front-end/I-prefer-the-front-end | iptfe/blog/migrations/0006_auto_20151201_1923.py | Python | mit | 391 |
# -*- coding: utf-8 -*-
"""
Xlsx xml-parser for Reporting Services.
Converts text to formulae, eg. '=SUM(A1:A10)'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Notice: Only Reporting Services 2012 (or higher) is supporting export reports to
xlsx-format.
"""
from __future__ import unicode_literals
import sys
import os
import shutil
import time
import re
from zipfile import ZipFile, ZIP_DEFLATED
from lxml import etree
from copy import deepcopy
from xlsx_rc_convertor import convert_rc_formula, get_cell_format, col2str
class RecursiveFileIterator:
def __init__(self, *root_dirs):
self.dir_queue = list(root_dirs)
self.include_dirs = None
self.file_queue = []
def __getitem__(self, index):
while not len(self.file_queue):
self.next_dir()
result = self.file_queue[0]
del self.file_queue[0]
return result
def next_dir(self):
dir = self.dir_queue[0] # fails with IndexError, which is fine
# for iterator interface
del self.dir_queue[0]
list = os.listdir(dir)
join = os.path.join
isdir = os.path.isdir
for basename in list:
full_path = join(dir, basename)
if isdir(full_path):
self.dir_queue.append(full_path)
if self.include_dirs:
self.file_queue.append(full_path)
else:
self.file_queue.append(full_path)
class ParseXlsx:
""" Parse xlsx file and replace formulas strings to formulas format """
def __init__(self, file_name, task_id=0, show_log=False, run=False, **kwargs):
""" Init start parameters """
self.file_name = file_name
self.task_id = task_id
self.main_temp_dir = 'temp'
self.show_log = show_log
self.shared_strings = []
self.style_list = None
# Print view params
self.print_view = kwargs.get('print_view')
self.landscape = 'landscape' if kwargs.get('landscape') else 'portrait'
self.fit_to_width = str(int(kwargs.get('fit_to_width', 0)))
self.fit_to_height = str(int(kwargs.get('fit_to_height', 0)))
self.fix_area = kwargs.get('fix_area', [])
if run:
self.main()
def main(self):
""" Read xlsx file, extract files from it and parse each sheet """
if not os.path.exists(self.file_name):
print('Source file not found. Exit.')
else:
if not os.path.isdir(self.main_temp_dir):
self.print_log('Creating temp directory')
os.mkdir(os.path.join(os.getcwd(), self.main_temp_dir))
os.chdir(self.main_temp_dir)
# Create temp dir
temp_dir = str(self.task_id) + str(time.time())
os.mkdir(os.path.join(os.getcwd(), temp_dir))
os.chdir(temp_dir)
# Extract xlsx and process it
zip_file_name = os.path.join("../" * 2, self.file_name)
with ZipFile(zip_file_name, 'a', ZIP_DEFLATED) as report_zip:
report_zip.extractall(os.getcwd())
# Check if file generated with sharedString or with inlineStr
if os.path.isfile('xl/sharedStrings.xml'):
self.print_log('Found sharedStrings')
# Extract all strings from sharedStrings.xml
shared_string_xml_object = etree.parse('xl/sharedStrings.xml')
si_tags = shared_string_xml_object.getroot().xpath("//*[local-name()='sst']/*[local-name()='si']")
for si_tag in si_tags:
t_tag = si_tag.xpath("*[local-name()='t']")
if not t_tag:
self.shared_strings.append(None)
else:
self.shared_strings.append(t_tag[0].text)
else:
self.print_log('sharedStrings not found')
# Process each sheet
for sheet_file_name in report_zip.namelist():
if 'xl/worksheets/sheet' in sheet_file_name:
self.parse_sheet(sheet_file_name)
self.print_log('Deleting source file')
os.stat(zip_file_name)
os.remove(zip_file_name)
with ZipFile(zip_file_name, "w") as cur_file:
for name in RecursiveFileIterator('.'):
self.print_log('Writing to new Excel file. File -> {0}'.format(name))
if os.path.isfile(name):
cur_file.write(name, name, ZIP_DEFLATED)
os.chdir('..')
self.print_log('Removing temp files')
shutil.rmtree(os.path.join(os.getcwd(), temp_dir))
# Return to script's work directory
os.chdir(sys.path[0])
self.print_log('Done')
def parse_sheet(self, sheet_file_name):
""" Parse sheet and replace formulas strings to formulas format """
styles_file = 'xl/styles.xml'
self.style_list = etree.parse(styles_file)
sheet_xml_object = etree.parse(sheet_file_name)
# Removing NaN values
v_nan_tags = sheet_xml_object.getroot().xpath(
"//*[local-name()='c']/*[local-name()='v' and text()='NaN']"
)
for v_nan_tag in v_nan_tags:
c_nan_tag = v_nan_tag.xpath("ancestor::*[local-name()='c']")
self.print_log("Found NaN value in cell {0}".format(c_nan_tag[0].get("r")))
v_nan_tag.text = "0"
# If not found sharedStrings, then looking for inlineStr c tags
if not len(self.shared_strings):
c_tags = sheet_xml_object.getroot().xpath(
"//*[local-name()='sheetData']/*[local-name()='row']/*[local-name()='c'][@t='inlineStr']"
)
for c_tag in c_tags:
is_tag = c_tag.xpath("*[local-name()='is']")
t_tag = c_tag.xpath("*[local-name()='is']/*[local-name()='t']")
if len(t_tag):
cur_inline_string = t_tag[0].text
if cur_inline_string and cur_inline_string[0] == '=':
self.print_log(
'Found formula -> {0} in row {1}'.format(cur_inline_string, c_tag.get('r'))
)
right_formula = convert_rc_formula(cur_inline_string[1:], c_tag.get('r'))
if right_formula:
c_tag.remove(is_tag[0])
# Generate formula
self.gen_formula_tag(c_tag, right_formula)
# Set format to formula's cell
if '@' in cur_inline_string[1:]:
c_tag.attrib['s'] = self.set_format(c_tag.get('s'), get_cell_format(cur_inline_string[1:]))
else:
c_tags = sheet_xml_object.getroot().xpath(
"//*[local-name()='sheetData']/*[local-name()='row']/*[local-name()='c'][@t='s']"
)
for c_tag in c_tags:
v_tag = c_tag.xpath("*[local-name()='v']")
if self.shared_strings[int(v_tag[0].text)]:
cur_shared_string = self.shared_strings[int(v_tag[0].text)]
if cur_shared_string[0] == '=':
self.print_log(
'Found formula -> {0} in row {1}'.format(cur_shared_string, c_tag.get('r'))
)
right_formula = convert_rc_formula(cur_shared_string[1:], c_tag.get('r'))
if right_formula:
c_tag.remove(v_tag[0])
# Generate formula
self.gen_formula_tag(c_tag, right_formula)
# Set format to formula's cell
if '@' in cur_shared_string[1:]:
c_tag.attrib['s'] = self.set_format(c_tag.get('s'), get_cell_format(cur_shared_string[1:]))
# Save changes in styles.xml
self.save_xml_to_file(self.style_list, styles_file)
# Set sheet styles
sh_num = int(re.compile(r'\d+').findall(sheet_file_name)[-1])
if self.print_view:
sheet_xml_object = self.set_print_view(sheet_xml_object)
if sh_num <= len(self.fix_area):
sheet_xml_object = self.set_fixed_area(sheet_xml_object, int(self.fix_area[sh_num-1][0]), int(self.fix_area[sh_num-1][1]))
# Save changes in sheetN.xml
self.save_xml_to_file(sheet_xml_object, sheet_file_name)
@staticmethod
def gen_formula_tag(c_tag, right_formula):
""" Generate new formula tag """
c_tag.append(etree.Element("f"))
f_tag = c_tag.xpath("*[local-name()='f']")
f_tag[0].text = right_formula
del c_tag.attrib["t"]
def print_log(self, message):
""" Show log messages during work """
if self.show_log:
print(message)
def set_format(self, style_id, new_format):
""" Set formula's cell format """
new_format = new_format.replace("'", '"')
# Find current common format
cell_xfs = self.style_list.getroot().xpath(
"//*[local-name()='cellXfs']"
)[0]
current_xf = deepcopy(cell_xfs.xpath("*[local-name()='xf']")[int(style_id)])
# Append copied common format
cell_xfs.append(current_xf)
# Save last item's id as new style_id
style_id = cell_xfs.attrib['count']
# Increase cellXfs' count
cell_xfs.attrib['count'] = str(int(cell_xfs.get('count')) + 1)
# Get new common format
current_xf = cell_xfs.xpath("*[local-name()='xf']")[-1]
# Edit numFmts block
num_fmts = self.style_list.getroot().xpath(
"//*[local-name()='numFmts'][@count]"
)[0]
# Check on existing current style
exists_fmt = num_fmts.xpath(
"""*[local-name()='numFmt'][@formatCode='[$-010419]{0}']""".format(new_format)
)
if not exists_fmt:
# Add new numFmt
num_fmts.append(etree.Element('numFmt'))
new_item = num_fmts.xpath("*[local-name()='numFmt']")[-1]
new_item.attrib['numFmtId'] = str(style_id)
new_item.attrib['formatCode'] = """[$-010419]{0}""".format(new_format)
# Increase numFmts count
num_fmts.attrib['count'] = str(int(num_fmts.get('count')) + 1)
# Set format number's id to new common style
current_xf.attrib["numFmtId"] = str(
exists_fmt[0].get('numFmtId') if exists_fmt else style_id
)
return style_id
def set_print_view(self, sheet_object):
""" Set pageSetup-tag """
# Set fixToPage property to True
sheet_pr = sheet_object.getroot().xpath("//*[local-name()='sheetPr']")
if not len(sheet_pr):
sheet_object.getroot().xpath("//")[0].insert(0, etree.Element('sheetPr'))
sheet_pr = sheet_object.getroot().xpath("//*[local-name()='sheetPr']")[0]
else:
sheet_pr = sheet_pr[0]
sheet_pr.append(etree.Element('pageSetUpPr', {'fitToPage': '1'}))
# Set orientation to landscape and fit to width and height to True
page_setup = sheet_object.getroot().xpath("//*[local-name()='pageSetup']")[0]
page_setup.attrib['orientation'] = self.landscape
page_setup.attrib['fitToWidth'] = self.fit_to_width
page_setup.attrib['fitToHeight'] = self.fit_to_height
return sheet_object
@staticmethod
def set_fixed_area(sheet_object, col=0, row=0):
""" Set fixed area to sheet """
# Get sheetViews tag
sheet_views = sheet_object.getroot().xpath("//*[local-name()='sheetViews']")
if not len(sheet_views):
sheet_object.getroot().xpath("//")[0].insert(0, etree.Element('sheetViews'))
sheet_views = sheet_object.getroot().xpath("//*[local-name()='sheetViews']")[0]
else:
sheet_views = sheet_views[0]
# Get sheetView tag
cur_sheet_view = sheet_views.xpath("*[local-name()='sheetView']")
if not len(cur_sheet_view):
sheet_views.insert(0, etree.Element('sheetView'))
cur_sheet_view = sheet_views.xpath("*[local-name()='sheetView']")[0]
else:
cur_sheet_view = cur_sheet_view[0]
# Add new pane to fix current area
cur_sheet_view.append(etree.Element('pane', {
'xSplit': str(col),
'ySplit': str(row),
'topLeftCell': "{col}{row}".format(**dict(
col=col2str(col+1, run=1),
row=row+1,
)),
'activePane': "bottomRight",
'state': "frozen",
}))
return sheet_object
@staticmethod
def save_xml_to_file(xml_object, file_name):
""" Save edited XML-object to source-file """
file_handler = open(file_name, "w")
file_handler.writelines(etree.tostring(xml_object, pretty_print=True))
file_handler.close()
if __name__ == '__main__':
file_name = 'KeyIndicatorsTT.xlsx'
ParseXlsx(file_name, show_log=True, run=True)
os.stat(file_name) | marat-/python-reporting-services | parse_xlsx_xml.py | Python | apache-2.0 | 13,350 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, api, models
from math import ceil
class stock_print_stock_voucher(models.TransientModel):
_name = 'stock.print_stock_voucher'
_description = "Print Stock Voucher"
@api.model
def _get_picking(self):
active_id = self._context.get('active_id', False)
return self.env['stock.picking'].browse(active_id)
@api.model
def _get_book(self):
picking = self._get_picking()
return picking.picking_type_id.book_id
picking_id = fields.Many2one(
'stock.picking',
default=_get_picking,
required=True,
)
printed = fields.Boolean(
compute='_get_printed',
)
book_id = fields.Many2one(
'stock.book', 'Book', default=_get_book,
)
next_voucher_number = fields.Integer(
'Next Voucher Number',
related='book_id.sequence_id.number_next_actual', readonly=True,
)
estimated_number_of_pages = fields.Integer(
'Number of Pages',
)
lines_per_voucher = fields.Integer(
'Lines Per Voucher',
related='book_id.lines_per_voucher',
)
@api.depends('picking_id', 'picking_id.voucher_ids')
def _get_printed(self):
printed = False
if self.picking_id.voucher_ids:
printed = True
self.printed = printed
@api.onchange('book_id', 'picking_id')
def get_estimated_number_of_pages(self):
lines_per_voucher = self.lines_per_voucher
if lines_per_voucher == 0:
estimated_number_of_pages = 1
else:
operations = len(self.picking_id.pack_operation_ids)
estimated_number_of_pages = ceil(
float(operations) / float(lines_per_voucher)
)
self.estimated_number_of_pages = estimated_number_of_pages
@api.multi
def do_print_voucher(self):
return self.picking_id.do_print_voucher()
@api.one
def assign_numbers(self):
self.picking_id.assign_numbers(
self.estimated_number_of_pages, self.book_id)
@api.multi
def do_print_and_assign(self):
self.assign_numbers()
return self.do_print_voucher()
@api.multi
def do_clean(self):
self.picking_id.voucher_ids.unlink()
self.picking_id.book_id = False
| sysadminmatmoz/ingadhoc | stock_voucher/wizard/stock_print_remit.py | Python | agpl-3.0 | 2,589 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations:
"""LoadBalancerFrontendIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerFrontendIPConfigurationListResult"]:
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
frontend_ip_configuration_name: str,
**kwargs: Any
) -> "_models.FrontendIPConfiguration":
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_load_balancer_frontend_ip_configurations_operations.py | Python | mit | 8,931 |
import nltk
import filemanager
import multiprocessing
import os
import ConfigParser
from assistant import Assistant, Messenger
from nltk.corpus import wordnet
resources_dir = 'resources\\'
login_creds = ConfigParser.SafeConfigParser()
if os.path.isfile(resources_dir + 'login_creds.cfg'):
login_creds.read(resources_dir + 'login_creds.cfg')
else:
print "No logins... creating now"
new_login_creds = open(resources_dir + 'login_creds.cfg', 'w')
login_creds.write(new_login_creds)
new_login_creds.close()
def fb_worker(email, password):
messenger = Messenger(email, password)
messenger.listen()
return
def check_for_word(word, verblist):
if word in verbs:
return True
target = wordnet.synsets(word)
for synonyms in target:
new_list = [str(x) for x in synonyms.lemma_names()]
if any(i in new_list for i in verblist):
return True
return False
if __name__ == '__main__':
use_speech = False
nlp_debug = False
jarvis = Assistant(use_speech)
jarvis.say('I have been fully loaded')
input = ''
while (input != 'Goodbye JARVIS'):
try:
input = jarvis.get_input()
if not input == '':
words = nltk.word_tokenize(input)
tagged = nltk.pos_tag(words)
verbs = []
proper_nouns = []
pronouns = []
has_question_word = False
has_question = False
for word in tagged:
if 'VB' in word[1]:
verbs.append(word[0].lower())
elif word[1] == 'NNP':
proper_nouns.append(word[0].lower())
elif 'PRP' in word[1]:
pronouns.append(word[0].lower())
elif word[1][0] == 'W':
has_question_word = True
has_question = has_question_word and len(pronouns) == 0
if nlp_debug:
print 'Tags: {}'.format(tagged)
print 'Verbs: {}'.format(verbs)
if not has_question:
if check_for_word('open', verbs):
jarvis.say(filemanager.try_open_executable(words, tagged))
elif check_for_word('respond', verbs):
if "facebook" in proper_nouns:
if not login_creds.has_section('Facebook'):
login_creds.add_section('Facebook')
login_creds.set('Facebook', 'email', raw_input('Enter your FB email: '))
login_creds.set('Facebook', 'password', raw_input('Enter your FB password: '))
with open(resources_dir + 'login_creds.cfg', 'wb') as configfile:
login_creds.write(configfile)
fb_process = multiprocessing.Process(target = fb_worker, args = (login_creds.get('Facebook', 'email'), login_creds.get('Facebook', 'password')))
fb_process.daemon = True
fb_process.start()
jarvis.say('Answering your Facebook messages.')
else:
jarvis.respond(input)
else:
if not jarvis.search_wolfram(input):
jarvis.respond(input)
except Exception as e:
print e
try:
fb_process.terminate()
fb_process.join()
except NameError:
pass
break
| omn0mn0m/JARVIS | jarvis/jarvis.py | Python | mit | 3,867 |
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# l
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, util
from string import Template
def is_manually_generated(f_name):
return f_name in {'control_ping_reply'}
class_reference_template = Template("""jclass ${ref_name}Class;
""")
find_class_invocation_template = Template("""
${ref_name}Class = (jclass)(*env)->NewGlobalRef(env, (*env)->FindClass(env, "org/openvpp/jvpp/dto/${class_name}"));
if ((*env)->ExceptionCheck(env)) {
(*env)->ExceptionDescribe(env);
return JNI_ERR;
}""")
find_class_template = Template("""
${ref_name}Class = (jclass)(*env)->NewGlobalRef(env, (*env)->FindClass(env, "${class_name}"));
if ((*env)->ExceptionCheck(env)) {
(*env)->ExceptionDescribe(env);
return JNI_ERR;
}""")
class_cache_template = Template("""
$class_references
static int cache_class_references(JNIEnv* env) {
$find_class_invocations
return 0;
}""")
def generate_class_cache(func_list):
class_references = []
find_class_invocations = []
for f in func_list:
c_name = f['name']
class_name = util.underscore_to_camelcase_upper(c_name)
ref_name = util.underscore_to_camelcase(c_name)
if util.is_ignored(c_name):
continue
if util.is_reply(class_name):
class_references.append(class_reference_template.substitute(
ref_name=ref_name))
find_class_invocations.append(find_class_invocation_template.substitute(
ref_name=ref_name,
class_name=class_name))
elif util.is_notification(c_name):
class_references.append(class_reference_template.substitute(
ref_name=util.add_notification_suffix(ref_name)))
find_class_invocations.append(find_class_invocation_template.substitute(
ref_name=util.add_notification_suffix(ref_name),
class_name=util.add_notification_suffix(class_name)))
# add exception class to class cache
ref_name = 'callbackException'
class_name = 'org/openvpp/jvpp/VppCallbackException'
class_references.append(class_reference_template.substitute(
ref_name=ref_name))
find_class_invocations.append(find_class_template.substitute(
ref_name=ref_name,
class_name=class_name))
return class_cache_template.substitute(
class_references="".join(class_references), find_class_invocations="".join(find_class_invocations))
# TODO: cache method and field identifiers to achieve better performance
# https://jira.fd.io/browse/HONEYCOMB-42
request_class_template = Template("""
jclass requestClass = (*env)->FindClass(env, "org/openvpp/jvpp/dto/${java_name_upper}");""")
request_field_identifier_template = Template("""
jfieldID ${java_name}FieldId = (*env)->GetFieldID(env, requestClass, "${java_name}", "${jni_signature}");
${jni_type} ${java_name} = (*env)->Get${jni_getter}(env, request, ${java_name}FieldId);
""")
u8_struct_setter_template = Template("""
mp->${c_name} = ${java_name};""")
u16_struct_setter_template = Template("""
mp->${c_name} = clib_host_to_net_u16(${java_name});""")
u32_struct_setter_template = Template("""
mp->${c_name} = clib_host_to_net_u32(${java_name});""")
i32_struct_setter_template = Template("""
mp->${c_name} = clib_host_to_net_i32(${java_name});!""")
u64_struct_setter_template = Template("""
mp->${c_name} = clib_host_to_net_u64(${java_name});""")
u8_array_struct_setter_template = Template("""
{
jsize cnt = (*env)->GetArrayLength (env, ${java_name});
size_t max_size = ${field_length};
if (max_size != 0 && cnt > max_size) cnt = max_size;
(*env)->GetByteArrayRegion(env, ${java_name}, 0, cnt, (jbyte *)mp->${c_name});
}
""")
u32_array_struct_setter_template = Template("""
jint * ${java_name}ArrayElements = (*env)->GetIntArrayElements(env, ${java_name}, NULL);
{
size_t _i;
jsize cnt = (*env)->GetArrayLength (env, ${java_name});
size_t max_size = ${field_length};
if (max_size != 0 && cnt > max_size) cnt = max_size;
for (_i = 0; _i < cnt; _i++) {
mp->${c_name}[_i] = clib_host_to_net_u32(${java_name}ArrayElements[_i]);
}
}
(*env)->ReleaseIntArrayElements (env, ${java_name}, ${java_name}ArrayElements, 0);
""")
vl_api_ip4_fib_counter_t_array_struct_setter_template = Template("""
// vl_api_ip4_fib_counter_t_array_field_setter_template FIXME""")
vl_api_ip6_fib_counter_t_array_struct_setter_template = Template("""
// vl_api_ip6_fib_counter_t_array_field_setter_template FIXME""")
struct_setter_templates = {'u8': u8_struct_setter_template,
'u16': u16_struct_setter_template,
'u32': u32_struct_setter_template,
'i32': u32_struct_setter_template,
'u64': u64_struct_setter_template,
'u8[]': u8_array_struct_setter_template,
'u32[]': u32_array_struct_setter_template,
'vl_api_ip4_fib_counter_t[]': vl_api_ip4_fib_counter_t_array_struct_setter_template,
'vl_api_ip6_fib_counter_t[]': vl_api_ip6_fib_counter_t_array_struct_setter_template
}
jni_impl_template = Template("""
/**
* JNI binding for sending ${c_name} vpe.api message.
* Generated based on $inputfile preparsed data:
$api_data
*/
JNIEXPORT jint JNICALL Java_org_openvpp_jvpp_JVppImpl_${java_name}0
(JNIEnv * env, jclass clazz$args) {
vppjni_main_t *jm = &vppjni_main;
vl_api_${c_name}_t * mp;
u32 my_context_id;
int rv;
rv = vppjni_sanity_check (jm);
if (rv) return rv;
my_context_id = vppjni_get_context_id (jm);
$request_class
$field_identifiers
M(${c_name_uppercase}, ${c_name});
mp->context = clib_host_to_net_u32 (my_context_id);
$struct_setters
S;
if ((*env)->ExceptionCheck(env)) {
return JNI_ERR;
}
return my_context_id;
}""")
def generate_jni_impl(func_list, inputfile):
jni_impl = []
for f in func_list:
f_name = f['name']
camel_case_function_name = util.underscore_to_camelcase(f_name)
if is_manually_generated(f_name) or util.is_reply(camel_case_function_name) \
or util.is_ignored(f_name) or util.is_just_notification(f_name):
continue
arguments = ''
request_class = ''
field_identifiers = ''
struct_setters = ''
f_name_uppercase = f_name.upper()
if f['args']:
arguments = ', jobject request'
camel_case_function_name_upper = util.underscore_to_camelcase_upper(f_name)
request_class = request_class_template.substitute(java_name_upper=camel_case_function_name_upper)
# field identifiers
for t in zip(f['types'], f['args']):
jni_type = t[0]
java_field_name = util.underscore_to_camelcase(t[1])
jni_signature = util.jni_2_signature_mapping[jni_type]
jni_getter = util.jni_field_accessors[jni_type]
field_identifiers += request_field_identifier_template.substitute(
jni_type=jni_type,
java_name=java_field_name,
jni_signature=jni_signature,
jni_getter=jni_getter)
# field setters
for t in zip(f['c_types'], f['args'], f['lengths']):
c_type = t[0]
c_name = t[1]
field_length = t[2][0]
# check if we are processing variable length array:
if t[2][1]:
field_length = util.underscore_to_camelcase(t[2][0])
java_field_name = util.underscore_to_camelcase(c_name)
struct_setter_template = struct_setter_templates[c_type]
struct_setters += struct_setter_template.substitute(
c_name=c_name,
java_name=java_field_name,
field_length=field_length)
jni_impl.append(jni_impl_template.substitute(
inputfile=inputfile,
api_data=util.api_message_to_javadoc(f),
java_name=camel_case_function_name,
c_name_uppercase=f_name_uppercase,
c_name=f_name,
request_class=request_class,
field_identifiers=field_identifiers,
struct_setters=struct_setters,
args=arguments))
return "\n".join(jni_impl)
dto_field_id_template = Template("""
jfieldID ${java_name}FieldId = (*env)->GetFieldID(env, ${class_ref_name}Class, "${java_name}", "${jni_signature}");""")
default_dto_field_setter_template = Template("""
(*env)->Set${jni_setter}(env, dto, ${java_name}FieldId, mp->${c_name});
""")
variable_length_array_value_template = Template("""mp->${length_var_name}""")
variable_length_array_template = Template("""clib_net_to_host_${length_field_type}(${value})""")
u16_dto_field_setter_template = Template("""
(*env)->Set${jni_setter}(env, dto, ${java_name}FieldId, clib_net_to_host_u16(mp->${c_name}));
""")
u32_dto_field_setter_template = Template("""
(*env)->Set${jni_setter}(env, dto, ${java_name}FieldId, clib_net_to_host_u32(mp->${c_name}));
""")
u64_dto_field_setter_template = Template("""
(*env)->Set${jni_setter}(env, dto, ${java_name}FieldId, clib_net_to_host_u64(mp->${c_name}));
""")
u8_array_dto_field_setter_template = Template("""
jbyteArray ${java_name} = (*env)->NewByteArray(env, ${field_length});
(*env)->SetByteArrayRegion(env, ${java_name}, 0, ${field_length}, (const jbyte*)mp->${c_name});
(*env)->SetObjectField(env, dto, ${java_name}FieldId, ${java_name});
""")
u32_array_dto_field_setter_template = Template("""
{
jintArray ${java_name} = (*env)->NewIntArray(env, ${field_length});
jint * ${java_name}ArrayElements = (*env)->GetIntArrayElements(env, ${java_name}, NULL);
unsigned int _i;
for (_i = 0; _i < ${field_length}; _i++) {
${java_name}ArrayElements[_i] = clib_net_to_host_u32(mp->${c_name}[_i]);
}
(*env)->ReleaseIntArrayElements(env, ${java_name}, ${java_name}ArrayElements, 0);
(*env)->SetObjectField(env, dto, ${java_name}FieldId, ${java_name});
}
""")
# For each u64 array we get its elements. Then we convert values to host byte order.
# All changes to jlong* buffer are written to jlongArray (isCopy is set to NULL)
u64_array_dto_field_setter_template = Template("""
{
jlongArray ${java_name} = (*env)->NewLongArray(env, ${field_length});
jlong * ${java_name}ArrayElements = (*env)->GetLongArrayElements(env, ${java_name}, NULL);
unsigned int _i;
for (_i = 0; _i < ${field_length}; _i++) {
${java_name}ArrayElements[_i] = clib_net_to_host_u64(mp->${c_name}[_i]);
}
(*env)->ReleaseLongArrayElements(env, ${java_name}, ${java_name}ArrayElements, 0);
(*env)->SetObjectField(env, dto, ${java_name}FieldId, ${java_name});
}
""")
dto_field_setter_templates = {'u8': default_dto_field_setter_template,
'u16': u16_dto_field_setter_template,
'u32': u32_dto_field_setter_template,
'i32': u32_dto_field_setter_template,
'u64': u64_dto_field_setter_template,
'f64': default_dto_field_setter_template, #fixme
'u8[]': u8_array_dto_field_setter_template,
'u32[]': u32_array_dto_field_setter_template,
'u64[]': u64_array_dto_field_setter_template
}
# code fragment for checking result of the operation before sending request reply
callback_err_handler_template = Template("""
// for negative result don't send callback message but send error callback
if (mp->retval<0) {
CallOnError("${handler_name}",mp->context,mp->retval);
return;
}
if (mp->retval == VNET_API_ERROR_IN_PROGRESS) {
clib_warning("Result in progress");
return;
}
""")
msg_handler_template = Template("""
/**
* Handler for ${handler_name} vpe.api message.
* Generated based on $inputfile preparsed data:
$api_data
*/
static void vl_api_${handler_name}_t_handler (vl_api_${handler_name}_t * mp)
{
vppjni_main_t * jm = &vppjni_main;
JNIEnv *env = jm->jenv;
$err_handler
jmethodID constructor = (*env)->GetMethodID(env, ${class_ref_name}Class, "<init>", "()V");
jmethodID callbackMethod = (*env)->GetMethodID(env, jm->callbackClass, "on${dto_name}", "(Lorg/openvpp/jvpp/dto/${dto_name};)V");
jobject dto = (*env)->NewObject(env, ${class_ref_name}Class, constructor);
$dto_setters
(*env)->CallVoidMethod(env, jm->callback, callbackMethod, dto);
}""")
def generate_msg_handlers(func_list, inputfile):
handlers = []
for f in func_list:
handler_name = f['name']
dto_name = util.underscore_to_camelcase_upper(handler_name)
ref_name = util.underscore_to_camelcase(handler_name)
if is_manually_generated(handler_name) or util.is_ignored(handler_name):
continue
if not util.is_reply(dto_name) and not util.is_notification(handler_name):
continue
if util.is_notification(handler_name):
dto_name = util.add_notification_suffix(dto_name)
ref_name = util.add_notification_suffix(ref_name)
dto_setters = ''
err_handler = ''
# dto setters
for t in zip(f['c_types'], f['types'], f['args'], f['lengths']):
c_type = t[0]
jni_type = t[1]
c_name = t[2]
field_length = t[3][0]
# check if we are processing variable length array
if t[3][1]:
length_var_name = t[3][0]
length_field_type = f['c_types'][f['args'].index(length_var_name)]
field_length = variable_length_array_value_template.substitute(length_var_name=length_var_name)
if length_field_type != 'u8': # we need net to host conversion:
field_length = variable_length_array_template.substitute(
length_field_type=length_field_type, value=field_length)
# for retval don't generate setters and generate retval check
if util.is_retval_field(c_name):
err_handler = callback_err_handler_template.substitute(
handler_name=handler_name
)
continue
java_field_name = util.underscore_to_camelcase(c_name)
jni_signature = util.jni_2_signature_mapping[jni_type]
jni_setter = util.jni_field_accessors[jni_type]
dto_setters += dto_field_id_template.substitute(
java_name=java_field_name,
class_ref_name=ref_name,
jni_signature=jni_signature)
dto_setter_template = dto_field_setter_templates[c_type]
dto_setters += dto_setter_template.substitute(
java_name=java_field_name,
jni_signature=jni_signature,
c_name=c_name,
jni_setter=jni_setter,
field_length=field_length)
handlers.append(msg_handler_template.substitute(
inputfile=inputfile,
api_data=util.api_message_to_javadoc(f),
handler_name=handler_name,
dto_name=dto_name,
class_ref_name=ref_name,
dto_setters=dto_setters,
err_handler=err_handler))
return "\n".join(handlers)
handler_registration_template = Template("""_(${upercase_name}, ${name}) \\
""")
def generate_handler_registration(func_list):
handler_registration = ["#define foreach_vpe_api_msg \\\n"]
for f in func_list:
name = f['name']
camelcase_name = util.underscore_to_camelcase(f['name'])
if (not util.is_reply(camelcase_name) and not util.is_notification(name)) or util.is_ignored(name):
continue
handler_registration.append(handler_registration_template.substitute(
name=name,
upercase_name=name.upper()))
return "".join(handler_registration)
jvpp_c_template = Template("""/**
* This file contains JNI bindings for jvpp Java API.
* It was generated by jvpp_c_gen.py based on $inputfile
* (python representation of vpe.api generated by vppapigen).
*/
void CallOnError(const char* call, int context, int retval);
// JAVA class reference cache
$class_cache
// JNI bindings
$jni_implementations
// Message handlers
$msg_handlers
// Registration of message handlers in vlib
$handler_registration
""")
def generate_jvpp(func_list, inputfile):
""" Generates jvpp C file """
print "Generating jvpp C"
class_cache = generate_class_cache(func_list)
jni_impl = generate_jni_impl(func_list, inputfile)
msg_handlers = generate_msg_handlers(func_list, inputfile)
handler_registration = generate_handler_registration(func_list)
jvpp_c_file = open("jvpp_gen.h", 'w')
jvpp_c_file.write(jvpp_c_template.substitute(
inputfile=inputfile,
class_cache=class_cache,
jni_implementations=jni_impl,
msg_handlers=msg_handlers,
handler_registration=handler_registration))
jvpp_c_file.flush()
jvpp_c_file.close()
| muharif/vpp | vpp-api/java/jvpp/gen/jvpp_c_gen.py | Python | apache-2.0 | 18,185 |
import datetime
import logging
import discord
from discord.ext import commands
from alexBot.classes import GuildData
from alexBot.tools import Cog
log = logging.getLogger(__name__)
class VoiceStats(Cog):
@Cog.listener()
async def on_voice_state_update(
self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState
):
if before.channel is not None and after.channel is not None: # check that joined or left a voice call
return
channel = before.channel or after.channel
# ?? can we gather data from this guild?
gd = await self.bot.db.get_guild_data(channel.guild.id)
if not gd.config.collectVoiceData:
return
# ?? are we getting an event for someone leaving?
if before.channel:
LEAVING = True
else:
LEAVING = False
# ?? were they the last person?
if len([m for m in channel.members if not m.bot]) == 0:
LAST = True
else:
LAST = False
if not LEAVING and len([m for m in after.channel.members if not m.bot]) == 1:
FIRST = True
else:
FIRST = False
if LEAVING and LAST:
# definitly ending of a call
await self.ending_a_call(channel, gd)
if not LEAVING and FIRST:
await self.starting_a_call(channel, gd)
log.debug(f"{LAST=}, {LEAVING=}, {FIRST=}")
async def starting_a_call(self, channel: discord.VoiceChannel, guildData: GuildData):
log.debug(f"starting a call: {channel=}")
if guildData.voiceStat.currently_running:
log.debug("second call started in guild")
return
guildData.voiceStat.last_started = datetime.datetime.now()
guildData.voiceStat.currently_running = True
await self.bot.db.save_guild_data(channel.guild.id, guildData)
async def ending_a_call(self, channel: discord.VoiceChannel, gd: GuildData):
log.debug(f"ending a call: {channel=}")
guild = channel.guild
if self.any_other_voice_chats(guild):
log.debug("late return: other VC in guild")
return # the call continues in another channel
if not gd.voiceStat.currently_running:
# odd state, ignore
return
current_session_length = datetime.datetime.now() - gd.voiceStat.last_started
if gd.voiceStat.longest_session < current_session_length:
gd.voiceStat.longest_session = current_session_length
gd.voiceStat.average_duration_raw = (
(gd.voiceStat.total_sessions * gd.voiceStat.average_duration_raw) + current_session_length.total_seconds()
) / (gd.voiceStat.total_sessions + 1)
gd.voiceStat.total_sessions += 1
gd.voiceStat.currently_running = False
await self.bot.db.save_guild_data(channel.guild.id, gd)
@commands.command()
async def voiceStats(self, ctx: commands.Context):
"""tells you how long your average, longest, and current voice sessions is."""
vd = (await self.bot.db.get_guild_data(ctx.guild.id)).voiceStat
embed = discord.Embed()
if self.any_other_voice_chats(ctx.guild):
embed.add_field(name="Current Session Length", value=datetime.datetime.now() - vd.last_started)
embed.add_field(name="longest session", value=vd.longest_session)
embed.add_field(name="Average Session Length", value=vd.average_duration)
embed.add_field(name="Total Sessions", value=vd.total_sessions)
await ctx.send(embed=embed)
@staticmethod
def any_other_voice_chats(guild: discord.Guild) -> bool:
return any([len([m for m in vc.members if not m.bot]) > 0 for vc in guild.voice_channels])
def setup(bot):
bot.add_cog(VoiceStats(bot))
| mralext20/alex-bot | alexBot/cogs/voiceStats.py | Python | mit | 3,833 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import gast
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import builtin_functions
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import decorators
from tensorflow.python.autograph.converters import directives
from tensorflow.python.autograph.converters import error_handlers
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.converters import side_effect_guards
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import errors
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
# TODO(mdan): Might we not need any renaming at all?
def is_whitelisted_for_graph(o):
"""Check whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
Returns:
Boolean
"""
m = tf_inspect.getmodule(o)
for prefix, in config.DEFAULT_UNCOMPILED_MODULES:
if m.__name__.startswith(prefix):
return True
if hasattr(o, 'autograph_info__'):
return True
return False
def entity_to_graph(o, program_ctx, arg_values, arg_types):
"""Compile a Python entity into equivalent TensorFlow.
The function will also recursively compile all the entities that `o`
references, updating `dependency_cache`.
This function is reentrant, and relies on dependency_cache to avoid
generating duplicate code.
Args:
o: A Python entity.
program_ctx: A ProgramContext object.
arg_values: A dict containing value hints for symbols like function
parameters.
arg_types: A dict containing type hints for symbols like function
parameters.
Returns:
A tuple (ast, new_name, namespace):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
* namespace: A dict mapping all symbols visible to the converted entity,
keyed by their symbol name.
Raises:
ValueError: if the entity type is not supported.
"""
if program_ctx.options.verbose:
logging.info('Converting {}'.format(o))
if tf_inspect.isclass(o):
node, name, ns = class_to_graph(o, program_ctx)
elif tf_inspect.isfunction(o):
# TODO(mdan): This is not a reliable mechanism.
# The most reliable way is to check the source code, the AST will contain
# a Lambda node instead of a FunctionDef
if o.__name__ == '<lambda>':
raise NotImplementedError(
'lambda functions are not yet supported; declare the function'
' using def instead: %s' % o)
else:
node, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)
elif tf_inspect.ismethod(o):
node, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)
# TODO(mdan,yashkatariya): Remove when object conversion is implemented.
elif hasattr(o, '__class__'):
raise NotImplementedError(
'Object conversion is not yet supported. If you are '
'trying to convert code that uses an existing object, '
'try including the creation of that object in the '
'conversion. For example, instead of converting the method '
'of a class, try converting the entire class instead. '
'See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/'
'contrib/autograph/README.md#using-the-functional-api '
'for more information.')
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
# TODO(mdan): This is temporary. it should be created using a converter.
# TODO(mdan): The attribute should be added with a helper, not directly.
# The helper can ensure there are no collisions.
template = '''
entity.autograph_info__ = {}
'''
node.extend(templates.replace(template, entity=name))
program_ctx.add_to_cache(o, node)
if program_ctx.options.verbose:
logging.info('Compiled output of {}:\n\n{}\n'.format(
o, compiler.ast_to_source(node)))
if program_ctx.options.recursive:
while True:
candidate = None
for obj in program_ctx.name_map.keys():
if obj not in program_ctx.dependency_cache:
candidate = obj
break
if candidate is None:
break
if (hasattr(candidate, 'im_class') and
getattr(candidate, 'im_class') not in program_ctx.partial_types):
# Class members are converted with their objects, unless they're
# only converted partially.
continue
entity_to_graph(candidate, program_ctx, {}, {})
return node, name, ns
def class_to_graph(c, program_ctx):
"""Specialization of `entity_to_graph` for classes."""
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('Cannot convert %s: it has no member methods.' % c)
class_namespace = {}
for _, m in members:
# Only convert the members that are directly defined by the class.
if inspect_utils.getdefiningclass(m, c) is not c:
continue
node, _, namespace = function_to_graph(
m,
program_ctx=program_ctx,
arg_values={},
arg_types={'self': (c.__name__, c)},
owner_type=c,
rewrite_errors=False)
if class_namespace is None:
class_namespace = namespace
else:
class_namespace.update(namespace)
converted_members[m] = node[0]
namer = program_ctx.new_namer(class_namespace)
class_name = namer.compiled_class_name(c.__name__, c)
# TODO(mdan): This needs to be explained more thoroughly.
# Process any base classes: if the superclass if of a whitelisted type, an
# absolute import line is generated. Otherwise, it is marked for conversion
# (as a side effect of the call to namer.compiled_class_name() followed by
# program_ctx.update_name_map(namer)).
output_nodes = []
renames = {}
base_names = []
for base in c.__bases__:
if isinstance(object, base):
base_names.append('object')
continue
if is_whitelisted_for_graph(base):
alias = namer.new_symbol(base.__name__, ())
output_nodes.append(
gast.ImportFrom(
module=base.__module__,
names=[gast.alias(name=base.__name__, asname=alias)],
level=0))
else:
# This will trigger a conversion into a class with this name.
alias = namer.compiled_class_name(base.__name__, base)
base_names.append(alias)
renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)
program_ctx.update_name_map(namer)
# Generate the definition of the converted class.
bases = [gast.Name(n, gast.Load(), None) for n in base_names]
class_def = gast.ClassDef(
class_name,
bases=bases,
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
# Make a final pass to replace references to the class or its base classes.
# Most commonly, this occurs when making super().__init__() calls.
# TODO(mdan): Making direct references to superclass' superclass will fail.
class_def = qual_names.resolve(class_def)
renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)
class_def = ast_util.rename_symbols(class_def, renames)
output_nodes.append(class_def)
return output_nodes, class_name, class_namespace
def _add_reserved_symbol(namespace, name, entity):
if name not in namespace:
namespace[name] = entity
elif namespace[name] != entity:
raise ValueError('The name "%s" is reserved and may not be used.' % name)
ag_internal = None
def _add_self_references(namespace, autograph_module):
"""Adds namespace references to the module that exposes the api itself."""
global ag_internal
if ag_internal is None:
# Craft a module that exposes parts of the external API as well as certain
# internal modules.
ag_internal = imp.new_module('autograph')
ag_internal.converted_call = autograph_module.converted_call
ag_internal.ConversionOptions = converter.ConversionOptions
ag_internal.utils = utils
ag_internal.function_scope = function_wrapping.function_scope
ag_internal.rewrite_graph_construction_error = (
errors.rewrite_graph_construction_error)
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(operators.__dict__)
_add_reserved_symbol(namespace, 'ag__', ag_internal)
def function_to_graph(f,
program_ctx,
arg_values,
arg_types,
owner_type=None,
rewrite_errors=True):
"""Specialization of `entity_to_graph` for callable functions."""
node, source = parser.parse_entity(f)
node = node.body[0]
origin_info.resolve(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = program_ctx.new_namer(namespace)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
namespace=namespace,
arg_values=arg_values,
arg_types=arg_types,
owner_type=owner_type)
context = converter.EntityContext(namer, entity_info, program_ctx)
node = node_to_graph(node, context, rewrite_errors=rewrite_errors)
# TODO(mdan): This somewhat duplicates the call rename logic in call_trees.py
new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)
if not did_rename:
new_name = f.__name__
if node.name != f.__name__:
raise NotImplementedError('Strange corner case. Send us offending code!')
node.name = new_name
program_ctx.update_name_map(namer)
# TODO(mdan): Use this at compilation.
return [node], new_name, namespace
def node_to_graph(node, context, rewrite_errors=True):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
rewrite_errors: Boolean, whether or not to rewrite the error traceback.
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Insert list_comprehensions somewhere.
node = converter.standard_analysis(node, context, is_initial=True)
# Past this point, line numbers are no longer accurate so we ignore the
# source.
# TODO(mdan): Is it feasible to reconstruct intermediate source code?
context.info.source_code = None
node = converter.apply_(node, context, decorators)
node = converter.apply_(node, context, directives)
node = converter.apply_(node, context, break_statements)
node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = converter.apply_(node, context, continue_statements)
context.info.namespace['len'] = len
node = converter.apply_(node, context, return_statements)
node = converter.apply_(node, context, lists)
node = converter.apply_(node, context, slices)
node = converter.apply_(node, context, builtin_functions)
node = converter.apply_(node, context, call_trees)
node = converter.apply_(node, context, control_flow)
node = converter.apply_(node, context, conditional_expressions)
node = converter.apply_(node, context, logical_expressions)
node = converter.apply_(node, context, side_effect_guards)
node = converter.apply_(node, context, function_scopes)
if rewrite_errors:
node = converter.apply_(node, context, error_handlers)
return node
| girving/tensorflow | tensorflow/python/autograph/impl/conversion.py | Python | apache-2.0 | 14,250 |
from os.path import join
import pytest
from cobra.io import load_json_model, write_sbml_model
def test_load_json_model_valid(data_directory, tmp_path):
"""Test loading a valid annotation from JSON."""
path_to_file = join(data_directory, "valid_annotation_format.json")
model = load_json_model(path_to_file)
expected = {
"bigg.reaction": [["is", "PFK26"]],
"kegg.reaction": [["is", "R02732"]],
"rhea": [["is", "15656"]],
}
for metabolite in model.metabolites:
assert metabolite.annotation == expected
path_to_output = join(str(tmp_path), "valid_annotation_output.xml")
write_sbml_model(model, path_to_output)
def test_load_json_model_invalid(data_directory):
"""Test that loading an invalid annotation from JSON raises TypeError"""
path = join(data_directory, "invalid_annotation_format.json")
with pytest.raises(TypeError):
model = load_json_model(path)
| opencobra/cobrapy | src/cobra/test/test_io/test_annotation_format.py | Python | gpl-2.0 | 944 |
import json
DMCA_ERROR = '''
<div class="alert alert-info" role="alert">
This file has been the subject of a DMCA take down
and is unable to be rendered by the Open Science Framework
</div>
<style>.file-download{{display: none;}}</style>
'''
# Note: the style is for disabling download buttons
STATUS_CODE_ERROR_MAP = {
461: DMCA_ERROR
}
class RenderFailureException(Exception):
'''An exception for temporary errors when attempting to render a file
IE 500s 400s etc etc
'''
def __init__(self, http_status_code, **additional):
additional['status_code'] = http_status_code
msg = json.dumps(additional)
super(RenderFailureException, self).__init__(msg)
class RenderNotPossibleException(Exception):
'''An exception indicating an an avoidable render error
that should therefore be cached. IE dropbox's DMCA take downs
'''
def __init__(self, msg):
self.renderable_error = msg
super(RenderNotPossibleException, self).__init__(msg)
def error_message_or_exception(http_status_code, **info):
err = STATUS_CODE_ERROR_MAP.get(http_status_code)
if not err:
raise RenderFailureException(http_status_code)
raise RenderNotPossibleException(err.format(**info))
| GaryKriebel/osf.io | framework/render/exceptions.py | Python | apache-2.0 | 1,252 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import re
import llnl.util.tty as tty
from llnl.util.lang import pretty_date
from llnl.util.filesystem import working_dir
from llnl.util.tty.colify import colify_table
import spack
from spack.util.executable import which
from spack.cmd import spack_is_git_repo
description = "show contributors to packages"
section = "developer"
level = "long"
def setup_parser(subparser):
view_group = subparser.add_mutually_exclusive_group()
view_group.add_argument(
'-t', '--time', dest='view', action='store_const', const='time',
default='time', help='sort by last modification date (default)')
view_group.add_argument(
'-p', '--percent', dest='view', action='store_const', const='percent',
help='sort by percent of code')
view_group.add_argument(
'-g', '--git', dest='view', action='store_const', const='git',
help='show git blame output instead of summary')
subparser.add_argument(
'package_name', help='name of package to show contributions for, '
'or path to a file in the spack repo')
def blame(parser, args):
# make sure this is a git repo
if not spack_is_git_repo():
tty.die("This spack is not a git clone. Can't use 'spack blame'")
git = which('git', required=True)
# Get name of file to blame
blame_file = None
if os.path.isfile(args.package_name):
path = os.path.realpath(args.package_name)
if path.startswith(spack.prefix):
blame_file = path
if not blame_file:
pkg = spack.repo.get(args.package_name)
blame_file = pkg.module.__file__.rstrip('c') # .pyc -> .py
# get git blame for the package
with working_dir(spack.prefix):
if args.view == 'git':
git('blame', blame_file)
return
else:
output = git('blame', '--line-porcelain', blame_file, output=str)
lines = output.split('\n')
# Histogram authors
counts = {}
emails = {}
last_mod = {}
total_lines = 0
for line in lines:
match = re.match(r'^author (.*)', line)
if match:
author = match.group(1)
match = re.match(r'^author-mail (.*)', line)
if match:
email = match.group(1)
match = re.match(r'^author-time (.*)', line)
if match:
mod = int(match.group(1))
last_mod[author] = max(last_mod.setdefault(author, 0), mod)
# ignore comments
if re.match(r'^\t[^#]', line):
counts[author] = counts.setdefault(author, 0) + 1
emails.setdefault(author, email)
total_lines += 1
if args.view == 'time':
rows = sorted(
counts.items(), key=lambda t: last_mod[t[0]], reverse=True)
else: # args.view == 'percent'
rows = sorted(counts.items(), key=lambda t: t[1], reverse=True)
# Print a nice table with authors and emails
table = [['LAST_COMMIT', 'LINES', '%', 'AUTHOR', 'EMAIL']]
for author, nlines in rows:
table += [[
pretty_date(last_mod[author]),
nlines,
round(nlines / float(total_lines) * 100, 1),
author,
emails[author]]]
table += [[''] * 5]
table += [[pretty_date(max(last_mod.values())), total_lines, '100.0'] +
[''] * 3]
colify_table(table)
| skosukhin/spack | lib/spack/spack/cmd/blame.py | Python | lgpl-2.1 | 4,597 |
from genast.nodes import *
def expr(val, tail):
if isinstance(val, SumNode):
return val
else:
return SumNode(mult(val, []), *tail)
def mult(val, tail):
if isinstance(val, MultNode):
return val
else:
return MultNode(power(val), *tail)
def sign(s):
if isinstance(s, str):
return SignNode(s)
elif isinstance(s,SignNode):
return s
def mult_sub(s, e):
return MultSub(sign(s), power(e))
def sum_sub(s, e):
return SumSub(sign(s), mult(e, []))
def power(val):
if isinstance(val, PowerNode):
return val
else:
return PowerNode(funcall(val))
def funcall(val):
if isinstance(val, FuncallNode):
return val
elif isinstance(val, NumberNode):
return FuncallNode(None, num(val))
else:
return FuncallNode(None, num(val)) or FuncallNode(None, expr(val, []))
def num(val):
if isinstance(val, NumberNode):
return val
elif isinstance(val, (int, float, Fraction)):
return NumberNode(val)
| dasdy/genexpr | genast/wrappers.py | Python | mit | 1,045 |
pytest_plugins = [
"tests.fixtures",
]
| andreroggeri/pynubank | tests/conftest.py | Python | mit | 43 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.