text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
Functions for ecommerce
"""
from base64 import b64encode
import hashlib
import hmac
from itertools import chain
import logging
from urllib.parse import quote_plus
import uuid
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.exceptions import ValidationError
from edx_api.client import EdxApi
from backends.constants import COURSEWARE_BACKEND_URL
from courses.models import (
CourseRun,
Program,
)
from dashboard.api_edx_cache import (
CachedEdxDataApi,
CachedEdxUserData,
)
from dashboard.models import ProgramEnrollment
from dashboard.utils import MMTrack, get_mmtrack
from dashboard.api import has_to_pay_for_exam
from ecommerce.constants import REFERENCE_NUMBER_PREFIX
from ecommerce.exceptions import (
EcommerceEdxApiException,
EcommerceException,
ParseException,
)
from ecommerce.models import (
Coupon,
Line,
Order,
RedeemedCoupon,
)
from financialaid.api import get_formatted_course_price
from financialaid.models import (
FinancialAid,
FinancialAidStatus,
TierProgram
)
from micromasters.utils import now_in_utc
from profiles.api import get_social_auth
ISO_8601_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
log = logging.getLogger(__name__)
def get_purchasable_course_run(course_key, user):
"""
Gets a course run, or raises Http404 if not purchasable. To be purchasable a course run
must not already be purchased, must be part of a live program, must be part of a program
with financial aid enabled, with a financial aid object, and must have a valid price.
Args:
course_key (str):
An edX course key
user (User):
The purchaser of the course run
Returns:
CourseRun: A course run
"""
# Make sure it's connected to a live program, it has a valid price, and the user is enrolled in the program already
try:
course_run = get_object_or_404(
CourseRun,
edx_course_key=course_key,
course__program__live=True,
course__program__financial_aid_availability=True,
)
except Http404:
log.warning("Course run %s is not purchasable", course_key)
raise
if not FinancialAid.objects.filter(
tier_program__current=True,
tier_program__program__course__courserun=course_run,
user=user,
status__in=FinancialAidStatus.TERMINAL_STATUSES,
).exists():
log.warning("Course run %s has no attached financial aid for user %s", course_key, user.username)
raise ValidationError(
"Course run {} does not have a current attached financial aid application".format(course_key)
)
# Make sure it's not already purchased
if Line.objects.filter(
order__status__in=Order.FULFILLED_STATUSES,
order__user=user,
course_key=course_run.edx_course_key,
).exists():
mmtrack = get_mmtrack(user, course_run.course.program)
if not has_to_pay_for_exam(mmtrack, course_run.course):
log.warning("Course run %s is already purchased by user %s", course_key, user)
raise ValidationError("Course run {} is already purchased".format(course_key))
return course_run
@transaction.atomic
def create_unfulfilled_order(course_key, user):
"""
Create a new Order which is not fulfilled for a purchasable course run. If course run is not purchasable,
it raises an Http404
Args:
course_key (str):
A course key
user (User):
The purchaser of the course run
Returns:
Order: A newly created Order for the CourseRun with the given course_id
"""
course_run = get_purchasable_course_run(course_key, user)
price, coupon = calculate_run_price(course_run, user)
if price < 0:
log.error(
"Price to be charged for course run %s for user %s is less than zero: %s",
course_key,
user.username,
price,
)
raise ImproperlyConfigured("Price to be charged is less than zero")
order = Order.objects.create(
status=Order.CREATED,
total_price_paid=price,
user=user,
)
Line.objects.create(
order=order,
course_key=course_key,
description='Seat for {}'.format(course_run.title),
price=price,
)
if coupon is not None:
redeemed_coupon = RedeemedCoupon(order=order, coupon=coupon)
redeemed_coupon.save_and_log(user)
order.save_and_log(user)
return order
def generate_cybersource_sa_signature(payload):
"""
Generate an HMAC SHA256 signature for the CyberSource Secure Acceptance payload
Args:
payload (dict): The payload to be sent to CyberSource
Returns:
str: The signature
"""
# This is documented in certain CyberSource sample applications:
# http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_SOP/html/wwhelp/wwhimpl/js/html/wwhelp.htm#href=creating_profile.05.6.html
keys = payload['signed_field_names'].split(',')
message = ','.join('{}={}'.format(key, payload[key]) for key in keys)
digest = hmac.new(
settings.CYBERSOURCE_SECURITY_KEY.encode('utf-8'),
msg=message.encode('utf-8'),
digestmod=hashlib.sha256,
).digest()
return b64encode(digest).decode('utf-8')
def make_dashboard_receipt_url(dashboard_url, course_key, status):
"""
Generate URL that user is redirected to on successful order
Args:
dashboard_url (str): The absolute url for the dashboard
course_key (str): An edX course key
status (str): The order receipt page status, either 'cancel' or 'receipt'
Returns:
str:
The URL for the order receipt page
"""
return "{dashboard_url}?status={status}&course_key={course_key}".format(
dashboard_url=dashboard_url,
status=status,
course_key=quote_plus(course_key),
)
def generate_cybersource_sa_payload(order, dashboard_url, ip_address=None):
"""
Generates a payload dict to send to CyberSource for Secure Acceptance
Args:
order (Order): An order
dashboard_url: (str): The absolute url for the dashboard
ip_address (str): The user's IP address
Returns:
dict: the payload to send to CyberSource via Secure Acceptance
"""
# http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_WM/Secure_Acceptance_WM.pdf
# Section: API Fields
# Course key is used only to show the confirmation message to the user
course_key = ""
line = order.line_set.first()
if line is not None:
course_key = line.course_key
course_run = CourseRun.objects.get(edx_course_key=course_key)
# NOTE: be careful about max length here, many (all?) string fields have a max
# length of 255. At the moment none of these fields should go over that, due to database
# constraints or other reasons
payload = {
'access_key': settings.CYBERSOURCE_ACCESS_KEY,
'amount': str(order.total_price_paid),
'consumer_id': order.user.username,
'currency': 'USD',
'locale': 'en-us',
'item_0_code': 'course',
'item_0_name': '{}'.format(course_run.title),
'item_0_quantity': 1,
'item_0_sku': '{}'.format(course_key),
'item_0_tax_amount': '0',
'item_0_unit_price': str(order.total_price_paid),
'line_item_count': 1,
'override_custom_cancel_page': make_dashboard_receipt_url(dashboard_url, course_key, 'cancel'),
'override_custom_receipt_page': make_dashboard_receipt_url(dashboard_url, course_key, 'receipt'),
'reference_number': make_reference_id(order),
'profile_id': settings.CYBERSOURCE_PROFILE_ID,
'signed_date_time': now_in_utc().strftime(ISO_8601_FORMAT),
'transaction_type': 'sale',
'transaction_uuid': uuid.uuid4().hex,
'unsigned_field_names': '',
'merchant_defined_data1': 'course',
'merchant_defined_data2': '{}'.format(course_run.title),
'merchant_defined_data3': '{}'.format(course_key),
"customer_ip_address": ip_address if ip_address else None,
}
field_names = sorted(list(payload.keys()) + ['signed_field_names'])
payload['signed_field_names'] = ','.join(field_names)
payload['signature'] = generate_cybersource_sa_signature(payload)
return payload
def make_reference_id(order):
"""
Make a reference id
Args:
order (Order):
An order
Returns:
str:
A reference number for use with CyberSource to keep track of orders
"""
return "{}{}-{}".format(REFERENCE_NUMBER_PREFIX, settings.CYBERSOURCE_REFERENCE_PREFIX, order.id)
def get_new_order_by_reference_number(reference_number):
"""
Parse a reference number received from CyberSource and lookup the corresponding Order.
Args:
reference_number (str):
A string which contains the order id and the instance which generated it
Returns:
Order:
An order
"""
if not reference_number.startswith(REFERENCE_NUMBER_PREFIX):
raise ParseException("Reference number must start with {}".format(REFERENCE_NUMBER_PREFIX))
reference_number = reference_number[len(REFERENCE_NUMBER_PREFIX):]
try:
order_id_pos = reference_number.rindex('-')
except ValueError:
raise ParseException("Unable to find order number in reference number")
try:
order_id = int(reference_number[order_id_pos + 1:])
except ValueError:
raise ParseException("Unable to parse order number")
prefix = reference_number[:order_id_pos]
if prefix != settings.CYBERSOURCE_REFERENCE_PREFIX:
log.error("CyberSource prefix doesn't match: %s != %s", prefix, settings.CYBERSOURCE_REFERENCE_PREFIX)
raise ParseException("CyberSource prefix doesn't match")
try:
return Order.objects.get(id=order_id)
except Order.DoesNotExist:
raise EcommerceException("Unable to find order {}".format(order_id))
def enroll_user_on_success(order):
"""
Enroll user after they made a successful purchase.
Args:
order (Order): An order to be fulfilled
Returns:
None
"""
line_qset = order.line_set.all()
courseware_backend = CourseRun.objects.get(edx_course_key=line_qset.first().course_key).courseware_backend
user_social = get_social_auth(order.user, courseware_backend)
enrollments_client = EdxApi(user_social.extra_data, COURSEWARE_BACKEND_URL[courseware_backend]).enrollments
existing_enrollments = enrollments_client.get_student_enrollments()
exceptions = []
enrollments = []
for line in line_qset:
course_key = line.course_key
try:
if not existing_enrollments.is_enrolled_in(course_key):
enrollments.append(enrollments_client.create_audit_student_enrollment(course_key))
except Exception as ex: # pylint: disable=broad-except
log.exception(
"Error creating audit enrollment for course key %s for user %s",
course_key,
order.user.username,
)
exceptions.append(ex)
for enrollment in enrollments:
CachedEdxDataApi.update_cached_enrollment(
order.user,
enrollment,
enrollment.course_id,
index_user=True,
)
if exceptions:
raise EcommerceEdxApiException(exceptions)
def is_coupon_redeemable_for_run(coupon, user, course_key):
"""
Returns true if the coupon is redeemable for the user for a course run.
Args:
coupon (Coupon): A coupon
user (django.contrib.auth.models.User): A user
course_key (str): An edX course key
Returns:
bool:
True if a course is redeemable by a user, for the specific course run with that course key
"""
if not is_coupon_redeemable(coupon, user):
return False
return course_key in coupon.course_keys
def is_coupon_redeemable(coupon, user):
"""
Returns true if the coupon is redeemable for the user, for any relevant course run.
Args:
coupon (Coupon): A coupon
user (django.contrib.auth.models.User): A user
Returns:
bool:
True if the coupon is redeemable by the user for some course run
"""
if not Program.objects.filter(
programenrollment__user=user,
programenrollment__program__course__courserun__edx_course_key__in=coupon.course_keys,
live=True,
).exists():
return False
if (
not coupon.is_valid or # coupon must be enabled and within valid date range
not coupon.user_has_redemptions_left(user) # coupon must not be used up
):
return False
if coupon.coupon_type == Coupon.DISCOUNTED_PREVIOUS_COURSE:
# We validate in clean() that content_object is a Course if coupon_type is DISCOUNTED_PREVIOUS_RUN
course = coupon.content_object
program = course.program
edx_user_data = CachedEdxUserData(user, program=program)
mmtrack = MMTrack(
user,
program,
edx_user_data,
)
# For this coupon type the user must have already purchased a course run on edX
return any((mmtrack.has_verified_enrollment(run.edx_course_key) for run in course.courserun_set.not_discontinued()))
return True
def pick_coupons(user):
"""
Choose the coupons which would be used in redemptions by the user. There should be at most one coupon
per program in the output.
The heuristic is currently:
- choose attached coupons over automatic coupons
- choose the coupon which has been most recently attached, or most recently modified
Args:
user (django.contrib.auth.models.User): A user
Returns:
list of Coupon: The coupons which will be used by the user when redeeming runs in a program
"""
sorted_attached_coupons = Coupon.user_coupon_qset(user).order_by('-usercoupon__updated_on')
sorted_automatic_coupons = Coupon.is_automatic_qset().order_by('-updated_on')
# At this point there should only be coupons the user has attached (opted into by clicking a link)
# or automatic coupons, which there should only be a few. So the next iterations should not
# affect many rows in the DB.
coupons = []
# Only one coupon per program
program_ids = set()
for coupon in chain(sorted_attached_coupons, sorted_automatic_coupons):
program_id = coupon.program.id
if program_id not in program_ids and is_coupon_redeemable(coupon, user):
coupons.append(coupon)
program_ids.add(program_id)
return coupons
def calculate_coupon_price(coupon, price, course_key):
"""
Calculate the adjusted price given a coupon
Args:
coupon (Coupon): A coupon
price (decimal.Decimal): A price
course_key (str): An edX course key
Returns:
decimal.Decimal: An adjusted price
"""
new_price = price
if course_key in coupon.course_keys:
if coupon.amount_type == Coupon.PERCENT_DISCOUNT:
new_price = price * (1-coupon.amount)
elif coupon.amount_type == Coupon.FIXED_DISCOUNT:
new_price = price - coupon.amount
elif coupon.amount_type == Coupon.FIXED_PRICE:
new_price = coupon.amount
if new_price < 0:
new_price = 0
elif new_price > price:
new_price = price
return new_price
def calculate_run_price(course_run, user):
"""
Calculate the price of a course given the coupons and financial aid available to the user.
Args:
course_run (CourseRun): A course run
user (django.contrib.auth.models.User): A user
Returns:
(decimal.Decimal, Coupon):
The adjusted of the course, and the coupon used if any
"""
program = course_run.course.program
enrollment = get_object_or_404(ProgramEnrollment, program=program, user=user)
price = get_formatted_course_price(enrollment)['price']
coupons = [coupon for coupon in pick_coupons(user) if coupon.program == program]
if not coupons:
# There is no coupon for this program
return price, None
coupon = coupons[0]
if course_run.edx_course_key not in coupon.course_keys:
# coupon does not apply to this particular course run
return price, None
price = calculate_coupon_price(coupon, price, course_run.edx_course_key)
return price, coupon
def validate_prices():
"""
Validate prices and financial aid discounts
Returns:
list: List of validation errors
"""
errors = []
programs = Program.objects.filter(live=True)
for program in programs:
if program.financial_aid_availability:
tier = TierProgram.objects.filter(program=program, current=True).order_by("-discount_amount").first()
if tier:
if tier.discount_amount > program.price:
errors.append('Discount is higher than course price for program {0}'.format(program.title))
if not TierProgram.objects.filter(discount_amount=0, program=program, current=True).exists():
errors.append('Could not find 0 discount TierProgram for program {0}'.format(program.title))
if not TierProgram.objects.filter(income_threshold=0, program=program, current=True).exists():
errors.append(
'Could not find 0 income_threshold TierProgram for program {0}'.format(program.title)
)
else:
errors.append('Could not find current TierProgram for program {0}'.format(program.title))
return errors
| {
"content_hash": "6e0e55ae7427627be6b12ac978e95732",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 159,
"avg_line_length": 34.78886756238004,
"alnum_prop": 0.6534068965517241,
"repo_name": "mitodl/micromasters",
"id": "62b7e1904ccac6163905450ece7e6273c461fc3e",
"size": "18125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecommerce/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ApplicationInsightsManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ApplicationInsightsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2019-10-17-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2019-10-17-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| {
"content_hash": "9119685497a2060f2940a52a81ca5057",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 125,
"avg_line_length": 48.2,
"alnum_prop": 0.7095435684647303,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c743c45383d55e1265bd047df5c23d3de1693d6f",
"size": "3601",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2019_10_17_preview/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from pymongo import MongoClient
from twisted.trial.unittest import TestCase
from tests.utils import ObjectMaker
from vusion.persist import ShortcodeManager, Shortcode
class TestShortcodeManager(TestCase, ObjectMaker):
def setUp(self):
self.database_name = 'test_vusion_db'
c = MongoClient(w=1)
db = c[self.database_name]
self.manager = ShortcodeManager(db, 'shortcodes')
self.clearData()
def tearDown(self):
self.clearData()
def clearData(self):
self.manager.drop()
def test_get_shortcode(self):
self.manager.save_document(
Shortcode(**self.mkobj_shortcode(code='8181',
international_prefix='256')))
self.manager.save_document(
Shortcode(**self.mkobj_shortcode(code='8282',
international_prefix='256')))
self.manager.save_document(
Shortcode(**self.mkobj_shortcode(code='8181',
international_prefix='255')))
self.manager.save_document(
Shortcode(**self.mkobj_shortcode_international(code='+318181')))
shortcode = self.manager.get_shortcode('8181', '+25511111')
self.assertEqual(shortcode['international-prefix'], '255')
shortcode = self.manager.get_shortcode('8181', '+25611111')
self.assertEqual(shortcode['international-prefix'], '256')
shortcode = self.manager.get_shortcode('+318181', '+25611111')
self.assertEqual(shortcode['international-prefix'], '31')
| {
"content_hash": "55ab5d2e3a39c5eff8f5f0442f26e950",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 82,
"avg_line_length": 36.68888888888889,
"alnum_prop": 0.5935796486977589,
"repo_name": "texttochange/vusion-backend",
"id": "ce4d06f71942d50a44548ba13c01266b8f3d2feb",
"size": "1651",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "vusion/persist/shortcode/tests/test_shortcode_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1510"
},
{
"name": "Python",
"bytes": "1204678"
},
{
"name": "Shell",
"bytes": "798"
}
],
"symlink_target": ""
} |
from django.conf import settings as django_settings
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from gnotify import settings
class NotificationType(models.Model):
"""
Notification types are added on-the-fly by the
applications adding new notifications"""
key = models.CharField(
max_length=128,
primary_key=True,
verbose_name=_(u'unique key'),
unique=True,
)
label = models.CharField(
max_length=128,
verbose_name=_(u'verbose name'),
blank=True,
null=True,
)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
def __unicode__(self):
return self.key
class Meta:
db_table = settings.DB_TABLE_PREFIX + '_notificationtype'
verbose_name = _(u'type')
verbose_name_plural = _(u'types')
class Settings(models.Model):
user = models.ForeignKey(
django_settings.AUTH_USER_MODEL
)
interval = models.SmallIntegerField(
choices=settings.INTERVALS,
verbose_name=_(u'interval'),
default=settings.INTERVALS_DEFAULT,
)
def __unicode__(self):
obj_name = _(u"Settings for %s") % self.user.username
return unicode(obj_name)
class Meta:
db_table = settings.DB_TABLE_PREFIX + '_settings'
verbose_name = _(u'settings')
verbose_name_plural = _(u'settings')
class Subscription(models.Model):
settings = models.ForeignKey(
Settings,
)
notification_type = models.ForeignKey(
NotificationType,
)
object_id = models.CharField(
max_length=64,
null=True,
blank=True,
help_text=_(u'Leave this blank to subscribe to any kind of object'),
)
send_emails = models.BooleanField(
default=True,
)
latest = models.ForeignKey(
'Notification',
null=True,
blank=True,
related_name='latest_for',
)
def __unicode__(self):
obj_name = _("Subscription for: %s") % str(self.settings.user.username)
return unicode(obj_name)
class Meta:
db_table = settings.DB_TABLE_PREFIX + '_subscription'
verbose_name = _(u'subscription')
verbose_name_plural = _(u'subscriptions')
class Notification(models.Model):
subscription = models.ForeignKey(
Subscription,
null=True,
blank=True,
on_delete=models.SET_NULL,
)
message = models.TextField()
url = models.CharField(
blank=True,
null=True,
verbose_name=_(u'link for notification'),
max_length=200,
)
is_viewed = models.BooleanField(default=False)
is_emailed = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
occurrences = models.PositiveIntegerField(
default=1,
verbose_name=_(u'occurrences'),
help_text=_(u'If the same notification was fired multiple times with no intermediate notifications'),
)
@classmethod
def create_notifications(cls, key, **kwargs):
if not key or not isinstance(key, str):
raise KeyError('No notification key (string) specified.')
object_id = kwargs.pop('object_id', None)
objects_created = []
subscriptions = Subscription.objects.filter(Q(notification_type__key=key) |
Q(notification_type__key=None),)
if object_id:
subscriptions = subscriptions.filter(Q(object_id=object_id) |
Q(object_id=None))
subscriptions = subscriptions.prefetch_related('latest', 'settings')
subscriptions = subscriptions.order_by('settings__user')
prev_user = None
for subscription in subscriptions:
# Don't alert the same user several times even though overlapping
# subscriptions occur.
if subscription.settings.user == prev_user:
continue
# Check if it's the same as the previous message
latest = subscription.latest
if latest and (latest.message == kwargs.get('message', None) and latest.url == kwargs.get('url', None) and latest.is_viewed is False):
# Both message and URL are the same, and it hasn't been viewed
# so just increment occurrence count.
latest.occurrences = latest.occurrences + 1
latest.save()
else:
# Insert a new notification
new_obj = cls.objects.create(subscription=subscription, **kwargs)
objects_created.append(new_obj)
subscription.latest = new_obj
subscription.save()
prev_user = subscription.settings.user
return objects_created
def __unicode__(self):
return "%s: %s" % (str(self.subscription.settings.user), self.message)
class Meta:
db_table = settings.DB_TABLE_PREFIX + '_notification'
verbose_name = _(u'notification')
verbose_name_plural = _(u'notifications')
ordering = ('-id',)
| {
"content_hash": "25b8249cb3ec2beda7366ad4a7cba875",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 146,
"avg_line_length": 32.29447852760736,
"alnum_prop": 0.6054331306990881,
"repo_name": "indexofire/gork",
"id": "7e43d1adb6a27efab2bb7c3934fb0181c733828b",
"size": "5288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gork/contrib/gnotify/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199039"
},
{
"name": "JavaScript",
"bytes": "89817"
},
{
"name": "Python",
"bytes": "1120919"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab', '0025_auto_20160404_0534'),
]
operations = [
migrations.AddField(
model_name='people',
name='display_priority',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='peopletype',
name='count_in_one_row',
field=models.IntegerField(choices=[(1, '4 in a row'), (2, '6 in a row')], default=2),
),
]
| {
"content_hash": "96ee87296017f342e994dc2205fcbca5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 25.782608695652176,
"alnum_prop": 0.5632377740303541,
"repo_name": "skbly7/serc",
"id": "56bf8907f53361834e69b099aea77b8208871d4d",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/lab/migrations/0026_auto_20160404_0630.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "167250"
},
{
"name": "GCC Machine Description",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "127197"
},
{
"name": "JavaScript",
"bytes": "276437"
},
{
"name": "Python",
"bytes": "659443"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
"""
Provide reverse functions that return fully qualified URLs
"""
from __future__ import unicode_literals
from django.core.urlresolvers import reverse as django_reverse
from django.utils.functional import lazy
def reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra):
"""
Same as `django.core.urlresolvers.reverse`, but optionally takes a request
and returns a fully qualified URL, using the request to get the base URL.
"""
if format is not None:
kwargs = kwargs or {}
kwargs['format'] = format
url = django_reverse(viewname, args=args, kwargs=kwargs, **extra)
if request:
return request.build_absolute_uri(url)
return url
reverse_lazy = lazy(reverse, str)
| {
"content_hash": "3f77a019365d2d631dfa602dc50ab358",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 82,
"avg_line_length": 33.34782608695652,
"alnum_prop": 0.6844850065189049,
"repo_name": "hfercc/mese2014",
"id": "ddfc14edbc2abf338218cef44e6d2b6dcab5566e",
"size": "767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/rest_framework/reverse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103122"
},
{
"name": "JavaScript",
"bytes": "1054910"
},
{
"name": "Python",
"bytes": "1121791"
},
{
"name": "Shell",
"bytes": "2381"
}
],
"symlink_target": ""
} |
"""Default AppConfig for dal_select2."""
from django.apps import AppConfig
# from django.core import checks
class DefaultApp(AppConfig):
"""Default app for dal_select2."""
name = 'dal_select2'
def ready(self):
"""Register select2_submodule_check."""
| {
"content_hash": "730a496a5c646cf47b4a9ec6c49690a7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.6788321167883211,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "83750e8da4f29ba969e72ddc76f6267332891b69",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dal_select2/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
} |
import unittest, netCDF4, tempfile, os
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
cache_size = 10000
cache_nelems = 100
cache_preempt = 0.5
cache_size2 = 20000
cache_nelems2 = 200
cache_preempt2 = 1.0
class RefCountTestCase(unittest.TestCase):
def setUp(self):
nc = netCDF4.Dataset(file_name, mode='w', format='NETCDF4')
d = nc.createDimension('fred', 2000)
# can only change cache size in createVariable (not nelems or preemption)
# this change lasts only as long as file is open.
v = nc.createVariable('frank','f',('fred',),chunk_cache=15000)
size, nelems, preempt = v.get_var_chunk_cache()
assert(size==15000)
self.file=file_name
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing methods for accessing and changing chunk cache"""
# change cache parameters before opening fil.
netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt)
nc = netCDF4.Dataset(self.file, mode='r')
# check to see that chunk cache parameters were changed.
assert(netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt))
# change cache parameters for variable, check
nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2)
assert(nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2))
nc.close()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7278d9b221514186ddab03e6755e1210",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 97,
"avg_line_length": 38.292682926829265,
"alnum_prop": 0.6535031847133758,
"repo_name": "Unidata/netcdf4-python",
"id": "129ce7128857abe471a7c66d8c3076ab6e4b4faf",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/tst_chunk_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "425"
},
{
"name": "Cython",
"bytes": "334106"
},
{
"name": "Python",
"bytes": "296829"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
} |
import logging as python_logging
import logging.config
log_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
}
}
def setup_logging():
logging.config.dictConfig(log_config)
def getLogger(name):
return python_logging.getLogger(name)
| {
"content_hash": "6c859b1835325704b9f84a662625cc6c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 20.228571428571428,
"alnum_prop": 0.4957627118644068,
"repo_name": "LostProperty/schedule",
"id": "ddd3d04e5528f3c1badde2e552f2aa243eda55b4",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "schedule/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12441"
},
{
"name": "Shell",
"bytes": "1972"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import skdata
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SciKit Data'
copyright = u"2016, Ivan Ogasawara"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = skdata.__version__
# The full version, including alpha/beta/rc tags.
release = skdata.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skdatadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'skdata.tex',
u'SciKit Data Documentation',
u'Ivan Ogasawara', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'skdata',
u'SciKit Data Analisys Documentation',
[u'Ivan Ogasawara'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'skdata',
u'SciKit Data Analisys Documentation',
u'Ivan Ogasawara',
'skdata',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "3c6f8aca7caaa0859d585f3d664a21f8",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 30.68846153846154,
"alnum_prop": 0.7024689810753227,
"repo_name": "OpenDataScienceLab/skdata",
"id": "66405fc3ffb1fabaf9d26e0fd385b77e573c1044",
"size": "8420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "209055"
},
{
"name": "Makefile",
"bytes": "2288"
},
{
"name": "Python",
"bytes": "34508"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ManagementConfig(AppConfig):
name = 'rdmo.management'
verbose_name = _('Management')
| {
"content_hash": "eb29f35409e6e220b3dfa2ff8cc848fe",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.7526315789473684,
"repo_name": "rdmorganiser/rdmo",
"id": "b2a1f9ab096656bfa69aa1250dd24a21208618de",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdmo/management/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
} |
"""
WSGI config for dreact project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dreact.settings")
application = get_wsgi_application()
| {
"content_hash": "99a79599aac58bedffd4c00426ecb244",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.375,
"alnum_prop": 0.7692307692307693,
"repo_name": "KyoungRan/Django_React_ex",
"id": "b807f82befceda140dec6f16a7788ffdec10b472",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "03-djreact-urang/dreact/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "67258"
},
{
"name": "JavaScript",
"bytes": "1486739"
},
{
"name": "Python",
"bytes": "6392221"
},
{
"name": "Shell",
"bytes": "11272"
}
],
"symlink_target": ""
} |
""" crmngr utility module """
# stdlib
from fnmatch import fnmatchcase
import sys
def truncate(string, max_len=1000):
"""returns a truncated to max_len version of a string (or str(string))"""
string = str(string)
if len(string) > max_len - 12:
return string[:max_len] + '...TRUNCATED'
return string
def fnlistmatch(value, patterns):
"""match a value against a list of fnmatch patterns.
returns True if any pattern matches.
"""
for pattern in patterns:
if fnmatchcase(value, pattern):
return True
return False
def query_yes_no(question, default="yes"):
"""Asks a yes/no question via and returns the answer as bool."""
valid = {"yes": True, "y": True, "ye": True, "j": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').")
| {
"content_hash": "399af6c4a62189445f12dcf67f616820",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.5761834319526628,
"repo_name": "vshn/crmngr",
"id": "21e9ec7bd789fdebf4dd45359712e95293650438",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crmngr/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "92419"
}
],
"symlink_target": ""
} |
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework, BITCORED_PROC_WAIT_TIMEOUT
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid bitcore address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.stop_node(2)
self.stop_node(3)
self.nodes[1].encryptwallet("test")
self.bitcored_processes[1].wait(timeout=BITCORED_PROC_WAIT_TIMEOUT)
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| {
"content_hash": "a3fb9e2fd202148a29ecfe86da97a58a",
"timestamp": "",
"source": "github",
"line_count": 720,
"max_line_length": 223,
"avg_line_length": 44.583333333333336,
"alnum_prop": 0.5596884735202492,
"repo_name": "LIMXTEC/BitCore",
"id": "9dc52dbe3d3e1b9ed8d2ef161c92574c99b888bc",
"size": "32314",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.15",
"path": "test/functional/fundrawtransaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28452"
},
{
"name": "C",
"bytes": "1354576"
},
{
"name": "C++",
"bytes": "5399655"
},
{
"name": "CSS",
"bytes": "5502"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "193655"
},
{
"name": "Makefile",
"bytes": "114143"
},
{
"name": "Objective-C",
"bytes": "31778"
},
{
"name": "Objective-C++",
"bytes": "7230"
},
{
"name": "Python",
"bytes": "1252960"
},
{
"name": "QMake",
"bytes": "754"
},
{
"name": "Shell",
"bytes": "56783"
}
],
"symlink_target": ""
} |
from .compat import collections_abc
class DirectedGraph(object):
"""A graph structure with directed edges."""
def __init__(self):
self._vertices = set()
self._forwards = {} # <key> -> Set[<key>]
self._backwards = {} # <key> -> Set[<key>]
def __iter__(self):
return iter(self._vertices)
def __len__(self):
return len(self._vertices)
def __contains__(self, key):
return key in self._vertices
def copy(self):
"""Return a shallow copy of this graph."""
other = DirectedGraph()
other._vertices = set(self._vertices)
other._forwards = {k: set(v) for k, v in self._forwards.items()}
other._backwards = {k: set(v) for k, v in self._backwards.items()}
return other
def add(self, key):
"""Add a new vertex to the graph."""
if key in self._vertices:
raise ValueError("vertex exists")
self._vertices.add(key)
self._forwards[key] = set()
self._backwards[key] = set()
def remove(self, key):
"""Remove a vertex from the graph, disconnecting all edges from/to it."""
self._vertices.remove(key)
for f in self._forwards.pop(key):
self._backwards[f].remove(key)
for t in self._backwards.pop(key):
self._forwards[t].remove(key)
def connected(self, f, t):
return f in self._backwards[t] and t in self._forwards[f]
def connect(self, f, t):
"""Connect two existing vertices.
Nothing happens if the vertices are already connected.
"""
if t not in self._vertices:
raise KeyError(t)
self._forwards[f].add(t)
self._backwards[t].add(f)
def iter_edges(self):
for f, children in self._forwards.items():
for t in children:
yield f, t
def iter_children(self, key):
return iter(self._forwards[key])
def iter_parents(self, key):
return iter(self._backwards[key])
class _FactoryIterableView(object):
"""Wrap an iterator factory returned by `find_matches()`.
Calling `iter()` on this class would invoke the underlying iterator
factory, making it a "collection with ordering" that can be iterated
through multiple times, but lacks random access methods presented in
built-in Python sequence types.
"""
def __init__(self, factory):
self._factory = factory
def __bool__(self):
try:
next(self._factory())
except StopIteration:
return False
return True
__nonzero__ = __bool__ # XXX: Python 2.
def __iter__(self):
return self._factory()
def for_preference(self):
"""Provide an candidate iterable for `get_preference()`"""
return self._factory()
def excluding(self, candidate):
"""Create a new `Candidates` instance excluding `candidate`."""
def factory():
return (c for c in self._factory() if c != candidate)
return type(self)(factory)
class _SequenceIterableView(object):
"""Wrap an iterable returned by find_matches().
This is essentially just a proxy to the underlying sequence that provides
the same interface as `_FactoryIterableView`.
"""
def __init__(self, sequence):
self._sequence = sequence
def __bool__(self):
return bool(self._sequence)
__nonzero__ = __bool__ # XXX: Python 2.
def __iter__(self):
return iter(self._sequence)
def __len__(self):
return len(self._sequence)
def for_preference(self):
"""Provide an candidate iterable for `get_preference()`"""
return self._sequence
def excluding(self, candidate):
"""Create a new instance excluding `candidate`."""
return type(self)([c for c in self._sequence if c != candidate])
def build_iter_view(matches):
"""Build an iterable view from the value returned by `find_matches()`."""
if callable(matches):
return _FactoryIterableView(matches)
if not isinstance(matches, collections_abc.Sequence):
matches = list(matches)
return _SequenceIterableView(matches)
| {
"content_hash": "882260eaab6ef920f5d6046033f2c278",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 81,
"avg_line_length": 29.356643356643357,
"alnum_prop": 0.5971891376846117,
"repo_name": "kennethreitz/pipenv",
"id": "479aad5dc176370dedefe63d0cc77a9168b1f7ec",
"size": "4198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/vendor/resolvelib/structs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
} |
__author__ = 'jmartinez'
__version__ = '1.0.2'
import os
import click
import requests
import requirements
import shutil
import subprocess
import sys
import tarfile
import tempfile
import zipfile
from datetime import date
from glob import glob as ls
from jinja2 import Environment, FileSystemLoader
from uuid import uuid4
from pkg_resources import RequirementParseError
def find_dependencies(directory):
cwd = os.getcwd()
os.chdir(directory)
dependencies = set()
single_version_externally_managed = True
_requirements = ['{}/requirements.txt'.format(directory)]
try:
subprocess.check_output([sys.executable, 'setup.py', 'egg_info'])
_requirements.extend(ls('{}/*.egg-info/requires.txt'.format(directory)))
except subprocess.CalledProcessError:
click.echo('egg_info not supported by {}'.format(directory))
single_version_externally_managed = False
try:
for _requirement in _requirements:
if not os.path.isfile(_requirement):
continue
click.echo('Reading requirements file {}'.format(_requirement))
with open(_requirement, 'r') as f:
try:
for req in requirements.parse(f):
if req.specs:
dependencies.add('{} {}'.format(req.name,
''.join([ '{} {}'.format(o, v)
for o,v in req.specs])))
else:
dependencies.add(req.name)
except RequirementParseError:
pass
finally:
os.chdir(cwd)
shutil.rmtree(directory)
return dependencies, single_version_externally_managed
def extract_files(_file):
def extract_tar(_file, tmpdir):
tar = tarfile.open(_file)
os.makedirs(tmpdir)
os.chdir(tmpdir)
tar.extractall()
def extract_zip(_file, tmpdir):
with zipfile.ZipFile(_file) as _zip:
os.makedirs(tmpdir)
os.chdir(tmpdir)
_zip.extractall()
name, extension = os.path.splitext(_file)
if extension in ['.gz', '.bz2']:
_, _extension = os.path.splitext(name)
extension = '{}{}'.format(_extension, extension)
name = name.replace(_extension, '')
tmpdir = "{}/{}".format(tempfile.gettempdir(), uuid4())
cwd = os.getcwd()
if extension in ['.tar.gz', '.tgz', '.tar.bz2']:
extract_tar(_file, tmpdir)
elif extension == '.zip':
extract_zip(_file, tmpdir)
else:
raise RuntimeError('I dunno how to handle {} files'.format(extension))
os.chdir(cwd)
return os.path.join(tmpdir, name)
def create(meta):
os.mkdir(meta['package_name'])
click.echo('Downloading {}'.format(meta['source']))
cwd = os.getcwd()
os.chdir(meta['package_name'])
_file = download_file(meta['source'])
click.echo('Extracting {}'.format(_file))
extracted = extract_files(_file)
click.echo('Searching for extra dependencies on {}'.format(extracted))
extra_deps, single_version_externally_managed = find_dependencies(extracted)
if not single_version_externally_managed:
meta.update({'single_version_externally_managed': False})
meta['requires'].extend(extra_deps)
os.chdir(cwd)
env = Environment(loader=FileSystemLoader('{}/templates'.format(
os.path.dirname(os.path.abspath(__file__)))))
spec = env.get_template('python-spec.tmpl')
rendered = spec.render(meta)
with open('{name}/{name}.spec'.format(
name=meta['package_name']), 'w') as spec:
spec.write(rendered)
def download_file(url):
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return local_filename
def build_metadata(pypi):
click.echo('Building metadata for the package genaration...')
meta = {'name': pypi['info']['name']}
meta.update({'source':
next((url['url'] for url in pypi['urls']
if 'source' in url['python_version']
and ('tar.gz' in url['url'] or
'zip' in url['url'] or
'tar.bz2' in url['url'])), '')
})
if meta['source'] == '':
click.echo("Cannot determine download URL... "
"Check Pypi: https://pypi.python.org/pypi/{}/"
.format(meta['name']))
sys.exit(3)
meta.update(
{'version': (meta['source'].split('-')[-1].
replace('.tar.gz', '').
replace('.zip', '').
replace('.tar.bz2', ''))}
)
meta.update({'url': pypi['info']['home_page']})
meta.update({'summary': pypi['info']['summary']})
meta.update({'license': pypi['info']['license']})
meta.update({'requires': pypi['info'].get('requires', [])})
meta.update({'author': pypi['info']['author']})
meta.update({'description': pypi['info'].get(
'description', meta['summary'])}
)
meta.update({'me': os.getenv('PACKAGER_NAME')})
meta.update({'email': os.getenv('PACKAGER_EMAIL')})
meta.update({'date': date.today().strftime("%a %b %d %Y")})
meta.update({'single_version_externally_managed': True})
return meta
def read_pypi(name, version=None):
click.echo('Trying to fetch pypi information about {}...'.format(name))
if not version:
url = "https://pypi.python.org/pypi/{}/json".format(name)
else:
url = "https://pypi.python.org/pypi/{}/{}/json".format(name, version)
result = requests.get(url)
if result.status_code == 200:
pypi = result.json()
else:
click.echo("Package or release not found on {}".format(url))
sys.exit(2)
return build_metadata(pypi)
def run(name, python_prefix, recursive):
meta = read_pypi(name)
package_name = '{}-{}'.format(python_prefix, meta['name'])
if os.path.isdir(package_name):
click.echo('{} already exists, skipping...'.format(package_name))
return
meta.update({'python_prefix': python_prefix})
meta.update({'package_name': package_name})
create(meta)
if recursive:
for dep in meta['requires']:
## Todo:
# Refactor here to actually enfore the version
dep = dep.replace('>', ' ').replace('<', ' ').\
replace('=', '/').split()[0]
run(dep, python_prefix, recursive)
@click.command()
@click.argument('name')
@click.option('--python_prefix', '-p', default='python35')
@click.option('--recursive', '-r', is_flag=True)
def cli(name, python_prefix, recursive):
if os.path.isdir(name):
click.echo("Package {} alreayd exists".format(name))
sys.exit(1)
run(name, python_prefix, recursive)
sys.exit(0)
if __name__ == '__main__':
cli()
| {
"content_hash": "f656f954f4992be1db6233ec5d7869c5",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 32.985849056603776,
"alnum_prop": 0.5792935792935793,
"repo_name": "ncode/spec-from-pypi",
"id": "fa0564a1a938d102782d703389505c6d48081ed4",
"size": "6993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specfrompypi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7641"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, request
from flask_flatpages import FlatPages,pygments_style_defs
from flask_frozen import Freezer
import sys
BOLG_URL = 'http://blog.zeromake.com'
DEBUG = True
FLATPAGES_AUTO_RELOAD = DEBUG
FLATPAGES_EXTENSION = '.md'
FLATPAGES_MARKDOWN_EXTENSIONS = ['markdown.extensions.extra','markdown.extensions.codehilite','pymdownx.github', 'markdown.extensions.toc', 'markdown.extensions.tables']
app = Flask(__name__)
app.config.from_object(__name__)
flatpages = FlatPages(app)
freezer = Freezer(app)
@app.route('/')
def index():
pages = [p for p in flatpages if 'date' in p.meta]
# print(pages[0].meta)
pages = sorted(pages, key=lambda page: page.meta['date'], reverse = True)
return render_template('index.html', pages=pages)
@app.route('/pages/<path:path>/')
def page(path):
page = flatpages.get_or_404(path)
# print(dir(page))
return render_template('page.html', page=page, root_url=BOLG_URL)
@app.template_filter('md5')
def str_to_md5(s):
import hashlib
m = hashlib.md5()
s = str(s)
m.update(s.encode('utf8'))
return m.hexdigest()
@app.route('/css/pygments.css')
def pygments_css():
return pygments_style_defs('xcode'), 200, {'Content-Type': 'text/css'}
# ['colorful', 'fruity', 'emacs', 'pastie', 'default', 'rrt', 'igor',
# 'bw', 'perldoc', 'paraiso-light', 'tango', 'monokai', 'vs', 'xcode', 'trac',
# 'borland', 'algol_nu', 'paraiso-dark', 'algol', 'autumn', 'manni', 'lovelace',
# 'native', 'murphy', 'vim', 'friendly']
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == "make":
freezer.freeze()
else:
app.run('0.0.0.0',port=8000)
| {
"content_hash": "17e26e70ce30827c0bded12b522c9e4f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 169,
"avg_line_length": 34.82,
"alnum_prop": 0.6318207926479035,
"repo_name": "zeromake/zero_blog",
"id": "159f20604bf7d3506560434af8f9176438935c24",
"size": "1785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13131"
},
{
"name": "HTML",
"bytes": "3224"
},
{
"name": "Nginx",
"bytes": "799"
},
{
"name": "Python",
"bytes": "1785"
},
{
"name": "Shell",
"bytes": "590"
}
],
"symlink_target": ""
} |
"""
A variety of app callables used to test WEB-INF interactions.
"""
def test_import_from_lib_python(environ, start_response):
from test_lib import some_libs
writer = start_response("200 OK", [])
return ["Factorial 10 is %d" % some_libs.factorial(10)]
def test_import_from_zip_file(environ, start_response):
from module_in_zipfile import lib_function
writer = start_response("200 OK", [])
return [lib_function()]
def test_execed_import_in_pth(environ, start_response):
writer = start_response("200 OK", [])
import sys
if sys.modules.has_key('math'):
return ["pass"]
else:
return ["fail"]
| {
"content_hash": "7f8342b5a4197cffe2251b1b267ff363",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 30.545454545454547,
"alnum_prop": 0.6369047619047619,
"repo_name": "pjenvey/modjy-pjenvey",
"id": "4dcc952e6e94b87e230e00192e0151cf35688d03",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_apps_dir/web_inf_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "48708"
},
{
"name": "Python",
"bytes": "48804"
}
],
"symlink_target": ""
} |
"""Simple command-line example for Moderator.
Command-line application that exercises the Google Moderator API.
Usage:
$ python moderator.py
You can also get help on all the command-line flags the program understands
by running:
$ python moderator.py --help
To get detailed log output run:
$ python moderator.py --logging_level=DEBUG
"""
__author__ = '[email protected] (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/moderator',
user_agent='moderator-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('moderator.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("moderator", "v1", http=http)
try:
# Create a new Moderator series.
series_body = {
"description": "Share and rank tips for eating healthy and cheap!",
"name": "Eating Healthy & Cheap",
"videoSubmissionAllowed": False
}
series = service.series().insert(body=series_body).execute()
print "Created a new series"
# Create a new Moderator topic in that series.
topic_body = {
"description": "Share your ideas on eating healthy!",
"name": "Ideas",
"presenter": "liz"
}
topic = service.topics().insert(seriesId=series['id']['seriesId'],
body=topic_body).execute()
print "Created a new topic"
# Create a new Submission in that topic.
submission_body = {
"attachmentUrl": "http://www.youtube.com/watch?v=1a1wyc5Xxpg",
"attribution": {
"displayName": "Bashan",
"location": "Bainbridge Island, WA"
},
"text": "Charlie Ayers @ Google"
}
submission = service.submissions().insert(seriesId=topic['id']['seriesId'],
topicId=topic['id']['topicId'], body=submission_body).execute()
print "Inserted a new submisson on the topic"
# Vote on that newly added Submission.
vote_body = {
"vote": "PLUS"
}
service.votes().insert(seriesId=topic['id']['seriesId'],
submissionId=submission['id']['submissionId'],
body=vote_body)
print "Voted on the submission"
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "3fe2609512ca7fad02cd012c04d7f11e",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 33.796992481203006,
"alnum_prop": 0.6843159065628476,
"repo_name": "MapofLife/MOL",
"id": "b7da058f2f810e296d1a50015e861e9436749847",
"size": "5123",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "earthengine/google-api-python-client/samples/moderator/moderator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83354"
},
{
"name": "CSS",
"bytes": "245523"
},
{
"name": "JavaScript",
"bytes": "1302309"
},
{
"name": "PHP",
"bytes": "613"
},
{
"name": "Perl",
"bytes": "2100"
},
{
"name": "Python",
"bytes": "1953387"
},
{
"name": "R",
"bytes": "52"
},
{
"name": "SQL",
"bytes": "21299"
},
{
"name": "Shell",
"bytes": "3146"
}
],
"symlink_target": ""
} |
import glob
def main():
file_list = glob.glob("include/flatbuffers/*.h", recursive=False)
for filename in file_list:
print(filename)
with open(filename) as infile:
lines = infile.readlines()
begin_index = index_any(lines, [
"namespace flatbuffers {\n",
"namespace flexbuffers {\n",
"namespace reflection {\n",
])
assert begin_index >= 0
lines.insert(begin_index, "namespace Data {\n")
lines.insert(begin_index, "namespace Effekseer {\n")
end_index = index_any(lines, [
"} // namespace flatbuffers\n",
"} // namespace flexbuffers\n",
"} // namespace reflection\n",
])
assert end_index >= 0
lines.insert(end_index + 1, "} // namespace Data\n")
lines.insert(end_index + 2, "} // namespace Effekseer\n")
with open(filename, mode="w", newline="\n") as outfile:
outfile.writelines(lines)
def index_any(lines: str, key_texts: list) -> int:
for key in key_texts:
if key in lines:
return lines.index(key)
return -1
if __name__ == "__main__":
main()
| {
"content_hash": "9ce8129571b6c15b07d30b19ed6384e4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 32.41025641025641,
"alnum_prop": 0.5174050632911392,
"repo_name": "effekseer/Effekseer",
"id": "1204aaabbf40393c92e57dfe03e8f20d859e1a11",
"size": "1264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dev/Cpp/3rdParty/flatbuffers/ReplaceNamespace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "31004"
},
{
"name": "C",
"bytes": "2906155"
},
{
"name": "C#",
"bytes": "1833372"
},
{
"name": "C++",
"bytes": "4042188"
},
{
"name": "CMake",
"bytes": "101152"
},
{
"name": "Dockerfile",
"bytes": "1075"
},
{
"name": "GLSL",
"bytes": "215212"
},
{
"name": "HLSL",
"bytes": "980762"
},
{
"name": "HTML",
"bytes": "2753"
},
{
"name": "JavaScript",
"bytes": "6147"
},
{
"name": "Objective-C",
"bytes": "17847"
},
{
"name": "Objective-C++",
"bytes": "13682"
},
{
"name": "Python",
"bytes": "47026"
},
{
"name": "SWIG",
"bytes": "13524"
},
{
"name": "Shell",
"bytes": "11492"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
LONG_DESCRIPTION = """
Mustache template engine for Django 1.8 and newer, with support for Django context processors.
"""
def readme():
try:
readme = open("README.md")
except IOError:
return LONG_DESCRIPTION
return readme.read()
setup(
name='django-mustache',
use_scm_version=True,
author='S. Andrew Sheppard',
author_email='[email protected]',
url='https://github.com/wq/django-mustache',
license='MIT',
packages=['django_mustache'],
description=LONG_DESCRIPTION.strip(),
long_description=readme(),
long_description_content_type='text/markdown',
install_requires=[
'Django>=1.8',
'pystache',
],
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Topic :: Text Processing :: Markup :: HTML',
],
test_suite='tests',
tests_require=[
'djangorestframework'
],
setup_requires=[
'setuptools_scm',
],
)
| {
"content_hash": "e6dffcbe59ad5f73ba8febd4c7b8751f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 94,
"avg_line_length": 28.80952380952381,
"alnum_prop": 0.5757575757575758,
"repo_name": "wq/django-mustache",
"id": "28fa930a6c1d84533613e15ba81f56df02fda58d",
"size": "1815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1275"
},
{
"name": "Python",
"bytes": "11693"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
from contextlib import redirect_stdout
from io import StringIO
from pathlib import Path
import lief
from utils import get_sample
lief.logging.set_level(lief.logging.LOGGING_LEVEL.INFO)
def test_change_note(tmp_path: Path):
etterlog = lief.parse(get_sample('ELF/ELF64_x86-64_binary_etterlog.bin'))
build_id = etterlog[lief.ELF.NOTE_TYPES.BUILD_ID]
new_desc = [i & 0xFF for i in range(500)]
build_id.description = new_desc
output = tmp_path / "etterlog"
etterlog.write(output.as_posix())
etterlog_updated = lief.parse(output.as_posix())
assert etterlog[lief.ELF.NOTE_TYPES.BUILD_ID] == etterlog_updated[lief.ELF.NOTE_TYPES.BUILD_ID]
def test_remove_note(tmp_path: Path):
etterlog = lief.parse(get_sample('ELF/ELF64_x86-64_binary_etterlog.bin'))
output = tmp_path / "etterlog"
print(output)
build_id = etterlog[lief.ELF.NOTE_TYPES.BUILD_ID]
assert build_id is not None
etterlog -= build_id
etterlog.write(output.as_posix())
etterlog_updated = lief.parse(output.as_posix())
assert lief.ELF.NOTE_TYPES.BUILD_ID not in etterlog_updated
def test_add_note(tmp_path: Path):
etterlog = lief.parse(get_sample('ELF/ELF64_x86-64_binary_etterlog.bin'))
output = tmp_path / "etterlog"
note = lief.ELF.Note("Foo", lief.ELF.NOTE_TYPES.GOLD_VERSION, [123])
etterlog += note
etterlog.write(output.as_posix())
etterlog_updated = lief.parse(output.as_posix())
assert lief.ELF.NOTE_TYPES.GOLD_VERSION in etterlog_updated
# The string printed is largely irrelevant, but running print ensures no regression occurs in a previous Note::dump segfault
# https://github.com/lief-project/LIEF/issues/300
with StringIO() as temp_stdout:
with redirect_stdout(temp_stdout):
print(etterlog)
def test_android_note(tmp_path: Path):
ndkr16 = lief.parse(get_sample('ELF/ELF64_AArch64_piebinary_ndkr16.bin'))
output = tmp_path / "etterlog"
note = ndkr16.get(lief.ELF.NOTE_TYPES.ABI_TAG)
details = note.details
assert details.sdk_version == 21
assert details.ndk_version[:4] == "r16b"
assert details.ndk_build_number[:7] == "4479499"
details.sdk_version = 15
details.ndk_version = "r15c"
details.ndk_build_number = "123456"
note = ndkr16.get(lief.ELF.NOTE_TYPES.ABI_TAG).details
assert note.sdk_version == 15
assert note.ndk_version[:4] == "r15c"
assert note.ndk_build_number[:6] == "123456"
ndkr16.write(output.as_posix())
ndkr15 = lief.parse(output.as_posix())
note = ndkr15.get(lief.ELF.NOTE_TYPES.ABI_TAG).details
assert note.sdk_version == 15
assert note.ndk_version[:4] == "r15c"
assert note.ndk_build_number[:6] == "123456"
def test_issue_816(tmp_path: Path):
elf = lief.parse(get_sample('ELF/elf_notes_issue_816.bin'))
output = tmp_path / "elf_notes_issue_816"
assert len(elf.notes) == 40
elf.write(output.as_posix())
new = lief.parse(output.as_posix())
assert len(new.notes) == 40
| {
"content_hash": "34132a6ef677cde073daa7c80d488618",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 128,
"avg_line_length": 31.375,
"alnum_prop": 0.6835989375830013,
"repo_name": "lief-project/LIEF",
"id": "8f00eef236f1fcafb1498e23b74c3ed10e3d9a94",
"size": "3034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/elf/test_notes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "115380"
},
{
"name": "C++",
"bytes": "5516502"
},
{
"name": "CMake",
"bytes": "185657"
},
{
"name": "Dockerfile",
"bytes": "994"
},
{
"name": "Objective-C",
"bytes": "736"
},
{
"name": "Python",
"bytes": "305524"
},
{
"name": "Shell",
"bytes": "21907"
},
{
"name": "SourcePawn",
"bytes": "130615"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0007_contact_recruiter'),
]
operations = [
migrations.AddField(
model_name='contact',
name='phone_number_quick_copy',
field=models.CharField(blank=True, db_index=True, max_length=255, null=True),
),
]
| {
"content_hash": "6fbceb419eb9223bb283a89c48fe12a8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 89,
"avg_line_length": 24.5,
"alnum_prop": 0.5994897959183674,
"repo_name": "RobSpectre/garfield",
"id": "d4c74fb282069ae5a637abda5bb7211e05a99381",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "garfield/contacts/migrations/0008_contact_phone_number_quick_copy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7324"
},
{
"name": "Python",
"bytes": "339908"
}
],
"symlink_target": ""
} |
import unittest
import omniture
import os
from datetime import date
import pandas
import datetime
import requests_mock
creds = {}
creds['username'] = os.environ['OMNITURE_USERNAME']
creds['secret'] = os.environ['OMNITURE_SECRET']
test_report_suite = 'omniture.api-gateway'
class ReportTest(unittest.TestCase):
def setUp(self):
self.analytics = omniture.authenticate(creds['username'], creds['secret'])
def tearDown(self):
self.analytics = None
def test_basic_report(self):
""" Make sure a basic report can be run
"""
response = self.analytics.suites[test_report_suite].report.run()
self.assertIsInstance(response.data, list, "Something went wrong with the report")
#Timing Info
self.assertIsInstance(response.timing['queue'], float, "waitSeconds info is missing")
self.assertIsInstance(response.timing['execution'], float, "Execution info is missing")
#Raw Reports
self.assertIsInstance(response.report, dict, "The raw report hasn't been populated")
#Check Metrics
self.assertIsInstance(response.metrics, list, "The metrics weren't populated")
self.assertEqual(response.metrics[0].id,"pageviews", "Wrong Metric")
#Check Elements
self.assertIsInstance(response.elements, list, "The elements is the wrong type")
self.assertEqual(response.elements[0].id,"datetime", "There are elements when there shouldn't be")
#check time range
checkdate = date.today().strftime("%a. %e %h. %Y")
self.assertEqual(response.period, checkdate)
#check segmetns
self.assertIsNone(response.segments)
#Check Data
self.assertIsInstance(response.data, list, "Data isn't getting populated right")
self.assertIsInstance(response.data[0] , dict, "The data isn't getting into the dict")
self.assertIsInstance(response.data[0]['datetime'], datetime.datetime, "The date isn't getting populated in the data")
self.assertIsInstance(response.data[0]['pageviews'], int, "The pageviews aren't getting populated in the data")
def test_ranked_report(self):
""" Make sure the ranked report is being processed
"""
ranked = self.analytics.suites[test_report_suite].report.element("page").metric("pageviews").metric("visits")
queue = []
queue.append(ranked)
response = omniture.sync(queue)
for report in response:
#Check Data
self.assertIsInstance(report.data, list, "Data isn't getting populated right")
self.assertIsInstance(report.data[0] , dict, "The data isn't getting into the dict")
self.assertIsInstance(report.data[0]['page'], str, "The page isn't getting populated in the data")
self.assertIsInstance(report.data[0]['pageviews'], int, "The pageviews aren't getting populated in the data")
self.assertIsInstance(report.data[0]['visits'], int, "The visits aren't getting populated in the data")
def test_trended_report(self):
"""Make sure the trended reports are being processed corretly"""
trended = self.analytics.suites[test_report_suite].report.element("page").metric("pageviews").granularity('hour').run()
self.assertIsInstance(trended.data, list, "Treneded Reports don't work")
self.assertIsInstance(trended.data[0] , dict, "The data isn't getting into the dict")
self.assertIsInstance(trended.data[0]['datetime'], datetime.datetime, "The date isn't getting propulated correctly")
self.assertIsInstance(trended.data[0]['page'], str, "The page isn't getting populated in the data")
self.assertIsInstance(trended.data[0]['pageviews'], int, "The pageviews aren't getting populated in the data")
def test_dataframe(self):
"""Make sure the pandas data frame object can be generated"""
trended = self.analytics.suites[test_report_suite].report.element("page").metric("pageviews").granularity('hour').run()
self.assertIsInstance(trended.dataframe, pandas.DataFrame, "Data Frame Object doesn't work")
def test_segments_id(self):
""" Make sure segments can be added """
suite = self.analytics.suites[test_report_suite]
report = suite.report.filter(suite.segments[0]).run()
self.assertEqual(report.segments[0], suite.segments[0], "The segments don't match")
@unittest.skip("skip inline segments because checked in Query")
def test_inline_segment(self):
""" Make sure inline segments work """
#pretty poor check but need to make it work with any report suite
report = self.analytics.suites[0].report.element('page').metric('pageviews').metric('visits').filter(element='browser', selected=["::unspecified::"]).run()
self.assertIsInstance(report.data, list, "inline segments don't work")
@requests_mock.mock()
def test_multiple_classifications(self, m):
"""Makes sure the report can parse multiple classifications correctly since they have the same element ID"""
#load sample file
path = os.path.dirname(__file__)
with open(path+'/mock_objects/multi_classifications.json') as data_file:
json_response = data_file.read()
with open(path+'/mock_objects/Report.Queue.json') as queue_file:
ReportQueue = queue_file.read()
#setup mock object
m.post('https://api.omniture.com/admin/1.4/rest/?method=Company.GetReportSuites', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Get', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Queue', text=ReportQueue)
report = self.analytics.suites[0].report\
.element('evar2',classification="Classification 1", disable_validation=True)\
.element('evar2',classification="Classification 2", disable_validation=True)\
report = report.run()
self.assertTrue('evar2 | Classification 1' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
self.assertTrue('evar2 | Classification 2' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
@requests_mock.mock()
def test_mixed_classifications(self, m):
"""Makes sure the report can parse reports with classifications and
regular dimensionscorrectly since they have the same element ID"""
#load sample file
path = os.path.dirname(__file__)
with open(path+'/mock_objects/mixed_classifications.json') as data_file:
json_response = data_file.read()
with open(path+'/mock_objects/Report.Queue.json') as queue_file:
ReportQueue = queue_file.read()
#setup mock object
m.post('https://api.omniture.com/admin/1.4/rest/?method=Company.GetReportSuites', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Get', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Queue', text=ReportQueue)
report = self.analytics.suites[0].report\
.element('evar3',classification="Classification 1", disable_validation=True)\
.element('evar5', disable_validation=True)\
report = report.run()
self.assertTrue('evar3 | Classification 1' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
self.assertTrue('evar5' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a1f60414a340b5fa559bc86afddaebf5",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 163,
"avg_line_length": 48.65605095541401,
"alnum_prop": 0.6729938473622202,
"repo_name": "vmAggies/omniture-master",
"id": "27d5aac3bbb9245874eadcbab7c58334b66ce874",
"size": "7658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testReports.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115224"
},
{
"name": "Shell",
"bytes": "123"
}
],
"symlink_target": ""
} |
import os
from io import open as io_open
__all__ = ["__version__"]
# major, minor, patch, -extra
version_info = 4, 23, 4
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
# auto -extra based on commit hash (if not tagged as release)
scriptdir = os.path.dirname(__file__)
gitdir = os.path.abspath(os.path.join(scriptdir, "..", ".git"))
if os.path.isdir(gitdir): # pragma: nocover
extra = None
# Open config file to check if we are in tqdm project
with io_open(os.path.join(gitdir, "config"), 'r') as fh_config:
if 'tqdm' in fh_config.read():
# Open the HEAD file
with io_open(os.path.join(gitdir, "HEAD"), 'r') as fh_head:
extra = fh_head.readline().strip()
# in a branch => HEAD points to file containing last commit
if 'ref:' in extra:
# reference file path
ref_file = extra[5:]
branch_name = ref_file.rsplit('/', 1)[-1]
ref_file_path = os.path.abspath(os.path.join(gitdir, ref_file))
# check that we are in git folder
# (by stripping the git folder from the ref file path)
if os.path.relpath(
ref_file_path, gitdir).replace('\\', '/') != ref_file:
# out of git folder
extra = None
else:
# open the ref file
with io_open(ref_file_path, 'r') as fh_branch:
commit_hash = fh_branch.readline().strip()
extra = commit_hash[:8]
if branch_name != "master":
extra += '.' + branch_name
# detached HEAD mode, already have commit hash
else:
extra = extra[:8]
# Append commit hash (and branch) to version string if not tagged
if extra is not None:
try:
with io_open(os.path.join(gitdir, "refs", "tags",
'v' + __version__)) as fdv:
if fdv.readline().strip()[:8] != extra[:8]:
__version__ += '-' + extra
except Exception as e:
if "No such file" not in str(e):
raise
| {
"content_hash": "a692480a2935d79e3dde82c7b9efe15d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 39.36206896551724,
"alnum_prop": 0.49846692947875604,
"repo_name": "ryfeus/lambda-packs",
"id": "0292d1e2e28523a187bcfc51ae198e54895e9ec5",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Spacy/source2.7/tqdm/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from timeit import timeit
from memory_profiler import profile
from elementpath.regex import UNICODE_CATEGORIES, UnicodeSubset
def run_timeit(stmt='pass', setup='pass', number=1000):
seconds = timeit(stmt, setup=setup, number=number)
print("{}: {}s".format(stmt, seconds))
@profile
def unicode_subset_objects():
return [UnicodeSubset('\U00020000-\U0002A6D6') for _ in range(10000)]
if __name__ == '__main__':
print('*' * 62)
print("*** Memory and timing profile of UnicodeSubset class ***")
print("***" + ' ' * 56 + "***")
print("*** Note: save ~28% of memory with __slots__ (from v2.2.3) ***")
print('*' * 62)
print()
unicode_subset_objects()
subset = UNICODE_CATEGORIES['C']
SETUP = 'from __main__ import subset'
NUMBER = 10000
run_timeit('1328 in subset # True ', SETUP, NUMBER)
run_timeit('1329 in subset # False', SETUP, NUMBER)
run_timeit('72165 in subset # True ', SETUP, NUMBER)
run_timeit('72872 in subset # False', SETUP, NUMBER)
| {
"content_hash": "9855258b070ba5a95dba292120f67569",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 30.264705882352942,
"alnum_prop": 0.6248785228377065,
"repo_name": "sissaschool/elementpath",
"id": "4116700f3d47d94c96d6690d90ba2a4df40a5bc8",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiling/profile_unicode_subsets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1405428"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import Context
_mail_context_processors = None
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses MailContext.
def get_mail_processors():
global _mail_context_processors
if _mail_context_processors is None:
processors = []
for path in getattr(settings, 'CAMPAIGN_CONTEXT_PROCESSORS', ()):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, {}, {}, [attr])
except ImportError, e:
raise ImproperlyConfigured('Error importing campaign processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable campaign processor' % (module, attr))
processors.append(func)
_mail_context_processors = tuple(processors)
return _mail_context_processors
class MailContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CAMPAIGN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, subscriber, dict=None, processors=None):
Context.__init__(self, dict)
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_mail_processors() + processors:
self.update(processor(subscriber))
self.update({'recipient': subscriber})
| {
"content_hash": "f7617d23906186032a3a53a697132e61",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 125,
"avg_line_length": 41.883720930232556,
"alnum_prop": 0.6413103831204886,
"repo_name": "philippbosch/django-campaign",
"id": "e821301297b9788f7f24738fd4193c399e4e9480",
"size": "1844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campaign/context.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25780"
}
],
"symlink_target": ""
} |
class Subject(object):
def __init__(self):
self._observers = []
def attach(self, observer):
if not observer in self._observers:
self._observers.append(observer)
def detach(self, observer):
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self, modifier=None):
for observer in self._observers:
if modifier != observer:
observer.update(self)
# Example usage
class Data(Subject):
def __init__(self, name=''):
Subject.__init__(self)
self.name = name
self._data = 0
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
self.notify()
class HexViewer:
def update(self, subject):
print('HexViewer: Subject %s has data 0x%x' %
(subject.name, subject.data))
class DecimalViewer:
def update(self, subject):
print('DecimalViewer: Subject %s has data %d' %
(subject.name, subject.data))
# Example usage...
def main():
data1 = Data('Data 1')
data2 = Data('Data 2')
view1 = DecimalViewer()
view2 = HexViewer()
data1.attach(view1)
data1.attach(view2)
data2.attach(view2)
data2.attach(view1)
print("Setting Data 1 = 10")
data1.data = 10
print("Setting Data 2 = 15")
data2.data = 15
print("Setting Data 1 = 3")
data1.data = 3
print("Setting Data 2 = 5")
data2.data = 5
print("Detach HexViewer from data1 and data2.")
data1.detach(view2)
data2.detach(view2)
print("Setting Data 1 = 10")
data1.data = 10
print("Setting Data 2 = 15")
data2.data = 15
if __name__ == '__main__':
main()
| {
"content_hash": "e20b9f5da73e52d26eb71850504fb32a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 55,
"avg_line_length": 22.59493670886076,
"alnum_prop": 0.5719887955182072,
"repo_name": "JiangKlijna/design-pattern",
"id": "400d80c4de6676d43f6508851519c15e0f095eb2",
"size": "1805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ObserverPattern/Observer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "27199"
},
{
"name": "Python",
"bytes": "25097"
}
],
"symlink_target": ""
} |
'''
@author: Sergio Rojas
@contact: [email protected]
--------------------------
Contenido bajo
Atribución-NoComercial-CompartirIgual 3.0 Venezuela (CC BY-NC-SA 3.0 VE)
http://creativecommons.org/licenses/by-nc-sa/3.0/ve/
Creado en abril 23, 2016
'''
def myinput(par):
"""
Esta funcion permite leer datos desde el
teclado sin ocuparnos de estar usando
python 2 o python 3
"""
import sys
prueba = True
while prueba:
if sys.version[0]=="2":
a = raw_input('\t Escriba la respuesta y presione ENTER/RETURN:--> : ')
elif sys.version[0]=="3":
a = input('\t Escriba la respuesta y presione ENTER/RETURN:--> : ')
if par == 'int':
try:
prueba = False
a = int(a)
except:
prueba = True
print("NO es correcta la entrada '" + str(a) + "'")
print("Por favor ingrese un numero entero: ")
elif par == 'float':
try:
prueba = False
a = float(a)
except:
prueba = True
print("NO es correcta la entrada '" + str(a) + "'")
print("Por favor ingrese un numero real usando punto: ")
else:
prueba = False
return a
print('\n Ingresa tu nombre: ')
nombre = myinput('str')
print('\n Ingresa tu edad, {0:s}:'.format(nombre))
edad = myinput('int')
print('\n Ingresa tu estatura en metros, {0:s}:'.format(nombre))
altura = myinput('float')
print('\n Ingresa tu peso en kilogramos, {0:s}:'.format(nombre))
peso = myinput('float')
str1 = '\n {0:s} de {1:d} a~nos, tiene la'.format(nombre, edad)
str2 = 'estatura de {0:3.2f} m y pesa {1:5.2f} kgs.\n'.format(altura, peso)
print(' *** {0:s} {1:s} *** '.format(str1, str2))
| {
"content_hash": "86481e51513bd67f227433e966e2870b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 83,
"avg_line_length": 31.137931034482758,
"alnum_prop": 0.5454042081949059,
"repo_name": "rojassergio/Aprendiendo-a-programar-en-Python-con-mi-computador",
"id": "922cacd0fd67b1f60db1dd4ba216ec9c408d1f65",
"size": "1807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Programas_Capitulo_06/Cap06_pagina_152_leer_datos_teclado_mejorado.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "260843"
},
{
"name": "Jupyter Notebook",
"bytes": "9588"
},
{
"name": "Python",
"bytes": "71033"
},
{
"name": "TeX",
"bytes": "4965"
}
],
"symlink_target": ""
} |
from zeit.cms.browser.resources import Resource, Library
import zeit.cms.browser.resources
lib = Library('zeit.imp', 'resources')
Resource('ui4w.css')
Resource('ui4w.js', depends=[ui4w_css])
Resource('imp.css')
Resource('imp.js', depends=[
zeit.cms.browser.resources.base,
ui4w_js, imp_css])
| {
"content_hash": "e1392c6f5dacb81918ef756826db07d3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 27.454545454545453,
"alnum_prop": 0.7251655629139073,
"repo_name": "ZeitOnline/zeit.imp",
"id": "85e1e105f528d36315717fb300ac1a215e331a64",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/imp/browser/resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6245"
},
{
"name": "JavaScript",
"bytes": "168959"
},
{
"name": "Python",
"bytes": "54413"
}
],
"symlink_target": ""
} |
from morphforge.morphology.core import MorphologyTree, Section, Region
import numpy
from morphforge import units
from morphforge.core.misc import is_float, is_int
import morphforge
from morphforge.units import qty
# pylint: disable=E1103
# pylint tries to work out the return value of _convert_to_unit,
# but does it wrong and makes complaints.
def _convert_to_unit(o, default_unit):
assert not isinstance(default_unit, units.Quantity)
if isinstance(o, units.Quantity):
return o.rescale(default_unit)
elif is_float(o) or is_int(o):
return o * morphforge.units.parse_unit_str(default_unit)#.rescale(default_unit)
elif isinstance(o, (str, unicode)) and ':' in o:
return qty(o).rescale(default_unit)
else:
raise ValueError()
class MorphologyBuilder(object):
""" Class to build simple neuron morphologies """
@classmethod
def get_single_section_soma(cls, rad=None, area=None):
assert (rad or area) and not (rad and area)
if area:
area = _convert_to_unit(area, default_unit="um2").rescale("um2").magnitude
rad = numpy.power((area / (4.0 * numpy.pi)), 1.0 / 2.0)
else:
assert isinstance(rad, int) or isinstance(rad, float)
rad = _convert_to_unit(rad, default_unit="um").rescale("um").magnitude
soma_region = Region("soma")
dummysection = Section(region=None, x=0.0, y=0.0, z=0.0, r=rad)
dummysection.create_distal_section(region=soma_region, x=rad * 2.0, y=0.0, z=0.0, r=rad, idtag="soma")
cell = MorphologyTree("SimpleSomaMorph", dummysection=dummysection, metadata={})
return cell
@classmethod
def get_soma_axon_morph(cls, axon_length=1000.0, axon_radius=0.3, soma_radius=20.0, axon_sections=10):
soma_region = Region("soma")
axon_region = Region("axon")
axon_section_length = float(axon_length) / float(axon_sections)
dummy_root = Section(region=None, x=0.0, y=0.0, z=0.0, r=soma_radius)
soma = dummy_root.create_distal_section(region=soma_region, x=soma_radius * 2.0, y=0.0, z=0.0, r=soma_radius, idtag="soma")
prev_section = soma
for x in range(1, axon_sections):
axon = prev_section.create_distal_section(region=axon_region, x=x * axon_section_length + 2.0 * soma_radius, y=0, z=0, r=axon_radius, idtag="axon_%d" % x)
prev_section = axon
cell = MorphologyTree('SimpleSomaAxonMorph',
dummysection=dummy_root, metadata={})
return cell
| {
"content_hash": "afe393908a0eab42f7c25d8074f35db1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 166,
"avg_line_length": 39.36923076923077,
"alnum_prop": 0.6451738960531458,
"repo_name": "mikehulluk/morphforge",
"id": "0d8a40481f18e4efd6fca42ae6175ac350ee2b44",
"size": "4098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morphforge/morphology/builders/morphologybuilder.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "4818"
},
{
"name": "C",
"bytes": "1499"
},
{
"name": "Makefile",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "1557833"
},
{
"name": "Shell",
"bytes": "14"
},
{
"name": "XSLT",
"bytes": "94266"
}
],
"symlink_target": ""
} |
"""
Modules related to structure used throughout PTYSH.
"""
import dbus
from collections import OrderedDict
from yaml import Loader
from yaml import MappingNode
class _Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Singleton(_Singleton("Singleton", (object,), {})): pass
class Status(Singleton):
"""
Class for checking status in PTYSH
You can check the login(enable) status, the depth of the node, and the name of the node.
"""
ROOT_DEPTH = 0
CONFIGURE_DEPTH = 1
def __init__(self):
self._login = False
self._module_depth = self.ROOT_DEPTH
self._current_node = []
self._debug = False
@property
def login(self):
return self._login
@login.setter
def login(self, state):
self._login = state
@property
def module_depth(self):
return self._module_depth
def increase_module_depth(self):
self._module_depth += 1
def decrease_module_depth(self):
self._module_depth -= 1
@property
def current_node(self):
return "" if len(self._current_node) == 0 else self._current_node[-1]
def push_current_node(self, node_name):
self._current_node.append(node_name)
def pop_current_node(self):
self._current_node.pop()
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, state):
self._debug = state
class PtyshDbus(object):
"""
The wrapper class of dbus used in PTYSH.
DBus introspect, property, and method functions are implemented.
"""
def __init__(self, service_name, object_path):
"""
Initialize bus and object path.
"""
self.bus = None
self.bus_object = None
try:
self.bus = dbus.SystemBus()
self.bus_object = self.bus.get_object(service_name, object_path)
except Exception as e:
self.dbus_exception_handler(e)
def dbus_introspect(self):
"""
Show introspect information.
"""
try:
iface = dbus.Interface(self.bus_object, dbus.INTROSPECTABLE_IFACE)
result = iface.Introspect()
except Exception as e:
self.dbus_exception_handler(e)
else:
return result
def dbus_get_property(self, property_interface, property_name=None):
"""
Show property information.
If no property name is given, it shows all available properties.
"""
try:
properties = dbus.Interface(self.bus_object, dbus.PROPERTIES_IFACE)
if property_name:
result = properties.Get(property_interface, property_name)
else:
result = properties.GetAll(property_interface)
except Exception as e:
self.dbus_exception_handler(e)
else:
return result
def dbus_method_call(self, method_name, method_interface, *args):
"""
Show or set the result of method call.
"""
try:
method = self.bus_object.get_dbus_method(method_name, method_interface)
if args:
result = method(*args)
else:
result = method()
except Exception as e:
self.dbus_exception_handler(e)
else:
return result
def dbus_exception_handler(self, exception):
if Status().debug:
IoControl().print_message(exception)
else:
IoControl().print_message("There was a problem sending the dbus message.")
raise Exception
class OrderedDictYAMLLoader(Loader):
"""
When loading a YAML file, use OrderDedDictionary to maintain the order of the loaded settings.
PyYaml does not support OrderedDictionary, so I created a Loader to make OrderdedDictionary available.
This source code was referenced in the gist below.
- https://gist.github.com/enaeseth/844388
"""
def __init__(self, *args, **kwargs):
Loader.__init__(self, *args, **kwargs)
self.add_constructor(u"tag:yaml.org,2002:map", type(self).construct_yaml_map)
self.add_constructor(u"tag:yaml.org,2002:omap", type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
if value is not None:
data.update(value)
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
return None
self.flatten_mapping(node)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except:
return None
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
| {
"content_hash": "3e1d69073972425997958399303da0c6",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 106,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.5924198250728863,
"repo_name": "IPOT/PTYSH",
"id": "0081788a6540311d6624dee1f5768300bd9e131b",
"size": "5170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ptysh/structure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18514"
}
],
"symlink_target": ""
} |
from modules.Configuration import config
from models.scf import Frame, ReferenceFrame, Verb, database
from modules.Plotter import Plotter
from filter import Filter
## This class is used for evaluating system performance
# @author Adriano Zanette
# @version 1.0
class Evaluator():
## class constructor
# @author Adriano Zanette
# @version 1.0
# @return Evaluator
def __init__ (self):
self.filter = config.evaluator.filter
self.value = config.evaluator.minValue
self.max = config.evaluator.maxValue
self.increment = config.evaluator.increment
self.operator = config.evaluator.operator
self.output = config.evaluator.output
self.verbList = config.evaluator.verbList
self.values = []
self.precisionValues = []
self.recallValues = []
self.fmeasureValues = []
## add values to a result vector to build a graph
# @author Adriano Zanette
# @version 1.0
# @param value float
# @param precision float
# @param recall float
# @param fmeasure float
def addValue(self, value, precision, recall, fmeasure):
self.values.append(value)
self.precisionValues.append(precision)
self.recallValues.append(recall)
self.fmeasureValues.append(fmeasure)
## draw a verb histogram of scf frequencies
# @author Adriano Zanette
# @version 1.0
# @param verbString String
def verbHistogram(self, verbString):
verb = Verb.get(Verb.verb == verbString)
frequencies = [frame.frequency for frame in verb.frames if frame.frequency > self.value ]
frequencies.sort(reverse=True)
plotter = Plotter()
plotter.drawBars(frequencies, edgecolor="#cccccc")
plotter.title('Verb '+verbString+' Histogram')
plotter.labels("Frames", 'Frequency')
plotter.output()
## evaluates system's performance
# @author Adriano Zanette
# @version 1.0
# @param verbList If passed evaluate only verbs in the list
def evaluate(self, verbList = None):
filterModule = Filter()
self.queries = self.buildQueries(verbList)
while(self.value <= self.max):
filterModule.setComparator(self.filter, self.operator, self.value)
filterModule.filterFrames()
golden = self.countGoldenFrames()
retrieved = self.countNotFilteredFrames()
intersect = self.countIntersection()
#print 'value: %s, ints: %s, retr: %s, gold: %s ' % (str(self.value), str(intersect), str(retrieved), str(golden))
p = self.precision(intersect, retrieved)
r = self.recall(intersect, golden)
f = self.fmeasure(p, r)
#print 'value: %s, p: %s, r: %s, f: %s ' % (str(self.value), str(p), str(r), str(f))
print '%s,%s,%s,%s' % (str(self.value), str(p), str(r), str(f))
self.addValue(self.value, p, r, f)
self.value += self.increment
self.plotEvaluation()
## build queries for future searches
# @author Adriano Zanette
# @version 1.0
# @param verbList If passed restricts SQL only for verbs in the list
# @return Dict Queries for golden, extract and intersection
def buildQueries(self, verbList = None):
verbRestriction = self.buildVerbListRestriction(verbList)
goldenSQL = """ SELECT COUNT(*)
FROM """+ReferenceFrame._meta.db_table + """ AS rf
WHERE """+Verb.id.db_column+""" in
( SELECT DISTINCT("""+Verb.id.db_column+""")
FROM """+Frame._meta.db_table+ """ AS f
WHERE f."""+Frame.filtered.db_column+""" = 0
"""+ verbRestriction +""")"""
intersectionSQL = """ SELECT COUNT(*)
FROM """+ReferenceFrame._meta.db_table + """ AS rf
JOIN """+Frame._meta.db_table+""" AS f
ON f."""+Frame.verb.db_column+""" = rf."""+ReferenceFrame.verb.db_column+"""
AND f."""+Frame.frame.db_column+""" = rf."""+ReferenceFrame.frame.db_column+"""
AND rf."""+Frame.isPassive.db_column+""" = f."""+ReferenceFrame.isPassive.db_column+"""
WHERE f."""+Frame.filtered.db_column+""" = 0 """ + verbRestriction
extractedSQL = "SELECT COUNT(*) FROM "+Frame._meta.db_table + " AS f WHERE "+Frame.filtered.db_column+" = 0 " + verbRestriction
return {'golden': goldenSQL, 'intersection': intersectionSQL, 'extracted': extractedSQL}
## build restriction for verblist
# @author Adriano Zanette
# @version 1.0
# @param verbList If passed builds SQL restriction for verbs in the list
# @return string
def buildVerbListRestriction(self, verbList):
if verbList:
inSQL = ["\'%s\'" % (verb) for verb in verbList]
sqlVerbs = "SELECT "+Verb.id.db_column+" FROM "+Verb._meta.db_table+" WHERE "+Verb.verb.db_column+" in ( "+ (",".join(inSQL)) +")"
verbIds = [ str(row[0]) for row in database.execute_sql(sqlVerbs).fetchall() ]
restriction = " AND f."+Verb.id.db_column+" IN ( "+ ",".join(verbIds) +" ) "
else:
restriction = ""
return restriction
## calculates precision
# @author Adriano Zanette
# @version 1.0
# @param intersect int Number of SCF extracted correct
# @param retrieved int Number of SCF extracted
# @return float
def precision(self, intersect, retrieved):
if intersect == 0 :
return 0
return (float(intersect)/float(retrieved))*100
## calculates recall
# @author Adriano Zanette
# @version 1.0
# @param intersect int Number of SCF extracted correct
# @param golden int Number of reference SCF
# @return float
def recall(self, intersect, golden):
if intersect == 0 :
return 0
return (float(intersect)/float(golden))*100
## calculates f-measure
# @author Adriano Zanette
# @version 1.0
# @param precistion float
# @param recall float
# @return float
def fmeasure(self, precision, recall):
if precision == 0 or recall == 0 :
return 0
return (2*precision*recall)/(precision+recall)
## retrieve the number of golden frames not filtered
# @author Adriano Zanette
# @version 1.0
# @return Integer
def countGoldenFrames(self):
sql = self.queries['golden']
result = database.execute_sql(sql)
return result.fetchone()[0]
## retrieve the size of intersection between golden frames and frames extracted not filtered
# @author Adriano Zanette
# @version 1.0
# @return Integer
def countIntersection(self):
sql = self.queries['intersection']
result = database.execute_sql(sql)
return result.fetchone()[0]
## retrieve the number of frames extracted not filtered
# @author Adriano Zanette
# @version 1.0
# @return Integer
def countNotFilteredFrames(self):
sql = self.queries['extracted']
result = database.execute_sql(sql)
return result.fetchone()[0]
## plot evaluation
# @author Adriano Zanette
# @version 1.0
def plotEvaluation(self):
plotter = Plotter()
plotter.drawLine(self.values, self.precisionValues, 'precision')
plotter.drawLine(self.values, self.recallValues, 'recall')
plotter.drawLine(self.values, self.fmeasureValues, 'fmeasure')
plotter.title('SCFExtractor Evaluation')
plotter.labels("Cutoff", '%')
if self.output:
plotter.output(self.output)
else:
plotter.show() | {
"content_hash": "42f2f905a36bec80139efb7e41a49bfe",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 136,
"avg_line_length": 35.770731707317076,
"alnum_prop": 0.6435292513296059,
"repo_name": "adzanette/scf-extractor",
"id": "ab7de331800778dc95d27f812ee0d641035223df",
"size": "7333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scf-extractor/modules/Evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "51698"
},
{
"name": "PHP",
"bytes": "131430"
},
{
"name": "Python",
"bytes": "423162"
}
],
"symlink_target": ""
} |
from torchvision.datasets import CIFAR100
import numpy as np
class CIFAR100N(CIFAR100):
"""
Extends CIFAR100 dataset to yield index of element in addition to image and target label.
"""
def __init__(self,
root,
train=True,
transform=None,
target_transform=None,
download=False,
rand_fraction=0.0):
super(CIFAR100N, self).__init__(root=root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
assert (rand_fraction <= 1.0) and (rand_fraction >= 0.0)
self.rand_fraction = rand_fraction
if self.rand_fraction > 0.0:
self.targets = self.corrupt_fraction_of_data()
def corrupt_fraction_of_data(self):
"""Corrupts fraction of train data by permuting image-label pairs."""
# Check if we are not corrupting test data
assert self.train is True, 'We should not corrupt test data.'
rearrange = []
length = len(self.targets)//4
start_next = 0
new_labels = []
for i in range(4):
nr_corrupt_instances = start_next + int(np.floor(length * self.rand_fraction))
print('Randomizing {} fraction of data == {} / {}'.format(self.rand_fraction,
nr_corrupt_instances-start_next ,
length))
# We will corrupt the top fraction data points
corrupt_label = self.targets[start_next:nr_corrupt_instances]
clean_label = self.targets[nr_corrupt_instances:start_next + length]
# Corrupting data
np.random.seed(111)
rand_idx = np.random.permutation(np.arange(start_next,nr_corrupt_instances))
corrupt_label = np.array(corrupt_label)[rand_idx-start_next]
# Adding corrupt and clean data back together
new_labels.extend(corrupt_label)
new_labels.extend(clean_label)
start_next += length
return np.array(new_labels)
def __getitem__(self, index):
"""
Args:
index (int): index of element to be fetched
Returns:
tuple: (sample, target, index) where index is the index of this sample in dataset.
"""
img, target = super().__getitem__(index)
#return img, target, index
return img, target
| {
"content_hash": "25c4d24eb537e1c971549c5eebab9439",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 103,
"avg_line_length": 39.01428571428571,
"alnum_prop": 0.5173928963749542,
"repo_name": "google-research/understanding-curricula",
"id": "8c60ede866c80f1538c72bfcbda25f5265b7fb11",
"size": "3307",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "utils/cifar_label.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60120"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import sys, os, subprocess, errno, hashlib, threading, signal, gzip
from mparts.rpc import RPCServer, RPCProxy
LOG_COMMANDS = False
__all__ = ["CAPTURE", "STDERR", "DISCARD",
"CHECKED", "UNCHECKED"]
CAPTURE = "\0CAPTURE"
STDERR = "\0STDERR"
DISCARD = "\0DISCARD"
CHECKED = "CHECKED"
UNCHECKED = "UNCHECKED"
popenLock = threading.Lock()
class Process(object):
def __init__(self, cmd, p, dw):
self.__cmd = cmd
self.__p = p
self.__dw = dw
if p.stdout:
self.__stdout = []
self.__stdoutCond = threading.Condition()
self.__stdoutClosed = False
t = threading.Thread(target = self.__reader)
t.setDaemon(True)
t.start()
else:
self.__stdout = None
self.__waitLock = threading.Lock()
def __reader(self):
fd = self.__p.stdout.fileno()
while True:
buf = os.read(fd, 65536)
with self.__stdoutCond:
if len(buf) == 0:
self.__stdoutClosed = True
self.__p.stdout.close()
else:
self.__stdout.append(buf)
self.__stdoutCond.notifyAll()
if len(buf) == 0:
break
def stdinWrite(self, s):
self.__p.stdin.write(s)
def stdinClose(self):
self.__p.stdin.close()
def __stdoutRead(self, fn, pred = None):
if self.__stdout == None:
raise ValueError("stdout of %s is not being captured" % self.__cmd)
s = ""
while True:
with self.__stdoutCond:
while len(self.__stdout) == 0:
if self.__stdoutClosed:
return s
self.__stdoutCond.wait()
s += "".join(self.__stdout)
self.__stdout[:] = []
if pred == None or pred(s):
read, unread = fn(s)
if unread != "":
self.__stdout.append(unread)
return read
def stdoutRead(self):
return self.__stdoutRead(lambda s: (s, ""))
def stdoutReadline(self):
def split(s):
nl = s.index("\n")
return s[:nl+1], s[nl+1:]
return self.__stdoutRead(split, lambda s: "\n" in s)
def getCode(self):
return self.__p.returncode
def kill(self, sig = signal.SIGINT):
os.kill(self.__p.pid, sig)
def wait(self, check=True, poll=False):
"""Wait for this process to exit, returning its exit code (or
-N if it died by signal N). Unlike UNIX wait, this wait is
idempotent. If check is True, raise a ValueError if the
return code is non-zero. If poll is True, don't block."""
# We serialize through a lock because the underlying wait call
# will only return successfully to one concurrent wait and we
# want to support multiple waits. In practice, this occurs
# when we're waiting on something, then the client gets a
# KeyboardInterrupt and tells us to wait again in another
# request (and thus thread) as part of shutting down.
with self.__waitLock:
if self.__p.returncode != None:
code = self.__p.returncode
elif poll:
code = self.__p.poll()
else:
code = self.__p.wait()
if self.__p.stdin:
self.__p.stdin.close()
if self.__dw:
# XXX It's possible I could just dup the pipe FD
# and have only one death-pipe write FD, but I
# don't know what the signal ownership semantics
# are.
os.close(self.__dw)
self.__dw = None
if check and code:
if code < 0:
msg = "signal %d" % (-code)
else:
msg = "status %d" % code
raise ValueError("%s exited with %s" % (self.__cmd, msg))
return code
SHELL_SPECIALS = set("\\'\"`<>|; \t\n()[]?#$^&*=")
def shellEscape(s):
if not set(s).intersection(SHELL_SPECIALS):
# No quoting necessary
return s
if not "'" in s:
# Single quoting works
return "'" + s + "'"
# Have to double quote. See man bash QUOTING.
s = (s.replace("\\", "\\\\").replace("\"", "\\\"").replace("$", "\\$")
.replace("`", "\\`").replace("!", "\\!"))
return '"' + s + '"'
class RemoteHost(object):
def init(self, rootDir, cwd, name):
# XXX This is terrible. Since we're sharing our root tree
# between a regular user and root and most file operations,
# including cleaning up the tree, are done as the regular
# user, the root user has to permit this. This is obviously
# not the right answer.
if os.getuid() == 0:
os.umask(0)
self.__rootDir = rootDir
lcwd = self.__safePath(os.path.join(rootDir, cwd.lstrip("/")))
self.__makedirs(lcwd)
os.chdir(lcwd)
self.__makedirs(os.path.join(rootDir, "out"))
self.__name = name
def __safePath(self, p):
if not os.path.normpath(p).startswith(self.__rootDir):
raise ValueError("The path %r is not in the remote root %r" %
(p, self.__rootDir))
return p
def __makedirs(self, path):
try:
os.makedirs(path)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def listOutFiles(self):
res = []
base = os.path.join(self.__rootDir, "out")
for dirpath, dirnames, filenames in os.walk(base):
for n in filenames:
abspath = os.path.join(dirpath, n)
relpath = abspath[len(base):].lstrip("/")
res.append(relpath)
return res
def __toOutFile(self, desc, noCheck = False):
if desc == CAPTURE:
return subprocess.PIPE
elif desc == DISCARD:
return file("/dev/null", "w")
else:
desc = os.path.expanduser(desc)
if not noCheck:
desc = self.__safePath(desc)
self.__makedirs(os.path.dirname(desc))
return file(desc, "a")
def run(self, cmd, stdin = DISCARD, stdout = DISCARD, stderr = STDERR,
cwd = None, shell = False, addEnv = {},
wait = CHECKED, exitSig = signal.SIGINT, noCheck = False):
# Set up stdin/stdout/stderr
assert stderr != CAPTURE, "stderr capture not implemented"
if stdin == DISCARD:
pstdin = file("/dev/null")
elif stdin == CAPTURE:
pstdin = subprocess.PIPE
elif stdin == STDERR:
raise ValueError("Illegal stdin %s" % stdin)
else:
pstdin = file(os.path.expanduser(stdin), "r")
if stdout == stderr == STDERR:
# subprocess is really finicky. If you pass the same
# FD for both stdout and stderr, it will get closed.
pstdout = os.dup(2)
else:
pstdout = self.__toOutFile(stdout, noCheck)
if stderr == STDERR:
pstderr = None
else:
pstderr = self.__toOutFile(stderr, noCheck)
# Expand user
cmd = map(os.path.expanduser, cmd)
# Set up environment variables
env = os.environ.copy()
for k, v in addEnv.iteritems():
env[k] = os.path.expanduser(v)
# Set up death pipe
if exitSig:
dr, dw = os.pipe()
def preexec():
import fcntl, struct
flags = fcntl.fcntl(dr, fcntl.F_GETFL)
O_ASYNC = 020000
fcntl.fcntl(dr, fcntl.F_SETFL, flags | O_ASYNC)
fcntl.fcntl(dr, fcntl.F_SETOWN, os.getpid())
fcntl.fcntl(dr, fcntl.F_SETSIG, exitSig)
os.close(dw)
else:
dr = dw = None
preexec = None
# Create subprocess
if LOG_COMMANDS:
print >> sys.stderr, \
"=%s= %s" % (self.__name, " ".join(map(shellEscape, cmd)))
try:
# Ugh. Popen as of Python 2.6 isn't thread-safe. See
# Python issue 2320. A better solution would be
# close_fds=True, but that fails for some reason (XXX
# track down).
with popenLock:
p = subprocess.Popen(cmd, stdin = pstdin, stdout = pstdout,
stderr = pstderr, preexec_fn = preexec,
shell = shell, cwd = cwd, env = env)
except:
if dw:
os.close(dw)
raise
finally:
if dr:
os.close(dr)
# Return Process object
pobj = Process(cmd, p, dw)
if wait:
pobj.wait(wait == CHECKED)
return RPCProxy(pobj)
def procList(self):
procs = {}
for pid in os.listdir("/proc"):
if not pid.isdigit():
continue
info = {}
try:
info["cmdline"] = file(os.path.join("/proc", pid, "cmdline")).read().split("\0")
info["exe"] = os.readlink(os.path.join("/proc", pid, "exe"))
info["status"] = {}
for l in file(os.path.join("/proc", pid, "status")):
k, v = l.split(":", 1)
info["status"][k] = v.strip()
except EnvironmentError, e:
if e.errno == errno.ENOENT or e.errno == errno.EACCES:
continue
raise
procs[int(pid)] = info
return procs
def kill(self, pid, sig):
os.kill(pid, sig)
def writeFile(self, path, data, noCheck = False, append = False):
if not noCheck:
path = self.__safePath(path)
else:
self.__makedirs(os.path.dirname(path))
file(path, "a" if append else "w").write(data)
def readFile(self, path):
return file(path).read()
def readGzipFile(self, path):
f = gzip.open(path, 'rb')
content = f.read()
f.close()
return content
def main():
sys.stdout = sys.stderr
RPCServer(RemoteHost()).serve()
if __name__ == "__main__":
main()
| {
"content_hash": "3ecf28a55fc8de50fb3c093a08ff7a31",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 96,
"avg_line_length": 33.546325878594246,
"alnum_prop": 0.4997142857142857,
"repo_name": "KMU-embedded/mosbench-ext",
"id": "d4c9395780c9659422a29a7248d9a4753f26af6b",
"size": "10500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mparts/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "8491"
},
{
"name": "Awk",
"bytes": "45243"
},
{
"name": "Batchfile",
"bytes": "15130"
},
{
"name": "C",
"bytes": "38923116"
},
{
"name": "C++",
"bytes": "644544"
},
{
"name": "CSS",
"bytes": "38896"
},
{
"name": "DTrace",
"bytes": "12271"
},
{
"name": "Erlang",
"bytes": "312670"
},
{
"name": "Frege",
"bytes": "146785"
},
{
"name": "Groff",
"bytes": "255736"
},
{
"name": "HTML",
"bytes": "1026176"
},
{
"name": "Lex",
"bytes": "149807"
},
{
"name": "Makefile",
"bytes": "368369"
},
{
"name": "Objective-C",
"bytes": "20461"
},
{
"name": "PLpgSQL",
"bytes": "808278"
},
{
"name": "Perl",
"bytes": "336526"
},
{
"name": "Perl6",
"bytes": "11115"
},
{
"name": "Prolog",
"bytes": "11284"
},
{
"name": "Python",
"bytes": "198848"
},
{
"name": "SQLPL",
"bytes": "105796"
},
{
"name": "Shell",
"bytes": "982753"
},
{
"name": "SourcePawn",
"bytes": "6894"
},
{
"name": "TeX",
"bytes": "2582"
},
{
"name": "XS",
"bytes": "4040"
},
{
"name": "XSLT",
"bytes": "10992"
},
{
"name": "Yacc",
"bytes": "569728"
}
],
"symlink_target": ""
} |
import re
import argparse
def read_features(f_file):
"""
Read the feature file and return a nested dictionary of feature values.
"""
f_dict = {}
m = [line.rstrip().split('\t') for line in f_file.read().rstrip().split('\n')]
f_names = m[0][1:]
for line in m[1:]:
if line[0] != 'empty':
seg_vals = {f: v for f,v in zip(f_names, line[1:])}
f_dict[line[0]] = seg_vals
return f_dict
def read_constraints(c_file):
"""
Read the constraint file and return a list of the original constraint strings.
"""
return [line.rstrip().split('\t')[0] for line in c_file.read().rstrip().split('\n')]
def convert_constraints(constraints, f_dict):
"""
Return a list of RE-enabled (segmental) constraints translated from the originals.
"""
def natural_class(vals_features, f_dict):
seg_list = [segment for segment in f_dict]
if vals_features == '':
return '|'.join(seg_list)
for vf in vals_features:
# Assumes that the first character of a feature specification is its value and that the rest is the feature name.
the_val = vf[0]
the_ft = vf[1:]
for seg in f_dict:
s_vals = f_dict.get(seg, str(seg) + ' not found in feature dict.')
if s_vals[the_ft] != the_val:
if seg in seg_list:
seg_list.remove(seg)
return '|'.join(seg_list)
re_constraints = []
for c in constraints:
c = c.replace('[]', '({})'.format(natural_class('',f_dict)))
splitcon = re.split('([\[\]])',c)
for i in range(1,len(splitcon)-1):
if splitcon[i-1] == '[' and splitcon[i+1] == ']':
if splitcon[i][0] == '^': # complementation operator
splitcon[i-1] = '[^('
splitcon[i+1] = ')]'
vals_features = splitcon[i][1:].split(',')
else:
splitcon[i-1] = '('
splitcon[i+1] = ')'
vals_features = splitcon[i].split(',')
splitcon[i] = natural_class(vals_features,f_dict)
re_constraints.append(''.join(splitcon))
return re_constraints
def main():
parser = argparse.ArgumentParser(description = 'Featural constraint translator')
parser.add_argument('constraint_file_name', help='Name of constraints file')
parser.add_argument('feature_file_name', help='Name of feature file')
parser.add_argument('outfile', help='Name of output file')
args = parser.parse_args()
with open(args.constraint_file_name) as c_file:
with open(args.feature_file_name) as f_file:
features = read_features(f_file)
constraints = read_constraints(c_file)
converted = convert_constraints(constraints, features)
with open(args.outfile, 'w') as outfile:
outfile.write('\n'.join(converted))
print("Output file created.")
return 0
if __name__ == '__main__':
main()
| {
"content_hash": "c04076f130f7644e5c93804b6a7c7ffb",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 125,
"avg_line_length": 35.275862068965516,
"alnum_prop": 0.5536005213424569,
"repo_name": "bhallen/constraint-translator",
"id": "c6ae50ef9f5f3dfded98e8d251cd3f86c78b9e92",
"size": "3116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "translate.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2959"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from urllib.parse import urlparse
import requests
from requests.exceptions import ConnectTimeout, ReadTimeout
import simplejson
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from standup.auth0.models import IdToken
class Auth0LookupError(Exception):
pass
def renew_id_token(id_token):
"""Renews id token and returns id token or None
:arg str id_token: the id token to renew
:returns: ``id_token`` or ``None``
"""
url = 'https://{}/delegation'.format(settings.AUTH0_DOMAIN)
response = requests.post(url, json={
'client_id': settings.AUTH0_CLIENT_ID,
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'id_token': id_token,
'api_type': 'app',
}, timeout=settings.AUTH0_PATIENCE_TIMEOUT)
try:
result = response.json()
except simplejson.JSONDecodeError:
# This can happen if the response was someting like a 502 error
return
# If the response.status_code is not 200, it's still JSON but it won't have a id_token.
return result.get('id_token')
def get_path(url):
"""Takes a url and returns path + querystring"""
parsed = urlparse(url)
if parsed.query:
return '%s?%s' % (parsed.path, parsed.query)
return parsed.path
class ValidateIDToken(object):
"""For users authenticated with an id_token, we need to check that it's still valid. For example,
the user could have been blocked (e.g. leaving the company) if so we need to ask the user to log
in again.
"""
exception_paths = (
get_path(settings.AUTH0_CALLBACK),
)
def process_request(self, request):
if (
request.method != 'POST' and
not request.is_ajax() and
request.user.is_active and
request.path not in self.exception_paths
):
# Look up expiration in session and see if the id_token needs to be renewed.
id_token_expiration = request.session.get('id_token_expiration', None)
if id_token_expiration and id_token_expiration < datetime.utcnow():
return
# Either no expiration in session or token needs to be renewed, so renew it
# now.
try:
token = IdToken.objects.get(user=request.user)
except IdToken.DoesNotExist:
# If there is no IdToken, then this isn't a mozilla.com address and we're fine.
return
if token.id_token:
try:
id_token = renew_id_token(token.id_token)
except (ConnectTimeout, ReadTimeout):
messages.error(
request,
'Unable to validate your authentication with Auth0. '
'This can happen when there is temporary network '
'problem. Please sign in again.'
)
# Log the user out because their id_token didn't renew and send them to
# home page.
logout(request)
return HttpResponseRedirect(reverse(settings.AUTH0_SIGNIN_VIEW))
if id_token:
# Save new token.
token.id_token = id_token
token.save()
# Re-up the session.
request.session['id_token_expiration'] = (
datetime.utcnow() + timedelta(seconds=settings.AUTH0_RENEW_ID_TOKEN_EXPIRY_SECONDS)
)
else:
# If we don't have a new id_token, then it's not valid anymore. We log the user
# out and send them to the home page.
logout(request)
messages.error(
request,
'Unable to validate your authentication with Auth0. '
'This is most likely due to an expired authentication '
'session. You have to sign in again.'
)
return HttpResponseRedirect(reverse(settings.AUTH0_SIGNIN_VIEW))
| {
"content_hash": "9abbb30a790446541530e3514b615c31",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 107,
"avg_line_length": 36.02479338842975,
"alnum_prop": 0.5774260151410874,
"repo_name": "willkg/standup",
"id": "d077e51697fe58616998695a376df534755b20eb",
"size": "4359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "standup/auth0/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33331"
},
{
"name": "HTML",
"bytes": "19203"
},
{
"name": "JavaScript",
"bytes": "979"
},
{
"name": "Makefile",
"bytes": "2446"
},
{
"name": "Python",
"bytes": "98929"
},
{
"name": "Shell",
"bytes": "3335"
}
],
"symlink_target": ""
} |
"""
main module for dft basis sets
"""
__author__ = "Fenglai Liu"
import sys
import os
import infor
import generateAngBasis
import generateBasis
import derivorder
# setting the basis set order
maxLChoice = 6
if len(sys.argv) == 2:
maxLChoice = int(sys.argv[1])
elif len(sys.argv) > 2:
print "Wrong argv list! We only support zero/one arguments! Please check it!\n"
sys.exit()
infor.setBasisSetOrder()
infor.setMaxL(maxLChoice)
# print out the angular part of code
generateAngBasis.generateCode()
# print out the basis set code
for i in range(4):
i = i + 1
generateBasis.generateCode(i)
# finally, we try to print out the derivatives information
# used in the program
count = 1
for i in range(4):
i = i + 1
dlist = derivorder.derivOrderGeneration(i)
for var in dlist:
v = "DERIV_" + var
line = "UInt " + v + " = " + str(count) + ";"
print line
count = count + 1
| {
"content_hash": "eb097da69db96c2a9763a015849dd561",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 21.071428571428573,
"alnum_prop": 0.6994350282485876,
"repo_name": "murfreesboro/dftints",
"id": "8bb9ceac16545dae174296942579a410940ccf5b",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25516"
},
{
"name": "Shell",
"bytes": "32"
}
],
"symlink_target": ""
} |
'''There is a story I like about PG Wodehouse, author of the Jeeves
and Wooster novels. He had an unusual method of editing his own work.
He would write (long hand) a page of his new book. And read it, and
place the page on the wall of his study, and start on the next page.
But the page stuck to the wall was not at some random location - it
was, perhaps obviously, in order going horizontally around the room,
but it was also vertically where he thought it should be relative to
the *quality* he expected of himself.::
"Pages would start near the floor and slowly work their way up
until they touched the picture rail, when they were good enough
for publication"
This jibes marvellously with how I think of software development. It
is focused on the code itself - the rating system is not in a
notebook, spreadsheet or bug tracker, it is a natural extension of where
and how the code is stored.
So I intend to update and release my todo-inator idea.
There are two parts - the rating of the code, by the author. this will be a simple
five star system ::
# rate: * * * * *
# lifecycle: <prototype/PoC>, <pre-release>, maturing, mature, retiring
# TODO:
And we can then see this rating in a specialised `ls`.
specialised ls - walk over a package source and tell me ::
star rating
todos
new features
I think I will have a second system that correlates to a map of my todos and
some external systems
And some way of recording success / failures
Design
------
walk a source tree, and build a dict of file: {markers...}
return that
Todos
-----
We have a slight finesse on the TODO. Every TODO is assigned a
priority. By default that is 30/100. 100 being the most hair on fire
thing we can imagine, and 1 being lets get around to this before the
heat death of the universe.
but we can alter that priority by putting a {val} after the text
(we do not want to confuse with [x] form of done / not done.)
Future enhancements:
* "TODO-feature: "
* have a store of todos in .todoinator in the current repo,
which lets us compute what has changed ?
#TODO: build a test framework / runner {20}
#TODO: build a lifecycle todo listing {99}
#TODO: build a ticketing system??? at least a list of todos
#TODO: linting etc - a pre-commit check system ala phabricator
'''
# rate: **
# life: prototype
# TODO: build basic walk and parse and report features
import os
import re
import logging
VALID_SUFFIX = ['.py', '.rst']
### config
confd = {'todoinator.priorityregex': "\{\d+\}"}
class TODO(object):
"""
"""
def __init__(self, line, filepath=None):
"""
"""
parsedline, priority = parse_line(line)
self.line = parsedline
self.priority = priority
self.origpath = filepath
try:
absfilepath = os.path.abspath(filepath)
bits = absfilepath.split("/")
idx = bits.index("projects") #assume thats there
self.reponame = bits[idx+1]
except ValueError: # cant find projects in path
self.reponame = '?'
def keep_file(filepath):
"""Decide if we keep the filepath, solely by exlcusion of end of path
This is primarily to avoid keeping .pyc files
>>> keep_file('/foo.pyc')
False
"""
ignoredpathendings = ['.pyc',]
for ending in ignoredpathendings:
if filepath.endswith(ending):
return False
return True
def walk_tree(rootpath):
"""
"""
ignoredirs = ['.git',]
for dirpath, dirs, files in os.walk(rootpath):
#change dirs to remove unwanted dirs to descend into
#rememer we need to use .remove as dirs seems to just point at
#underlying implementation, so substitution has no effect
for d in ignoredirs:
if d in dirs:
dirs.remove(d)
files = list(filter(keep_file, files))
for file in files:
thisfile = os.path.join(dirpath, file)
yield thisfile
def parse_file(txt):
"""extract todo lines from a file
>>> parse_file("# todo: foo\\n foo")
[' foo']
"""
res = []
for line in txt.split('\n'):
if line.strip().startswith('#'):
#valid possible
for token in ['todo:']:
stpoint = line.lower().find(token)
if stpoint >-1:
res.append(line[stpoint+len(token):])
return res
def parse_line(todoline):
"""extract data from a todo line
>>> parse_line(" some note unadorned")
(' some note unadorned', 30)
>>> parse_line(" some note {88}")
(' some note ', 88)
"""
rgx = re.compile(confd['todoinator.priorityregex'])
vals = rgx.findall(todoline) # returns [] or ['{4}']
if vals:
token = sorted(vals)[-1]
priority = int(token.replace("{", "").replace("}", ""))
else:
token = ''
priority = 30
return todoline.replace(token, ''), priority
def parse_tree(rootpath):
"""
"""
all_todos = []
textfrag = "TODO\n"
htmlfrag = "<table>"
for filepath in walk_tree(rootpath):
# test if suffix valid (ie dont parse pyc or wheel)
suffix = os.path.splitext(filepath)[1]
if suffix not in VALID_SUFFIX:
continue
try:
todo_list = parse_file(open(filepath).read())
res = sorted([TODO(line, filepath) for line in todo_list], key=lambda t: t.priority, reverse=True)
except IOError:
res = []
except UnicodeDecodeError as e:
logging.error("could not read %s - unicode err",filepath)
if res:
all_todos.extend(res)
all_todos = sorted(all_todos, key=lambda t: t.priority, reverse=True)
for todo in all_todos:
textfrag += "{0} {2} ({1})\n".format(todo.priority, todo.reponame, todo.line)
htmlfrag += "<tr><td>%s</td> <td>%s</td> <td>%s</td> </tr>\n" % (todo.priority, todo.reponame, todo.line)
htmlfrag += "</table>"
#######################
path = "/tmp/todo.html"
open(path, 'w').write(htmlfrag)
import webbrowser
#webbrowser.open(path)
print(textfrag)
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False)
| {
"content_hash": "d97cfdcea88afcbf4c227ddae036a315",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 114,
"avg_line_length": 28.67579908675799,
"alnum_prop": 0.621656050955414,
"repo_name": "mikadosoftware/weaver",
"id": "d191a927499b508556b82b98e3c12f75b4291d42",
"size": "6284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weaver/devtools/todoinator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39536"
},
{
"name": "Shell",
"bytes": "2079"
}
],
"symlink_target": ""
} |
"""
Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_7mode
@interface.volumedriver
class NetApp7modeISCSIDriver(driver.BaseVD,
driver.ConsistencyGroupVD,
driver.ManageableVD,
driver.ExtendVD,
driver.TransferVD,
driver.SnapshotVD):
"""NetApp 7-mode iSCSI volume driver."""
DRIVER_NAME = 'NetApp_iSCSI_7mode_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_iscsi(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_iscsi(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None):
raise NotImplementedError()
| {
"content_hash": "23d603e6d58a59366fdd2a6ad4d09496",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 76,
"avg_line_length": 38.48760330578512,
"alnum_prop": 0.6465535752630449,
"repo_name": "Nexenta/cinder",
"id": "9ba51d838a82b667f98881274c733c751eb50ce0",
"size": "5345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18007018"
},
{
"name": "Shell",
"bytes": "13543"
}
],
"symlink_target": ""
} |
"""Regression test for zone_importer_lib.py
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import unittest
import os
from roster_config_manager import zone_importer_lib
import roster_core
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
ZONE_FILE = 'test_data/test_zone.db'
REVERSE_ZONE_FILE = 'test_data/test_reverse_zone.db'
REVERSE_IPV6_ZONE_FILE = 'test_data/test_reverse_ipv6_zone.db'
INCLUDES_ZONE_FILE = 'test_data/test_include_zone.db'
BAD_INCLUDES_ZONE_FILE = 'test_data/test_bad_include_zone.db'
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
class TestZoneImport(unittest.TestCase):
def setUp(self):
config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.core_instance = roster_core.Core(u'sharrell', config_instance)
self.core_instance.MakeView(u'external')
self.core_instance.MakeZone(u'sub.university.lcl', u'master',
u'sub.university.lcl.', view_name=u'external')
self.core_instance.MakeZone(u'0.168.192.in-addr.arpa', u'master',
u'0.168.192.in-addr.arpa.',
view_name=u'external')
self.core_instance.MakeZone(u'8.0.e.f.f.3.ip6.arpa', u'master',
u'8.0.e.f.f.3.ip6.arpa.', view_name=u'external')
self.core_instance.MakeReverseRangeZoneAssignment(
u'0.168.192.in-addr.arpa', u'192.168.0/24')
self.core_instance.MakeReverseRangeZoneAssignment(
u'8.0.e.f.f.3.ip6.arpa', u'3ffe:0800:0000:0000:0000:0000:0000:0000/24')
def testReverseZoneToCIDRBlock(self):
importer_instance = zone_importer_lib.ZoneImport(
ZONE_FILE, CONFIG_FILE, u'sharrell', u'external', u'sub.university.lcl')
self.assertRaises(zone_importer_lib.Error,
importer_instance.ReverseZoneToCIDRBlock)
importer_instance.origin = '0.0.0.10.in-addr.arpa.'
self.assertRaises(zone_importer_lib.Error,
importer_instance.ReverseZoneToCIDRBlock)
importer_instance.origin = '0.10.in-addr.arpa.'
self.assertEqual(importer_instance.ReverseZoneToCIDRBlock(), '10.0/16')
importer_instance.origin = '4.5.6.7.8.9.1.f.3.3.0.8.e.f.f.3.ip6.arpa.'
self.assertEqual(importer_instance.ReverseZoneToCIDRBlock(),
'3ffe:8033:f198:7654:0000:0000:0000:0000/64')
importer_instance.origin = '4.8.e.f.f.3.ip6.arpa.'
self.assertEqual(importer_instance.ReverseZoneToCIDRBlock(),
'3ffe:8400:0000:0000:0000:0000:0000:0000/24')
importer_instance.origin = '4.8.e.f.f.z.ip6.arpa.'
self.assertRaises(zone_importer_lib.Error,
importer_instance.ReverseZoneToCIDRBlock)
def testMakeRecordsFromForwardZone(self):
importer_instance = zone_importer_lib.ZoneImport(
ZONE_FILE, CONFIG_FILE, u'sharrell', u'external', u'sub.university.lcl')
importer_instance.MakeRecordsFromZone()
self.assertEquals(self.core_instance.ListRecords(record_type=u'soa'),
[{u'serial_number': 795, u'refresh_seconds': 10800,
'target': u'@',
u'name_server': u'ns.university.lcl.',
u'retry_seconds': 3600, 'ttl': 3600,
u'minimum_seconds': 86400, 'record_type': u'soa',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'admin_email': u'hostmaster.ns.university.lcl.',
u'expiry_seconds': 3600000}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'ns'),
[{'target': u'@',
u'name_server': u'ns.sub.university.lcl.', 'ttl': 3600,
'record_type': u'ns', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl'},
{'target': u'@',
u'name_server': u'ns2.sub.university.lcl.', 'ttl': 3600,
'record_type': u'ns', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'mx'),
[{'target': u'@', 'ttl': 3600,
u'priority': 10, 'record_type': u'mx',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'mail_server': u'mail1.sub.university.lcl.'},
{'target': u'@', 'ttl': 3600,
u'priority': 20, 'record_type': u'mx',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'mail_server': u'mail2.sub.university.lcl.'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'txt'),
[{'target': u'@', 'ttl': 3600,
'record_type': u'txt', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'quoted_text': u'"Contact 1: Stephen Harrell '
u'([email protected])"'}])
records_list = self.core_instance.ListRecords(record_type=u'a')
self.assertTrue({'target': u'localhost', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_ip': u'127.0.0.1'} in records_list)
self.assertTrue({'target': u'desktop-1', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_ip': u'192.168.1.100'} in records_list)
self.assertTrue({'target': u'@', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_ip': u'192.168.0.1'} in records_list)
self.assertEquals(self.core_instance.ListRecords(record_type=u'cname'),
[{'target': u'www', 'ttl': 3600,
'record_type': u'cname', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_host': u'sub.university.lcl.'},
{'target': u'www.data', 'ttl': 3600,
'record_type': u'cname', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_host': u'ns.university.lcl.'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'hinfo'),
[{'target': u'ns2', 'ttl': 3600,
u'hardware': u'PC', 'record_type': u'hinfo',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl', u'os': u'NT'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'aaaa'),
[{'target': u'desktop-1', 'ttl': 3600, 'record_type':
u'aaaa', 'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl', u'assignment_ip':
u'3ffe:0800:0000:0000:02a8:79ff:fe32:1982'}])
def testMakeRecordsFromReverseZone(self):
importer_instance = zone_importer_lib.ZoneImport(
REVERSE_ZONE_FILE, CONFIG_FILE, u'sharrell', u'external',
u'0.168.192.in-addr.arpa')
importer_instance.MakeRecordsFromZone()
self.assertEquals(self.core_instance.ListReverseRangeZoneAssignments(
zone_name=u'0.168.192.in-addr.arpa'),
{u'0.168.192.in-addr.arpa': u'192.168.0/24'})
self.assertEqual(self.core_instance.ListRecords(record_type=u'ptr'),
[{'target': u'1', 'ttl': 86400,
'record_type': u'ptr', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'0.168.192.in-addr.arpa',
u'assignment_host': u'router.university.lcl.'},
{'target': u'11', 'ttl': 86400,
'record_type': u'ptr', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'0.168.192.in-addr.arpa',
u'assignment_host': u'desktop-1.university.lcl.'},
{'target': u'12', 'ttl': 86400,
'record_type': u'ptr', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'0.168.192.in-addr.arpa',
u'assignment_host': u'desktop-2.university.lcl.'}])
def testMakeRecordsFromIPV6ReverseZone(self):
importer_instance = zone_importer_lib.ZoneImport(
REVERSE_IPV6_ZONE_FILE, CONFIG_FILE, u'sharrell', u'external',
u'8.0.e.f.f.3.ip6.arpa')
importer_instance.MakeRecordsFromZone()
self.assertEquals(self.core_instance.ListReverseRangeZoneAssignments(
zone_name=u'8.0.e.f.f.3.ip6.arpa'),
{u'8.0.e.f.f.3.ip6.arpa':
u'3ffe:0800:0000:0000:0000:0000:0000:0000/24'})
for record in self.core_instance.ListRecords(record_type=u'ptr'):
self.assertTrue(record in
[{'target':
u'2.8.9.1.2.3.e.f.f.f.9.7.8.a.2.0.0.0.0.0.0.0.0.0.0.0',
'ttl': 86400, 'record_type': u'ptr', 'view_name': u'external',
'last_user': u'sharrell', 'zone_name':
u'8.0.e.f.f.3.ip6.arpa', u'assignment_host':
u'router.university.lcl.'},
{'target':
u'0.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0',
'ttl': 86400, 'record_type': u'ptr', 'view_name':
u'external', 'last_user': u'sharrell', 'zone_name':
u'8.0.e.f.f.3.ip6.arpa', u'assignment_host':
u'desktop-1.university.lcl.'}])
def testMakeRecordsFromZoneWithIncludes(self):
self.assertRaises(zone_importer_lib.IncludeError, zone_importer_lib.ZoneImport,
BAD_INCLUDES_ZONE_FILE, CONFIG_FILE, u'sharrell', u'external', u'sub.university.lcl')
importer_instance = zone_importer_lib.ZoneImport(
INCLUDES_ZONE_FILE, CONFIG_FILE, u'sharrell', u'external', u'sub.university.lcl')
importer_instance.MakeRecordsFromZone()
self.assertEquals(self.core_instance.ListRecords(record_type=u'soa'),
[{u'serial_number': 795, u'refresh_seconds': 10800,
'target': u'@',
u'name_server': u'ns.university.lcl.',
u'retry_seconds': 3600, 'ttl': 3600,
u'minimum_seconds': 86400, 'record_type': u'soa',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'admin_email': u'hostmaster.ns.university.lcl.',
u'expiry_seconds': 3600000}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'ns'),
[{'target': u'@',
u'name_server': u'ns.sub.university.lcl.', 'ttl': 3600,
'record_type': u'ns', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl'},
{'target': u'@',
u'name_server': u'ns2.sub.university.lcl.', 'ttl': 3600,
'record_type': u'ns', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'mx'),
[{'target': u'@', 'ttl': 3600,
u'priority': 10, 'record_type': u'mx',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'mail_server': u'mail1.sub.university.lcl.'},
{'target': u'@', 'ttl': 3600,
u'priority': 20, 'record_type': u'mx',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'mail_server': u'mail2.sub.university.lcl.'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'txt'),
[{'target': u'@', 'ttl': 3600,
'record_type': u'txt', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'quoted_text': u'"Contact 1: Stephen Harrell '
u'([email protected])"'}])
records_list = self.core_instance.ListRecords(record_type=u'a')
self.assertTrue({'target': u'localhost', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_ip': u'127.0.0.1'} in records_list)
self.assertTrue({'target': u'desktop-1', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_ip': u'192.168.1.100'} in records_list)
self.assertTrue({'target': u'@', 'ttl': 3600,
'record_type': u'a', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_ip': u'192.168.0.1'} in records_list)
self.assertEquals(self.core_instance.ListRecords(record_type=u'cname'),
[{'target': u'www', 'ttl': 3600,
'record_type': u'cname', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_host': u'sub.university.lcl.'},
{'target': u'www.data', 'ttl': 3600,
'record_type': u'cname', 'view_name': u'external',
'last_user': u'sharrell',
'zone_name': u'sub.university.lcl',
u'assignment_host': u'ns.university.lcl.'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'hinfo'),
[{'target': u'ns2', 'ttl': 3600,
u'hardware': u'PC', 'record_type': u'hinfo',
'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl', u'os': u'NT'}])
self.assertEquals(self.core_instance.ListRecords(record_type=u'aaaa'),
[{'target': u'desktop-1', 'ttl': 3600, 'record_type':
u'aaaa', 'view_name': u'external', 'last_user': u'sharrell',
'zone_name': u'sub.university.lcl', u'assignment_ip':
u'3ffe:0800:0000:0000:02a8:79ff:fe32:1982'}])
if( __name__ == '__main__' ):
unittest.main()
| {
"content_hash": "b31b5e59f35f7ee986de7d472d5271ee",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 93,
"avg_line_length": 55.63175675675676,
"alnum_prop": 0.5189773486366672,
"repo_name": "stephenlienharrell/roster-dns-management",
"id": "3a8797ec15ff76926848d4f541405f55376cee79",
"size": "18020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/zone_importer_lib_regtest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2339145"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://repo.maven.apache.org/maven2/org/robolectric/pluginapi/4.3.1/pluginapi-4.3.1.jar'
_FILE_NAME = 'pluginapi-4.3.1.jar'
_FILE_VERSION = '4.3.1'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| {
"content_hash": "0db63dfed9715b0d25401dab352fc728",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 102,
"avg_line_length": 22.145833333333332,
"alnum_prop": 0.6077140169332079,
"repo_name": "ric2b/Vivaldi-browser",
"id": "100d93a0c8cf74e7b732f228a087a7885db337b6",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/third_party/android_deps/libs/org_robolectric_pluginapi/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
if sys.version_info < (2, 7): # pragma: no cover
unittest_mod = 'unittest2'
else: # pragma: no cover
unittest_mod = 'unittest'
unittest = __import__(unittest_mod)
__all__ = ('unittest',)
| {
"content_hash": "a1e8be91f81ef127750933444c3dae6f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.6285714285714286,
"repo_name": "malept/pyoath-toolkit",
"id": "da7950eaaa41d4e0fd0a71ad489e56e70c77374a",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oath_toolkit/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5945"
},
{
"name": "Python",
"bytes": "121071"
},
{
"name": "Shell",
"bytes": "728"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': "arakhne",
'packages': [
"arakhne",
"arakhne.common",
"arakhne.corpus",
"arakhne.corpus.files",
"arakhne.doc",
"arakhne.settings",
"arakhne.setup",
"arakhne.stopwords"
],
'install_requires': ["nltk", "cltk"],
'version': "0.3.1",
'description': "Arakhne Classical Text Loom for Corpus-Based Analytics",
'author': "David J. Thomas",
'author_email': "[email protected]",
'url': "https://github.com/thePortus/arakhne",
'download_url': "https://github.com/thePortus/arakhne/archive/master.zip",
'keywords': [
"nlp",
"nltk",
"cltk",
],
'classifiers': [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Linguistic",
"Topic :: Sociology :: History"
],
}
setup(**config)
| {
"content_hash": "40b75f8d726f66a50cec69ac1e8b7f72",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 30.133333333333333,
"alnum_prop": 0.5685840707964602,
"repo_name": "thePortus/arakhne",
"id": "b4d3230896d2747e99a426f32c480de06628fc81",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84723"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='django-generic-scaffold',
version='0.5.6',
description='Generic scaffolding for Django',
long_description=readme(),
author='Serafeim Papastefanos',
author_email='[email protected]',
license='MIT',
url='https://github.com/spapas/django-generic-scaffold/',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['tests.*', 'tests',]),
install_requires=['Django >=1.8', 'six'],
classifiers=[
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Framework :: Django :: 4.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
)
| {
"content_hash": "b37772e65a4bca11d67fba4f7d8daafc",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 61,
"avg_line_length": 33.89795918367347,
"alnum_prop": 0.5647200481637568,
"repo_name": "spapas/django-generic-scaffold",
"id": "6d049596fde380b79a5a61e1b03cd15a9215fa5d",
"size": "1683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "Python",
"bytes": "34361"
}
],
"symlink_target": ""
} |
""" Elements """
import errors
from utils import Utils
NL = "\n"
class Element:
"""Element."""
def __init__( self, name, parent=None):
self.name = name
self.params = []
self.parent = parent
self.content = []
def __call__( self, *args, **kwargs ):
self.params = args
return self
def add (self, line):
self.content.append(line)
if self.parent is not None:
self.parent.add(line)
return line
def __str__( self ):
return ''.join(self.content)
class UlElement(Element):
"""Ul Element."""
def __init__( self, parent=None):
Element.__init__(self, "ul", parent)
def __call__( self, *args, **kwargs ):
self.params = args
self.add(NL)
for arg in args:
line = "* " + Utils.html_rest(arg)
self.add(line)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class OlElement(Element):
"""Ol Element."""
def __init__( self, parent=None):
Element.__init__(self, "ol", parent)
def __call__( self, *args, **kwargs ):
self.params = args
self.add(NL)
i = 0
for arg in args:
i +=1
line = str(i) + ". " + Utils.html_rest(arg)
self.add(line)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class FlistElement(Element):
"""Field List Element.
:Date: 2001-08-16
:Version: 1
:Authors: - Me
- Myself
- I
:Indentation: Since the field marker may be quite long, the second
and subsequent lines of the field body do not have to line up
"""
def __init__( self, parent=None):
Element.__init__(self, "flist", parent)
def __call__( self, *args, **kwargs ):
self.params = args
if len(kwargs) == 0:
raise errors.DocumentError("No list fields.")
self.add(NL)
if len(kwargs) > 1:
for field in sorted(kwargs):
value = Utils.html_rest(kwargs[field])
self.add(':'+ field +': ' + value)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class HrElement(Element):
"""Hr or Transaition Element . A transition marker is a horizontal line
of 4 or more repeated punctuation ----- """
def __init__( self, parent=None):
Element.__init__(self, "hr", parent)
def __call__( self, *args, **kwargs ):
self.params = "----"
self.add(NL)
self.add('-----------')
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class PElement(Element):
"""Paragraph """
def __init__( self, parent=None):
Element.__init__(self, "p", parent)
def __call__( self, *args, **kwargs ):
text = ""
if len(kwargs) != 0:
text = kwargs.get('text', "")
elif len(args) != 0:
text = args[0]
text = Utils.html_rest(text)
text = Utils.br_rest(text)
self.add(NL)
self.add(text)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class PreElement(Element):
"""Pre - Literal Block """
def __init__( self, parent=None):
Element.__init__(self, "pre", parent)
def __call__( self, *args, **kwargs ):
text = ""
if len(kwargs) != 0:
text = kwargs.get('text', "")
elif len(args) != 0:
text = args[0]
self.add(NL)
self.add('::')
self.add(NL)
self.add(NL)
self.add(' ')
self.add(text)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class LineblockElement(Element):
"""Line Block
| These lines are
| broken exactly like in
| the source file.
"""
def __init__( self, parent=None):
Element.__init__(self, "pre", parent)
def __call__( self, *args, **kwargs ):
block = ""
if len(args) != 0:
self.add(NL)
for arg in args:
block += "| " + arg + NL
self.add(block)
return self
def __str__( self ):
return Element.__str__(self)
class CommentElement(Element):
"""Comment
.. This text will not be shown
Second line
"""
def __init__( self, parent=None):
Element.__init__(self, "comment", parent)
def __call__( self, *args, **kwargs ):
if len(kwargs) != 0:
text = kwargs.get('text', "")
elif len(args) > 0:
text = args[0]
if text is None:
raise errors.InvalidElementError("text")
self.add(NL)
self.add(NL)
self.add('.. ' + text)
self.add(NL)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class ImageElement(Element):
"""Image Element.
.. image::images/ball1.tiff
:height: 100px
:width: 200 px
:scale: 50 %
:alt: alternate text
:align: right
"""
def __init__( self, parent=None):
Element.__init__(self, "image", parent)
def __call__( self, *args, **kwargs ):
src = None
options = {}
if len(kwargs) != 0:
src = kwargs.get('src', None)
elif len(args) > 0:
src = args[0]
if src is None:
raise errors.InvalidElementError("src")
self.add(NL)
self.add('.. image:: ' + src)
if len(kwargs) > 1:
for option in sorted(kwargs):
if option != "src":
self.add(NL)
self.add(' :'+option+': ' + kwargs[option])
pass
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class TitleElement(Element):
"""
Titles
# with overline, for parts
* with overline, for chapters
=, for sections
-, for subsections
^, for subsubsections
", for paragraphs
"""
def __init__( self, parent = None):
Element.__init__(self, "title", parent)
def __call__( self, *args, **kwargs ):
text = ""
char = "*"
underline = ""
if len(kwargs) != 0:
text = kwargs.get('text', "")
char = kwargs.get('type', "*")
elif len(args) > 0:
text = args[0]
if len(args) > 1:
char = args[1]
underline = str(char) * len(text)
self.params = [text, underline]
self.add(NL)
self.add(text)
self.add(NL)
self.add(underline)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class LinkElement(Element):
"""
Link Element
`Python <http://www.python.org/>`_
"""
def __init__( self, parent=None):
Element.__init__(self, "link", parent)
def __call__( self, *args, **kwargs ):
text = ""
href = ""
if len(kwargs) != 0:
href = kwargs.get('href', "")
text = kwargs.get('text', "")
elif len(args) != 0:
href = args[0]
text = args[1]
self.params = [text, href]
self.add(NL)
self.add("`%s <%s>`_" % ( text, href ))
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class DirectiveElement(Element):
"""
Directive Element
.. note:: Note
This is content
"""
def __init__( self, parent=None):
Element.__init__(self, "directive", parent)
def __call__( self, *args, **kwargs ):
type = ""
title = ""
text = ""
if len(kwargs) != 0:
type = kwargs.get('type', "")
title = kwargs.get('title', "")
text = kwargs.get('text', "")
elif len(args) > 0:
type = args[0]
if len(args) > 1:
title = args[1]
if len(args) > 2:
text = args[2]
self.params = [type, title, text]
self.add(NL)
self.add(".. %s:: %s" % ( type, title))
self.add(NL)
if len(kwargs) > 1:
for option in sorted(kwargs):
if option != "type" and option != "text" and option != "title":
self.add(' :'+option+': ' + kwargs[option])
self.add(NL)
pass
self.add(NL)
self.add(' ' + text)
self.add(NL)
return self
def __str__( self ):
return Element.__str__(self)
class TableElement(Element):
"""
Table Element
"""
def __init__( self, parent=None):
Element.__init__(self, "table", parent)
def __call__( self, *args, **kwargs ):
data = None
if len(args) > 0:
data = args[0]
if len(data) > 0:
self.params = args
table = self.make_table(data)
self.add(NL)
self.add(table)
self.add(NL)
return self
def make_table(self, grid):
max_cols = [max(out) for out in map(list, zip(*[[len(item) for item in row] for row in grid]))]
rst = self.table_div(max_cols, 1)
for i, row in enumerate(grid):
header_flag = False
if i == 0 or i == len(grid)-1: header_flag = True
rst += self.normalize_row(row,max_cols)
rst += self.table_div(max_cols, header_flag )
return rst
def table_div(self, max_cols, header_flag=1):
out = ""
if header_flag == 1:
style = "="
else:
style = "-"
for max_col in max_cols:
out += max_col * style + " "
out += "\n"
return out
def normalize_row(self, row, max_cols):
r = ""
for i, max_col in enumerate(max_cols):
r += row[i] + (max_col - len(row[i]) + 1) * " "
return r + "\n"
def __str__( self ):
return Element.__str__(self)
| {
"content_hash": "64664a3eaaed43042572a2e42126c733",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 103,
"avg_line_length": 21.04527559055118,
"alnum_prop": 0.45814236273501074,
"repo_name": "svilborg/simplerestler",
"id": "fbf525fa9ad00fa59dd1dcfa78a5a4a22a295312",
"size": "10691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplerestler/element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21335"
}
],
"symlink_target": ""
} |
from scrapy_kafka.spiders import ListeningKafkaSpider
from ..items import DmozItem
class CustomKafkaSpider(ListeningKafkaSpider):
name = "dmoz_kafka"
allowed_domains = ["dmoz.org"]
def parse(self, response):
for sel in response.xpath('//ul/li'):
item = DmozItem()
item['title'] = sel.xpath('a/text()').extract()
item['link'] = sel.xpath('a/@href').extract()
item['desc'] = sel.xpath('text()').extract()
yield item
| {
"content_hash": "eea9f794f195182b2d5dab731e7a7d17",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 33.13333333333333,
"alnum_prop": 0.5975855130784709,
"repo_name": "dfdeshom/scrapy-kafka",
"id": "c26aa5a698f4b2c7208e48dc45fbbb2afefec89a",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/spiders/custom_kafka_spider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8107"
}
],
"symlink_target": ""
} |
import nova.conf
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = nova.conf.CONF
class FlavorsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
sample_dir = 'flavors'
def _get_flags(self):
f = super(FlavorsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_swap.Flavor_swap')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.flavor_disabled.Flavor_disabled')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.flavor_access.Flavor_access')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.flavorextradata.Flavorextradata')
return f
def test_flavors_get(self):
response = self._do_get('flavors/1')
self._verify_response('flavor-get-resp', {}, response, 200)
def test_flavors_list(self):
response = self._do_get('flavors')
self._verify_response('flavors-list-resp', {}, response, 200)
def test_flavors_detail(self):
response = self._do_get('flavors/detail')
self._verify_response('flavors-detail-resp', {}, response, 200)
class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
all_extensions = True
sample_dir = None
def _get_flags(self):
f = super(FlavorsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
return f
| {
"content_hash": "807922daca07f7029b8243dffedbc448",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 38.395348837209305,
"alnum_prop": 0.6529376135675349,
"repo_name": "bigswitch/nova",
"id": "6c10dbe61c30ac114972ad07f86fd0efcfa15c0f",
"size": "2283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_flavors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17220528"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
import requests
from bs4 import BeautifulSoup
from .common import get_text_if_exists
def get_admin_announces():
"""Get announces from the administration office of University of Tokyo"""
url = "http://www.c.u-tokyo.ac.jp/zenki/news/kyoumu/index.html"
host = url.split("/zenki", 2)[0]
res = requests.get(url)
soup = BeautifulSoup(res.text)
# updated
updated = soup.find('p', {'id': 'update'})
updated = updated.text.strip().split(u':')[1]
# announces
data=[]
newslist=soup.find("div", id="newslist2").find('dl')
for line in newslist.find_all(['dt', 'dd']):
if line == "\n":
continue
if str(line).startswith("<dt>"):
imgs = line.find_all('img')
date = get_text_if_exists(line.contents[0]).strip()
data.append(
{"date" : date,
"kind_image" : host + imgs[0].attrs["src"],
"grade_image": host + imgs[1].attrs["src"],
},
)
elif str(line).startswith("<dd>"):
href = line.contents[0].attrs['href']
if not href.startswith('http'):
href = host + line.contents[0].attrs["href"]
data[len(data)-1]["href"] = href
data[len(data)-1]["announce"] = line.contents[0].string
return updated, data
| {
"content_hash": "6b0279c76d1e515fbd98ccf58680ce49",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 34.25,
"alnum_prop": 0.5343065693430656,
"repo_name": "wkentaro/utaskweb",
"id": "dee01d84b35eee56c1804272fe396c2c323ccce2",
"size": "1420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utaskweb/kyomu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7343"
}
],
"symlink_target": ""
} |
"""Generate XML and HTML test reports."""
try:
from mako.runtime import Context
from mako.template import Template
from mako import exceptions
except (ImportError):
pass # Mako not installed but it is ok.
import os
import string
import xml.etree.cElementTree as ET
def _filter_msg(msg, output_format):
"""Filters out nonprintable and illegal characters from the message."""
if output_format in ['XML', 'HTML']:
# keep whitespaces but remove formfeed and vertical tab characters
# that make XML report unparseable.
filtered_msg = filter(
lambda x: x in string.printable and x != '\f' and x != '\v',
msg.decode('UTF-8', 'ignore'))
if output_format == 'HTML':
filtered_msg = filtered_msg.replace('"', '"')
return filtered_msg
else:
return msg
def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
suite_name='tests'):
"""Generate JUnit-like XML report."""
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package=suite_package,
name=suite_name)
for shortname, results in resultset.iteritems():
for result in results:
xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
if result.elapsed_time:
xml_test.set('time', str(result.elapsed_time))
ET.SubElement(xml_test, 'system-out').text = _filter_msg(result.message,
'XML')
if result.state == 'FAILED':
ET.SubElement(xml_test, 'failure', message='Failure')
elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout')
tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8')
def render_interop_html_report(
client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
resultset, num_failures, cloud_to_prod, prod_servers, http2_interop):
"""Generate HTML report for interop tests."""
template_file = 'tools/run_tests/interop_html_report.template'
try:
mytemplate = Template(filename=template_file, format_exceptions=True)
except NameError:
print 'Mako template is not installed. Skipping HTML report generation.'
return
except IOError as e:
print 'Failed to find the template %s: %s' % (template_file, e)
return
sorted_test_cases = sorted(test_cases)
sorted_auth_test_cases = sorted(auth_test_cases)
sorted_http2_cases = sorted(http2_cases)
sorted_client_langs = sorted(client_langs)
sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers)
args = {'client_langs': sorted_client_langs,
'server_langs': sorted_server_langs,
'test_cases': sorted_test_cases,
'auth_test_cases': sorted_auth_test_cases,
'http2_cases': sorted_http2_cases,
'resultset': resultset,
'num_failures': num_failures,
'cloud_to_prod': cloud_to_prod,
'prod_servers': sorted_prod_servers,
'http2_interop': http2_interop}
html_report_out_dir = 'reports'
if not os.path.exists(html_report_out_dir):
os.mkdir(html_report_out_dir)
html_file_path = os.path.join(html_report_out_dir, 'index.html')
try:
with open(html_file_path, 'w') as output_file:
mytemplate.render_context(Context(output_file, **args))
except:
print(exceptions.text_error_template().render())
raise
| {
"content_hash": "42491699e81cb5f6777c1f4b1561b458",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 37.57608695652174,
"alnum_prop": 0.6546138270176454,
"repo_name": "arkmaxim/grpc",
"id": "4fe9cf84136f96f2c6c4bd4ef34b10f1787dc97b",
"size": "4986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/run_tests/report_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "32056"
},
{
"name": "C",
"bytes": "5497549"
},
{
"name": "C#",
"bytes": "1289743"
},
{
"name": "C++",
"bytes": "1795452"
},
{
"name": "CMake",
"bytes": "38175"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "338062"
},
{
"name": "M4",
"bytes": "36831"
},
{
"name": "Makefile",
"bytes": "635104"
},
{
"name": "Objective-C",
"bytes": "283782"
},
{
"name": "PHP",
"bytes": "148056"
},
{
"name": "Protocol Buffer",
"bytes": "116293"
},
{
"name": "Python",
"bytes": "1138919"
},
{
"name": "Ruby",
"bytes": "566458"
},
{
"name": "Shell",
"bytes": "54583"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
"""efi module."""
from __future__ import print_function
from cStringIO import StringIO
from _ctypes import CFuncPtr as _CFuncPtr
import _efi
import atexit
import binascii
import bits
import bits.cdata
from collections import OrderedDict
from ctypes import *
from efikeys import *
import redirect
import os
import sys as _sys
import traceback
import ttypager
import uuid
known_uuids = {
uuid.UUID('00720665-67eb-4a99-baf7-d3c33a1c7cc9'): 'EFI_TCP4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('02e800be-8f01-4aa6-946b-d71388e1833f'): 'EFI_MTFTP4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('0379be4e-d706-437d-b037-edb82fb772a4'): 'EFI_DEVICE_PATH_UTILITIES_PROTOCOL_GUID',
uuid.UUID('03c4e603-ac28-11d3-9a2d-0090273fc14d'): 'EFI_PXE_BASE_CODE_PROTOCOL_GUID',
uuid.UUID('05ad34ba-6f02-4214-952e-4da0398e2bb9'): 'EFI_DXE_SERVICES_TABLE_GUID',
uuid.UUID('09576e91-6d3f-11d2-8e39-00a0c969723b'): 'EFI_DEVICE_PATH_PROTOCOL_GUID',
uuid.UUID('09576e92-6d3f-11d2-8e39-00a0c969723b'): 'EFI_FILE_INFO_ID',
uuid.UUID('09576e93-6d3f-11d2-8e39-00a0c969723b'): 'EFI_FILE_SYSTEM_INFO_ID',
uuid.UUID('0db48a36-4e54-ea9c-9b09-1ea5be3a660b'): 'EFI_REST_PROTOCOL_GUID',
uuid.UUID('0faaecb1-226e-4782-aace-7db9bcbf4daf'): 'EFI_FTP4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('107a772c-d5e1-11d4-9a46-0090273fc14d'): 'EFI_COMPONENT_NAME_PROTOCOL_GUID',
uuid.UUID('11b34006-d85b-4d0a-a290-d5a571310ef7'): 'PCD_PROTOCOL_GUID',
uuid.UUID('13a3f0f6-264a-3ef0-f2e0-dec512342f34'): 'EFI_PCD_PROTOCOL_GUID',
uuid.UUID('13ac6dd1-73d0-11d4-b06b-00aa00bd6de7'): 'EFI_EBC_INTERPRETER_PROTOCOL_GUID',
uuid.UUID('143b7632-b81b-4cb7-abd3-b625a5b9bffe'): 'EFI_EXT_SCSI_PASS_THRU_PROTOCOL_GUID',
uuid.UUID('151c8eae-7f2c-472c-9e54-9828194f6a88'): 'EFI_DISK_IO2_PROTOCOL_GUID',
uuid.UUID('1682fe44-bd7a-4407-b7c7-dca37ca3922d'): 'EFI_TLS_CONFIGURATION_PROTOCOL_GUID',
uuid.UUID('18a031ab-b443-4d1a-a5c0-0c09261e9f71'): 'EFI_DRIVER_BINDING_PROTOCOL_GUID',
uuid.UUID('1c0c34f6-d380-41fa-a049-8ad06c1a66aa'): 'EFI_EDID_DISCOVERED_PROTOCOL_GUID',
uuid.UUID('1d3de7f0-0807-424f-aa69-11a54e19a46f'): 'EFI_ATA_PASS_THRU_PROTOCOL_GUID',
uuid.UUID('2755590c-6f3c-42fa-9ea4-a3ba543cda25'): 'EFI_DEBUG_SUPPORT_PROTOCOL_GUID',
uuid.UUID('2a534210-9280-41d8-ae79-cada01a2b127'): 'EFI_DRIVER_HEALTH_PROTOCOL_GUID',
uuid.UUID('2c8759d5-5c2d-66ef-925f-b66c101957e2'): 'EFI_IP6_PROTOCOL_GUID',
uuid.UUID('2f707ebb-4a1a-11d4-9a38-0090273fc14d'): 'EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID',
uuid.UUID('31878c87-0b75-11d5-9a4f-0090273fc14d'): 'EFI_SIMPLE_POINTER_PROTOCOL_GUID',
uuid.UUID('31a6406a-6bdf-4e46-b2a2-ebaa89c40920'): 'EFI_HII_IMAGE_PROTOCOL_GUID',
uuid.UUID('330d4706-f2a0-4e4f-a369-b66fa8d54385'): 'EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID',
uuid.UUID('387477c1-69c7-11d2-8e39-00a0c969723b'): 'EFI_SIMPLE_TEXT_INPUT_PROTOCOL_GUID',
uuid.UUID('387477c2-69c7-11d2-8e39-00a0c969723b'): 'EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL_GUID',
uuid.UUID('39b68c46-f7fb-441b-b6ec-16b0f69821f3'): 'EFI_CAPSULE_REPORT_GUID',
uuid.UUID('3ad9df29-4501-478d-b1f8-7f7fe70e50f3'): 'EFI_UDP4_PROTOCOL_GUID',
uuid.UUID('3b95aa31-3793-434b-8667-c8070892e05e'): 'EFI_IP4_CONFIG_PROTOCOL_GUID',
uuid.UUID('3e35c163-4074-45dd-431e-23989dd86b32'): 'EFI_HTTP_UTILITIES_PROTOCOL_GUID',
uuid.UUID('3e745226-9818-45b6-a2ac-d7cd0e8ba2bc'): 'EFI_USB2_HC_PROTOCOL_GUID',
uuid.UUID('41d94cd2-35b6-455a-8258-d4e51334aadd'): 'EFI_IP4_PROTOCOL_GUID',
uuid.UUID('49152e77-1ada-4764-b7a2-7afefed95e8b'): 'EFI_DEBUG_IMAGE_INFO_TABLE_GUID',
uuid.UUID('4c19049f-4137-4dd3-9c10-8b97a83ffdfa'): 'EFI_MEMORY_TYPE_INFORMATION_GUID',
uuid.UUID('4cf5b200-68b8-4ca5-9eec-b23e3f50029a'): 'EFI_PCI_IO_PROTOCOL_GUID',
uuid.UUID('4d330321-025f-4aac-90d8-5ed900173b63'): 'EFI_DRIVER_DIAGNOSTICS_PROTOCOL_GUID',
uuid.UUID('4f948815-b4b9-43cb-8a33-90e060b34955'): 'EFI_UDP6_PROTOCOL_GUID',
uuid.UUID('587e72d7-cc50-4f79-8209-ca291fc1a10f'): 'EFI_HII_CONFIG_ROUTING_PROTOCOL_GUID',
uuid.UUID('59324945-ec44-4c0d-b1cd-9db139df070c'): 'EFI_ISCSI_INITIATOR_NAME_PROTOCOL_GUID',
uuid.UUID('5b1b31a1-9562-11d2-8e3f-00a0c969723b'): 'EFI_LOADED_IMAGE_PROTOCOL_GUID',
uuid.UUID('5b446ed1-e30b-4faa-871a-3654eca36080'): 'EFI_IP4_CONFIG2_PROTOCOL_GUID',
uuid.UUID('5c198761-16a8-4e69-972c-89d67954f81d'): 'EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL_GUID',
uuid.UUID('65530bc7-a359-410f-b010-5aadc7ec2b62'): 'EFI_TCP4_PROTOCOL_GUID',
uuid.UUID('66ed4721-3c98-4d3e-81e3-d03dd39a7254'): 'EFI_UDP6_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('6a1ee763-d47a-43b4-aabe-ef1de2ab56fc'): 'EFI_HII_PACKAGE_LIST_PROTOCOL_GUID',
uuid.UUID('6a7a5cff-e8d9-4f70-bada-75ab3025ce14'): 'EFI_COMPONENT_NAME2_PROTOCOL_GUID',
uuid.UUID('6b30c738-a391-11d4-9a3b-0090273fc14d'): 'EFI_PLATFORM_DRIVER_OVERRIDE_PROTOCOL_GUID',
uuid.UUID('7739f24c-93d7-11d4-9a3a-0090273fc14d'): 'EFI_HOB_LIST_GUID',
uuid.UUID('78247c57-63db-4708-99c2-a8b4a9a61f6b'): 'EFI_MTFTP4_PROTOCOL_GUID',
uuid.UUID('783658a3-4172-4421-a299-e009079c0cb4'): 'EFI_LEGACY_BIOS_PLATFORM_PROTOCOL_GUID',
uuid.UUID('7a59b29b-910b-4171-8242-a85a0df25b5b'): 'EFI_HTTP_PROTOCOL_GUID',
uuid.UUID('7ab33a91-ace5-4326-b572-e7ee33d39f16'): 'EFI_MANAGED_NETWORK_PROTOCOL_GUID',
uuid.UUID('7f1647c8-b76e-44b2-a565-f70ff19cd19e'): 'EFI_DNS6_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('83f01464-99bd-45e5-b383-af6305d8e9e6'): 'EFI_UDP4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('87c8bad7-0595-4053-8297-dede395f5d5b'): 'EFI_DHCP6_PROTOCOL_GUID',
uuid.UUID('8868e871-e4f1-11d3-bc22-0080c73c8881'): 'EFI_ACPI_TABLE_GUID',
uuid.UUID('8a219718-4ef5-4761-91c8-c0f04bda9e56'): 'EFI_DHCP4_PROTOCOL_GUID',
uuid.UUID('8b843e20-8132-4852-90cc-551a4e4a7f1c'): 'EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID',
uuid.UUID('8d59d32b-c655-4ae9-9b15-f25904992a43'): 'EFI_ABSOLUTE_POINTER_PROTOCOL_GUID',
uuid.UUID('9042a9de-23dc-4a38-96fb-7aded080516a'): 'EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID',
uuid.UUID('937fe521-95ae-4d1a-8929-48bcd90ad31a'): 'EFI_IP6_CONFIG_PROTOCOL_GUID',
uuid.UUID('952cb795-ff36-48cf-a249-4df486d6ab8d'): 'EFI_TLS_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('964e5b21-6459-11d2-8e39-00a0c969723b'): 'EFI_BLOCK_IO_PROTOCOL_GUID',
uuid.UUID('964e5b22-6459-11d2-8e39-00a0c969723b'): 'EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID',
uuid.UUID('9d9a39d8-bd42-4a73-a4d5-8ee94be11380'): 'EFI_DHCP4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('9e23d768-d2f3-4366-9fc3-3a7aba864374'): 'EFI_VLAN_CONFIG_PROTOCOL_GUID',
uuid.UUID('9fb9a8a1-2f4a-43a6-889c-d0f7b6c47ad5'): 'EFI_DHCP6_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('a3979e64-ace8-4ddc-bc07-4d66b8fd0977'): 'EFI_IPSEC2_PROTOCOL_GUID',
uuid.UUID('a4c751fc-23ae-4c3e-92e9-4964cf63f349'): 'EFI_UNICODE_COLLATION_PROTOCOL2_GUID',
uuid.UUID('a77b2472-e282-4e9f-a245-c2c0e27bbcc1'): 'EFI_BLOCK_IO2_PROTOCOL_GUID',
uuid.UUID('ae3d28cc-e05b-4fa1-a011-7eb55a3f1401'): 'EFI_DNS4_PROTOCOL_GUID',
uuid.UUID('b625b186-e063-44f7-8905-6a74dc6f52b4'): 'EFI_DNS4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('b9d4c360-bcfb-4f9b-9298-53c136982258'): 'EFI_FORM_BROWSER2_PROTOCOL_GUID',
uuid.UUID('bb25cf6f-f1d4-11d2-9a0c-0090273fc1fd'): 'EFI_SERIAL_IO_PROTOCOL_GUID',
uuid.UUID('bc62157e-3e33-4fec-9920-2d3b36d750df'): 'EFI_LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID',
uuid.UUID('bd8c1056-9f36-44ec-92a8-a6337f817986'): 'EFI_EDID_ACTIVE_PROTOCOL_GUID',
uuid.UUID('bdc8e6af-d9bc-4379-a72a-e0c4e75dae1c'): 'EFI_HTTP_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('bf0a78ba-ec29-49cf-a1c9-7ae54eab6a51'): 'EFI_MTFTP6_PROTOCOL_GUID',
uuid.UUID('c51711e7-b4bf-404a-bfb8-0a048ef1ffe4'): 'EFI_IP4_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('c68ed8e2-9dc6-4cbd-9d94-db65acc5c332'): 'EFI_SMM_COMMUNICATION_PROTOCOL_GUID',
uuid.UUID('ca37bc1f-a327-4ae9-828a-8c40d8506a17'): 'EFI_DNS6_PROTOCOL_GUID',
uuid.UUID('ce345171-ba0b-11d2-8e4f-00a0c969723b'): 'EFI_DISK_IO_PROTOCOL_GUID',
uuid.UUID('ce5e5929-c7a3-4602-ad9e-c9daf94ebfcf'): 'EFI_IPSEC_CONFIG_PROTOCOL_GUID',
uuid.UUID('d42ae6bd-1352-4bfb-909a-ca72a6eae889'): 'LZMAF86_CUSTOM_DECOMPRESS_GUID',
uuid.UUID('d8117cfe-94a6-11d4-9a3a-0090273fc14d'): 'EFI_DECOMPRESS_PROTOCOL_GUID',
uuid.UUID('d9760ff3-3cca-4267-80f9-7527fafa4223'): 'EFI_MTFTP6_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('db47d7d3-fe81-11d3-9a35-0090273fc14d'): 'EFI_FILE_SYSTEM_VOLUME_LABEL_ID',
uuid.UUID('dd9e7534-7762-4698-8c14-f58517a625aa'): 'EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL_GUID',
uuid.UUID('dfb386f7-e100-43ad-9c9a-ed90d08a5e12'): 'EFI_IPSEC_PROTOCOL_GUID',
uuid.UUID('e9ca4775-8657-47fc-97e7-7ed65a084324'): 'EFI_HII_FONT_PROTOCOL_GUID',
uuid.UUID('eb338826-681b-4295-b356-2b364c757b09'): 'EFI_FTP4_PROTOCOL_GUID',
uuid.UUID('eb9d2d2f-2d88-11d3-9a16-0090273fc14d'): 'MPS_TABLE_GUID',
uuid.UUID('eb9d2d30-2d88-11d3-9a16-0090273fc14d'): 'ACPI_TABLE_GUID',
uuid.UUID('eb9d2d31-2d88-11d3-9a16-0090273fc14d'): 'SMBIOS_TABLE_GUID',
uuid.UUID('eb9d2d32-2d88-11d3-9a16-0090273fc14d'): 'SAL_SYSTEM_TABLE_GUID',
uuid.UUID('eba4e8d2-3858-41ec-a281-2647ba9660d0'): 'EFI_DEBUGPORT_PROTOCOL_GUID',
uuid.UUID('ec20eb79-6c1a-4664-9a0d-d2e4cc16d664'): 'EFI_TCP6_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('ec835dd3-fe0f-617b-a621-b350c3e13388'): 'EFI_IP6_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('ee4e5898-3914-4259-9d6e-dc7bd79403cf'): 'LZMA_CUSTOM_DECOMPRESS_GUID',
uuid.UUID('ef9fc172-a1b2-4693-b327-6d32fc416042'): 'EFI_HII_DATABASE_PROTOCOL_GUID',
uuid.UUID('f2fd1544-9794-4a2c-992e-e5bbcf20e394'): 'SMBIOS3_TABLE_GUID',
uuid.UUID('f36ff770-a7e1-42cf-9ed2-56f0f271f44c'): 'EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('f44c00ee-1f2c-4a00-aa09-1c9f3e0800a3'): 'EFI_ARP_SERVICE_BINDING_PROTOCOL_GUID',
uuid.UUID('f4b427bb-ba21-4f16-bc4e-43e416ab619c'): 'EFI_ARP_PROTOCOL_GUID',
uuid.UUID('f4ccbfb7-f6e0-47fd-9dd4-10a8f150c191'): 'EFI_SMM_BASE2_PROTOCOL_GUID',
uuid.UUID('f541796d-a62e-4954-a775-9584f61b9cdd'): 'EFI_TCG_PROTOCOL_GUID',
uuid.UUID('fc1bcdb0-7d31-49aa-936a-a4600d9dd083'): 'EFI_CRC32_GUIDED_SECTION_EXTRACTION_GUID',
uuid.UUID('ffe06bdd-6107-46a6-7bb2-5a9c7ec5275c'): 'EFI_ACPI_TABLE_PROTOCOL_GUID',
}
# Create each of the values above as a constant referring to the corresponding UUID.
globals().update(map(reversed, known_uuids.iteritems()))
class Protocol(bits.cdata.Struct):
"""Base class for EFI protocols. Derived classes must have a uuid.UUID named guid."""
@classmethod
def from_handle(cls, handle):
"""Retrieve an instance of this protocol from an EFI handle"""
p = cls.from_address(get_protocol(handle, cls.guid))
p._handle = handle
return p
ptrsize = sizeof(c_void_p)
if ptrsize == 4:
EFIFUNCTYPE = CFUNCTYPE
else:
_efi_functype_cache = {}
def EFIFUNCTYPE(restype, *argtypes):
"""EFIFUNCTYPE(restype, *argtypes) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
"""
try:
return _efi_functype_cache[(restype, argtypes)]
except KeyError:
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = 0
_efi_functype_cache[(restype, argtypes)] = CFunctionType
return CFunctionType
# Define UEFI data types
BOOLEAN = c_bool
CHAR8 = c_char
CHAR16 = c_wchar
EFI_BROWSER_ACTION = c_ulong
EFI_BROWSER_ACTION_REQUEST = c_ulong
EFI_EVENT = c_void_p
EFI_GUID = bits.cdata.GUID
EFI_HANDLE = c_void_p
EFI_HII_DATABASE_NOTIFY_TYPE = c_ulong
EFI_HII_HANDLE = c_void_p
EFI_INTERFACE_TYPE = c_ulong
EFI_KEY = c_ulong
EFI_LOCATE_SEARCH_TYPE = c_ulong
EFI_MEMORY_TYPE = c_ulong
EFI_PHYSICAL_ADDRESS = c_uint64
EFI_QUESTION_ID = c_uint16
EFI_STATUS = c_ulong
EFI_STRING = c_wchar_p
EFI_STRING_ID = c_uint16
EFI_TIMER_DELAY = c_ulong
EFI_TPL = c_ulong
EFI_VIRTUAL_ADDRESS = c_uint64
INT8 = c_int8
INT16 = c_int16
INT32 = c_int32
INT64 = c_int64
UINTN = c_ulong
UINT8 = c_uint8
UINT16 = c_uint16
UINT32 = c_uint32
UINT64 = c_uint64
def FUNC(*argtypes, **kw):
"""FUNC(*argtypes, ret=EFI_STATUS) -> function prototype.
ret: the result type (defaults to EFI_STATUS)
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
"""
ret = kw.pop("ret", EFI_STATUS)
if kw:
raise TypeError("Invalid keyword arguments")
return EFIFUNCTYPE(ret, *argtypes)
def compute_crc(buf, offset):
before_buffer = (UINT8 * offset).from_buffer(buf)
zero = (UINT8 * 4)()
after_buffer = (UINT8 * (sizeof(buf) - offset - 4)).from_buffer(buf, offset + 4)
crc = binascii.crc32(before_buffer)
crc = binascii.crc32(zero, crc)
return binascii.crc32(after_buffer, crc)
def table_crc(table):
th = TableHeader.from_buffer(table)
buf = (UINT8 * th.HeaderSize).from_buffer(table)
crc = compute_crc(buf, TableHeader.CRC32.offset)
return th.CRC32 == c_uint32(crc).value
class EFI_DEVICE_PATH_PROTOCOL(Protocol):
guid = EFI_DEVICE_PATH_PROTOCOL_GUID
_fields_ = [
("Type", UINT8),
("SubType", UINT8),
("Length", UINT8 * 2),
]
class EFI_DEVICE_PATH_TO_TEXT_PROTOCOL(Protocol):
guid = EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID
_fields_ = [
("ConvertDeviceNodeToText", FUNC(POINTER(EFI_DEVICE_PATH_PROTOCOL), BOOLEAN, BOOLEAN, ret=POINTER(CHAR16))),
("ConvertDevicePathToText", FUNC(POINTER(EFI_DEVICE_PATH_PROTOCOL), BOOLEAN, BOOLEAN, ret=POINTER(CHAR16))),
]
def _helper(self, method, path):
ucs2_string_ptr = method(path, 0, 0)
try:
s = wstring_at(ucs2_string_ptr)
finally:
check_status(system_table.BootServices.contents.FreePool(ucs2_string_ptr))
return s
def device_path_text(self, path):
"""Convert the specified device path to text."""
return self._helper(self.ConvertDevicePathToText, path)
def device_node_text(self, path):
"""Convert the specified device node to text."""
return self._helper(self.ConvertDeviceNodeToText, path)
class EFI_HII_TIME(bits.cdata.Struct):
_fields_ = [
('Hour', UINT8),
('Minute', UINT8),
('Second', UINT8),
]
class EFI_HII_DATE(bits.cdata.Struct):
_fields_ = [
('Year', UINT16),
('Month', UINT8),
('Day', UINT8),
]
class EFI_IFR_TYPE_VALUE(bits.cdata.Union):
_fields_ = [
('u8', UINT8),
('u16', UINT16),
('u32', UINT32),
('u64', UINT64),
('b', BOOLEAN),
('time', EFI_HII_TIME),
('date', EFI_HII_DATE),
('string', EFI_STRING_ID),
]
class EFI_HII_CONFIG_ACCESS_PROTOCOL(Protocol):
"""EFI HII Configuration Access Protocol"""
guid = EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID
EFI_HII_CONFIG_ACCESS_PROTOCOL._fields_ = [
('ExtractConfig', FUNC(POINTER(EFI_HII_CONFIG_ACCESS_PROTOCOL), EFI_STRING, POINTER(EFI_STRING), POINTER(EFI_STRING))),
('RouteConfig', FUNC(POINTER(EFI_HII_CONFIG_ACCESS_PROTOCOL), EFI_STRING, POINTER(EFI_STRING))),
('Callback', FUNC(POINTER(EFI_HII_CONFIG_ACCESS_PROTOCOL), EFI_BROWSER_ACTION, EFI_QUESTION_ID, UINT8, POINTER(EFI_IFR_TYPE_VALUE), POINTER(EFI_BROWSER_ACTION_REQUEST))),
]
class EFI_HII_CONFIG_ROUTING_PROTOCOL(Protocol):
"""EFI HII Configuration Routing Protocol"""
guid = EFI_HII_CONFIG_ROUTING_PROTOCOL_GUID
EFI_HII_CONFIG_ROUTING_PROTOCOL._fields_ = [
('ExtractConfig', FUNC(POINTER(EFI_HII_CONFIG_ROUTING_PROTOCOL), EFI_STRING, POINTER(EFI_STRING), POINTER(EFI_STRING))),
('ExportConfig', FUNC(POINTER(EFI_HII_CONFIG_ROUTING_PROTOCOL), POINTER(EFI_STRING))),
('RouteConfig', FUNC(POINTER(EFI_HII_CONFIG_ROUTING_PROTOCOL), EFI_STRING, POINTER(EFI_STRING))),
('BlockToConfig', FUNC(POINTER(EFI_HII_CONFIG_ROUTING_PROTOCOL), EFI_STRING, POINTER(UINT8), UINTN, POINTER(EFI_STRING), POINTER(EFI_STRING))),
('ConfigToBlock', FUNC(POINTER(EFI_HII_CONFIG_ROUTING_PROTOCOL), POINTER(EFI_STRING), POINTER(UINT8), POINTER(UINTN), POINTER(EFI_STRING))),
('GetAltConfig', FUNC(POINTER(EFI_HII_CONFIG_ROUTING_PROTOCOL), EFI_STRING, POINTER(EFI_GUID), EFI_STRING, POINTER(EFI_DEVICE_PATH_PROTOCOL), EFI_STRING, POINTER(EFI_STRING))),
]
class EFI_HII_PACKAGE_LIST_HEADER(bits.cdata.Struct):
_fields_ = [
('PackageListGuid', EFI_GUID),
('PackagLength', UINT32),
]
class EFI_KEY_DESCRIPTOR(bits.cdata.Struct):
_fields_ = [
('Key', EFI_KEY),
('Unicode', CHAR16),
('ShiftedUnicode', CHAR16),
('AltGrUnicode', CHAR16),
('ShiftedAltGrUnicode', CHAR16),
('Modifier', UINT16),
('AffectedAttribute', UINT16),
]
class EFI_HII_KEYBOARD_LAYOUT(bits.cdata.Struct):
_fields_ = [
('LayoutLength', UINT16),
('Guid', EFI_GUID),
('LayoutDescriptorStringOffset', UINT32),
('DescriptorCount', UINT8),
('Descriptors', POINTER(EFI_KEY_DESCRIPTOR)),
]
class EFI_HII_DATABASE_PROTOCOL(Protocol):
"""EFI HII Database Protocol"""
guid = EFI_HII_DATABASE_PROTOCOL_GUID
EFI_HII_DATABASE_PROTOCOL._fields_ = [
('NewPackageList', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), POINTER(EFI_HII_PACKAGE_LIST_HEADER), EFI_HANDLE, POINTER(EFI_HANDLE))),
('RemovePackageList', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), EFI_HII_HANDLE)),
('UpdatePackageList', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), EFI_HII_HANDLE, POINTER(EFI_HII_PACKAGE_LIST_HEADER))),
('ListPackageLists', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), UINT8, POINTER(EFI_GUID), POINTER(UINTN), POINTER(EFI_HII_HANDLE))),
('ExportPackageLists', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), EFI_HII_HANDLE, POINTER(UINTN), POINTER(EFI_HII_PACKAGE_LIST_HEADER))),
('RegisterPackageNotify', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), UINT8, POINTER(EFI_GUID), EFI_HII_DATABASE_NOTIFY_TYPE, POINTER(EFI_HANDLE))),
('UnregisterPackageNotify', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), EFI_HANDLE)),
('FindKeyboardsLayouts', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), POINTER(UINT16))),
('GetKeyboardLayouts', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), POINTER(EFI_GUID), POINTER(UINT16), POINTER(EFI_HII_KEYBOARD_LAYOUT))),
('SetKeyboardLayout', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), POINTER(EFI_GUID))),
('GetPackageListHandle', FUNC(POINTER(EFI_HII_DATABASE_PROTOCOL), EFI_HII_HANDLE, POINTER(EFI_HANDLE))),
]
class EFI_INPUT_KEY(bits.cdata.Struct):
_fields_ = [
("ScanCode", UINT16),
("UnicodeChar", CHAR16),
]
class EFI_SIMPLE_TEXT_INPUT_PROTOCOL(Protocol):
"""EFI Simple Text Input Protocol"""
guid = EFI_SIMPLE_TEXT_INPUT_PROTOCOL_GUID
EFI_SIMPLE_TEXT_INPUT_PROTOCOL._fields_ = [
('Reset', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_PROTOCOL), BOOLEAN)),
('ReadKeyStroke', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_PROTOCOL), POINTER(EFI_INPUT_KEY))),
('WaitForKey', EFI_EVENT),
]
EFI_SHIFT_STATE_VALID = 0x80000000
EFI_RIGHT_SHIFT_PRESSED = 0x00000001
EFI_LEFT_SHIFT_PRESSED = 0x00000002
EFI_RIGHT_CONTROL_PRESSED = 0x00000004
EFI_LEFT_CONTROL_PRESSED = 0x00000008
EFI_RIGHT_ALT_PRESSED = 0x00000010
EFI_LEFT_ALT_PRESSED = 0x00000020
EFI_RIGHT_LOGO_PRESSED = 0x00000040
EFI_LEFT_LOGO_PRESSED = 0x00000080
EFI_MENU_KEY_PRESSED = 0x00000100
EFI_SYS_REQ_PRESSED = 0x00000200
EFI_KEY_TOGGLE_STATE = UINT8
EFI_TOGGLE_STATE_VALID = 0x80
EFI_KEY_STATE_EXPOSED = 0x40
EFI_SCROLL_LOCK_ACTIVE = 0x01
EFI_NUM_LOCK_ACTIVE = 0x02
EFI_CAPS_LOCK_ACTIVE = 0x04
class EFI_KEY_STATE(bits.cdata.Struct):
_fields_ = [
("KeyShiftState", UINT32),
("KeyToggleState", EFI_KEY_TOGGLE_STATE),
]
class EFI_KEY_DATA(bits.cdata.Struct):
_fields_ = [
("Key", EFI_INPUT_KEY),
("KeyState", EFI_KEY_STATE),
]
EFI_KEY_NOTIFY_FUNCTION = FUNC(POINTER(EFI_KEY_DATA))
class EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL(Protocol):
"""EFI Simple Text Input Ex Protocol"""
guid = EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL_GUID
EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL._fields_ = [
('Reset', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL), BOOLEAN)),
('ReadKeyStrokeEx', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL), POINTER(EFI_KEY_DATA))),
('WaitForKeyEx', EFI_EVENT),
('SetState', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL), POINTER(EFI_KEY_TOGGLE_STATE))),
('RegisterKeyNotify', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL), POINTER(EFI_KEY_DATA), EFI_KEY_NOTIFY_FUNCTION, POINTER(c_void_p))),
('UnregisterKeyNotify', FUNC(POINTER(EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL), c_void_p)),
]
class SIMPLE_TEXT_OUTPUT_MODE(bits.cdata.Struct):
"""Decode the SIMPLE_TEXT_OUTPUT_MODE structure"""
_fields_ = [
('MaxMode', INT32),
('Mode', INT32),
('Attribute', INT32),
('CursorColumn', INT32),
('CursorRow', INT32),
('CursorVisible', BOOLEAN),
]
class EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL(Protocol):
"""EFI Simple Text Output Protocol"""
guid = EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL_GUID
EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL._fields_ = [
('Reset', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), BOOLEAN)),
('OutputString', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), EFI_STRING)),
('TestString', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), EFI_STRING)),
('QueryMode', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), UINTN, POINTER(UINTN), POINTER(UINTN))),
('SetMode', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), UINTN)),
('SetAttribute', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), UINTN)),
('ClearScreen', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL))),
('SetCursorPosition', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), UINTN, UINTN)),
('EnableCursor', FUNC(POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL), BOOLEAN)),
('Mode', POINTER(SIMPLE_TEXT_OUTPUT_MODE)),
]
class ConfigurationTable(bits.cdata.Struct):
"""Decode the EFI Configuration Table"""
_fields_ = [
('VendorGuid', EFI_GUID),
('VendorTable', c_void_p),
]
_formats = {
'VendorGuid' : bits.cdata._format_guid,
}
class EFI_MEMORY_DESCRIPTOR(bits.cdata.Struct):
_fields_ = [
('Type', UINT32),
('PhysicalStart', EFI_PHYSICAL_ADDRESS),
('VirtualStart', EFI_VIRTUAL_ADDRESS),
('NumberOfPages', UINT64),
('Attribute', UINT64),
]
class EFI_OPEN_PROTOCOL_INFORMATION_ENTRY(bits.cdata.Struct):
_fields_ = [
('AgentHandle', EFI_HANDLE),
('ControllerHandle', EFI_HANDLE),
('Attributes', UINT32),
('OpenCount', UINT32),
]
class TableHeader(bits.cdata.Struct):
"""Decode the EFI Table Header"""
_fields_ = [
('Signature', UINT64),
('Revision', UINT32),
('HeaderSize', UINT32),
('CRC32', UINT32),
('Reserved', UINT32),
]
EFI_ALLOCATE_TYPE = UINTN
AllocateAnyPages, AllocateMaxAddress, AllocateAddress, MaxAllocateType = range(4)
EFI_EVENT_NOTIFY = FUNC(EFI_EVENT, c_void_p, ret=None)
class EFI_BOOT_SERVICES(bits.cdata.Struct):
"""Decode the EFI Boot Services"""
_fields_ = [
('Hdr', TableHeader),
('RaiseTPL', FUNC(EFI_TPL, ret=EFI_TPL)),
('RestoreTPL', FUNC(EFI_TPL, ret=None)),
('AllocatePages', FUNC(EFI_ALLOCATE_TYPE, EFI_MEMORY_TYPE, UINTN, POINTER(EFI_PHYSICAL_ADDRESS))),
('FreePages', FUNC(EFI_PHYSICAL_ADDRESS, UINTN)),
('GetMemoryMap', FUNC(POINTER(UINTN), POINTER(EFI_MEMORY_DESCRIPTOR), POINTER(UINTN), POINTER(UINTN), POINTER(UINT32))),
('AllocatePool', FUNC(EFI_MEMORY_TYPE, UINTN, POINTER(c_void_p))),
('FreePool', FUNC(c_void_p)),
('CreateEvent', FUNC(UINT32, EFI_TPL, EFI_EVENT_NOTIFY, c_void_p, POINTER(EFI_EVENT))),
('SetTimer', FUNC(EFI_EVENT, EFI_TIMER_DELAY, UINT64)),
('WaitForEvent', FUNC(UINTN, POINTER(EFI_EVENT), POINTER(UINTN))),
('SignalEvent', FUNC(EFI_EVENT)),
('CloseEvent', FUNC(EFI_EVENT)),
('CheckEvent', FUNC(EFI_EVENT)),
('InstallProtocolInterface', FUNC(POINTER(EFI_HANDLE), POINTER(EFI_GUID), EFI_INTERFACE_TYPE, c_void_p)),
('ReinstallProtocolInterface', FUNC(EFI_HANDLE, POINTER(EFI_GUID), c_void_p, c_void_p)),
('UninstallProtocolInterface', FUNC(EFI_HANDLE, POINTER(EFI_GUID), c_void_p)),
('HandleProtocol', FUNC(EFI_HANDLE, POINTER(EFI_GUID), POINTER(c_void_p))),
('Reserved', c_void_p),
('RegisterProtocolNotify', FUNC(POINTER(EFI_GUID), EFI_EVENT, POINTER(c_void_p))),
('LocateHandle', FUNC(EFI_LOCATE_SEARCH_TYPE, POINTER(EFI_GUID), c_void_p, POINTER(UINTN), POINTER(EFI_HANDLE))),
('LocateDevicePath', FUNC(POINTER(EFI_GUID), POINTER(POINTER(EFI_DEVICE_PATH_PROTOCOL)), POINTER(EFI_HANDLE))),
('InstallConfigurationTable', FUNC(POINTER(EFI_GUID), c_void_p)),
('LoadImage', FUNC(BOOLEAN, EFI_HANDLE, POINTER(EFI_DEVICE_PATH_PROTOCOL), c_void_p, UINTN, POINTER(EFI_HANDLE))),
('StartImage', FUNC(EFI_HANDLE, POINTER(UINTN), POINTER(POINTER(CHAR16)))),
('Exit', FUNC(EFI_HANDLE, EFI_STATUS, UINTN, POINTER(CHAR16))),
('UnloadImage', FUNC(EFI_HANDLE)),
('ExitBootServices', FUNC(EFI_HANDLE, UINTN)),
('GetNextMonotonicCount', FUNC(POINTER(UINT64))),
('Stall', FUNC(UINTN)),
('SetWatchdogTimer', FUNC(UINTN, UINT64, UINTN, POINTER(CHAR16))),
('ConnectController', FUNC(EFI_HANDLE, POINTER(EFI_HANDLE), POINTER(EFI_DEVICE_PATH_PROTOCOL), BOOLEAN)),
('DisconnectController', FUNC(EFI_HANDLE, EFI_HANDLE, EFI_HANDLE)),
('OpenProtocol', FUNC(EFI_HANDLE, POINTER(EFI_GUID), POINTER(c_void_p), EFI_HANDLE, EFI_HANDLE, UINT32)),
('CloseProtocol', FUNC(EFI_HANDLE, POINTER(EFI_GUID), EFI_HANDLE, EFI_HANDLE)),
('OpenProtocolInformation', FUNC(EFI_HANDLE, POINTER(EFI_GUID), POINTER(POINTER(EFI_OPEN_PROTOCOL_INFORMATION_ENTRY)), POINTER(UINTN))),
('ProtocolsPerHandle', FUNC(EFI_HANDLE, POINTER(POINTER(POINTER(EFI_GUID))), POINTER(UINTN))),
('LocateHandleBuffer', FUNC(EFI_LOCATE_SEARCH_TYPE, POINTER(EFI_GUID), c_void_p, POINTER(UINTN), POINTER(POINTER(EFI_HANDLE)))),
('LocateProtocol', FUNC(POINTER(EFI_GUID), c_void_p, POINTER(c_void_p))),
('InstallMultipleProtocolInterfaces', c_void_p),
('UninstallMultipleProtocolInterfaces', c_void_p),
('CalculateCrc32', FUNC(c_void_p, UINTN, POINTER(UINT32))),
('CopyMem', FUNC(c_void_p, c_void_p, UINTN)),
('SetMem', FUNC(c_void_p, UINTN, UINT8)),
('CreateEventEx', FUNC(UINT32, EFI_TPL, EFI_EVENT_NOTIFY, c_void_p, POINTER(EFI_GUID), POINTER(EFI_EVENT))),
]
class EFI_TIME(bits.cdata.Struct):
_fields_ = [
('Year', UINT16),
('Month', UINT8),
('Day', UINT8),
('Hour', UINT8),
('Minute', UINT8),
('Second', UINT8),
('Pad1', UINT8),
('Nanosecond', UINT32),
('TimeZone', INT16),
('Daylight', UINT8),
('Pad2', UINT8),
]
class EFI_TIME_CAPABILITIES(bits.cdata.Struct):
_fields_ = [
('Resolution', UINT32),
('Accuracy', UINT32),
('SetsToZero', BOOLEAN),
]
EFI_RESET_TYPE = UINTN
EfiResetCold, EfiResetWarm, EfiResetShutdown = range(3)
class EFI_CAPSULE_HEADER(bits.cdata.Struct):
_fields_ = [
('CapsuleGuid', EFI_GUID),
('HeaderSize', UINT32),
('Flags', UINT32),
('CapsuleImageSize', UINT32),
]
class EFI_RUNTIME_SERVICES(bits.cdata.Struct):
"""Decode the EFI Runtime Services"""
_fields_ = [
('Hdr', TableHeader),
('GetTime', FUNC(POINTER(EFI_TIME), POINTER(EFI_TIME_CAPABILITIES))),
('SetTime', FUNC(POINTER(EFI_TIME))),
('GetWakeupTime', FUNC(POINTER(BOOLEAN), POINTER(BOOLEAN), POINTER(EFI_TIME))),
('SetWakeupTime', FUNC(BOOLEAN, POINTER(EFI_TIME))),
('SetVirtualAddressMap', FUNC(UINTN, UINTN, UINT32, POINTER(EFI_MEMORY_DESCRIPTOR))),
('ConvertPointer', FUNC(UINTN, POINTER(c_void_p))),
('GetVariable', FUNC(EFI_STRING, POINTER(EFI_GUID), POINTER(UINT32), POINTER(UINTN), c_void_p)),
('GetNextVariableName', FUNC(POINTER(UINTN), POINTER(CHAR16), POINTER(EFI_GUID))),
('SetVariable', FUNC(EFI_STRING, POINTER(EFI_GUID), UINT32, UINTN, c_void_p)),
('GetNextHighMonotonicCount', FUNC(POINTER(UINT32))),
('ResetSystem', FUNC(EFI_RESET_TYPE, EFI_STATUS, UINTN, c_void_p)),
('UpdateCapsule', FUNC(POINTER(POINTER(EFI_CAPSULE_HEADER)), UINTN, EFI_PHYSICAL_ADDRESS)),
('QueryCapsuleCapabilities', FUNC(POINTER(POINTER(EFI_CAPSULE_HEADER)), UINTN, UINT64, EFI_RESET_TYPE)),
('QueryVariableInfo', FUNC(UINT32, POINTER(UINT64), POINTER(UINT64), POINTER(UINT64))),
]
class EFI_SYSTEM_TABLE(bits.cdata.Struct):
"""Decode the EFI System Table."""
_fields_ = [
('Hdr', TableHeader),
('FirmwareVendor', EFI_STRING),
('FirmwareRevision', UINT32),
('ConsoleInHandle', EFI_HANDLE),
('ConIn', POINTER(EFI_SIMPLE_TEXT_INPUT_PROTOCOL)),
('ConsoleOutHandle', EFI_HANDLE),
('ConOut', POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL)),
('StandardErrorHandle', EFI_HANDLE),
('StdErr', POINTER(EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL)),
('RuntimeServices', POINTER(EFI_RUNTIME_SERVICES)),
('BootServices', POINTER(EFI_BOOT_SERVICES)),
('NumberOfTableEntries', UINTN),
('ConfigurationTablePtr', POINTER(ConfigurationTable)),
]
@property
def ConfigurationTable(self):
ptr = cast(self.ConfigurationTablePtr, c_void_p)
return (ConfigurationTable * self.NumberOfTableEntries).from_address(ptr.value)
@property
def ConfigurationTableDict(self):
return OrderedDict((t.VendorGuid, t.VendorTable) for t in self.ConfigurationTable)
system_table = EFI_SYSTEM_TABLE.from_address(_efi._system_table)
TPL_APPLICATION = 4
TPL_CALLBACK = 8
TPL_NOTIFY = 16
TPL_HIGH_LEVEL = 31
EVT_TIMER = 0x80000000
EVT_RUNTIME = 0x40000000
EVT_NOTIFY_WAIT = 0x100
EVT_NOTIFY_SIGNAL = 0x200
class event_signal(object):
"""A wrapper around an EFI_EVENT of type EVT_NOTIFY_SIGNAL
Used for cases that should busy-loop calling some function until complete.
The caller must ensure that the event does not get signaled after the
event_signal gets destroyed."""
def __init__(self, abort=None):
self.signaled = False
self.closed = False
self.event = create_event(self._set_signaled, abort=abort)
def _set_signaled(self):
self.signaled = True
def close(self):
if not self.closed:
close_event(self.event)
self.closed = True
def __del__(self):
self.close()
def __enter__(self):
if self.closed:
raise ValueError("Cannot enter context with closed event")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class EFI_LOADED_IMAGE_PROTOCOL(Protocol):
"""EFI Loaded Image Protocol"""
guid = EFI_LOADED_IMAGE_PROTOCOL_GUID
_fields_ = [
('Revision', UINT32),
('ParentHandle', EFI_HANDLE),
('SystemTable', POINTER(EFI_SYSTEM_TABLE)),
('DeviceHandle', EFI_HANDLE),
('FilePath', POINTER(EFI_DEVICE_PATH_PROTOCOL)),
('Reserved', c_void_p),
('LoadOptionsSize', UINT32),
('LoadOptions', c_void_p),
('ImageBase', c_void_p),
('ImageSize', UINT64),
('ImageCodeType', EFI_MEMORY_TYPE),
('ImageDataType', EFI_MEMORY_TYPE),
('Unload', FUNC(EFI_HANDLE)),
]
class EFI_FILE_PROTOCOL(bits.cdata.Struct):
"""EFI File Protocol"""
pass
EFI_FILE_PROTOCOL._fields_ = [
('Revision', UINT64),
('Open', FUNC(POINTER(EFI_FILE_PROTOCOL), POINTER(POINTER(EFI_FILE_PROTOCOL)), EFI_STRING, UINT64, UINT64)),
('Close', FUNC(POINTER(EFI_FILE_PROTOCOL))),
('Delete', FUNC(POINTER(EFI_FILE_PROTOCOL))),
('Read', FUNC(POINTER(EFI_FILE_PROTOCOL), POINTER(UINTN), c_void_p)),
('Write', FUNC(POINTER(EFI_FILE_PROTOCOL), POINTER(UINTN), c_void_p)),
('GetPosition', FUNC(POINTER(EFI_FILE_PROTOCOL), POINTER(UINT64))),
('SetPosition', FUNC(POINTER(EFI_FILE_PROTOCOL), UINT64)),
('GetInfo', FUNC(POINTER(EFI_FILE_PROTOCOL), POINTER(EFI_GUID), POINTER(UINTN), c_void_p)),
('SetInfo', FUNC(POINTER(EFI_FILE_PROTOCOL), POINTER(EFI_GUID), UINTN, c_void_p)),
('Flush', FUNC(POINTER(EFI_FILE_PROTOCOL))),
]
class EFI_SIMPLE_FILE_SYSTEM_PROTOCOL(Protocol):
"""EFI Simple File System Protocol"""
guid = EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID
@property
def root(self):
root_ptr = POINTER(EFI_FILE_PROTOCOL)()
check_status(self.OpenVolume(byref(self), byref(root_ptr)))
return efi_file(root_ptr.contents)
EFI_SIMPLE_FILE_SYSTEM_PROTOCOL._fields_ = [
('Revision', UINT64),
('OpenVolume', FUNC(POINTER(EFI_SIMPLE_FILE_SYSTEM_PROTOCOL), POINTER(POINTER(EFI_FILE_PROTOCOL)))),
]
def make_UCS2_name_property():
"""Create a variable-sized UCS2-encoded name property at the end of the structure
Automatically resizes the structure and updates the field named "Size"
when set."""
def _get_name(self):
return wstring_at(addressof(self) + sizeof(self.__class__))
def _set_name(self, name):
b = create_unicode_buffer(name)
resize(self, sizeof(self.__class__) + sizeof(b))
memmove(addressof(self) + sizeof(self.__class__), addressof(b), sizeof(b))
self.Size = sizeof(b)
return property(_get_name, _set_name)
class EFI_FILE_INFO(bits.cdata.Struct):
"""EFI File Info"""
_fields_ = [
('Size', UINT64),
('FileSize', UINT64),
('PhysicalSize', UINT64),
('CreateTime', EFI_TIME),
('LastAccessTime', EFI_TIME),
('ModificationTime', EFI_TIME),
('Attribute', UINT64),
]
FileName = make_UCS2_name_property()
class EFI_FILE_SYSTEM_INFO(bits.cdata.Struct):
"""EFI File System Info"""
_pack_ = 4
_fields_ = [
('Size', UINT64),
('ReadOnly', BOOLEAN),
('_pad', UINT8 * 7),
('VolumeSize', UINT64),
('FreeSpace', UINT64),
('BlockSize', UINT32),
]
VolumeLabel = make_UCS2_name_property()
class efi_file(object):
"""A file-like object for an EFI file"""
def __init__(self, file_protocol):
self.file_protocol = file_protocol
self.closed = False
def _check_closed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def __del__(self):
self.close()
def close(self):
if not self.closed:
check_status(self.file_protocol.Close(byref(self.file_protocol)))
self.closed = True
def delete(self):
self._check_closed()
try:
check_status(self.file_protocol.Delete(byref(self.file_protocol)))
finally:
self.closed = True
# Context management protocol
def __enter__(self):
if self.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def flush(self):
self._check_closed()
check_status(self.file_protocol.Flush(byref(self.file_protocol)))
def read(self, size=-1):
self._check_closed()
if size < 0:
try:
size = self.file_info.FileSize - self.tell()
except EFIException as e:
size = self.file_info.FileSize
size = UINTN(size)
buf = create_string_buffer(0)
resize(buf, size.value)
check_status(self.file_protocol.Read(byref(self.file_protocol), byref(size), byref(buf)))
if size.value != sizeof(buf):
resize(buf, size.value)
return buf.raw
def seek(self, offset, whence=0):
self._check_closed()
if whence == 0:
pos = offset
elif whence == 1:
pos = self.tell() + offset
elif whence == 2:
pos = self.file_info.FileSize + offset
else:
raise ValueError("seek: whence makes no sense: {}".format(whence))
check_status(self.file_protocol.SetPosition(byref(self.file_protocol), pos))
def tell(self):
self._check_closed()
pos = c_uint64()
check_status(self.file_protocol.GetPosition(byref(self.file_protocol), byref(pos)))
return pos.value
def write(self, s):
self._check_closed()
buf = create_string_buffer(s, len(s))
size = UINTN(sizeof(buf))
check_status(self.file_protocol.Write(byref(self.file_protocol), byref(size), byref(buf)))
def open(self, name, mode, attrib=0):
self._check_closed()
new_protocol = POINTER(EFI_FILE_PROTOCOL)()
check_status(self.file_protocol.Open(byref(self.file_protocol), byref(new_protocol), name, mode, attrib))
return efi_file(new_protocol.contents)
def create(self, name, attrib=0):
"""Create a file. Shorthand for open with read/write/create."""
return self.open(name, EFI_FILE_MODE_CREATE | EFI_FILE_MODE_READ | EFI_FILE_MODE_WRITE, attrib)
def mkdir(self, name, attrib=0):
"""Make a directory. Shorthand for create EFI_FILE_DIRECTORY.
attrib, if specified, provides additional attributes beyond EFI_FILE_DIRECTORY."""
return self.create(name, EFI_FILE_DIRECTORY | attrib)
def _get_info(self, information_type_guid, info):
self._check_closed()
guid = EFI_GUID(information_type_guid)
size = UINTN()
status = self.file_protocol.GetInfo(byref(self.file_protocol), byref(guid), byref(size), 0)
if status != EFI_BUFFER_TOO_SMALL:
check_status(status)
resize(info, size.value)
check_status(self.file_protocol.GetInfo(byref(self.file_protocol), byref(guid), byref(size), byref(info)))
return info
def get_file_info(self):
return self._get_info(EFI_FILE_INFO_ID, EFI_FILE_INFO())
def get_file_system_info(self):
return self._get_info(EFI_FILE_SYSTEM_INFO_ID, EFI_FILE_SYSTEM_INFO())
def get_volume_label(self):
return self._get_info(EFI_FILE_SYSTEM_VOLUME_LABEL_ID, create_unicode_buffer(0)).value
def _set_info(self, information_type_guid, info):
self._check_closed()
guid = EFI_GUID(information_type_guid)
check_status(self.file_protocol.SetInfo(byref(self.file_protocol), byref(guid), byref(info), byref(info)))
def set_file_info(self, info):
self._set_info(EFI_FILE_INFO_ID, info)
def set_file_system_info(self, info):
self._set_info(EFI_FILE_SYSTEM_INFO_ID, info)
def set_volume_label(self, label):
buf = create_unicode_buffer(label)
self._set_info(EFI_FILE_SYSTEM_VOLUME_LABEL_ID, buf)
file_info = property(get_file_info, set_file_info)
file_system_info = property(get_file_system_info, set_file_system_info)
volume_label = property(get_volume_label, set_volume_label)
EFI_FILE_MODE_READ = 0x0000000000000001
EFI_FILE_MODE_WRITE = 0x0000000000000002
EFI_FILE_MODE_CREATE = 0x8000000000000000
EFI_FILE_READ_ONLY = 0x0000000000000001
EFI_FILE_HIDDEN = 0x0000000000000002
EFI_FILE_SYSTEM = 0x0000000000000004
EFI_FILE_RESERVED = 0x0000000000000008
EFI_FILE_DIRECTORY = 0x0000000000000010
EFI_FILE_ARCHIVE = 0x0000000000000020
def make_service_binding_protocol(name):
"""Create a protocol class for an EFI_SERVICE_BINDING_PROTOCOL
name should be the name of the class without the leading EFI_ or trailing
_SERVICE_BINDING_PROTOCOL. The corresponding GUID and child protocol
should already exist."""
sbp_name = "EFI_{}_SERVICE_BINDING_PROTOCOL".format(name)
guid = globals()[sbp_name + "_GUID"]
cls = type(sbp_name, (Protocol,), dict(guid=guid))
cls._fields_ = [
('CreateChild', FUNC(POINTER(cls), POINTER(EFI_HANDLE))),
('DestroyChild', FUNC(POINTER(cls), EFI_HANDLE)),
]
child_protocol = globals()["EFI_{}_PROTOCOL".format(name)]
def child(self):
handle = EFI_HANDLE()
check_status(self.CreateChild(self, byref(handle)))
return child_protocol.from_handle(handle)
cls.child = child
globals()[sbp_name] = cls
class EFI_IPv4_ADDRESS(bits.cdata.Struct):
_fields_ = [
('Addr', UINT8*4),
]
def __str__(self):
return "{}.{}.{}.{}".format(*self.Addr)
class EFI_IP4_CONFIG_DATA(bits.cdata.Struct):
_fields_ = [
('DefaultProtocol', UINT8),
('AcceptAnyProtocol', BOOLEAN),
('AcceptIcmpErrors', BOOLEAN),
('AcceptBroadcast', BOOLEAN),
('AcceptPromiscuous', BOOLEAN),
('UseDefaultAddress', BOOLEAN),
('StationAddress', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
('TypeOfService', UINT8),
('TimeToLive', UINT8),
('DoNotFragment', BOOLEAN),
('RawData', BOOLEAN),
('ReceiveTimeout', UINT32),
('TransmitTimeout', UINT32),
]
class EFI_IP4_ROUTE_TABLE(bits.cdata.Struct):
_fields_ = [
('SubnetAddress', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
('GatewayAddress', EFI_IPv4_ADDRESS),
]
class EFI_IP4_IPCONFIG_DATA(bits.cdata.Struct):
_fields_ = [
('StationAddress', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
('RouteTableSize', UINT32),
('RouteTable', POINTER(EFI_IP4_ROUTE_TABLE)),
]
class EFI_IP4_ICMP_TYPE(bits.cdata.Struct):
_fields_ = [
('Type', UINT8),
('Code', UINT8),
]
class EFI_IP4_MODE_DATA(bits.cdata.Struct):
_fields_ = [
('IsStarted', BOOLEAN),
('MaxPacketSize', UINT32),
('ConfigData', EFI_IP4_CONFIG_DATA),
('IsConfigured', BOOLEAN),
('GroupCount', UINT32),
('GroupTable', POINTER(EFI_IPv4_ADDRESS)),
('RouteCount', UINT32),
('RouteTable', POINTER(EFI_IP4_ROUTE_TABLE)),
('IcmpTypeCount', UINT32),
('IcmpTypeList', POINTER(EFI_IP4_ICMP_TYPE)),
]
class EFI_IP4_CONFIG_PROTOCOL(Protocol):
guid = EFI_IP4_CONFIG_PROTOCOL_GUID
EFI_IP4_CONFIG_PROTOCOL._fields_ = [
('Start', FUNC(POINTER(EFI_IP4_CONFIG_PROTOCOL), EFI_EVENT, EFI_EVENT)),
('Stop', FUNC(POINTER(EFI_IP4_CONFIG_PROTOCOL))),
('GetData', FUNC(POINTER(EFI_IP4_CONFIG_PROTOCOL), POINTER(UINTN), POINTER(EFI_IP4_IPCONFIG_DATA))),
]
class EFI_MAC_ADDRESS(bits.cdata.Struct):
_fields_ = [
('Addr', UINT8*32),
]
EFI_IP4_CONFIG2_DATA_TYPE = UINT32
(
Ip4Config2DataTypeInterfaceInfo,
Ip4Config2DataTypePolicy,
Ip4Config2DataTypeManualAddress,
Ip4Config2DataTypeGateway,
Ip4Config2DataTypeDnsServer,
Ip4Config2DataTypeMaximum,
) = range(6)
class EFI_IP4_CONFIG2_INTERFACE_INFO(bits.cdata.Struct):
_fields_ = [
('Name', CHAR16 * 32),
('IfType', UINT8),
('HwAddressSize', UINT32),
('HwAddress', EFI_MAC_ADDRESS),
('StationAddress', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
('RouteTableSize', UINT32),
('RouteTable', POINTER(EFI_IP4_ROUTE_TABLE)),
]
EFI_IP4_CONFIG2_POLICY = UINT32
(
Ip4Config2PolicyStatic,
Ip4Config2PolicyDhcp,
Ip4Config2PolicyMax,
) = range(3)
class EFI_IP4_CONFIG2_MANUAL_ADDRESS(bits.cdata.Struct):
_fields_ = [
('Address', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
]
class EFI_IP4_CONFIG2_PROTOCOL(Protocol):
guid = EFI_IP4_CONFIG2_PROTOCOL_GUID
EFI_IP4_CONFIG2_PROTOCOL._fields_ = [
('SetData', FUNC(POINTER(EFI_IP4_CONFIG2_PROTOCOL), EFI_IP4_CONFIG2_DATA_TYPE, UINTN, c_void_p)),
('GetData', FUNC(POINTER(EFI_IP4_CONFIG2_PROTOCOL), EFI_IP4_CONFIG2_DATA_TYPE, POINTER(UINTN), c_void_p)),
('RegisterDataNotify', FUNC(POINTER(EFI_IP4_CONFIG2_PROTOCOL), EFI_IP4_CONFIG2_DATA_TYPE, EFI_EVENT)),
('UnregisterDataNotify', FUNC(POINTER(EFI_IP4_CONFIG2_PROTOCOL), EFI_IP4_CONFIG2_DATA_TYPE, EFI_EVENT)),
]
class EFI_DNS4_CACHE_ENTRY(bits.cdata.Struct):
_fields_ = [
('HostName', c_wchar_p),
('IpAddress', POINTER(EFI_IPv4_ADDRESS)),
('Timeout', UINT32),
]
class EFI_DNS4_CONFIG_DATA(bits.cdata.Struct):
_fields_ = [
('DnsServerListCount', UINTN),
('DnsServerList', POINTER(EFI_IPv4_ADDRESS)),
('UseDefaultSetting', BOOLEAN),
('EnableDnsCache', BOOLEAN),
('Protocol', UINT8),
('StationIp', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
('LocalPort', UINT16),
('RetryCount', UINT32),
('RetryInterval', UINT32),
]
class EFI_DNS4_MODE_DATA(bits.cdata.Struct):
_fields_ = [
('DnsConfigData', EFI_DNS4_CONFIG_DATA),
('DnsServerCount', UINT32),
('DnsServerList', POINTER(EFI_IPv4_ADDRESS)),
('DnsCacheCount', UINT32),
('DnsCacheList', POINTER(EFI_DNS4_CACHE_ENTRY)),
]
class DNS_HOST_TO_ADDR_DATA(bits.cdata.Struct):
_fields_ = [
('IpCount', UINT32),
('IpList', POINTER(EFI_IPv4_ADDRESS)),
]
class DNS_ADDR_TO_HOST_DATA(bits.cdata.Struct):
_fields_ = [
('HostName', c_wchar_p),
]
class DNS_RESOURCE_RECORD(bits.cdata.Struct):
_fields_ = [
('QName', c_char_p),
('QType', UINT16),
('QClass', UINT16),
('TTL', UINT32),
('DataLength', UINT16),
('RData', POINTER(CHAR8)),
]
class DNS_GENERAL_LOOKUP_DATA(bits.cdata.Struct):
_fields_ = [
('RRCount', UINTN),
('RRList', POINTER(DNS_RESOURCE_RECORD)),
]
class EFI_DNS4_RSP_DATA(bits.cdata.Union):
_fields_ = [
('H2AData', POINTER(DNS_HOST_TO_ADDR_DATA)),
('A2HData', POINTER(DNS_ADDR_TO_HOST_DATA)),
('GLookupData', POINTER(DNS_GENERAL_LOOKUP_DATA)),
]
class EFI_DNS4_COMPLETION_TOKEN(bits.cdata.Struct):
_fields_ = [
('Event', EFI_EVENT),
('Status', EFI_STATUS),
('RetryCount', UINT32),
('RetryInterval', UINT32),
('RspData', EFI_DNS4_RSP_DATA),
]
class EFI_DNS4_PROTOCOL(Protocol):
guid = EFI_DNS4_PROTOCOL_GUID
EFI_DNS4_PROTOCOL._fields_ = [
('GetModeData', FUNC(POINTER(EFI_DNS4_PROTOCOL), POINTER(EFI_DNS4_MODE_DATA))),
('Configure', FUNC(POINTER(EFI_DNS4_PROTOCOL), POINTER(EFI_DNS4_CONFIG_DATA))),
('HostNameToIp', FUNC(POINTER(EFI_DNS4_PROTOCOL), c_wchar_p, POINTER(EFI_DNS4_COMPLETION_TOKEN))),
('IpToHostName', FUNC(POINTER(EFI_DNS4_PROTOCOL), EFI_IPv4_ADDRESS, POINTER(EFI_DNS4_COMPLETION_TOKEN))),
('GeneralLookUp', FUNC(POINTER(EFI_DNS4_PROTOCOL), c_char_p, UINT16, UINT16, POINTER(EFI_DNS4_COMPLETION_TOKEN))),
('UpdateDnsCache', FUNC(POINTER(EFI_DNS4_PROTOCOL), BOOLEAN, BOOLEAN, EFI_DNS4_CACHE_ENTRY)),
('Poll', FUNC(POINTER(EFI_DNS4_PROTOCOL))),
('Cancel', FUNC(POINTER(EFI_DNS4_PROTOCOL), POINTER(EFI_DNS4_COMPLETION_TOKEN))),
]
make_service_binding_protocol("DNS4")
EFI_TCP4_CONNECTION_STATE = UINT32
tcp4_connection_states = {
0: 'Tcp4StateClosed',
1: 'Tcp4StateListen',
2: 'Tcp4StateSynSent',
3: 'Tcp4StateSynReceived',
4: 'Tcp4StateEstablished',
5: 'Tcp4StateFinWait1',
6: 'Tcp4StateFinWait2',
7: 'Tcp4StateClosing',
8: 'Tcp4StateTimeWait',
9: 'Tcp4StateCloseWait',
10: 'Tcp4StateLastAck',
}
globals().update(map(reversed, tcp4_connection_states.iteritems()))
class EFI_TCP4_ACCESS_POINT(bits.cdata.Struct):
_fields_ = [
('UseDefaultAddress', BOOLEAN),
('StationAddress', EFI_IPv4_ADDRESS),
('SubnetMask', EFI_IPv4_ADDRESS),
('StationPort', UINT16),
('RemoteAddress', EFI_IPv4_ADDRESS),
('RemotePort', UINT16),
('ActiveFlag', BOOLEAN),
]
class EFI_TCP4_OPTION(bits.cdata.Struct):
_fields_ = [
('ReceiveBufferSize', UINT32),
('SendBufferSize', UINT32),
('MaxSynBackLog', UINT32),
('ConnectionTimeout', UINT32),
('DataRetries', UINT32),
('FinTimeout', UINT32),
('TimeWaitTimeout', UINT32),
('KeepAliveProbes', UINT32),
('KeepAliveTime', UINT32),
('KeepAliveInterval', UINT32),
('EnableNagle', BOOLEAN),
('EnableTimeStamp', BOOLEAN),
('EnableWindowScaling', BOOLEAN),
('EnableSelectiveAck', BOOLEAN),
('EnablePathMtuDiscovery', BOOLEAN),
]
class EFI_TCP4_CONFIG_DATA(bits.cdata.Struct):
_fields_ = [
('TypeOfService', UINT8),
('TimeToLive', UINT8),
('AccessPoint', EFI_TCP4_ACCESS_POINT),
('ControlOption', POINTER(EFI_TCP4_OPTION)),
]
class EFI_MANAGED_NETWORK_CONFIG_DATA(bits.cdata.Struct):
_fields_ = [
('ReceivedQueueTimeoutValue', UINT32),
('TransmitQueueTimeoutValue', UINT32),
('ProtocolTypeFilter', UINT16),
('EnableUnicastReceive', BOOLEAN),
('EnableMulticastReceive', BOOLEAN),
('EnableBroadcastReceive', BOOLEAN),
('EnablePromiscuousReceive', BOOLEAN),
('FlushQueuesOnReset', BOOLEAN),
('EnableReceiveTimestamps', BOOLEAN),
('DisableBackgroundPolling', BOOLEAN),
]
MAX_MCAST_FILTER_CNT = 16
class EFI_SIMPLE_NETWORK_MODE(bits.cdata.Struct):
_fields_ = [
('State', UINT32),
('HwAddressSize', UINT32),
('MediaHeaderSize', UINT32),
('MaxPacketSize', UINT32),
('NvRamSize', UINT32),
('NvRamAccessSize', UINT32),
('ReceiveFilterMask', UINT32),
('ReceiveFilterSetting', UINT32),
('MaxMCastFilterCount', UINT32),
('MCastFilterCount', UINT32),
('MCastFilter', EFI_MAC_ADDRESS * MAX_MCAST_FILTER_CNT),
('CurrentAddress', EFI_MAC_ADDRESS),
('BroadcastAddress', EFI_MAC_ADDRESS),
('PermanentAddress', EFI_MAC_ADDRESS),
('IfType', UINT8),
('MacAddressChangeable', BOOLEAN),
('MultipleTxSupported', BOOLEAN),
('MediaPresentSupported', BOOLEAN),
('MediaPresent', BOOLEAN),
]
class EFI_TCP4_COMPLETION_TOKEN(bits.cdata.Struct):
_fields_ = [
('Event', EFI_EVENT),
('Status', EFI_STATUS),
]
class EFI_TCP4_CONNECTION_TOKEN(bits.cdata.Struct):
_fields_ = [
('CompletionToken', EFI_TCP4_COMPLETION_TOKEN),
]
class EFI_TCP4_LISTEN_TOKEN(bits.cdata.Struct):
_fields_ = [
('CompletionToken', EFI_TCP4_COMPLETION_TOKEN),
('NewChildHandle', EFI_HANDLE),
]
class EFI_TCP4_FRAGMENT_DATA(bits.cdata.Struct):
_fields_ = [
('FragmentLength', UINT32),
('FragmentBuffer', c_void_p),
]
# FIXME: Support variable-length array for FragmentTable
class EFI_TCP4_RECEIVE_DATA(bits.cdata.Struct):
_fields_ = [
('UrgentFlag', BOOLEAN),
('DataLength', UINT32),
('FragmentCount', UINT32),
('FragmentTable', EFI_TCP4_FRAGMENT_DATA * 1),
]
# FIXME: Support variable-length array for FragmentTable
class EFI_TCP4_TRANSMIT_DATA(bits.cdata.Struct):
_fields_ = [
('Push', BOOLEAN),
('Urgent', BOOLEAN),
('DataLength', UINT32),
('FragmentCount', UINT32),
('FragmentTable', EFI_TCP4_FRAGMENT_DATA * 1),
]
class EFI_TCP4_RECEIVE_TRANSMIT_DATA(bits.cdata.Union):
_fields_ = [
('RxData', POINTER(EFI_TCP4_RECEIVE_DATA)),
('TxData', POINTER(EFI_TCP4_TRANSMIT_DATA)),
]
class EFI_TCP4_IO_TOKEN(bits.cdata.Struct):
_fields_ = [
('CompletionToken', EFI_TCP4_COMPLETION_TOKEN),
('Packet', EFI_TCP4_RECEIVE_TRANSMIT_DATA),
]
class EFI_TCP4_CLOSE_TOKEN(bits.cdata.Struct):
_fields_ = [
('CompletionToken', EFI_TCP4_COMPLETION_TOKEN),
('AbortOnClose', BOOLEAN),
]
class EFI_TCP4_PROTOCOL(Protocol):
guid = EFI_TCP4_PROTOCOL_GUID
EFI_TCP4_PROTOCOL._fields_ = [
('GetModeData', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_CONNECTION_STATE), POINTER(EFI_TCP4_CONFIG_DATA), POINTER(EFI_IP4_MODE_DATA), POINTER(EFI_MANAGED_NETWORK_CONFIG_DATA), POINTER(EFI_SIMPLE_NETWORK_MODE))),
('Configure', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_CONFIG_DATA))),
('Routes', FUNC(POINTER(EFI_TCP4_PROTOCOL), BOOLEAN, POINTER(EFI_IPv4_ADDRESS), POINTER(EFI_IPv4_ADDRESS), POINTER(EFI_IPv4_ADDRESS))),
('Connect', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_CONNECTION_TOKEN))),
('Accept', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_LISTEN_TOKEN))),
('Transmit', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_IO_TOKEN))),
('Receive', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_IO_TOKEN))),
('Close', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_CLOSE_TOKEN))),
('Cancel', FUNC(POINTER(EFI_TCP4_PROTOCOL), POINTER(EFI_TCP4_COMPLETION_TOKEN))),
('Poll', FUNC(POINTER(EFI_TCP4_PROTOCOL))),
]
make_service_binding_protocol("TCP4")
EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL = 0x00000001
EFI_OPEN_PROTOCOL_GET_PROTOCOL = 0x00000002
EFI_OPEN_PROTOCOL_TEST_PROTOCOL = 0x00000004
EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER = 0x00000008
EFI_OPEN_PROTOCOL_BY_DRIVER = 0x00000010
EFI_OPEN_PROTOCOL_EXCLUSIVE = 0x00000020
def locate_handles(protocol_guid=None):
"""Locate handles supporting a given protocol, or all handles if protocol_guid is None"""
if protocol_guid is not None:
guid = EFI_GUID(protocol_guid)
guid_ref = byref(guid)
search_type = ByProtocol
else:
guid_ref = None
search_type = AllHandles
size = UINTN(0)
status = system_table.BootServices.contents.LocateHandle(search_type, guid_ref, None, byref(size), None)
if status != EFI_BUFFER_TOO_SMALL:
check_status(status)
handles = (EFI_HANDLE * (size.value / sizeof(EFI_HANDLE)))()
check_status(system_table.BootServices.contents.LocateHandle(search_type, guid_ref, None, byref(size), handles))
return handles
def get_protocol(handle, protocol_guid):
"""Get the given protocol of the given handle
Uses OpenProtocol with the BITS image handle, so CloseProtocol is
optional."""
guid = EFI_GUID(protocol_guid)
protocol_addr = c_void_p()
check_status(system_table.BootServices.contents.OpenProtocol(handle, byref(guid), byref(protocol_addr), _efi._image_handle, None, EFI_OPEN_PROTOCOL_GET_PROTOCOL))
return protocol_addr.value
# EFI errors have the high bit set, so use the pointer size to find out how
# high your EFI is.
EFI_ERROR = 1 << (8*ptrsize - 1)
efi_status_decode = {
0: 'EFI_SUCCESS',
1: 'EFI_WARN_UNKNOWN_GLYPH',
2: 'EFI_WARN_DELETE_FAILURE',
3: 'EFI_WARN_WRITE_FAILURE',
4: 'EFI_WARN_BUFFER_TOO_SMALL',
5: 'EFI_WARN_STALE_DATA',
EFI_ERROR | 1: 'EFI_LOAD_ERROR',
EFI_ERROR | 2: 'EFI_INVALID_PARAMETER',
EFI_ERROR | 3: 'EFI_UNSUPPORTED',
EFI_ERROR | 4: 'EFI_BAD_BUFFER_SIZE',
EFI_ERROR | 5: 'EFI_BUFFER_TOO_SMALL',
EFI_ERROR | 6: 'EFI_NOT_READY',
EFI_ERROR | 7: 'EFI_DEVICE_ERROR',
EFI_ERROR | 8: 'EFI_WRITE_PROTECTED',
EFI_ERROR | 9: 'EFI_OUT_OF_RESOURCES',
EFI_ERROR | 10: 'EFI_VOLUME_CORRUPTED',
EFI_ERROR | 11: 'EFI_VOLUME_FULL',
EFI_ERROR | 12: 'EFI_NO_MEDIA',
EFI_ERROR | 13: 'EFI_MEDIA_CHANGED',
EFI_ERROR | 14: 'EFI_NOT_FOUND',
EFI_ERROR | 15: 'EFI_ACCESS_DENIED',
EFI_ERROR | 16: 'EFI_NO_RESPONSE',
EFI_ERROR | 17: 'EFI_NO_MAPPING',
EFI_ERROR | 18: 'EFI_TIMEOUT',
EFI_ERROR | 19: 'EFI_NOT_STARTED',
EFI_ERROR | 20: 'EFI_ALREADY_STARTED',
EFI_ERROR | 21: 'EFI_ABORTED',
EFI_ERROR | 22: 'EFI_ICMP_ERROR',
EFI_ERROR | 23: 'EFI_TFTP_ERROR',
EFI_ERROR | 24: 'EFI_PROTOCOL_ERROR',
EFI_ERROR | 25: 'EFI_INCOMPATIBLE_VERSION',
EFI_ERROR | 26: 'EFI_SECURITY_VIOLATION',
EFI_ERROR | 27: 'EFI_CRC_ERROR',
EFI_ERROR | 28: 'EFI_END_OF_MEDIA',
EFI_ERROR | 31: 'EFI_END_OF_FILE',
EFI_ERROR | 32: 'EFI_INVALID_LANGUAGE',
EFI_ERROR | 33: 'EFI_COMPROMISED_DATA',
EFI_ERROR | 34: 'EFI_IP_ADDRESS_CONFLICT',
EFI_ERROR | 100: 'EFI_NETWORK_UNREACHABLE',
EFI_ERROR | 101: 'EFI_HOST_UNREACHABLE',
EFI_ERROR | 102: 'EFI_PROTOCOL_UNREACHABLE',
EFI_ERROR | 103: 'EFI_PORT_UNREACHABLE',
EFI_ERROR | 104: 'EFI_CONNECTION_FIN',
EFI_ERROR | 105: 'EFI_CONNECTION_RESET',
EFI_ERROR | 106: 'EFI_CONNECTION_REFUSED',
}
# Create each of the values above as a constant referring to the corresponding EFI status code.
globals().update(map(reversed, efi_status_decode.iteritems()))
class EFIException(Exception):
def __str__(self):
return "[Error {:#x}] {}".format(self.args[0], efi_status_decode.get(self.args[0], "Unknown EFI error"))
def check_status(status):
"""Check an EFI status value, and raise an exception if not EFI_SUCCESS
To check non-status values that may have the error bit set, use check_error_value instead."""
if status != EFI_SUCCESS:
raise EFIException(status)
def check_error_value(value):
"""Check a value that may have the error bit set
Raises an exception if the error bit is set; otherwise, returns the value."""
if value & EFI_ERROR:
raise EFIException(value)
return value
def loaded_image():
return EFI_LOADED_IMAGE_PROTOCOL.from_handle(_efi._image_handle)
def get_boot_fs():
return EFI_SIMPLE_FILE_SYSTEM_PROTOCOL.from_handle(loaded_image().DeviceHandle).root
def print_variables():
name = create_unicode_buffer("")
size = UINTN(sizeof(name))
guid = EFI_GUID()
while True:
status = system_table.RuntimeServices.contents.GetNextVariableName(byref(size), name, byref(guid))
if status == EFI_NOT_FOUND:
break
if status == EFI_BUFFER_TOO_SMALL:
resize(name, size.value)
continue
check_status(status)
print(name.value, guid)
data, attributes, data_size = get_variable(name, guid)
print("attributes={:#x} size={} data:".format(attributes, data_size))
print(bits.dumpmem(data.raw))
def get_variable(name, guid):
attribute = UINT32(0)
data = create_string_buffer(1)
size = UINTN(sizeof(data))
while True:
status = system_table.RuntimeServices.contents.GetVariable(name, byref(guid), byref(attribute), byref(size), data)
if status == EFI_NOT_FOUND:
break
if status == EFI_BUFFER_TOO_SMALL:
resize(data, size.value)
continue
check_status(status)
return data, attribute.value, size.value
def print_configurationtables():
for tbl in system_table.ConfigurationTable:
print(tbl)
def log_efi_info():
with redirect.logonly():
try:
print()
print("EFI system information:")
print("Firmware Vendor:", system_table.FirmwareVendor)
print("Firmware Revision: {:#x}".format(system_table.FirmwareRevision))
print("Supported EFI configuration table UUIDs:")
for t in system_table.ConfigurationTable:
print(t.VendorGuid, known_uuids.get(t.VendorGuid.uuid, ''))
print()
except:
print("Error printing EFI information:")
import traceback
traceback.print_exc()
EFI_LOCATE_SEARCH_TYPE = UINTN
AllHandles, ByRegisterNotify, ByProtocol = range(3)
def show_available_protocols():
with ttypager.page():
# Retrieve the list of all handles from the handle database
handle_count = UINTN()
handle_buffer = POINTER(EFI_HANDLE)()
check_status(system_table.BootServices.contents.LocateHandleBuffer(AllHandles, None, None, byref(handle_count), byref(handle_buffer)))
try:
handles = [handle_buffer[i] for i in range(handle_count.value)]
finally:
check_status(system_table.BootServices.contents.FreePool(handle_buffer))
protocols = set()
for handle in handles:
# Retrieve the list of all the protocols on each handle
guids_buffer = POINTER(POINTER(EFI_GUID))()
protocol_count = UINTN()
check_status(system_table.BootServices.contents.ProtocolsPerHandle(handle, byref(guids_buffer), byref(protocol_count)))
try:
guids = set(guids_buffer[i].contents.uuid for i in range(protocol_count.value))
finally:
check_status(system_table.BootServices.contents.FreePool(guids_buffer))
protocols |= guids
print('EFI protocols in use (count={})'.format(len(protocols)))
for protocol in sorted(protocols):
print(protocol, known_uuids.get(protocol, ''))
def save_tables(decode=True):
"""Save all EFI tables to files.
Warning: All files in the /efi_tables directory will be deleted!"""
root = get_boot_fs()
tables_dir = root.mkdir("efi_tables")
# delete all files in \efi_tables directory
if "efi_tables" in os.listdir("/"):
print("Deleting old files...")
for f in os.listdir("/efi_tables"):
print("Deleting {}...".format(f), end='')
tables_dir.open(f, EFI_FILE_MODE_READ | EFI_FILE_MODE_WRITE).delete()
print("Done")
tables = dict(
systemtable=system_table,
configurationtable=system_table.ConfigurationTable,
runtimeservices=system_table.RuntimeServices.contents,
bootservices=system_table.BootServices.contents)
for name, table in tables.iteritems():
fname = "{}.bin".format(name)
print("Saving {}...".format(fname))
tables_dir.create(fname).write(bits.cdata.to_bytes(table))
if decode:
for name, table in tables.iteritems():
fname = "{}.txt".format(name)
print("Saving {}...".format(fname), end='')
if name == 'configurationtable':
out = StringIO()
for tbl in system_table.ConfigurationTable:
print(tbl, file=out)
tables_dir.create(fname).write(out.getvalue())
out.close()
else:
tables_dir.create(fname).write(str(table))
print("Done")
created_explore_efi_cfg = False
def create_explore_efi_cfg():
global created_explore_efi_cfg
if created_explore_efi_cfg:
return
cfg = ""
cfg += 'menuentry "EFI tables" {\n'
cfg += " configfile (python)/explore_efi_tables.cfg\n"
cfg += '}\n\n'
bits.pyfs.add_static("explore_efi.cfg", cfg)
created_explore_efi_cfg = True
create_explore_efi_cfg()
created_explore_efi_tables_cfg = False
def create_explore_efi_tables_cfg():
global created_explore_efi_tables_cfg
if created_explore_efi_tables_cfg:
return
cfg = ""
cfg += 'menuentry "Save all EFI tables (raw and decoded) to files" {\n'
cfg += ' echo "Saving all EFI tables (raw and decoded) to files..."\n'
cfg += ' py "import efi"\n'
cfg += " py 'efi.save_tables()'\n"
cfg += ' echo "Done."\n'
cfg += " py 'from bits import pause ; pause.pause()'\n"
cfg += '}\n\n'
cfg += 'menuentry "Save all EFI tables (raw only) to files" {\n'
cfg += ' echo "Saving all EFI tables (raw only) to files..."\n'
cfg += ' py "import efi"\n'
cfg += " py 'efi.save_tables(decode=False)'\n"
cfg += ' echo "Done."\n'
cfg += " py 'from bits import pause ; pause.pause()'\n"
cfg += '}\n\n'
cfg += 'menuentry "Display EFI System Table" {\n'
cfg += ' py "import efi, ttypager"\n'
cfg += ' py "with ttypager.page(): print efi.system_table"\n'
cfg += '}\n\n'
cfg += 'menuentry "Display EFI Configuration Table" {\n'
cfg += ' py "import efi, ttypager"\n'
cfg += ' py "with ttypager.page(): efi.print_configurationtables()"\n'
cfg += '}\n\n'
cfg += 'menuentry "Decode EFI RuntimeServices" {\n'
cfg += ' py "import efi, ttypager"\n'
cfg += ' py "with ttypager.page(): print efi.system_table.RuntimeServices.contents"\n'
cfg += '}\n\n'
cfg += 'menuentry "Decode EFI BootServices" {\n'
cfg += ' py "import efi, ttypager"\n'
cfg += ' py "with ttypager.page(): print efi.system_table.BootServices.contents"\n'
cfg += '}\n\n'
bits.pyfs.add_static("explore_efi_tables.cfg", cfg)
created_explore_efi_tables_cfg = True
create_explore_efi_tables_cfg()
def hii_export_config():
"""Export current configuration (the entire HII database) as a null-terminated string."""
# Only one handle supports the EFI HII Configuration Routing Protocol
handle = locate_handles(EFI_HII_CONFIG_ROUTING_PROTOCOL_GUID)[0]
cfg_ptr = c_void_p(0)
hii = EFI_HII_CONFIG_ROUTING_PROTOCOL.from_handle(handle)
check_status(hii.ExportConfig(byref(hii), byref(cfg_ptr)))
try:
cfg = wstring_at(cfg_ptr)
finally:
check_status(system_table.BootServices.contents.FreePool(cfg_ptr.value))
return cfg
def hii_export_config_pager():
with ttypager.page():
print(hii_export_config())
def hii_extract_config():
"""Extract the current configuration as a null-terminated string."""
# FIXME: this could return multiple handles
handle = locate_handles(EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID)[0]
request = 0
progress_ptr = c_void_p(0)
results_ptr = c_void_p(0)
hii = EFI_HII_CONFIG_ACCESS_PROTOCOL.from_handle(handle)
check_status(hii.ExtractConfig(byref(hii), request, byref(progress_ptr), byref(results_ptr)))
try:
progress = results = None
if progress_ptr:
progress = wstring_at(progress_ptr)
if results_ptr:
results = wstring_at(results_ptr)
finally:
check_status(system_table.BootServices.contents.FreePool(results_ptr.value))
with ttypager.page():
print("Progress:\n")
print(progress)
print()
print("Results\n")
print(results)
EFI_HII_PACKAGE_TYPE_ALL = 0x00
EFI_HII_PACKAGE_TYPE_GUID = 0x01
EFI_HII_PACKAGE_FORMS = 0x02
EFI_HII_PACKAGE_STRINGS = 0x04
EFI_HII_PACKAGE_FONTS = 0x05
EFI_HII_PACKAGE_IMAGES = 0x06
EFI_HII_PACKAGE_SIMPLE_FONTS = 0x07
EFI_HII_PACKAGE_DEVICE_PATH = 0x08
EFI_HII_PACKAGE_KEYBOARD_LAYOUT = 0x09
EFI_HII_PACKAGE_ANIMATIONS = 0x0A
EFI_HII_PACKAGE_END = 0xDF
EFI_HII_PACKAGE_TYPE_SYSTEM_BEGIN = 0xE0
EFI_HII_PACKAGE_TYPE_SYSTEM_END = 0xFF
def hii_list_package_lists():
"""List the handles of the package lists within the HII database."""
handle = locate_handles(EFI_HII_DATABASE_PROTOCOL_GUID)[0]
HandleBufferLength = UINTN(0)
Handles = c_void_p(0)
hii = EFI_HII_DATABASE_PROTOCOL.from_handle(handle)
status = hii.ListPackageLists(byref(hii), EFI_HII_PACKAGE_TYPE_ALL, 0, byref(HandleBufferLength), 0)
if status != EFI_BUFFER_TOO_SMALL:
check_status(status)
assert HandleBufferLength.value % sizeof(c_void_p) == 0
Handles = (c_void_p * (HandleBufferLength.value / sizeof(c_void_p)))()
check_status( hii.ListPackageLists( byref(hii), EFI_HII_PACKAGE_TYPE_ALL, 0, byref(HandleBufferLength), byref(Handles)))
with ttypager.page():
print("HandleBufferLength={}\n".format(HandleBufferLength.value))
for handle in Handles:
print("{:#x}".format(handle))
def hii_export_package_lists():
"""Exports the contents of one or all package lists in the HII database into a buffer."""
handle = locate_handles(EFI_HII_DATABASE_PROTOCOL_GUID)[0]
buf_size = UINTN(0)
hii = EFI_HII_DATABASE_PROTOCOL.from_handle(handle)
status = hii.ExportPackageLists(byref(hii), 0, byref(buf_size), 0)
if status != EFI_BUFFER_TOO_SMALL:
check_status(status)
buf = create_string_buffer(buf_size.value)
check_status( hii.ExportPackageLists( byref(hii), 0, byref(buf_size), byref(buf)))
with ttypager.page():
print("buf_size={}\n".format(buf_size.value))
print(bits.dumpmem(buf.raw))
EFI_PCI_IO_PROTOCOL_WIDTH = UINTN
EfiPciIoWidthUint8, EfiPciIoWidthUint16, EfiPciIoWidthUint32, EfiPciIoWidthUint64, EfiPciIoWidthFifoUint8, EfiPciIoWidthFifoUint16, EfiPciIoWidthFifoUint32, EfiPciIoWidthFifoUint64, EfiPciIoWidthFillUint8, EfiPciIoWidthFillUint16, EfiPciIoWidthFillUint32, EfiPciIoWidthFillUint64, EfiPciIoWidthMaximum = range(13)
EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION = UINTN
EfiPciIoAttributeOperationGet, EfiPciIoAttributeOperationSet, EfiPciIoAttributeOperationEnable, EfiPciIoAttributeOperationDisable, EfiPciIoAttributeOperationSupported, EfiPciIoAttributeOperationMaximum = range(6)
EFI_PCI_IO_PROTOCOL_OPERATION = UINTN
EfiPciIoOperationBusMasterRead, EfiPciIoOperationBusMasterWrite, EfiPciIoOperationBusMasterCommonBuffer, EfiPciIoOperationMaximum = range(4)
class EFI_PCI_IO_PROTOCOL(Protocol):
"""EFI PCI IO Protocol"""
guid = EFI_PCI_IO_PROTOCOL_GUID
EFI_PCI_IO_PROTOCOL_IO_MEM = FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_PCI_IO_PROTOCOL_WIDTH, UINT8, UINT64, UINTN, c_void_p)
class EFI_PCI_IO_PROTOCOL_ACCESS(bits.cdata.Struct):
"""EFI PCI IO Protocol Access"""
_fields_ = [
('Read', EFI_PCI_IO_PROTOCOL_IO_MEM),
('Write', EFI_PCI_IO_PROTOCOL_IO_MEM),
]
EFI_PCI_IO_PROTOCOL_CONFIG = FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_PCI_IO_PROTOCOL_WIDTH, UINT32, UINTN, c_void_p)
class EFI_PCI_IO_PROTOCOL_CONFIG_ACCESS(bits.cdata.Struct):
"""EFI PCI IO Protocol Config Access Protocol"""
_fields_ = [
('Read', EFI_PCI_IO_PROTOCOL_CONFIG),
('Write', EFI_PCI_IO_PROTOCOL_CONFIG),
]
EFI_PCI_IO_PROTOCOL_POLL_IO_MEM = FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_PCI_IO_PROTOCOL_WIDTH, UINT8, UINT64, UINT64, UINT64, UINT64, POINTER(UINT64))
EFI_PCI_IO_PROTOCOL._fields_ = [
('PollMem', EFI_PCI_IO_PROTOCOL_POLL_IO_MEM),
('PollIo', EFI_PCI_IO_PROTOCOL_POLL_IO_MEM),
('Mem', EFI_PCI_IO_PROTOCOL_ACCESS),
('Io', EFI_PCI_IO_PROTOCOL_ACCESS),
('Pci', EFI_PCI_IO_PROTOCOL_CONFIG_ACCESS),
('CopyMem', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_PCI_IO_PROTOCOL_WIDTH, UINT8, UINT64, UINT8, UINT64, UINTN)),
('Map', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_PCI_IO_PROTOCOL_OPERATION, c_void_p, POINTER(UINTN), POINTER(EFI_PHYSICAL_ADDRESS), POINTER(c_void_p))),
('Unmap', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), c_void_p)),
('AllocateBuffer', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_ALLOCATE_TYPE, EFI_MEMORY_TYPE, UINTN, POINTER(c_void_p), UINT64)),
('FreeBuffer', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), UINTN, c_void_p)),
('Flush', FUNC(POINTER(EFI_PCI_IO_PROTOCOL))),
('GetLocation', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), POINTER(UINTN), POINTER(UINTN), POINTER(UINTN), POINTER(UINTN))),
('Attributes', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION, UINT64, POINTER(UINT64))),
('GetBarAttributes', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), UINT8, POINTER(UINT64), POINTER(c_void_p))),
('SetBarAttributes', FUNC(POINTER(EFI_PCI_IO_PROTOCOL), UINT64, UINT8, POINTER(UINT64), POINTER(UINT64))),
('RomSize', UINT64),
('RomImage', c_void_p),
]
_event_handlers = {}
_event_exiting = False
def _event_callback(event_value):
global _event_handlers
_event_handlers[event_value][0]()
_efi._set_event_callback(_event_callback)
def create_event(handler, timer=False, tpl=TPL_CALLBACK, abort=None):
"""Create an EFI_EVENT with the specified Python handler
The event always has type EVT_NOTIFY_SIGNAL. Pass timer=True to
additionally use EVT_TIMER.
tpl specifies the TPL for the callback: either TPL_CALLBACK (default) or
TPL_NOTIFY.
abort provides a callback to be called if cleaning up all events before
exiting Python. Supply an abort callback if you need to tell some EFI
object not to touch this event or associated data.
Returns the EFI_EVENT. Do not close directly; always call
efi.close_event."""
global _event_handlers, _event_exiting
if _event_exiting:
raise RuntimeError("Attempt to create_event while cleaning up events")
type = EVT_NOTIFY_SIGNAL
if timer:
type |= EVT_TIMER
event = EFI_EVENT()
notify = cast(c_void_p(_efi._c_event_callback), EFI_EVENT_NOTIFY)
# Safe to create before adding to handlers; nothing can signal it yet
check_status(system_table.BootServices.contents.CreateEvent(type, tpl, notify, None, byref(event)))
_event_handlers[event.value] = handler, abort
return event
def close_event(event):
"""Close an EFI_EVENT created by efi.create_event"""
global _event_handlers
_event_handlers[event.value] # raise KeyError if not found
check_status(system_table.BootServices.contents.CloseEvent(event))
del _event_handlers[event.value]
@atexit.register
def close_all_events():
global _event_handlers, _event_exiting
_event_exiting = True
for event_value, (handler, abort) in _event_handlers.iteritems():
if abort is not None:
try:
abort()
except Exception as e:
print("Exception occurred during event abort function:")
print(traceback.format_exc())
try:
close_event(EFI_EVENT(event_value))
except Exception as e:
pass
class event_set(object):
def __init__(self):
self.s = set()
def create_event(self, *args, **kwargs):
e = create_event(*args, **kwargs)
self.s.add(e.value)
return e
def close_event(self, e):
self.s.remove(e.value)
close_event(e)
def close_all(self):
for ev in self.s:
self.close_event(EFI_EVENT(ev))
_key_handlers = {}
# The C code packs the important bits of EFI_KEY_DATA into one pointer-sized
# value to avoid memory allocation.
def _unpack_key_data(d):
key_data = EFI_KEY_DATA()
if d & (1 << 16):
key_data.Key.ScanCode = d & 0xffff
else:
key_data.Key.UnicodeChar = chr(d & 0xffff)
key_data.KeyState.KeyShiftState = EFI_SHIFT_STATE_VALID | ((d >> 17) & 0x3ff)
if d & (1 << 28):
key_data.KeyState.KeyToggleState |= EFI_SCROLL_LOCK_ACTIVE
if d & (1 << 29):
key_data.KeyState.KeyToggleState |= EFI_NUM_LOCK_ACTIVE
if d & (1 << 30):
key_data.KeyState.KeyToggleState |= EFI_CAPS_LOCK_ACTIVE
if d & (1 << 31):
key_data.KeyState.KeyToggleState |= EFI_KEY_STATE_EXPOSED
key_data.KeyState.KeyToggleState |= EFI_TOGGLE_STATE_VALID
return key_data
def _key_callback(key_data_packed):
global _key_handlers
key_data = _unpack_key_data(key_data_packed)
shift = 0
if key_data.KeyState.KeyShiftState & EFI_SHIFT_STATE_VALID:
shift = key_data.KeyState.KeyShiftState & ~EFI_SHIFT_STATE_VALID
_key_handlers[key_data.Key.ScanCode, key_data.Key.UnicodeChar, shift][1]()
_efi._set_key_callback(_key_callback, sizeof(EFI_KEY_DATA))
def register_key_handler(handler, code=0, char='\0', shift=0):
global _key_handlers
if (code == 0 and char == '\0') or (code != 0 and char != '\0'):
raise ValueError("Must provide exactly one of code and char")
shift &= ~EFI_SHIFT_STATE_VALID
dict_key = code, char, shift
if dict_key in _key_handlers:
raise RuntimeError("Duplicate key handler for key", dict_key)
stiex = EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL.from_handle(system_table.ConsoleInHandle)
handle = c_void_p()
notify = cast(c_void_p(_efi._c_key_callback), EFI_KEY_NOTIFY_FUNCTION)
key_data = EFI_KEY_DATA()
key_data.Key.ScanCode = code
key_data.Key.UnicodeChar = char
if shift:
key_data.KeyState.KeyShiftState = EFI_SHIFT_STATE_VALID | shift
check_status(stiex.RegisterKeyNotify(stiex, byref(key_data), notify, byref(handle)))
_key_handlers[dict_key] = (handle, handler)
def unregister_key_handler(code=0, char='\0', shift=0):
global _key_handlers
shift &= ~EFI_SHIFT_STATE_VALID
handle, handler = _key_handlers[(code, char, shift)]
stiex = EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL.from_handle(system_table.ConsoleInHandle)
check_status(stiex.UnregisterKeyNotify(stiex, handle))
del _key_handlers[(code, char, shift)]
@atexit.register
def unregister_all_key_handlers():
global _key_handlers
for k in _key_handlers.keys():
unregister_key_handler(*k)
def raise_KeyboardInterrupt():
raise KeyboardInterrupt()
def register_keyboard_interrupt_handler():
for char in ('c', '\x03'):
for shift in (EFI_LEFT_CONTROL_PRESSED, EFI_RIGHT_CONTROL_PRESSED):
register_key_handler(raise_KeyboardInterrupt, char=char, shift=shift)
def list_pci_devices():
SegmentNumber = UINTN()
BusNumber = UINTN()
DeviceNumber = UINTN()
FunctionNumber = UINTN()
handles = locate_handles(EFI_PCI_IO_PROTOCOL_GUID)
for handle in handles:
pci_io = EFI_PCI_IO_PROTOCOL.from_handle(handle)
check_status(pci_io.GetLocation(byref(pci_io), byref(SegmentNumber), byref(BusNumber), byref(DeviceNumber), byref(FunctionNumber)))
print("{}:{}:{}:{}".format(SegmentNumber.value, BusNumber.value, DeviceNumber.value, FunctionNumber.value))
def exit(status=0):
if hasattr(_sys, "exitfunc"):
_sys.exitfunc()
system_table.BootServices.contents.Exit(_efi._image_handle, status, 0, None)
| {
"content_hash": "3f83a9ed717415293f5c314eceebe53f",
"timestamp": "",
"source": "github",
"line_count": 1928,
"max_line_length": 313,
"avg_line_length": 40.350103734439834,
"alnum_prop": 0.6569959508965871,
"repo_name": "biosbits/bits",
"id": "306f114619f9696578d82d8237bdbccecd06b17a",
"size": "79364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/efi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "11674"
},
{
"name": "C",
"bytes": "291011"
},
{
"name": "C++",
"bytes": "11894"
},
{
"name": "Makefile",
"bytes": "10177"
},
{
"name": "Python",
"bytes": "738083"
},
{
"name": "Shell",
"bytes": "3247"
}
],
"symlink_target": ""
} |
"""
Barbican defined mime-types
"""
import six
from barbican.common import utils
# Supported content types
# Note: These types may be provided by clients.
PLAIN_TEXT = ['text/plain',
'text/plain;charset=utf-8',
'text/plain; charset=utf-8']
PLAIN_TEXT_CHARSETS = ['utf-8']
BINARY = ['application/octet-stream',
'application/pkcs8']
SUPPORTED = PLAIN_TEXT + BINARY
# Normalizes client types to internal types.
INTERNAL_CTYPES = {'text/plain': 'text/plain',
'text/plain;charset=utf-8': 'text/plain',
'text/plain; charset=utf-8': 'text/plain',
'application/octet-stream': 'application/octet-stream',
'application/pkcs8': 'application/pkcs8',
'application/aes': 'application/aes'}
# Maps mime-types used to specify secret data formats to the types that can
# be requested for secrets via GET calls.
# Note: Raw client types are converted into the 'INTERNAL_CTYPES' types
# which are then used as the keys to the 'CTYPES_MAPPINGS' below.
CTYPES_PLAIN = {'default': 'text/plain'}
CTYPES_BINARY = {'default': 'application/octet-stream'}
CTYPES_PKCS8 = {'default': 'application/pkcs8'}
CTYPES_AES = {'default': 'application/aes'}
CTYPES_MAPPINGS = {'text/plain': CTYPES_PLAIN,
'application/octet-stream': CTYPES_BINARY,
'application/pkcs8': CTYPES_PKCS8,
'application/aes': CTYPES_AES}
# Supported encodings
ENCODINGS = ['base64']
# Maps normalized content-types to supported encoding(s)
CTYPES_TO_ENCODINGS = {'text/plain': None,
'application/octet-stream': ['base64', 'binary'],
'application/pkcs8': ['base64', 'binary'],
'application/aes': None}
def normalize_content_type(mime_type):
"""Normalize the supplied content-type to an internal form."""
stripped = list(six.moves.map(lambda x: x.strip(), mime_type.split(';')))
mime = stripped[0].lower()
if len(stripped) > 1:
# mime type includes charset
charset_type = stripped[1].lower()
if '=' not in charset_type:
# charset is malformed
return mime_type
else:
charset = list(six.moves.map(lambda x: x.strip(),
charset_type.split('=')))[1]
if charset not in PLAIN_TEXT_CHARSETS:
# unsupported charset
return mime_type
return INTERNAL_CTYPES.get(mime, mime_type)
def is_supported(mime_type):
normalized_type = normalize_content_type(mime_type)
return normalized_type in SUPPORTED
def is_base64_encoding_supported(mime_type):
if is_supported(mime_type):
encodings = CTYPES_TO_ENCODINGS[INTERNAL_CTYPES[mime_type]]
return encodings and ('base64' in encodings)
return False
def is_content_type_with_encoding_supported(content_type, content_encoding):
if not is_supported(content_type):
return False
normalized_type = normalize_content_type(content_type)
encodings = CTYPES_TO_ENCODINGS[INTERNAL_CTYPES[normalized_type]]
if encodings:
return content_encoding in encodings
else:
return content_encoding is None
def get_supported_encodings(content_type):
normalized_type = normalize_content_type(content_type)
return CTYPES_TO_ENCODINGS[INTERNAL_CTYPES[normalized_type]]
def is_base64_processing_needed(content_type, content_encoding):
content_encodings = utils.get_accepted_encodings_direct(content_encoding)
if content_encodings:
if 'base64' not in content_encodings:
return False
if is_supported(content_type):
encodings = CTYPES_TO_ENCODINGS[INTERNAL_CTYPES[content_type]]
return encodings and 'base64' in encodings
return False
def use_binary_content_as_is(content_type, content_encoding):
"""Checks if headers are valid to allow binary content as-is."""
content_encodings = utils.get_accepted_encodings_direct(content_encoding)
if content_encodings:
if 'binary' not in content_encodings:
return False
if is_supported(content_type):
encodings = CTYPES_TO_ENCODINGS[INTERNAL_CTYPES.get(content_type)]
return encodings and 'binary' in encodings
return INTERNAL_CTYPES.get(content_type) in BINARY
def augment_fields_with_content_types(secret):
"""Add content-types and encodings information to a Secret's fields.
Generate a dict of content types based on the data associated
with the specified secret.
:param secret: The models.Secret instance to add 'content_types' to.
"""
fields = secret.to_dict_fields()
if not secret.secret_store_metadata:
return fields
content_type = secret.secret_store_metadata.get('content_type')
if content_type and content_type.value in CTYPES_MAPPINGS:
fields.update(
{'content_types': CTYPES_MAPPINGS[content_type.value]}
)
return fields
| {
"content_hash": "199e4cdab8407987f30d78f009c7cfae",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 78,
"avg_line_length": 35.836879432624116,
"alnum_prop": 0.6544626954284584,
"repo_name": "openstack/barbican",
"id": "ab7c8f0608cbad2ed0980b7f8992f719a0475602",
"size": "5643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbican/plugin/util/mime_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "1586"
},
{
"name": "Mako",
"bytes": "979"
},
{
"name": "Python",
"bytes": "2626403"
},
{
"name": "Shell",
"bytes": "43567"
}
],
"symlink_target": ""
} |
"""
Main implementation of Flamyngo webapp and all processing.
"""
import json
import os
import re
from functools import wraps
import pandas as pd
import plotly
import plotly.express as px
from flask import Response, make_response, render_template, request
from flask.json import jsonify
from monty.json import jsanitize
from monty.serialization import loadfn
from pymongo import MongoClient
from ruamel.yaml import YAML
from flamyngo.app import app
SETTINGS = loadfn(os.environ["FLAMYNGO"])
APP_TITLE = SETTINGS.get("title", "Flamyngo")
HELPTXT = SETTINGS.get("help", "")
TEMPLATE_FOLDER = SETTINGS.get("template_folder", "templates")
DB_SETTINGS = SETTINGS["db"]
if "connection_string" in DB_SETTINGS:
connect_string = DB_SETTINGS["connection_string"]
else:
if "username" in DB_SETTINGS:
connect_string = (
f'mongodb://{DB_SETTINGS["username"]}:{DB_SETTINGS["password"]}@'
f'{DB_SETTINGS["host"]}:{DB_SETTINGS["port"]}/{DB_SETTINGS["database"]}'
)
else:
connect_string = f'mongodb://{DB_SETTINGS["host"]}:{DB_SETTINGS["port"]}/{DB_SETTINGS["database"]}'
CONN = MongoClient(connect_string)
DB = CONN[DB_SETTINGS["database"]]
CNAMES = [f'{d["name"]}' for d in SETTINGS["collections"]]
CSETTINGS = {d["name"]: d for d in SETTINGS["collections"]}
AUTH_USER = SETTINGS.get("AUTH_USER", None)
AUTH_PASSWD = SETTINGS.get("AUTH_PASSWD", None)
API_KEY = SETTINGS.get("API_KEY", None)
def check_auth(username, password):
"""
This function is called to check if a username /
password combination is valid.
"""
if AUTH_USER is None:
return True
return username == AUTH_USER and password == AUTH_PASSWD
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
"Could not verify your access level for that URL. You have to login "
"with proper credentials",
401,
{"WWW-Authenticate": 'Basic realm="Login Required"'},
)
def requires_auth(f):
"""
Check for authentication.
"""
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
api_key = request.headers.get("API_KEY") or request.args.get("API_KEY")
if (API_KEY is not None) and api_key == API_KEY:
return f(*args, **kwargs)
if (AUTH_USER is not None) and (
not auth or not check_auth(auth.username, auth.password)
):
return authenticate()
return f(*args, **kwargs)
return decorated
def get_mapped_name(settings, name):
"""
The following allows used of mapped names in search criteria.
"""
name_mappings = {v: k for k, v in settings.get("aliases", {}).items()}
return name_mappings.get(name, name)
def process_search_string_regex(search_string, settings):
"""
Process search string with regex
"""
criteria = {}
for regex in settings["query"]:
if re.match(regex[1], search_string):
criteria[regex[0]] = {"$regex": str(process(search_string, regex[2]))}
break
if not criteria:
clean_search_string = search_string.strip()
if clean_search_string[0] != "{" or clean_search_string[-1] != "}":
clean_search_string = "{" + clean_search_string + "}"
criteria = json.loads(clean_search_string)
criteria = {get_mapped_name(settings, k): v for k, v in criteria.items()}
return criteria
def process_search_string(search_string, settings):
"""
Process search string with query.
"""
criteria = {}
for regex in settings["query"]:
if re.match(regex[1], search_string):
criteria[regex[0]] = process(search_string, regex[2])
break
if not criteria:
clean_search_string = search_string.strip()
if clean_search_string[0] != "{" or clean_search_string[-1] != "}":
clean_search_string = "{" + clean_search_string + "}"
criteria = json.loads(clean_search_string)
criteria = {get_mapped_name(settings, k): v for k, v in criteria.items()}
return criteria
@app.route("/", methods=["GET"])
@requires_auth
def index():
"""
Index page.
"""
return make_response(
render_template(
"index.html", collections=CNAMES, helptext=HELPTXT, app_title=APP_TITLE
)
)
@app.route("/autocomplete", methods=["GET"])
@requires_auth
def autocomplete():
"""
Autocomplete if allowed.
"""
if SETTINGS.get("autocomplete"):
terms = []
criteria = {}
search_string = request.args.get("term")
cname = request.args.get("collection").split(":")[0]
collection = DB[cname]
settings = CSETTINGS[cname]
# if search looks like a special query, autocomplete values
for regex in settings["query"]:
if re.match(regex[1], search_string):
criteria[regex[0]] = {"$regex": str(process(search_string, regex[2]))}
projection = {regex[0]: 1}
results = collection.find(criteria, projection)
if results:
terms = [term[regex[0]] for term in results]
# if search looks like a query dict, autocomplete keys
if not criteria and search_string[0:2] == '{"':
if search_string.count('"') % 2 != 0:
splitted = search_string.split('"')
previous = splitted[:-1]
last = splitted[-1]
# get list of autocomplete keys from settings
# generic alternative: use a schema analizer like variety.js
results = _search_dict(settings["autocomplete_keys"], last)
if results:
terms = ['"'.join(previous + [term]) + '":' for term in results]
return jsonify(matching_results=jsanitize(list(set(terms))))
return jsonify(matching_results=[])
@app.route("/query", methods=["GET"])
@requires_auth
def query():
"""
Process query search.
"""
cname = request.args.get("collection").split(":")[0]
settings = CSETTINGS[cname]
search_string = request.args.get("search_string")
projection = [t[0].split(".")[0] for t in settings["summary"]]
fields = None
results = None
mapped_names = None
error_message = None
try:
if search_string.strip() != "":
criteria = process_search_string(search_string, settings)
criteria.update(settings.get("filter_criteria", {}))
results = []
for r in DB[cname].find(criteria, projection=projection):
processed = []
mapped_names = {}
fields = []
for m in settings["summary"]:
if len(m) == 2:
k, v = m
else:
raise ValueError("Invalid summary settings!")
mapped_k = settings.get("aliases", {}).get(k, k)
val = _get_val(k, r, v.strip())
val = val if val is not None else ""
mapped_names[k] = mapped_k
processed.append(val)
fields.append(mapped_k)
results.append(processed)
if not results:
error_message = "No results!"
else:
error_message = "No results!"
except Exception as ex:
error_message = str(ex)
try:
sort_key, sort_mode = settings["sort"]
sort_index = fields.index(sort_key)
except Exception:
sort_index = 0
sort_mode = "asc"
return make_response(
render_template(
"index.html",
collection_name=cname,
sort_index=sort_index,
sort_mode=sort_mode,
results=results,
fields=fields,
search_string=search_string,
mapped_names=mapped_names,
unique_key=settings["unique_key"],
active_collection=cname,
collections=CNAMES,
error_message=error_message,
helptext=HELPTXT,
app_title=APP_TITLE,
)
)
@app.route("/plot", methods=["GET"])
@requires_auth
def plot():
"""
Plot data.
"""
cname = request.args.get("collection")
if not cname:
return make_response(render_template("plot.html", collections=CNAMES))
cname = cname.split(":")[0]
plot_type = request.args.get("plot_type") or "scatter"
search_string = request.args.get("search_string")
xaxis = request.args.get("xaxis")
yaxis = request.args.get("yaxis")
settings = CSETTINGS[cname]
xaxis_mapped = get_mapped_name(settings, xaxis)
yaxis_mapped = get_mapped_name(settings, yaxis)
projection = [xaxis_mapped, yaxis_mapped]
if search_string.strip() != "":
criteria = process_search_string(search_string, settings)
data = []
for r in DB[cname].find(criteria, projection=projection):
x = _get_val(xaxis_mapped, r, None)
y = _get_val(yaxis_mapped, r, None)
if x and y:
data.append([x, y])
else:
data = []
df = pd.DataFrame(data, columns=[xaxis, yaxis])
if plot_type == "scatter":
fig = px.scatter(df, x=xaxis, y=yaxis)
else:
fig = px.bar(df, x=xaxis, y=yaxis)
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return make_response(
render_template(
"plot.html",
collection=cname,
search_string=search_string,
plot_type=plot_type,
xaxis=xaxis,
yaxis=yaxis,
active_collection=cname,
collections=CNAMES,
app_title=APP_TITLE,
graphJSON=graphJSON,
)
)
@app.route("/<string:collection_name>/unique_ids")
@requires_auth
def get_ids(collection_name):
"""
Returns unique ids
"""
settings = CSETTINGS[collection_name]
doc = DB[collection_name].distinct(settings["unique_key"])
return jsonify(jsanitize(doc))
@app.route("/<string:collection_name>/doc/<string:uid>")
@requires_auth
def get_doc(collection_name, uid):
"""
Returns document.
"""
return make_response(
render_template(
"doc.html", collection_name=collection_name, doc_id=uid, app_title=APP_TITLE
)
)
@app.route("/<string:collection_name>/doc/<string:uid>/<string:field>")
@requires_auth
def get_doc_field(collection_name, uid, field):
"""
Get doc field.
"""
settings = CSETTINGS[collection_name]
criteria = {settings["unique_key"]: process(uid, settings["unique_key_type"])}
doc = DB[collection_name].find_one(criteria, projection=[field])
return Response(str(doc[field]), mimetype="text/plain")
@app.route("/<string:collection_name>/doc/<string:uid>/json")
@requires_auth
def get_doc_json(collection_name, uid):
"""
Get doc json.
"""
settings = CSETTINGS[collection_name]
projection = {k: False for k in settings.get("doc_exclude", [])}
criteria = {settings["unique_key"]: process(uid, settings["unique_key_type"])}
doc = DB[collection_name].find_one(criteria, projection=projection)
return jsonify(jsanitize(doc))
@app.route("/<string:collection_name>/doc/<string:uid>/yaml")
@requires_auth
def get_doc_yaml(collection_name, uid):
"""
Get doc yaml.
"""
settings = CSETTINGS[collection_name]
projection = {k: False for k in settings.get("doc_exclude", [])}
criteria = {settings["unique_key"]: process(uid, settings["unique_key_type"])}
doc = DB[collection_name].find_one(criteria, projection=projection)
yml = YAML()
yml.default_flow_style = False
from io import StringIO
s = StringIO()
yml.dump(jsanitize(doc), s)
response = make_response(s.getvalue(), 200)
response.mimetype = "text/plain"
return response
def process(val, vtype):
"""
Value processing and formatting.
"""
if vtype:
if vtype.startswith("%"):
return vtype % val
toks = vtype.rsplit(".", 1)
if len(toks) == 1:
func = globals()["__builtins__"][toks[0]]
else:
mod = __import__(toks[0], globals(), locals(), [toks[1]], 0)
func = getattr(mod, toks[1])
return func(val)
try:
if float(val) == int(val):
return int(val)
return float(val)
except Exception:
try:
return float(val)
except Exception:
# Y is string.
return val
def _get_val(k, d, processing_func):
toks = k.split(".")
try:
val = d[toks[0]]
for t in toks[1:]:
try:
val = val[t]
except TypeError:
# Handle integer indices
val = val[int(t)]
val = process(val, processing_func)
except Exception:
# Return the base value if we cannot descend into the data.
val = None
return val
def _search_dict(dictionary, substr):
result = []
for key in dictionary:
if substr.lower() in key.lower():
result.append(key)
return result
if "additional_endpoints" in SETTINGS:
for rule, endpoint in SETTINGS["additional_endpoints"].items():
toks = endpoint.rsplit(".", 1)
if len(toks) == 1:
func = globals()["__builtins__"][toks[0]]
else:
mod = __import__(toks[0], globals(), locals(), [toks[1]], 0)
func = getattr(mod, toks[1])
app.add_url_rule(rule, view_func=func)
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "185530a04eb40d5c442a3b10be83a900",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 107,
"avg_line_length": 30.032894736842106,
"alnum_prop": 0.5809419496166484,
"repo_name": "materialsvirtuallab/flamyngo",
"id": "3324f647024b39851cc48701920b0434236c0c5f",
"size": "13695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flamyngo/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3026"
},
{
"name": "HTML",
"bytes": "10374"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Python",
"bytes": "19393"
}
],
"symlink_target": ""
} |
from alad_support import *
from results_support import *
from test_setups import *
"""
python pyalad/alad.py
"""
if __name__ == '__main__':
args = get_command_args(debug=False, debug_args=None)
configure_logger(args)
opts = Opts(args)
logger.debug(opts.str_opts())
# print opts.str_opts()
set_seed(opts.randseed)
alad_results = alad(opts)
opts.fid = 0
opts.runidx = 0
write_sequential_results_to_csv(alad_results, opts)
print "completed alad %s for %s" % (opts.detector_type_str(), opts.dataset,)
| {
"content_hash": "4afa6f2b027bd0969bfed839f20b31fb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 17.870967741935484,
"alnum_prop": 0.6407942238267148,
"repo_name": "shubhomoydas/pyaad",
"id": "aa41e1fbab88754a01ad184c4759466ba0e9b07c",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyalad/alad.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "472606"
},
{
"name": "R",
"bytes": "9471"
},
{
"name": "Shell",
"bytes": "26877"
}
],
"symlink_target": ""
} |
from coalib.bears.GlobalBear import GlobalBear
from coalib.results.Result import Result
class ProcessingGlobalTestRawFileBear(GlobalBear):
USE_RAW_FILES = True
def run(self):
for filename in self.file_dict:
return [Result.from_values('GlobalTestRawBear',
'test message',
filename)]
| {
"content_hash": "902c57bf09e12986964244c2adb59bfb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 30.153846153846153,
"alnum_prop": 0.5867346938775511,
"repo_name": "IPMITMO/statan",
"id": "7934476a9204d6e3cfa54a787fc3d2870d9f6b12",
"size": "392",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "coala/tests/processes/section_executor_test_files/ProcessingGlobalTestRawFileBear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "101"
},
{
"name": "Batchfile",
"bytes": "10931"
},
{
"name": "C",
"bytes": "28190"
},
{
"name": "C#",
"bytes": "45474"
},
{
"name": "C++",
"bytes": "335"
},
{
"name": "CSS",
"bytes": "6631"
},
{
"name": "Go",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "1564"
},
{
"name": "Java",
"bytes": "592"
},
{
"name": "JavaScript",
"bytes": "472227"
},
{
"name": "Makefile",
"bytes": "15304"
},
{
"name": "PHP",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "2312447"
},
{
"name": "Ruby",
"bytes": "447"
},
{
"name": "Shell",
"bytes": "12706"
}
],
"symlink_target": ""
} |
import os, sys, hashlib, binascii, time, decimal, logging, locale, re, io
import difflib, json, inspect, tempfile, shutil
import apsw, pytest, requests
from requests.auth import HTTPBasicAuth
CURR_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(CURR_DIR, '..')))
from lib import (config, api, util, exceptions, reddcoin, blocks)
from lib import (send, order, rddpay, issuance, broadcast, bet, dividend, burn, cancel, callback, rps, rpsresolve)
from lib.exceptions import ConsensusError
import reddcraftd
from fixtures.params import DEFAULT_PARAMS as DP
from fixtures.scenarios import UNITEST_FIXTURE, INTEGRATION_SCENARIOS, standard_scenarios_params
import bitcoin as reddcoinlib
import binascii
D = decimal.Decimal
# Set test environment
os.environ['TZ'] = 'EST'
time.tzset()
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
COUNTERPARTYD_OPTIONS = {
'testcoin': False,
'backend_rpc_ssl_verify': False,
'data_dir': tempfile.gettempdir(),
'rpc_port': 9999,
'rpc_password': 'pass',
'backend_rpc_port': 8888,
'backend_rpc_password': 'pass'
}
def dump_database(db):
# TEMPORARY
# .dump command bugs when aspw.Shell is used with 'db' args instead 'args'
# but this way stay 20x faster than running scenario with file db
db_filename = tempfile.gettempdir() + '/tmpforbackup.db'
remove_database_files(db_filename)
filecon = apsw.Connection(db_filename)
with filecon.backup("main", db, "main") as backup:
backup.step()
output = io.StringIO()
shell = apsw.Shell(stdout=output, args=(db_filename,))
#shell = apsw.Shell(stdout=output, db=db)
shell.process_command(".dump")
lines = output.getvalue().split('\n')[8:]
new_data = '\n'.join(lines)
#clean ; in new line
new_data = re.sub('\)[\n\s]+;', ');', new_data)
# apsw oddness: follwing sentence not always generated!
new_data = new_data.replace('-- The values of various per-database settings\n', '')
remove_database_files(db_filename)
return new_data
def restore_database(database_filename, dump_filename):
remove_database_files(database_filename)
db = apsw.Connection(database_filename)
cursor = db.cursor()
with open(dump_filename, 'r') as sql_dump:
cursor.execute(sql_dump.read())
cursor.close()
def remove_database_files(database_filename):
for path in [database_filename, '{}-shm'.format(database_filename), '{}-wal'.format(database_filename)]:
if os.path.isfile(path):
os.remove(path)
def insert_block(db, block_index, parse_block=False):
cursor = db.cursor()
block_hash = hashlib.sha512(chr(block_index).encode('utf-8')).hexdigest()
block_time = block_index * 10000000
block = (block_index, block_hash, block_time, None, None)
cursor.execute('''INSERT INTO blocks VALUES (?,?,?,?,?)''', block)
cursor.close()
if parse_block:
blocks.parse_block(db, block_index, block_time)
return block_index, block_hash, block_time
def create_next_block(db, block_index=None, parse_block=False):
cursor = db.cursor()
last_block_index = list(cursor.execute("SELECT block_index FROM blocks ORDER BY block_index DESC LIMIT 1"))[0]['block_index']
if not block_index:
block_index = last_block_index + 1
for index in range(last_block_index + 1, block_index + 1):
inserted_block_index, block_hash, block_time = insert_block(db, index, parse_block=parse_block)
cursor.close()
return inserted_block_index, block_hash, block_time
def insert_raw_transaction(raw_transaction, db, rawtransactions_db):
# one transaction per block
block_index, block_hash, block_time = create_next_block(db)
cursor = db.cursor()
tx_index = block_index - config.BURN_START + 1
tx = reddcoin.decode_raw_transaction(raw_transaction)
tx_hash = hashlib.sha256('{}{}'.format(tx_index,raw_transaction).encode('utf-8')).hexdigest()
#print(tx_hash)
tx['txid'] = tx_hash
if pytest.config.option.saverawtransactions:
save_rawtransaction(rawtransactions_db, tx_hash, raw_transaction, json.dumps(tx))
source, destination, rdd_amount, fee, data = blocks.get_tx_info2(tx, block_index)
transaction = (tx_index, tx_hash, block_index, block_hash, block_time, source, destination, rdd_amount, fee, data, True)
cursor.execute('''INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?)''', transaction)
tx = list(cursor.execute('''SELECT * FROM transactions WHERE tx_index = ?''', (tx_index,)))[0]
cursor.close()
blocks.parse_block(db, block_index, block_time)
return tx
def insert_transaction(transaction, db):
cursor = db.cursor()
block = (transaction['block_index'], transaction['block_hash'], transaction['block_time'], None, None)
cursor.execute('''INSERT INTO blocks VALUES (?,?,?,?,?)''', block)
keys = ",".join(transaction.keys())
cursor.execute('''INSERT INTO transactions ({}) VALUES (?,?,?,?,?,?,?,?,?,?,?)'''.format(keys), tuple(transaction.values()))
cursor.close()
# table uses for getrawtransaction mock.
# we use the same database (in memory) for speed
def initialise_rawtransactions_db(db):
if pytest.config.option.initrawtransactions:
reddcraftd.set_options(testnet=True, **COUNTERPARTYD_OPTIONS)
cursor = db.cursor()
cursor.execute('DROP TABLE IF EXISTS raw_transactions')
cursor.execute('CREATE TABLE IF NOT EXISTS raw_transactions(tx_hash TEXT UNIQUE, tx_hex TEXT, tx_json TEXT)')
with open(CURR_DIR + '/fixtures/unspent_outputs.json', 'r') as listunspent_test_file:
wallet_unspent = json.load(listunspent_test_file)
for output in wallet_unspent:
txid = binascii.hexlify(reddcoinlib.core.lx(output['txid'])).decode()
tx = reddcoin.decode_raw_transaction(output['txhex'])
cursor.execute('INSERT INTO raw_transactions VALUES (?, ?, ?)', (txid, output['txhex'], json.dumps(tx)))
cursor.close()
def save_rawtransaction(db, tx_hash, tx_hex, tx_json):
cursor = db.cursor()
try:
txid = binascii.hexlify(reddcoinlib.core.lx(tx_hash)).decode()
cursor.execute('''INSERT INTO raw_transactions VALUES (?, ?, ?)''', (txid, tx_hex, tx_json))
except Exception as e:
pass
cursor.close()
def getrawtransaction(db, txid):
cursor = db.cursor()
txid = binascii.hexlify(txid).decode()
tx_hex = list(cursor.execute('''SELECT tx_hex FROM raw_transactions WHERE tx_hash = ?''', (txid,)))[0][0]
cursor.close()
return tx_hex
def decoderawtransaction(db, tx_hex):
cursor = db.cursor()
tx_json = list(cursor.execute('''SELECT tx_json FROM raw_transactions WHERE tx_hex = ?''', (tx_hex,)))[0][0]
cursor.close()
return json.loads(tx_json)
def initialise_db(db):
blocks.initialise(db)
insert_block(db, config.BURN_START - 1)
def run_scenario(scenario, rawtransactions_db):
reddcraftd.set_options(database_file=':memory:', testnet=True, **COUNTERPARTYD_OPTIONS)
config.PREFIX = b'TESTXXXX'
config.FIRST_MULTISIG_BLOCK_TESTNET = 1
config.CHECKPOINTS_TESTNET = {}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger_buff = io.StringIO()
handler = logging.StreamHandler(logger_buff)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
asyncio_log = logging.getLogger('asyncio')
asyncio_log.setLevel(logging.ERROR)
db = util.connect_to_db()
initialise_db(db)
raw_transactions = []
for transaction in scenario:
if transaction[0] != 'create_next_block':
module = sys.modules['lib.{}'.format(transaction[0])]
compose = getattr(module, 'compose')
unsigned_tx_hex = reddcoin.transaction(db, compose(db, *transaction[1]), **transaction[2])
raw_transactions.append({transaction[0]: unsigned_tx_hex})
insert_raw_transaction(unsigned_tx_hex, db, rawtransactions_db)
else:
create_next_block(db, block_index=config.BURN_START + transaction[1], parse_block=True)
dump = dump_database(db)
log = logger_buff.getvalue()
db.close()
return dump, log, json.dumps(raw_transactions, indent=4)
def save_scenario(scenario_name, rawtransactions_db):
dump, log, raw_transactions = run_scenario(INTEGRATION_SCENARIOS[scenario_name][0], rawtransactions_db)
with open(CURR_DIR + '/fixtures/scenarios/' + scenario_name + '.new.sql', 'w') as f:
f.writelines(dump)
with open(CURR_DIR + '/fixtures/scenarios/' + scenario_name + '.new.log', 'w') as f:
f.writelines(log)
with open(CURR_DIR + '/fixtures/scenarios/' + scenario_name + '.new.json', 'w') as f:
f.writelines(raw_transactions)
def load_scenario_ouput(scenario_name):
with open(CURR_DIR + '/fixtures/scenarios/' + scenario_name + '.sql', 'r') as f:
dump = ("").join(f.readlines())
with open(CURR_DIR + '/fixtures/scenarios/' + scenario_name + '.log', 'r') as f:
log = ("").join(f.readlines())
with open(CURR_DIR + '/fixtures/scenarios/' + scenario_name + '.json', 'r') as f:
raw_transactions = ("").join(f.readlines())
return dump, log, raw_transactions
def clean_scenario_dump(scenario_name, dump):
dump = dump.replace(standard_scenarios_params[scenario_name]['address1'], 'address1')
dump = dump.replace(standard_scenarios_params[scenario_name]['address2'], 'address2')
dump = re.sub('[a-f0-9]{64}', 'hash', dump)
dump = re.sub('X\'[A-F0-9]+\',1\);', '\'data\',1)', dump)
return dump
def check_record(record, reddcraftd_db):
cursor = reddcraftd_db.cursor()
sql = '''SELECT COUNT(*) AS c FROM {} '''.format(record['table'])
sql += '''WHERE '''
bindings = []
conditions = []
for field in record['values']:
if record['values'][field] is not None:
conditions.append('''{} = ?'''.format(field))
bindings.append(record['values'][field])
sql += " AND ".join(conditions)
count = list(cursor.execute(sql, tuple(bindings)))[0]['c']
if count != 1:
print(list(cursor.execute('''SELECT * FROM {} WHERE block_index = ?'''.format(record['table']), (record['values']['block_index'],))))
assert False
def vector_to_args(vector, functions=[]):
args = []
for tx_name in vector:
for method in vector[tx_name]:
for params in vector[tx_name][method]:
error = outputs = records = None
if 'out' in params:
outputs = params['out']
if 'error' in params:
error = params['error']
if 'records' in params:
records = params['records']
if functions == [] or (tx_name + '.' + method) in functions:
args.append((tx_name, method, params['in'], outputs, error, records))
return args
def exec_tested_method(tx_name, method, tested_method, inputs, reddcraftd_db):
if tx_name == 'reddcoin' and method == 'transaction':
return tested_method(reddcraftd_db, inputs[0], **inputs[1])
elif tx_name == 'util' and method == 'api':
return tested_method(*inputs)
elif tx_name == 'reddcoin' and method == 'base58_check_decode':
return binascii.hexlify(tested_method(*inputs)).decode('utf-8')
else:
return tested_method(reddcraftd_db, *inputs)
def check_ouputs(tx_name, method, inputs, outputs, error, records, reddcraftd_db):
tested_module = sys.modules['lib.{}'.format(tx_name)]
tested_method = getattr(tested_module, method)
test_outputs = None
if error is not None:
with pytest.raises(getattr(exceptions, error[0])) as exception:
test_outputs = exec_tested_method(tx_name, method, tested_method, inputs, reddcraftd_db)
else:
test_outputs = exec_tested_method(tx_name, method, tested_method, inputs, reddcraftd_db)
if pytest.config.option.gentxhex and method == 'compose':
print('')
tx_params = {
'encoding': 'multisig'
}
if tx_name == 'order' and inputs[1]=='RDD':
print('give rdd')
tx_params['fee_provided'] = DP['fee_provided']
unsigned_tx_hex = reddcoin.transaction(reddcraftd_db, test_outputs, **tx_params)
print(tx_name)
print(unsigned_tx_hex)
if outputs is not None:
assert outputs == test_outputs
if error is not None:
assert str(exception.value) == error[1]
if records is not None:
for record in records:
check_record(record, reddcraftd_db)
def compare_strings(string1, string2):
diff = list(difflib.unified_diff(string1.splitlines(1), string2.splitlines(1), n=0))
if len(diff):
print("\nDifferences:")
print("\n".join(diff))
return len(diff)
def get_block_ledger(db, block_index):
cursor = db.cursor()
debits = list(cursor.execute('''SELECT * FROM debits WHERE block_index = ?''', (block_index,)))
credits = list(cursor.execute('''SELECT * FROM credits WHERE block_index = ?''', (block_index,)))
debits = [json.dumps(m).replace('"', '\'') for m in debits]
credits = [json.dumps(m).replace('"', '\'') for m in credits]
ledger = json.dumps(debits + credits, indent=4)
return ledger
def get_block_txlist(db, block_index):
cursor = db.cursor()
txlist = list(cursor.execute('''SELECT * FROM transactions WHERE block_index = ?''', (block_index,)))
txlist = [json.dumps(m).replace('"', '\'') for m in txlist]
txlist = json.dumps(txlist, indent=4)
return txlist
def reparse(testnet=True):
options = dict(COUNTERPARTYD_OPTIONS)
options.pop('data_dir')
reddcraftd.set_options(database_file=':memory:', testnet=testnet, **options)
if testnet:
config.PREFIX = b'TESTXXXX'
logger = logging.getLogger()
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
memory_db = util.connect_to_db()
initialise_db(memory_db)
prod_db_path = os.path.join(config.DATA_DIR, '{}.{}{}.db'.format(config.XRT_CLIENT, str(config.VERSION_MAJOR), '.testnet' if testnet else ''))
prod_db = apsw.Connection(prod_db_path)
prod_db.setrowtrace(util.rowtracer)
with memory_db.backup("main", prod_db, "main") as backup:
backup.step()
# here we don't use block.reparse() because it reparse db in transaction (`with db`)
memory_cursor = memory_db.cursor()
for table in blocks.TABLES + ['balances']:
memory_cursor.execute('''DROP TABLE IF EXISTS {}'''.format(table))
blocks.initialise(memory_db)
previous_ledger_hash = None
previous_txlist_hash = None
memory_cursor.execute('''SELECT * FROM blocks ORDER BY block_index''')
for block in memory_cursor.fetchall():
try:
logger.info('Block (re‐parse): {}'.format(str(block['block_index'])))
previous_ledger_hash, previous_txlist_hash = blocks.parse_block(memory_db, block['block_index'], block['block_time'],
previous_ledger_hash, block['ledger_hash'],
previous_txlist_hash, block['txlist_hash'])
except ConsensusError as e:
message = str(e)
if message.find('ledger_hash') != -1:
new_ledger = get_block_ledger(memory_db, block['block_index'])
old_ledger = get_block_ledger(prod_db, block['block_index'])
compare_strings(old_ledger, new_ledger)
elif message.find('txlist_hash') != -1:
new_txlist = get_block_txlist(memory_db, block['block_index'])
old_txlist = get_block_txlist(prod_db, block['block_index'])
compare_strings(old_txlist, new_txlist)
raise(e)
| {
"content_hash": "1b8c53cb4c35e8bdbd75a637cedc7861",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 146,
"avg_line_length": 43.25132275132275,
"alnum_prop": 0.6350847146614472,
"repo_name": "Reddcraft/reddcraftd",
"id": "62936173cc50071615dda0129e71a3cf4e9bfe81",
"size": "16351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/util_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5580"
},
{
"name": "Python",
"bytes": "400095"
},
{
"name": "Shell",
"bytes": "5104"
}
],
"symlink_target": ""
} |
from .. import events
from ..entity import entity_method
@events.subscriber
def did_load_object(event):
"""
Sets _parent references for entities in a container when the container
entity is loaded.
"""
if hasattr(event.actor, 'container'):
contents = list(event.actor.container.contents.values())
event.actor.container.contents = {}
for entity in contents:
entity._parent = event.actor
event.actor.container.contents[entity._path] = entity
# FIXME: for testing
if hasattr(entity, 'behavior'):
entity.start_behavior()
@entity_method
def get_contents(self, facet=None):
return (x for x in self.container.contents.values() if facet is None or hasattr(x, facet))
@entity_method
def contains_entity_with_facet(self, facet):
return any(x for x in self.container.contents.values() if hasattr(x, facet))
| {
"content_hash": "4b3866ce52164b9955694182a43c89cc",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 94,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.6633879781420765,
"repo_name": "wirefish/amber",
"id": "89b94d0906afaeb86ec801818bc01ded975491fc",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amber/systems/container.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13812"
},
{
"name": "HTML",
"bytes": "9822"
},
{
"name": "JavaScript",
"bytes": "58425"
},
{
"name": "Python",
"bytes": "175947"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.web.websockets import WebSocketsResource, WebSocketsProtocol, lookupProtocolForFactory
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.internet import protocol
from twisted.application import service, internet
from Queue import Queue
from apiclient.discovery import build
import json
pending = None
languages = {
"en": "English",
"fr": "French",
"es": "Spanish",
"de": "German",
"ja": "Japanese",
"ru": "Russian",
"zh-CN": "Chinese",
}
service = build('translate', 'v2',
developerKey='AIzaSyA6YjQwwDPZ52y8ejL9oemcvAc6rnAwwig')
class Chat(LineReceiver):
def __init__(self):
self.name = None
self.status = 0
self.match = None
self.lang = None
def setMatch(self, match):
self.match = match
print "Sending initial match message to %s" % self.name
self.message("N", self.match.name)
self.message("S", "You're connected to %s (%s)" % (self.match.name, languages[self.match.lang]), self.lang, "en")
# print "Matching %s (%s) to %s (%s)" % (self.name, self.lang, self.match.name, self.match.lang)
def connectionMade(self):
print "Got new client!"
# print pendingUsers.qsize()
# self.transport.write('connected ....\n')
# self.factory.clients.append(self)
def connectionLost(self, reason):
print "Lost a client!"
global pending
if pending == self:
pending = None
if self.match:
self.match.message("E", "Partner has disconnected", self.match.lang, "en")
def dataReceived(self, data):
global pending
if self.status == 0:
self.name = data
self.status = 1
print "Name received: " + self.name
elif self.status == 1:
self.lang = data
print "Lang received: " + self.lang
if pending == None:
print "Pending is empty"
pending = self
print "Pending is now " + self.name
else:
print "Pending is " + pending.name
self.setMatch(pending)
pending = None
self.match.setMatch(self)
self.status = 2
elif self.status == 2:
response = self.match.message("M", data[5:], self.match.lang, self.lang)
if self.lang != self.match.lang and response:
self.message("T" + data[:5], response, "en", "en")
# print "received", repr(data)
def message(self, prefix, message, lang_to="", lang_from=""):
# try:
if lang_to != "" and lang_from != "":
if lang_to != lang_from:
translated = service.translations().list(
source=lang_from,
target=lang_to,
q=[message.decode('utf-8')]
).execute()
translated = json.loads(json.dumps(translated))
message = translated["translations"][0]["translatedText"].encode('utf8')
self.transport.write(prefix + message + '\n')
# print "Writing %s to %s" % (prefix+message, self.name)
return message
# except:
# if self.match:
# self.match.transport.write("EError sending message. Try again." + '\n')
# return False
from twisted.internet.protocol import Factory
class ChatFactory(Factory):
protocol = Chat
clients = []
resource = WebSocketsResource(lookupProtocolForFactory(ChatFactory()))
root = Resource()
#serve chat protocol on /ws
root.putChild("chat",resource)
from twisted.application.service import Application
application = Application("chatserver")
internet.TCPServer(8001, Site(root)).setServiceParent(application)
# class ChatFactory(Factory):
# def __init__(self):
# self.users = {} # maps user names to Chat instances
# def buildProtocol(self, addr):
# return Chat(self.users)
# reactor.listenTCP(8001, ChatFactory())
# reactor.run()
| {
"content_hash": "f95a829bedac27b6e23e9f55b58bfa9b",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 121,
"avg_line_length": 31.637037037037036,
"alnum_prop": 0.5876843830484664,
"repo_name": "alexkau/ChatGlobal",
"id": "b28a5d2577baa4581c4f3b75e32ce927d58801ee",
"size": "4294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2884"
},
{
"name": "JavaScript",
"bytes": "11550"
},
{
"name": "Python",
"bytes": "4294"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from facebook import GraphAPIError
from rest_framework import serializers
from user import constants
from user.models import FruitUser
from ..utils import facebook as fcb
class AuthTokenFacebookSerializer(serializers.Serializer):
email = serializers.EmailField(
required=True,
allow_null=False,
max_length=constants.EMAIL_MAX_LENGTH,
)
fcb_id = serializers.CharField(
required=True,
allow_null=False,
max_length=constants.FCB_ID_MAX_LENGTH,
)
default_error_messages = {
"facebook_verification": _("Facebook verification failed: {context}"),
"user_does_not_exist": _("User with email {email} and Facebook ID {fcb_id} does not exist."),
"user_disabled": _("User account with email {email} is disabled."),
}
def validate(self, attrs):
email = attrs.get("email")
fcb_id = attrs.get("fcb_id")
try:
user = FruitUser.objects.get(email__iexact=email, facebook__fcb_id=fcb_id)
except FruitUser.DoesNotExist:
self.fail("user_does_not_exist", email=email, fcb_id=fcb_id)
if not user.is_active:
self.fail("user_disabled", email=email)
fcb_info = user.facebook
try:
fcb_user = fcb.verify_user(fcb_info.fcb_id, fcb_info.fcb_token)
except GraphAPIError as e:
self.fail("facebook_verification", context=e)
return {
"user": user,
"fcb_user": fcb_user,
"fcb_token": fcb_info.fcb_token,
**attrs,
}
def save(self, **kwargs):
fcb_user = self.validated_data["fcb_user"]
user = self.validated_data["user"]
fcb_id = self.validated_data["fcb_id"]
fcb_token = self.validated_data["fcb_token"]
return (fcb.connect_user(fcb_user, user, fcb_id, fcb_token),)
| {
"content_hash": "5dff7e33a9eac480e4147b2385db638f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 101,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.615424430641822,
"repo_name": "jsmesami/naovoce",
"id": "7e4723b46ebae7efc590abec615e63df0bf2c3c3",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/naovoce/api/v1/token/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "API Blueprint",
"bytes": "18118"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Python",
"bytes": "170165"
}
],
"symlink_target": ""
} |
import bisect
import re
import token
import tokenize
class ChangeCollector(object):
def __init__(self, text):
self.text = text
self.changes = []
def add_change(self, start, end, new_text=None):
if new_text is None:
new_text = self.text[start:end]
self.changes.append((start, end, new_text))
def get_changed(self):
if not self.changes:
return None
def compare_changes(change1, change2):
return cmp(change1[:2], change2[:2])
self.changes.sort(compare_changes)
pieces = []
last_changed = 0
for change in self.changes:
start, end, text = change
pieces.append(self.text[last_changed:start] + text)
last_changed = end
if last_changed < len(self.text):
pieces.append(self.text[last_changed:])
result = ''.join(pieces)
if result != self.text:
return result
class SourceLinesAdapter(object):
"""Adapts source to Lines interface
Note: The creation of this class is expensive.
"""
def __init__(self, source_code):
self.code = source_code
self.starts = None
self._initialize_line_starts()
def _initialize_line_starts(self):
self.starts = []
self.starts.append(0)
try:
i = 0
while True:
i = self.code.index('\n', i) + 1
self.starts.append(i)
except ValueError:
pass
self.starts.append(len(self.code) + 1)
def get_line(self, lineno):
return self.code[self.starts[lineno - 1]:
self.starts[lineno] - 1]
def length(self):
return len(self.starts) - 1
def get_line_number(self, offset):
return bisect.bisect(self.starts, offset)
def get_line_start(self, lineno):
return self.starts[lineno - 1]
def get_line_end(self, lineno):
return self.starts[lineno] - 1
class ArrayLinesAdapter(object):
def __init__(self, lines):
self.lines = lines
def get_line(self, line_number):
return self.lines[line_number - 1]
def length(self):
return len(self.lines)
class LinesToReadline(object):
def __init__(self, lines, start):
self.lines = lines
self.current = start
def readline(self):
if self.current <= self.lines.length():
self.current += 1
return self.lines.get_line(self.current - 1) + '\n'
return ''
def __call__(self):
return self.readline()
class _CustomGenerator(object):
def __init__(self, lines):
self.lines = lines
self.in_string = ''
self.open_count = 0
self.continuation = False
def __call__(self):
size = self.lines.length()
result = []
i = 1
while i <= size:
while i <= size and not self.lines.get_line(i).strip():
i += 1
if i <= size:
start = i
while True:
line = self.lines.get_line(i)
self._analyze_line(line)
if not (self.continuation or self.open_count or
self.in_string) or i == size:
break
i += 1
result.append((start, i))
i += 1
return result
_main_chars = re.compile(r'[\'|"|#|\\|\[|\]|\{|\}|\(|\)]')
def _analyze_line(self, line):
char = None
for match in self._main_chars.finditer(line):
char = match.group()
i = match.start()
if char in '\'"':
if not self.in_string:
self.in_string = char
if char * 3 == line[i:i + 3]:
self.in_string = char * 3
elif self.in_string == line[i:i + len(self.in_string)] and \
not (i > 0 and line[i - 1] == '\\' and
not (i > 1 and line[i - 2] == '\\')):
self.in_string = ''
if self.in_string:
continue
if char == '#':
break
if char in '([{':
self.open_count += 1
elif char in ')]}':
self.open_count -= 1
if line and char != '#' and line.endswith('\\'):
self.continuation = True
else:
self.continuation = False
def custom_generator(lines):
return _CustomGenerator(lines)()
class LogicalLineFinder(object):
def __init__(self, lines):
self.lines = lines
def logical_line_in(self, line_number):
indents = count_line_indents(self.lines.get_line(line_number))
tries = 0
while True:
block_start = get_block_start(self.lines, line_number, indents)
try:
return self._block_logical_line(block_start, line_number)
except IndentationError, e:
tries += 1
if tries == 5:
raise e
lineno = e.lineno + block_start - 1
indents = count_line_indents(self.lines.get_line(lineno))
def generate_starts(self, start_line=1, end_line=None):
for start, end in self.generate_regions(start_line, end_line):
yield start
def generate_regions(self, start_line=1, end_line=None):
# XXX: `block_start` should be at a better position!
block_start = 1
readline = LinesToReadline(self.lines, block_start)
try:
for start, end in self._logical_lines(readline):
real_start = start + block_start - 1
real_start = self._first_non_blank(real_start)
if end_line is not None and real_start >= end_line:
break
real_end = end + block_start - 1
if real_start >= start_line:
yield (real_start, real_end)
except tokenize.TokenError:
pass
def _block_logical_line(self, block_start, line_number):
readline = LinesToReadline(self.lines, block_start)
shifted = line_number - block_start + 1
region = self._calculate_logical(readline, shifted)
start = self._first_non_blank(region[0] + block_start - 1)
if region[1] is None:
end = self.lines.length()
else:
end = region[1] + block_start - 1
return start, end
def _calculate_logical(self, readline, line_number):
last_end = 1
try:
for start, end in self._logical_lines(readline):
if line_number <= end:
return (start, end)
last_end = end + 1
except tokenize.TokenError, e:
current = e.args[1][0]
return (last_end, max(last_end, current - 1))
return (last_end, None)
def _logical_lines(self, readline):
last_end = 1
for current_token in tokenize.generate_tokens(readline):
current = current_token[2][0]
if current_token[0] == token.NEWLINE:
yield (last_end, current)
last_end = current + 1
def _first_non_blank(self, line_number):
current = line_number
while current < self.lines.length():
line = self.lines.get_line(current).strip()
if line and not line.startswith('#'):
return current
current += 1
return current
def tokenizer_generator(lines):
return LogicalLineFinder(lines).generate_regions()
class CachingLogicalLineFinder(object):
def __init__(self, lines, generate=custom_generator):
self.lines = lines
self._generate = generate
_starts = None
@property
def starts(self):
if self._starts is None:
self._init_logicals()
return self._starts
_ends = None
@property
def ends(self):
if self._ends is None:
self._init_logicals()
return self._ends
def _init_logicals(self):
"""Should initialize _starts and _ends attributes"""
size = self.lines.length() + 1
self._starts = [None] * size
self._ends = [None] * size
for start, end in self._generate(self.lines):
self._starts[start] = True
self._ends[end] = True
def logical_line_in(self, line_number):
start = line_number
while start > 0 and not self.starts[start]:
start -= 1
if start == 0:
try:
start = self.starts.index(True, line_number)
except ValueError:
return (line_number, line_number)
return (start, self.ends.index(True, start))
def generate_starts(self, start_line=1, end_line=None):
if end_line is None:
end_line = self.lines.length()
for index in range(start_line, end_line):
if self.starts[index]:
yield index
def get_block_start(lines, lineno, maximum_indents=80):
"""Approximate block start"""
pattern = get_block_start_patterns()
for i in range(lineno, 0, -1):
match = pattern.search(lines.get_line(i))
if match is not None and \
count_line_indents(lines.get_line(i)) <= maximum_indents:
striped = match.string.lstrip()
# Maybe we're in a list comprehension or generator expression
if i > 1 and striped.startswith('if') or striped.startswith('for'):
bracs = 0
for j in range(i, min(i + 5, lines.length() + 1)):
for c in lines.get_line(j):
if c == '#':
break
if c in '[(':
bracs += 1
if c in ')]':
bracs -= 1
if bracs < 0:
break
if bracs < 0:
break
if bracs < 0:
continue
return i
return 1
_block_start_pattern = None
def get_block_start_patterns():
global _block_start_pattern
if not _block_start_pattern:
pattern = '^\\s*(((def|class|if|elif|except|for|while|with)\\s)|'\
'((try|else|finally|except)\\s*:))'
_block_start_pattern = re.compile(pattern, re.M)
return _block_start_pattern
def count_line_indents(line):
indents = 0
for char in line:
if char == ' ':
indents += 1
elif char == '\t':
indents += 8
else:
return indents
return 0
def get_string_pattern():
start = r'(\b[uU]?[rR]?)?'
longstr = r'%s"""(\\.|"(?!"")|\\\n|[^"\\])*"""' % start
shortstr = r'%s"(\\.|[^"\\\n])*"' % start
return '|'.join([longstr, longstr.replace('"', "'"),
shortstr, shortstr.replace('"', "'")])
def get_comment_pattern():
return r'#[^\n]*'
| {
"content_hash": "9963645443427242ad3342c0316f03ff",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 79,
"avg_line_length": 30.535714285714285,
"alnum_prop": 0.512280701754386,
"repo_name": "sreejithr/emacs.d",
"id": "5dbfbb10e289a9a5db800f29b47c050c24a37962",
"size": "11115",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyenv/emacs/lib/python2.7/site-packages/rope/base/codeanalyze.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "3742112"
},
{
"name": "Python",
"bytes": "1767564"
},
{
"name": "Shell",
"bytes": "251933"
}
],
"symlink_target": ""
} |
__author__ = 'Avinesh_Kumar'
'''
MIT License
Copyright (c) 2017 Avinesh Kumar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import BaseHTTPServer
import time
import HttpServerImpl
import json
from urlparse import urlparse, parse_qs
hostname="127.0.0.1"
port=80
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
keep_values = {}
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
# print self.path
# print urlparse(self.path).query
query_components = parse_qs(urlparse(self.path).query)
# print query_components
res = HttpServerImpl.process_get(query_components)
# print "response: ",res
self.wfile.write(res)
# support JSON post data only.
def do_POST(self):
content_len = int(self.headers.getheader('content-length'))
post_body = self.rfile.read(content_len)
# print "post body: ",post_body
self.send_response(200)
self.end_headers()
data = json.loads(post_body)
res = HttpServerImpl.process_post(data)
self.wfile.write(res)
if __name__ == '__main__':
server = BaseHTTPServer.HTTPServer
httpserver = server((hostname, port), MyHandler)
print time.asctime(), "Server Starts - %s:%s" % (hostname, port)
try:
httpserver.serve_forever()
except KeyboardInterrupt:
pass
httpserver.server_close()
print time.asctime(), "Server Stops - %s:%s" % (hostname, port)
| {
"content_hash": "fa4b13f7d803873e670b93a8ca5ecc2c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 34.532467532467535,
"alnum_prop": 0.6991350131628432,
"repo_name": "avinesh09/SimplePyHttpServer",
"id": "b9f9a372efef3d29cfdd2c58aa5050c20b4650a1",
"size": "2659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimplePyHttpServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5156"
}
],
"symlink_target": ""
} |
import sys
import math
import random
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import pymel.core as PyMEL
kNodeName = "cyclone"
kNodeId = OpenMaya.MTypeId(0x10001) # Must be < 0x80000
class cyclone(OpenMayaMPx.MPxNode):
aCurve = OpenMaya.MObject()
aRandomSeed = OpenMaya.MObject()
aDensity = OpenMaya.MObject()
aRadius = OpenMaya.MObject()
aSpin = OpenMaya.MObject()
aPositions = OpenMaya.MObject()
aRotation = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def postConstructor(self):
OpenMayaMPx.MPxNode.postConstructor(self)
self.setExistWithoutInConnections(True)
self.setExistWithoutOutConnections(True)
def compute(self, plug, data):
#print "Computing %s" % plug.name()
if plug == cyclone.aPositions or plug == cyclone.aRotation:
# Gather inputs
fnCurve = OpenMaya.MFnNurbsCurve(data.inputValue(cyclone.aCurve).asNurbsCurve());
randomSeed = data.inputValue(cyclone.aRandomSeed).asInt()
density = data.inputValue(cyclone.aDensity).asFloat()
radius = data.inputValue(cyclone.aRadius).asFloat()
spin = data.inputValue(cyclone.aSpin).asAngle().asRadians()
# Number of particles to calculate
N = int(density)
#print "Cyclone density %d" % density
# Prepare output positions array
hPositions = data.outputValue(cyclone.aPositions)
fnPositions = OpenMaya.MFnVectorArrayData();
objPositions = fnPositions.create()
positions = fnPositions.array()
positions.setLength(N)
# Prepare output rotations array
hRotation = data.outputValue(cyclone.aRotation)
fnRotation = OpenMaya.MFnVectorArrayData();
objRotation = fnRotation.create()
rotation = fnRotation.array()
rotation.setLength(N)
oldRandomState = random.getstate()
random.seed(randomSeed)
tMax = fnCurve.numSpans()
for i in range(0,N):
t = random.random()*float(tMax)
pt = OpenMaya.MPoint()
fnCurve.getPointAtParam(t, pt)
tangent = fnCurve.tangent(t).normal()
normal = fnCurve.normal(t).normal()
# Position
q = OpenMaya.MQuaternion(random.random()*2.0*math.pi + spin, tangent)
normal = normal.rotateBy(q)
binormal = tangent ^ normal
pt += normal*(radius*random.random())
positions.set(OpenMaya.MVector(pt.x, pt.y, pt.z), i)
# Rotation
# See Ryan Trowbridge's blog whic deriving euler rotations
# using Maya's Python binding. It's not entirely obvious:
# http://www.rtrowbridge.com/blog/2009/02/python-api-mtransformationmatrixgetrotation-bug/
mInit = [ tangent.x, tangent.y, tangent.z, 0, \
normal.x, normal.y, normal.z, 0, \
binormal.x, binormal.y, binormal.z, 0, \
0, 0, 0, 1]
mRot = OpenMaya.MMatrix()
OpenMaya.MScriptUtil.createMatrixFromList(mInit, mRot)
mTM = OpenMaya.MTransformationMatrix(mRot)
eulerRot = mTM.rotation().asEulerRotation()
# Angles are in radians
rotation.set(OpenMaya.MVector(eulerRot.x, eulerRot.y, eulerRot.z), i)
hPositions.setMObject(objPositions)
hPositions.setClean()
hRotation.setMObject(objRotation)
hRotation.setClean()
random.setstate(oldRandomState)
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return OpenMayaMPx.asMPxPtr( cyclone() )
def nodeInitializer():
typedAttr = OpenMaya.MFnTypedAttribute()
numericAttr = OpenMaya.MFnNumericAttribute()
unitAttr = OpenMaya.MFnUnitAttribute()
cyclone.aCurve = typedAttr.create("curve", "cur", OpenMaya.MFnData.kNurbsCurve)
typedAttr.setWritable(True)
typedAttr.setReadable(False)
typedAttr.setDisconnectBehavior(OpenMaya.MFnAttribute.kReset)
cyclone.addAttribute(cyclone.aCurve)
cyclone.aRandomSeed = numericAttr.create("randomSeed", "rs", OpenMaya.MFnNumericData.kInt, 17.0)
numericAttr.setMin(1.0)
cyclone.addAttribute(cyclone.aRandomSeed)
cyclone.aDensity = numericAttr.create("density", "den", OpenMaya.MFnNumericData.kFloat, 10.0)
numericAttr.setMin(0.0)
cyclone.addAttribute(cyclone.aDensity)
cyclone.aRadius = numericAttr.create("radius", "r", OpenMaya.MFnNumericData.kFloat, 5.0)
numericAttr.setMin(0.0)
cyclone.addAttribute(cyclone.aRadius)
cyclone.aSpin = unitAttr.create("spin", "sp", OpenMaya.MFnUnitAttribute.kAngle, 0.0)
cyclone.addAttribute(cyclone.aSpin)
cyclone.aPositions = typedAttr.create("positions", "pos", OpenMaya.MFnData.kVectorArray)
typedAttr.setWritable(False)
typedAttr.setReadable(True)
typedAttr.setDisconnectBehavior(OpenMaya.MFnAttribute.kReset)
typedAttr.setDefault(OpenMaya.MFnVectorArrayData().create())
cyclone.addAttribute(cyclone.aPositions)
cyclone.aRotation = typedAttr.create("rotation", "rot", OpenMaya.MFnData.kVectorArray)
typedAttr.setWritable(False)
typedAttr.setReadable(True)
typedAttr.setDisconnectBehavior(OpenMaya.MFnAttribute.kReset)
typedAttr.setDefault(OpenMaya.MFnVectorArrayData().create())
cyclone.addAttribute(cyclone.aRotation)
cyclone.attributeAffects(cyclone.aCurve, cyclone.aPositions)
cyclone.attributeAffects(cyclone.aRandomSeed, cyclone.aPositions)
cyclone.attributeAffects(cyclone.aDensity, cyclone.aPositions)
cyclone.attributeAffects(cyclone.aRadius, cyclone.aPositions)
cyclone.attributeAffects(cyclone.aSpin, cyclone.aPositions)
cyclone.attributeAffects(cyclone.aCurve, cyclone.aRotation)
cyclone.attributeAffects(cyclone.aRandomSeed, cyclone.aRotation)
cyclone.attributeAffects(cyclone.aDensity, cyclone.aRotation)
cyclone.attributeAffects(cyclone.aSpin, cyclone.aRotation)
| {
"content_hash": "2bb15037da5948ec38e6e8c9c64c5ae2",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 97,
"avg_line_length": 32.81065088757396,
"alnum_prop": 0.745356176735798,
"repo_name": "arharvey/Manifest",
"id": "fad94874300f69e0ac863d80f1f5c4ced1cc49b7",
"size": "5562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripted/manifestPlugin/cyclone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "15159"
},
{
"name": "Python",
"bytes": "18077"
}
],
"symlink_target": ""
} |
"""A simple example on how to use keras model for inference."""
import time
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import tensorflow_datasets as tfds
import effnetv2_model
import preprocessing
import utils
FLAGS = flags.FLAGS
def define_flags():
"""Define all flags for binary run."""
flags.DEFINE_string('mode', 'eval', 'Running mode.')
flags.DEFINE_string('image_path', None, 'Location of test image.')
flags.DEFINE_integer('image_size', None, 'Image size.')
flags.DEFINE_string('model_dir', None, 'Location of the checkpoint to run.')
flags.DEFINE_string('model_name', 'efficientnetv2-s', 'Model name to use.')
flags.DEFINE_string('dataset_cfg', 'Imagenet', 'dataset config name.')
flags.DEFINE_string('hparam_str', '', 'k=v,x=y pairs or yaml file.')
flags.DEFINE_bool('debug', False, 'If true, run in eager for debug.')
flags.DEFINE_string('export_dir', None, 'Export or saved model directory')
flags.DEFINE_string('trace_file', '/tmp/a.trace', 'If set, dump trace file.')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_bool('mixed_precision', False, 'If True, use mixed precision.')
def build_tf2_model():
"""Build the tf2 model."""
tf.config.run_functions_eagerly(FLAGS.debug)
if FLAGS.mixed_precision:
# Use 'mixed_float16' if running on GPUs.
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
model = effnetv2_model.get_model(
FLAGS.model_name,
FLAGS.hparam_str,
include_top=True,
weights=FLAGS.model_dir or 'imagenet')
model.summary()
return model
def tf2_eval_dataset():
"""Run TF2 benchmark and inference."""
model = build_tf2_model()
isize = FLAGS.image_size or model.cfg.eval.isize
def preprocess_fn(features):
features['image'] = preprocessing.preprocess_image(
features['image'], isize, is_training=False)
return features
@tf.function
def f(x):
return model(x)
top1_acc = tf.keras.metrics.Accuracy()
pbar = tf.keras.utils.Progbar(None)
data = tfds.load('imagenet2012', split='validation')
ds = data.map(preprocess_fn).batch(FLAGS.batch_size)
for i, batch in enumerate(ds.prefetch(tf.data.experimental.AUTOTUNE)):
logits = f(batch['image'])
top1_acc.update_state(batch['label'], tf.argmax(logits, axis=-1))
pbar.update(i, [('top1', top1_acc.result().numpy())])
print('\n top1= {:.4f}'.format(top1_acc.result().numpy()))
def tf2_benchmark():
"""Run TF2 benchmark and inference."""
model = build_tf2_model()
model_cfg = model.cfg
isize = FLAGS.image_size or model.cfg.eval.isize
if FLAGS.export_dir:
tf.saved_model.save(model, FLAGS.export_dir)
model = tf.saved_model.load(FLAGS.export_dir)
batch_size = FLAGS.batch_size
data_dtype = tf.float16 if FLAGS.mixed_precision else tf.float32
imgs = tf.ones((batch_size, isize, isize, 3), dtype=data_dtype)
if model_cfg.model.data_format == 'channels_first':
imgs = tf.transpose(imgs, [0, 3, 1, 2])
@tf.function
def f(x):
return model(x, training=False)
print('starting warmup.')
for _ in range(10): # warmup runs.
f(imgs)
print('start benchmark.')
start = time.perf_counter()
for _ in range(10):
f(imgs)
end = time.perf_counter()
inference_time = (end - start) / 10
print('Per batch inference time: ', inference_time)
print('FPS: ', batch_size / inference_time)
def tf1_benchmark():
"""Run TF1 inference and benchmark."""
# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
from tensorflow.python.client import timeline
with tf1.Session() as sess:
model = effnetv2_model.EffNetV2Model(FLAGS.model_name, FLAGS.hparam_str)
batch_size = FLAGS.batch_size
run_options = tf1.RunOptions(trace_level=tf1.RunOptions.FULL_TRACE)
run_metadata = tf1.RunMetadata()
isize = FLAGS.image_size or model.cfg.eval.isize
data_dtype = tf.float16 if FLAGS.mixed_precision else tf.float32
inputs = tf.ones((batch_size, isize, isize, 3), data_dtype)
output = model(inputs, training=False)
sess.run(tf1.global_variables_initializer())
print('starting warmup.')
for _ in range(5):
sess.run(output)
print('starting benchmark.')
start = time.perf_counter()
for _ in range(10):
sess.run(output)
end = time.perf_counter()
inference_time = (end - start) / 10
print('Per batch inference time: ', inference_time)
print('FPS: ', batch_size / inference_time)
if FLAGS.trace_file:
sess.run(output, options=run_options, run_metadata=run_metadata)
with tf.io.gfile.GFile(FLAGS.trace_file, 'w') as f:
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
f.write(trace.generate_chrome_trace_format(show_memory=True))
def tf1_export_ema_ckpt():
"""Restore variables from a given checkpoint."""
with tf1.Session() as sess:
model = effnetv2_model.EffNetV2Model(FLAGS.model_name, FLAGS.hparam_str)
batch_size = FLAGS.batch_size
isize = FLAGS.image_size or model.cfg.eval.isize
inputs = tf.ones((batch_size, isize, isize, 3), tf.float32)
_ = model(inputs, training=False)
sess.run(tf1.global_variables_initializer())
if tf.io.gfile.isdir(FLAGS.model_dir):
ckpt_path = tf1.train.latest_checkpoint(FLAGS.model_dir)
else:
ckpt_path = FLAGS.model_dir
ema = tf1.train.ExponentialMovingAverage(decay=0.0)
ema_vars = utils.get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
tf1.train.get_or_create_global_step()
sess.run(tf1.global_variables_initializer())
saver = tf1.train.Saver(var_dict, max_to_keep=1)
# Restore all variables from ckpt.
saver.restore(sess, ckpt_path)
print('export model to {}'.format(FLAGS.export_dir))
sess.run(ema_assign_op)
saver = tf1.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, FLAGS.export_dir)
def main(_):
if FLAGS.mode == 'tf1export':
tf1_export_ema_ckpt()
elif FLAGS.mode == 'tf1bm':
tf1_benchmark()
elif FLAGS.mode == 'tf2bm':
tf2_benchmark()
elif FLAGS.mode == 'tf2eval':
tf2_eval_dataset()
else:
raise ValueError(f'Invalid mode {FLAGS.mode}')
if __name__ == '__main__':
logging.set_verbosity(logging.ERROR)
define_flags()
app.run(main)
| {
"content_hash": "716fd6bb93a3dae35c18d60de57ffdab",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 79,
"avg_line_length": 33.46875,
"alnum_prop": 0.6822284469343293,
"repo_name": "google/automl",
"id": "5ea6e3f68bc583bd14eb07ff4eee081ac78bcabd",
"size": "7108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "efficientnetv2/infer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1782347"
},
{
"name": "Python",
"bytes": "1051435"
},
{
"name": "Shell",
"bytes": "1708"
}
],
"symlink_target": ""
} |
from keystoneclient.auth.identity import v2
from keystoneclient import session
from neutronclient.v2_0 import client
import sys
PUBLIC_NET_NAME="CHINATELECOM"
PUBLIC_SUBNET_NAME="CHINATELECOM"
PUBLIC_SUBNET_CIDR="115.238.155.160/27"
SHARED_SUBNET_CIDR="172.30.248.0/22"
START="115.238.155.162"
END="115.238.155.190"
DNS="1.2.4.8 114.114.114.114"
VLAN_ID="2001"
AUTH_URL="http://l7.0.shzh.ustack.in:35357/v2.0/"
USERNAME="admin"
PASSWORD="PASSWORD"
PROJECT_ID="admin"
def get_neutron_client():
neutroncli = client.Client(username=USERNAME, password=PASSWORD, tenant_name=PROJECT_ID, auth_url=AUTH_URL)
return neutroncli
def check_neutron_external_network(neutroncli):
networks = neutroncli.list_networks()['networks']
for network in networks:
if network['name'] == PUBLIC_NET_NAME:
return True
return False
def create_neutron_external_network(neutroncli):
body = {
"network": {
"name": PUBLIC_NET_NAME,
"provider:network_type": "local",
"router:external": True
}
}
return neutroncli.create_network(body)
def create_neutron_shared_network(neutroncli):
body = {
"network": {
"name": "shared",
"provider:network_type": "vlan",
"provider:segmentation_id": VLAN_ID,
"provider:physical_network": "physnet3",
"shared": True,
"uos:rate_limit": 1000
}
}
return neutroncli.create_network(body)
def check_neutron_shared_network(neutroncli):
networks = neutroncli.list_networks()['networks']
for network in networks:
if network['name'] == "shared":
return True
return False
def get_neutron_network_id(neutroncli, network_name=PUBLIC_NET_NAME):
networks = neutroncli.list_networks()['networks']
for network in networks:
if network['name'] == network_name:
return network['id']
return None
def create_neutron_subnet(neutroncli, network, name, cidr, enable_dhcp=False, allocation_pool=None, dns_nameservers=None):
body = {
"subnet": {
"name": name,
"network_id": get_neutron_network_id(neutroncli, network_name=network),
"enable_dhcp": enable_dhcp,
"ip_version": 4,
"cidr": cidr
}
}
if allocation_pool:
body['subnet']['allocation_pools'] = [allocation_pool]
if dns_nameservers:
body['subnet']['dns_nameservers'] = dns_nameservers
return neutron_client.create_subnet(body)
def check_neutron_subnet(neutroncli, name):
subnets = neutroncli.list_subnets()['subnets']
for subnet in subnets:
if subnet['name'] == name:
return True
return False
def create_neutron_router(neutroncli, name):
body = {
"router": {
"name": name,
"external_gateway_info":{
"network_id": get_neutron_network_id(neutroncli, network_name="shared")
}
}
}
return neutron_client.create_router(body)
def check_neutron_router(neutroncli, name):
routers = neutroncli.list_routers()['routers']
for router in routers:
if router['name'] == name:
return True
return False
def check_neutron_port(neutroncli, name):
ports = neutroncli.list_ports()['ports']
for port in ports:
if port['name'] == name:
return True
return False
def create_neutron_port(neutroncli, name):
body = {
"port": {
"name": name,
"network_id": get_neutron_network_id(neutroncli, network_name="shared")
}
}
return neutron_client.create_port(body)
def get_port_id_by_name(neutroncli, name):
for port in neutron_client.list_ports()["ports"]:
if port['name'] == name:
return port['id']
return None
def get_router_id_by_name(neutroncli, name):
routers = neutroncli.list_routers()['routers']
for router in routers:
if router['name'] == name:
return router['id']
return None
def add_interface_to_router(neutroncli, router_name, port_name):
body = {
"port_id": get_port_id_by_name(neutroncli, port_name)
}
return neutron_client.add_interface_router(get_router_id_by_name(neutroncli, router_name), body)
if __name__ == "__main__":
neutron_client = get_neutron_client()
if not check_neutron_external_network(neutron_client):
create_neutron_external_network(neutron_client)
if not check_neutron_subnet(neutron_client, "ext_shadow_subnet"):
create_neutron_subnet(neutron_client, PUBLIC_NET_NAME, "ext_shadow_subnet", "240.1.0.0/16")
if not check_neutron_subnet(neutron_client, PUBLIC_SUBNET_NAME):
allocation_pool = {
"start": START,
"end": END
}
create_neutron_subnet(neutron_client, PUBLIC_NET_NAME, PUBLIC_SUBNET_NAME, PUBLIC_SUBNET_CIDR, allocation_pool=allocation_pool)
if not check_neutron_shared_network(neutron_client):
create_neutron_shared_network(neutron_client)
if not check_neutron_subnet(neutron_client, "shared_subnet"):
DNSS = DNS.split(" ")
create_neutron_subnet(neutron_client, "shared", "shared_subnet", SHARED_SUBNET_CIDR, enable_dhcp=True, dns_nameservers=DNSS)
if not check_neutron_router(neutron_client, "shared_router"):
create_neutron_router(neutron_client, "shared_router")
if not check_neutron_port(neutron_client, "router_port"):
create_neutron_port(neutron_client, "router_port")
try:
add_interface_to_router(neutron_client, "shared_router", "router_port")
except:
pass
| {
"content_hash": "6cc19cd8f903eb7c2ebf53ac7b1209e1",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 135,
"avg_line_length": 32.60233918128655,
"alnum_prop": 0.6462780269058296,
"repo_name": "zhaozhilong1993/demon",
"id": "c4bb0cfb5c710c4caef084eb7ea1f800e9b97dee",
"size": "5594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/initial/neutron.init.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "480"
},
{
"name": "HTML",
"bytes": "162226"
},
{
"name": "Pascal",
"bytes": "3882"
},
{
"name": "Puppet",
"bytes": "616997"
},
{
"name": "Python",
"bytes": "102605"
},
{
"name": "Ruby",
"bytes": "2126480"
},
{
"name": "Shell",
"bytes": "15608"
}
],
"symlink_target": ""
} |
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column
import astropy.units as u
from astropy.units import Quantity
from gammapy.spectrum.models import LogParabola
from gammapy.spectrum import CrabSpectrum
def define_flux_crab_above_energy(emin=1 * u.TeV, emax=10 * u.TeV):
crab = CrabSpectrum('meyer').model
crabMAGIC = LogParabola(amplitude=3.23e-11 * u.Unit('cm-2 s-1 TeV-1'), reference=1 * u.TeV, alpha=2.47, beta=0.24)
crab_flux_above_1TeV = crabMAGIC.integral(emin=emin, emax=emax)
crab_flux_above_1TeV_model = crab.integral(emin=emin, emax=emax)
return crab_flux_above_1TeV, crab_flux_above_1TeV_model
def load_tables():
#filename_summary = 'ctadc_skymodel_gps_sources_snr_2_summary.ecsv'
#table_summary = Table.read(filename_summary, format='ascii.ecsv')
filename_orig = 'ctadc_skymodel_gps_sources_snr_1.ecsv'
table_orig = Table.read(filename_orig, format='ascii.ecsv')
return table_orig
def select_needed_parameters(table_o):
crab_flux_above_1TeV, crab_flux_above_1TeV_model = define_flux_crab_above_energy();
crab_flux = crab_flux_above_1TeV.value
q = Quantity(0.000291, unit='arcmin**-1')
flux_1_10_cu = []
distance = []
age = []
ism_density = []
skip_known = []
galactocentric_x = []
galactocentric_y = []
galactocentric_z = []
galactocentric_r = []
size_physical = []
size_degree = []
glat = []
glon = []
flux_q = []
flux_q_cu = []
#for row in table_o:
# fl_cu = ()
# #print(fl_cu)
# flux_1_10_cu.append(fl_cu)
print(table_o.info())
for idx in range(len(table_o)):
if table_o[idx]['skip'] == 1:
#print(row['keep'])
continue;
skip_known.append(table_o[idx]['skip'])
dis = Quantity(table_o[idx]['distance'], 'kpc')
distance.append(table_o[idx]['distance'])
age.append(table_o[idx]['age'])
ism_density.append(table_o[idx]['n0'])
ang_size = Quantity(table_o[idx]['sigma'], 'arcmin')
size_degree.append(ang_size.to('degree').value)
flux_q.append(table_o[idx]['flux_1_10'])
flux_q_cu.append(table_o[idx]['flux_1_10']/crab_flux)
print(table_o[idx]['flux_1_10'], table_o[idx]['age'])
# if (idx <10):
# print(ang_size, ang_size.to('degree'), dis, dis.to('pc'), size_ph, q)
galactocentric_x.append(table_o[idx]['galactocentric_x'])
galactocentric_y.append(table_o[idx]['galactocentric_y'])
galactocentric_z.append(table_o[idx]['galactocentric_z'])
galactocentric_r.append(table_o[idx]['galactocentric_r'])
glon.append(table_o[idx]['glon'])
glat.append(table_o[idx]['glat'])
size_physical.append(table_o[idx]['size_physical'])
print(table_o[idx]['size_physical'], table_o[idx]['flux_1_10'])
tab = Table([distance])
tab['distance']=Column(distance, description='distance', unit='kpc')
tab['GLAT'] = Column(glat, description='Latitude')
tab['GLON'] = Column(glon, description='Latitude')
tab['galactocentric_x'] = Column(galactocentric_x, description='galactocentric_x', unit='kpc')
tab['galactocentric_y'] = Column(galactocentric_y, description='galactocentric_x', unit='kpc')
tab['galactocentric_z'] = Column(galactocentric_z, description='galactocentric_x', unit='kpc')
tab['galactocentric_r'] = Column(galactocentric_r, description='galactocentric_x', unit='kpc')
tab['age'] = Column(age, description='age', unit='kyr')
tab['n0'] = Column(ism_density, description='n0 column density', unit='cm-3')
tab['skip'] = Column(skip_known, description='skip because already known')
tab['size_physical'] = Column(size_physical, description='intrinsic physical size', unit='pc')
tab['sigma'] = Column(size_degree, description='angular size', unit='deg')
#tab['int_flux_above_1TeV_cu'] = Column(flux_1_10_cu, description='Integral flux between 1 and 10 TeV in crab units')
tab['flux_1_10'] = Column(flux_q, description='Integral flux between 1 and 10 TeV', unit='cm-2 s-1')
tab['int_flux_above_1TeV_cu'] = Column(flux_q_cu, description='Integral flux between 1 and 10 TeV in crab units')
tab.remove_column('col0')
print('----------------------------')
print(tab)
return tab
if __name__ == '__main__':
table_o = load_tables()
print(table_o.info())
tab = select_needed_parameters(table_o)
#print(tab.info())
# tab.rename_column('glat','GLAT')
# tab.rename_column('glon', 'GLON')
# tab.rename_column('size_ang','sigma')
#print(table_s['sigma'], table_s['size'])
#print(table_s.info())
filename = 'ctadc_skymodel_gps_sources_snr_1_summary_all.ecsv'
print('Writing {}'.format(filename))
tab.write(filename, format='ascii.ecsv', overwrite=True)
| {
"content_hash": "4c8f1e704c8df6cd3a04c0469b8da0d0",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 121,
"avg_line_length": 39.096774193548384,
"alnum_prop": 0.6363448844884488,
"repo_name": "gammasky/cta-dc",
"id": "638a83740a18ac28840c4401d923b24d2f4b705c",
"size": "4848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sky_model/snrs/new_summary.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "603813"
},
{
"name": "Python",
"bytes": "177997"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from noball_django.settings import DEBUG, STATIC_ROOT
admin.autodiscover()
urlpatterns = patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve'
,{'document_root': STATIC_ROOT}),
url(r'^mlb/', include('mlb.urls')),
)
| {
"content_hash": "994d4f0882fd45aee55c2340cc2bf671",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 61,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.6936936936936937,
"repo_name": "Shinichi-Nakagawa/no-ball",
"id": "718a17437bf71b0179945e75ea2d34d89327e4dd",
"size": "333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noball_django/noball_django/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "130679"
},
{
"name": "JavaScript",
"bytes": "40977"
},
{
"name": "Python",
"bytes": "76458"
},
{
"name": "Ruby",
"bytes": "47030"
}
],
"symlink_target": ""
} |
"""Unit tests around ``hospital.assertions``."""
| {
"content_hash": "80bee783e81aa4c280836426024349b3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 49,
"alnum_prop": 0.673469387755102,
"repo_name": "python-hospital/hospital",
"id": "ae430c5dc7079901e721b035758b8629ab4424c9",
"size": "73",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/assertions/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "51950"
}
],
"symlink_target": ""
} |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException, NoSuchWindowException
def init():
driver = webdriver.Chrome()
driver.get("http://www.instagram.com")
return driver
| {
"content_hash": "344778331f400cc21d07f4aabace39d2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 116,
"avg_line_length": 41.714285714285715,
"alnum_prop": 0.8321917808219178,
"repo_name": "jshaker/igbot",
"id": "497227f0970807a8a9fcc5b80e9c3b01c52aa2fc",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4145"
}
],
"symlink_target": ""
} |
import pytest
from couchbase.exceptions import CouchbaseException
from couchbase.options import (AnalyticsOptions,
GetOptions,
InsertOptions,
QueryOptions,
RemoveOptions,
ReplaceOptions,
SearchOptions,
UpsertOptions,
ViewOptions)
from couchbase.search import TermQuery
from couchbase.tracing import CouchbaseSpan, CouchbaseTracer
from ._test_utils import KVPair, TestEnvironment
class TestSpan(CouchbaseSpan):
def __init__(self, name):
super().__init__(None)
self.finished_ = False
self.name_ = name
self.attributes_ = dict()
self.parent_ = None
self._span = None
def set_attribute(self, key, value):
self.attributes_[key] = value
def set_parent(self, parent):
self.parent_ = parent
def get_parent(self):
return self.parent_
def finish(self):
self.finished_ = True
def is_finished(self):
return self.finished_
def get_attributes(self):
return self.attributes_
def get_name(self):
return self.name_
class TestTracer(CouchbaseTracer):
def __init__(self):
self.spans_ = list()
def start_span(self, name, parent=None, **kwargs):
span = TestSpan(name)
span.set_parent(parent)
self.spans_.append(span)
return span
def reset(self):
self.spans_ = list()
def spans(self):
return self.spans_
class TracerTests:
TRACER = TestTracer()
@pytest.fixture(scope="class", name="cb_env")
def couchbase_test_environment(self, couchbase_config, request):
cb_env = TestEnvironment.get_environment(__name__,
couchbase_config,
manage_buckets=True,
tracer=self.TRACER)
cb_env.try_n_times(3, 5, cb_env.load_data)
self.TRACER.reset()
yield cb_env
cb_env.try_n_times(3, 5, cb_env.purge_data)
self.TRACER.reset()
@pytest.fixture(name="default_kvp")
def default_key_and_value(self, cb_env) -> KVPair:
key, value = cb_env.get_default_key_value()
yield KVPair(key, value)
@pytest.fixture(name="skip_if_mock")
def skip_if_mock(self, cb_env):
if cb_env.is_mock_server:
pytest.skip("Test needs real server")
@pytest.mark.parametrize("op, span_name, opts, value", [
("get", "cb.get", GetOptions, None),
("upsert", "cb.upsert", UpsertOptions, {"some": "thing"}),
("insert", "cb.insert", InsertOptions, {"some": "thing"}),
("replace", "cb.replace", ReplaceOptions, {"some": "thing"}),
("remove", "cb.remove", RemoveOptions, None),
])
@pytest.mark.parametrize("with_parent", [True, False])
def test_kv(self, cb_env, default_kvp, op, span_name, opts, value, with_parent):
# @TODO(): Pending CXXCBC-211 as recent changes do not allow for the parent_span to be passed in as an option
if with_parent is True and op in ['upsert', 'insert', 'replace', 'remove']:
pytest.skip("Pending CXXCBC-211")
# have to reset between parameterized runs
self.TRACER.reset()
cb = cb_env.collection
parent = None
if with_parent:
parent = self.TRACER.start_span(f'parent_{op}')
key = default_kvp.key
options = opts(span=parent)
operation = getattr(cb, op)
try:
if value:
operation(key, value, options)
else:
operation(key, options)
except CouchbaseException:
pass # insert will fail, who cares.
spans = self.TRACER.spans()
if with_parent:
assert len(spans) == 2
span = spans.pop(0)
assert span == parent
assert span.is_finished() is False
assert len(spans) == 1
assert spans[0].is_finished() is True
assert spans[0].get_name() == span_name
assert spans[0].get_parent() == parent
@pytest.mark.parametrize("http_op, http_span_name, http_opts, query, extra", [
("query", "cb.query", QueryOptions, "Select 1", None),
("analytics_query", "cb.analytics", AnalyticsOptions, "Select 1", None),
("search_query", "cb.search", SearchOptions, "whatever", TermQuery("foo")),
("view_query", "cb.views", ViewOptions, "whatever", "whatever_else")
])
@pytest.mark.parametrize("http_with_parent", [True, False])
@pytest.mark.usefixtures("skip_if_mock")
def test_http(self, cb_env, http_op, http_span_name, http_opts, query, extra, http_with_parent):
self.TRACER.reset()
cb = cb_env.bucket if http_op == "view_query" else cb_env.cluster
parent = None
if http_with_parent:
parent = self.TRACER.start_span(f'parent_{http_op}')
options = http_opts(span=parent)
operation = getattr(cb, http_op)
result = None
try:
if extra:
result = operation(query, extra, options).rows()
else:
result = operation(query, options).rows()
for r in result:
assert r is not None
except CouchbaseException:
pass
spans = self.TRACER.spans()
if http_with_parent:
assert len(spans) == 2
span = spans.pop(0)
assert span == parent
assert span.is_finished() is False
assert len(spans) == 1
assert spans[0].is_finished() is True
assert spans[0].get_name() == http_span_name
assert spans[0].get_parent() == parent
| {
"content_hash": "82e3db802b6f92b867984440a0f05e11",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 117,
"avg_line_length": 35.17261904761905,
"alnum_prop": 0.5564393298358437,
"repo_name": "couchbase/couchbase-python-client",
"id": "29276bf73d470eb8f743069a64342c8a713509c7",
"size": "6528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchbase/tests/tracing_t.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "779634"
},
{
"name": "CMake",
"bytes": "5320"
},
{
"name": "Python",
"bytes": "2787486"
}
],
"symlink_target": ""
} |
from setuptools import setup
__version__ = 'unknown'
with open('po_localization/version.py') as version_file:
exec(version_file.read())
with open('README.rst') as readme_file:
long_description = readme_file.read()
setup(
name='po_localization',
packages=[
'po_localization',
'po_localization.management',
'po_localization.management.commands',
'po_localization.tests',
'po_localization.tests.test_app',
'po_localization.tests.test_project'],
package_data={
'po_localization.tests': ['*.html', '*.po'],
'po_localization.tests.test_app': ['templates/*.html'],
'po_localization.tests.test_project': ['locale/fr/LC_MESSAGES/*.po'],
},
version=__version__,
description='Localize Django applications without compiling .po files',
long_description=long_description,
author='Kevin Michel',
author_email='[email protected]',
url='https://github.com/kmichel/po-localization',
download_url='https://github.com/kmichel/po-localization/archive/v{}.tar.gz'.format(__version__),
keywords=['django', 'localization'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=['django>=1.6'],
)
| {
"content_hash": "9a74441959c275d009075ee76507285f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 101,
"avg_line_length": 38.36,
"alnum_prop": 0.6225234619395204,
"repo_name": "kmichel/po-localization",
"id": "e90f109f4c1bad147947e3749403b96a315fff95",
"size": "1934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91595"
}
],
"symlink_target": ""
} |
print("Hallo Welt")
| {
"content_hash": "d2fb080452fdac2cfc84d170ef8bc20e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 19,
"avg_line_length": 20,
"alnum_prop": 0.7,
"repo_name": "kantel/python-schulung",
"id": "62fd63ba6e10b372367a6277964956bcb394b78b",
"size": "20",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/hallowelt/hallowelttrivia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66215"
}
],
"symlink_target": ""
} |
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import chrome_remote_control
def Main(args):
options = chrome_remote_control.BrowserOptions()
parser = options.CreateParser('rendering_microbenchmark_test.py <sitelist>')
# TODO(nduca): Add test specific options here, if any.
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_usage()
return 255
urls = []
with open(args[0], 'r') as f:
for url in f.readlines():
url = url.strip()
if not re.match('(.+)://', url):
url = 'http://%s' % url
urls.append(url)
options.extra_browser_args.append('--enable-gpu-benchmarking')
browser_to_create = chrome_remote_control.FindBrowser(options)
if not browser_to_create:
sys.stderr.write('No browser found! Supported types: %s' %
chrome_remote_control.GetAllAvailableBrowserTypes(options))
return 255
with browser_to_create.Create() as b:
with b.ConnectToNthTab(0) as tab:
# Check browser for benchmark API. Can only be done on non-chrome URLs.
tab.page.Navigate('http://www.google.com')
import time
time.sleep(2)
tab.WaitForDocumentReadyStateToBeComplete()
if tab.runtime.Evaluate('window.chrome.gpuBenchmarking === undefined'):
print 'Browser does not support gpu benchmarks API.'
return 255
if tab.runtime.Evaluate(
'window.chrome.gpuBenchmarking.runRenderingBenchmarks === undefined'):
print 'Browser does not support rendering benchmarks API.'
return 255
# Run the test. :)
first_line = []
def DumpResults(url, results):
if len(first_line) == 0:
cols = ['url']
for r in results:
cols.append(r['benchmark'])
print ','.join(cols)
first_line.append(0)
cols = [url]
for r in results:
cols.append(str(r['result']))
print ','.join(cols)
for u in urls:
tab.page.Navigate(u)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
results = tab.runtime.Evaluate(
'window.chrome.gpuBenchmarking.runRenderingBenchmarks();')
DumpResults(url, results)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| {
"content_hash": "e232f9e115086ef80f5452f05f91b25c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 31.930555555555557,
"alnum_prop": 0.6324488908220965,
"repo_name": "junmin-zhu/chromium-rivertrail",
"id": "4dd26ed42f5ff0ae55b7ae62f3b105c04d1e9167",
"size": "2487",
"binary": false,
"copies": "1",
"ref": "refs/heads/v8-binding",
"path": "tools/chrome_remote_control/examples/rendering_microbenchmark_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75806807"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "145161929"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "1546515"
},
{
"name": "JavaScript",
"bytes": "18675242"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "6981387"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "926245"
},
{
"name": "Python",
"bytes": "8088373"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3239"
},
{
"name": "Shell",
"bytes": "1513486"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
} |
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic.base import TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth import authenticate, login, logout
from rest_framework import generics, permissions, status, views
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from .serializers import ExpertSerializer, ProfileSerializer, ExpertRegistrationSerializer, \
UserSerializer, StateSerializer
from .models import Profile, Expert, State, Page
from mongoengine import connect
from boilerplate.settings.dev import DB_NAMES
class ArtistDetail(views.APIView):
"""Create a new Page with the given title"""
def post(self, request, format=None):
title = request.data.get('title', None)
connect(DB_NAMES['test'])
Page(title=title).save()
return Response({'title': title}, status=status.HTTP_200_OK)
class ExpertListView(generics.ListAPIView):
"""
View all experts on the roster.
* Requires authentication
* Requires admin permission
"""
# TODO: use admin authentication
permission_classes = (permissions.IsAuthenticated,)
queryset = Expert.experts.all()
serializer_class = ExpertSerializer
class ExpertCreateView(generics.CreateAPIView):
"""
Register a new expert (and user account)
"""
queryset = Expert.experts.all()
serializer_class = ExpertRegistrationSerializer
class ExpertLoginView(views.APIView):
"""
Sign in an expert
"""
def post(self, request, format=None):
email = request.data.get('email', None)
password = request.data.get('password', None)
user = authenticate(username=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
expert = Expert.experts.get(user__pk=user.pk)
serializedExpert = ExpertSerializer(expert)
token, created = Token.objects.get_or_create(user=user)
responseData = serializedExpert.data
responseData['token'] = token.key
return Response(responseData)
else:
return Response({
'status': 'Unauthorized',
'message': 'This authentication has been disabled.'
}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({}, status=status.HTTP_401_UNAUTHORIZED)
class ExpertLogoutView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, format=None):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT)
class ExpertActivateView(generics.CreateAPIView):
#TODO
queryset = Expert.experts.all()
serializer_class = ExpertRegistrationSerializer
class ExpertRetrieveView(generics.RetrieveAPIView):
permission_classes = (permissions.IsAdminUser, )
queryset = Expert.experts.all()
serializer_class = ExpertSerializer
class ExpertOwnView(views.APIView):
permission_classes = (permissions.IsAuthenticated, )
queryset = Expert.experts.all()
serializer_class = ExpertSerializer
def get(self, request):
user_data = UserSerializer(request.user)
profile_data = ProfileSerializer(request.user.expert.profile)
state_data = StateSerializer(request.user.expert.state)
return Response({
'user': user_data.data,
'profile': profile_data.data,
'state': state_data.data}
)
def patch(self, request):
# TODO Doesnt work, wrong lookup_field
updated_profile = ProfileSerializer(request.user.expert.profile, data=request.data.get('profile'))
updated_user = ProfileSerializer(request.user, data=request.data)
updated_state = StateSerializer(request.user.expert.state, data=request.data)
updated_profile.is_valid(raise_exception=True)
updated_state.is_valid(raise_exception=True)
updated_user.is_valid(raise_exception=True)
updated_user.save()
updated_state.save()
updated_profile.save()
response = ExpertSerializer(request.user.expert)
return Response({
'expert': response.data
}, status=status.HTTP_200_OK)
class ProfileListView(generics.ListAPIView):
permission_classes = (permissions.IsAdminUser, )
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class ProfileRetrieveUpdateView(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAdminUser, )
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
def patch(self, request):
updated = ProfileSerializer(request.user.expert.profile, data=request.data)
updated.save()
class StateRetrieveUpdateView(generics.RetrieveUpdateAPIView):
permission_classes = (permissions.IsAdminUser, )
queryset = State.objects.all()
serializer_class = StateSerializer
def patch(self, request):
updated = StateSerializer(request.user.expert.profile, data=request.data)
updated.save()
class StateListView(generics.ListAPIView):
permission_classes = (permissions.IsAdminUser, )
queryset = State.objects.all()
serializer_class = StateSerializer
class IndexView(TemplateView):
template_name = 'index.html'
@method_decorator(ensure_csrf_cookie)
def dispatch(self, *args, **kwargs):
return super(IndexView, self).dispatch(*args, **kwargs)
class WebMasterView(TemplateView):
template_name = 'google4c82de08f55a8973.html'
#
# def dispatch(self, *args, **kwargs):
# return super(WebMasterView, self).dispatch(*args, **kwargs)
| {
"content_hash": "c6f00b1895c6600c7680341cdcd78607",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 106,
"avg_line_length": 30.98936170212766,
"alnum_prop": 0.6848609680741503,
"repo_name": "ratnim/JRR-Prototyp1",
"id": "f11e4bb9bfe68c8346a5945f73e8cad60f488c45",
"size": "5826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1303"
},
{
"name": "Python",
"bytes": "50884"
}
],
"symlink_target": ""
} |
import numpy as np
import networkx as nx
import random
from tqdm import tqdm
from itertools import product, combinations, count
random.seed(12345)
DEBUG = False
def change_sign_to_distance(g, mapping={-1: 1, 1: -1}):
for i, j in g.edges_iter():
g[i][j]['weight'] = mapping[g[i][j]['sign']]
return g
def agglomerative(g, threshold=0.0, return_dict=True, change_sign=False):
"""
return_dict: return dict if True, otherwise return label array
"""
assert isinstance(g, nx.Graph)
if g.number_of_nodes() == 0:
raise ValueError('empty graph')
if change_sign:
g = change_sign_to_distance(g.copy())
clus = {i: {n}
for i, n in enumerate(g.nodes_iter())}
# to keep the distance sum of cluster pairs
# so no need t recompute them
clus_dist_cache = nx.Graph()
# at each iteration, we only need to update the distance sum
# of pairs from clus_pairs_to_consider
# initially, we consider all pairs
clus_pairs_to_consider = combinations(clus.keys(), 2)
for _ in tqdm(count()):
for c1, c2 in clus_pairs_to_consider:
cross_edges = [(n1, n2)
for n1, n2 in product(clus[c1], clus[c2])
if n1 in g.adj and n2 in g.adj[n1]]
if cross_edges:
clus_dist_cache.add_edge(
c1, c2,
weight=sum(g[n1][n2]['weight'] for n1, n2 in cross_edges) / len(cross_edges))
if clus_dist_cache.number_of_edges() > 0: # might got clusters to merge
new_clus = {}
# getting cluster pair with mimimum dist_sum
min_dist_pair = min(clus_dist_cache.edges(),
key=lambda e: clus_dist_cache[e[0]][e[1]]['weight'])
min_dist_sum = clus_dist_cache[min_dist_pair[0]][min_dist_pair[1]]['weight']
if min_dist_sum < threshold: # merge
(c1, c2) = min_dist_pair
if DEBUG:
print('merging {} and {}'.format(c1, c2))
new_c, rm_c = sorted([c1, c2])
new_clus[new_c] = clus[c1] | clus[c2]
for c, nodes in clus.items(): # copy the resst
if c not in {c1, c2}:
new_clus[c] = clus[c]
clus = new_clus
clus_pairs_to_consider = [(new_c, c) for c in new_clus if c != new_c]
# tidy the cache
clus_dist_cache.remove_node(rm_c)
else:
if DEBUG:
print("no more clusters to merge")
break
else:
if DEBUG:
print('didn\'t find mergable cluster pair')
break
assert g.number_of_nodes() == sum(map(len, clus.values()))
if return_dict:
return renumber_clus_dict(clus)
else:
return clus_dict_to_array(g, clus)
def renumber_clus_dict(clus):
new_clus = {}
for i, (_, ns) in enumerate(clus.items()):
new_clus[i] = ns
return new_clus
def clus_dict_to_array(g, clus):
labels = np.zeros(g.number_of_nodes())
for i, (_, nodes) in enumerate(clus.items()):
for n in nodes:
labels[n] = i
return labels
def sampling_wrapper(g, cluster_func, sample_size=None, samples=None, return_dict=True, **kwargs):
assert isinstance(g, nx.Graph)
g = change_sign_to_distance(g.copy())
# take samples
if samples is None:
assert sample_size > 0
samples = random.sample(g.nodes(), sample_size)
else:
sample_size = len(samples)
remaining_nodes = set(g.nodes()) - set(samples)
# if DEBUG:
print('sample_size {}'.format(sample_size))
# partition the samples using `cluster_func`
C = cluster_func(g.subgraph(samples), return_dict=True, change_sign=False,
**kwargs)
C[-1] = set()
assert sum(map(len, C.values())) == sample_size
# if DEBUG:
print('partition on the samples produces {} clusters'.format(len(C)))
print("remainign nodes to assign clusters {}".format(len(remaining_nodes)))
# assign remaining nodes to the clusters independently
for n in tqdm(remaining_nodes):
# if DEBUG:
# print('considering n {}'.format(n))
cost_by_clus = {}
connectable_to = {}
for c, cnodes in C.items():
if c == -1:
continue
cost_by_clus[c] = sum(g[n][cn]['weight']
for cn in cnodes
if g.has_edge(n, cn))
neg_weight_sum = sum(g[n][cn]['weight']
for cn in cnodes
if (g.has_edge(n, cn) and
g[n][cn]['weight'] < 0))
connectable_to[c] = (neg_weight_sum < 0)
total_cost_by_clus = sum(cost_by_clus.values())
min_cost = - total_cost_by_clus # singleton case
cand_clus = -1
if DEBUG:
print('min_cost {}'.format(min_cost))
for c, cnodes in C.items():
if c == -1:
continue
if connectable_to[c]:
cost = (2 * cost_by_clus[c] - total_cost_by_clus)
if DEBUG:
print('c {}'.format(c))
print('cost {}'.format(cost))
if cost < min_cost:
min_cost = cost
cand_clus = c
if DEBUG:
print('assinging {} to {}'.format(n, cand_clus))
print('with {}'.format(C[cand_clus]))
C[cand_clus].add(n)
# remain_nodes_clus[n] = cand_clus
# print('remainig node clusters')
# print(remain_nodes_clus)
# for n, c in remain_nodes_clus.items():
# if c != -1:
# C[c].add(n)
# singleton_nodes = list(filter(lambda n: remain_nodes_clus[n] == -1,
# remain_nodes_clus))
singleton_nodes = C[-1]
print('singleton_nodes ({})'.format(len(singleton_nodes)))
# print(singleton_nodes)
if singleton_nodes:
C1 = cluster_func(g.subgraph(singleton_nodes), return_dict=True, **kwargs)
print(C1)
del C[-1]
# renumbering
for c, nodes in C1.items():
C[len(C) + c] = nodes
if return_dict:
return renumber_clus_dict(C)
else:
return clus_dict_to_array(g, C)
| {
"content_hash": "35e7ca708c79feda0f61921697d59174",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 98,
"avg_line_length": 31.41826923076923,
"alnum_prop": 0.5121652639632747,
"repo_name": "xiaohan2012/snpp",
"id": "1cd2e10fd13d4d4e16f84f94da97b62f6394f670",
"size": "6535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snpp/cores/correlation_clustering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "508667"
},
{
"name": "Makefile",
"bytes": "62"
},
{
"name": "Python",
"bytes": "111757"
}
],
"symlink_target": ""
} |
from setuptools import setup
import os
long_description = open("README.rst").read()
install_requires = ['numpy>=1.7.1',
'quantities>=0.9.0']
if os.environ.get('TRAVIS') == 'true' and \
os.environ.get('TRAVIS_PYTHON_VERSION').startswith('2.6'):
install_requires.append('unittest2>=0.5.1')
setup(
name = "neo",
version = '0.4.1',
packages = ['neo', 'neo.core', 'neo.io', 'neo.test', 'neo.test.iotest'],
install_requires=install_requires,
author = "Neo authors and contributors",
author_email = "sgarcia at olfac.univ-lyon1.fr",
description = "Neo is a package for representing electrophysiology data in Python, together with support for reading a wide range of neurophysiology file formats",
long_description = long_description,
license = "BSD-3-Clause",
url='http://neuralensemble.org/neo',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering']
)
| {
"content_hash": "9d9b06756d615ab1c3d2bb8492e5573d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 167,
"avg_line_length": 40.2972972972973,
"alnum_prop": 0.6237424547283702,
"repo_name": "guangxingli/python-neo",
"id": "7f0259a0f47300230bf2120b1bddc2d4e2007c95",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1369197"
}
],
"symlink_target": ""
} |
import re
from urllib.parse import urljoin
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
COOKIE_PARAMS = (
"devicetype=desktop&"
"preferred-player-odm=hlslink&"
"preferred-player-live=hlslink"
)
_id_re = re.compile(r"/(?:program|direkte|serie/[^/]+)/([^/]+)")
_url_re = re.compile(r"https?://(tv|radio).nrk.no/")
_api_baseurl_re = re.compile(r'''apiBaseUrl:\s*["'](?P<baseurl>[^"']+)["']''')
_schema = validate.Schema(
validate.transform(_api_baseurl_re.search),
validate.any(
None,
validate.all(
validate.get("baseurl"),
validate.url(
scheme="http"
)
)
)
)
_mediaelement_schema = validate.Schema({
"mediaUrl": validate.url(
scheme="http",
path=validate.endswith(".m3u8")
)
})
class NRK(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
# Get the stream type from the url (tv/radio).
stream_type = _url_re.match(self.url).group(1).upper()
cookie = {
"NRK_PLAYER_SETTINGS_{0}".format(stream_type): COOKIE_PARAMS
}
# Construct API URL for this program.
baseurl = self.session.http.get(self.url, cookies=cookie, schema=_schema)
program_id = _id_re.search(self.url).group(1)
# Extract media URL.
json_url = urljoin(baseurl, "mediaelement/{0}".format(program_id))
res = self.session.http.get(json_url, cookies=cookie)
media_element = self.session.http.json(res, schema=_mediaelement_schema)
media_url = media_element["mediaUrl"]
return HLSStream.parse_variant_playlist(self.session, media_url)
__plugin__ = NRK
| {
"content_hash": "da2e8883ee7cd4844bf31876d4eac2e6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 81,
"avg_line_length": 28.21875,
"alnum_prop": 0.6140642303433002,
"repo_name": "beardypig/streamlink",
"id": "a04542158e4f8e06c8341e8fc4d8ac71a5730cdb",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/nrk.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
"""Define package tests."""
| {
"content_hash": "a59668f657b1e492b804c457b341aa70",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.6428571428571429,
"repo_name": "bachya/regenmaschine",
"id": "948444043ca110469d7042e0cfdbb8eb88f7077f",
"size": "28",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116689"
},
{
"name": "Shell",
"bytes": "1472"
}
],
"symlink_target": ""
} |
from logging import getLogger
from pygrim.decorators import template_method
log = getLogger(__file__)
class ContextIface(object):
""" testy contextu dostupneho z templaty"""
def postfork(self):
pass
@template_method('context.jinja', session=True)
def context(self, context):
return {
"data": {},
}
| {
"content_hash": "2de24e02bb19a571bf95d67ec0774c09",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 20.88235294117647,
"alnum_prop": 0.6309859154929578,
"repo_name": "ondrejkajinek/pyGrim",
"id": "3257c2e79bf198f21d7a1db9cccf08fbf01f0ea3",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/server/context_iface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "3437"
},
{
"name": "Makefile",
"bytes": "3972"
},
{
"name": "Python",
"bytes": "171412"
},
{
"name": "Shell",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""
Created on March 8th 2018
@author: rouxpn
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import numpy as np
class combine():
"""
Combines the PHISICS and RELAP output into one.
"""
def __init__(self,workingDir,relapData, phisicsData,depTimeDict,inpTimeDict):
"""
Constructor.
@ In, workingDir, string, absolute path to working directory
@ In, relapData, dict, data from relap
@ In, phisicsData, string, data from phisics
@ In, depTimeDict, dictionary, information from the xml depletion file
@ In, inpTimeDict, dictionary, information from the xml input file
@ Out, None
"""
self.timeStepSelected = []
paramDict = {}
paramDict['phisicsData'] = phisicsData
paramDict['relapData'] = relapData
paramDict['depTimeDict'] = depTimeDict
paramDict['inpTimeDict'] = inpTimeDict
selectedTs = 0
for i in paramDict['depTimeDict']['timeSteps'].split(' '):
selectedTs = selectedTs + int(i)
self.timeStepSelected.append(selectedTs)
self.response = self.joinData(paramDict)
def joinData(self, paramDict):
"""
Joins the RELAP and PHISICS data based on the time lines selected from PHISICS.
@ In, paramDict, dictionary, dictionary of parameters
@ Out, response, dict, the output to be returned
"""
phisicsVars = list(paramDict['phisicsData'].keys())
relapVars = list(paramDict['relapData'].keys())
headers = phisicsVars + relapVars
data = []
data.append([0.0] * len(phisicsVars) + np.array(list(paramDict['relapData'].values())).T[0].tolist())
thBurnStep = [float(val) for val in paramDict['inpTimeDict']['TH_between_BURN'].split(' ')]
# check the thburn
maxTime = max(thBurnStep)
# check max Relap
maxRelap = max(paramDict['relapData']['time'])
if maxRelap < maxTime:
thBurnStep[-1] = maxRelap
lineNumber, THbetweenBurn, mrTau = 0, 0, 0
while THbetweenBurn < len(thBurnStep):
lineNumber = lineNumber + 1
addedNow = False
# if the time on a relap line is <= than the TH_between_burn selected
if paramDict['relapData']['time'][lineNumber] <= thBurnStep[THbetweenBurn]:
# print the relap line with the phisics line corresponding to last time step of a burnstep
valuesPhisics = np.array(list(paramDict['phisicsData'].values())).T[self.timeStepSelected[mrTau]-1].tolist()
valuesRelap = np.array(list(paramDict['relapData'].values())).T[lineNumber].tolist()
data.append(valuesPhisics+valuesRelap)
addedNow = True
# if the relap time on a line is larger the TH_between_burn selected
if paramDict['relapData']['time'][lineNumber] >= thBurnStep[THbetweenBurn]:
# change the TH_between_burn selected
THbetweenBurn = THbetweenBurn + 1
# change the burn step in phisics
mrTau = mrTau + 1
# if this is the last TH_between_burn
if THbetweenBurn == len(thBurnStep) and not addedNow:
# print the last line of phisics and relap.
valuesPhisics = np.array(list(paramDict['phisicsData'].values())).T[-1].tolist()
valuesRelap = np.array(list(paramDict['relapData'].values())).T[-1].tolist()
data.append(valuesPhisics+valuesRelap)
data = np.asarray(data)
response = {var: data[: , i] for i, var in enumerate(headers)}
return response
def returnData(self):
"""
Method to return the data in a dictionary
@ In, None
@ Out, self.response, dict, the dictionary containing the data {var1:array,var2:array,etc}
"""
return self.response
| {
"content_hash": "4d6cace3d81d427f32755605b30ae78c",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 116,
"avg_line_length": 40.62222222222222,
"alnum_prop": 0.6641137855579868,
"repo_name": "joshua-cogliati-inl/raven",
"id": "86bf6aa648f54d40fb23310152bc290b895c3e1f",
"size": "4245",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "ravenframework/CodeInterfaceClasses/PHISICS/combine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556080"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "6952659"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8574"
},
{
"name": "Shell",
"bytes": "124279"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
} |
"""Parallel workflow execution via OAR http://oar.imag.fr
"""
import os
import stat
from time import sleep
import subprocess
import json
from .base import (SGELikeBatchManagerBase, logger, iflogger, logging)
from nipype.interfaces.base import CommandLine
class OARPlugin(SGELikeBatchManagerBase):
"""Execute using OAR
The plugin_args input to run can be used to control the OAR execution.
Currently supported options are:
- template : template to use for batch job submission
- oarsub_args : arguments to be prepended to the job execution
script in the oarsub call
- max_jobname_len: maximum length of the job name. Default 15.
"""
# Addtional class variables
_max_jobname_len = 15
_oarsub_args = ''
def __init__(self, **kwargs):
template = """
# oarsub -J
"""
self._retry_timeout = 2
self._max_tries = 2
self._max_jobname_length = 15
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'max_jobname_len' in kwargs['plugin_args']:
self._max_jobname_len = \
kwargs['plugin_args']['max_jobname_len']
super(OARPlugin, self).__init__(template, **kwargs)
def _is_pending(self, taskid):
# subprocess.Popen requires taskid to be a string
proc = subprocess.Popen(
['oarstat', '-J', '-s',
'-j', taskid],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
o, e = proc.communicate()
parsed_result = json.loads(o)[taskid].lower()
is_pending = (
('error' not in parsed_result) and
('terminated' not in parsed_result)
)
return is_pending
def _submit_batchtask(self, scriptfile, node):
cmd = CommandLine('oarsub', environ=dict(os.environ),
terminal_output='allatonce')
path = os.path.dirname(scriptfile)
oarsubargs = ''
if self._oarsub_args:
oarsubargs = self._oarsub_args
if 'oarsub_args' in node.plugin_args:
if (
'overwrite' in node.plugin_args and
node.plugin_args['overwrite']
):
oarsubargs = node.plugin_args['oarsub_args']
else:
oarsubargs += (" " + node.plugin_args['oarsub_args'])
if node._hierarchy:
jobname = '.'.join((dict(os.environ)['LOGNAME'],
node._hierarchy,
node._id))
else:
jobname = '.'.join((dict(os.environ)['LOGNAME'],
node._id))
jobnameitems = jobname.split('.')
jobnameitems.reverse()
jobname = '.'.join(jobnameitems)
jobname = jobname[0:self._max_jobname_len]
if '-O' not in oarsubargs:
oarsubargs = '%s -O %s' % (
oarsubargs,
os.path.join(path, jobname + '.stdout')
)
if '-E' not in oarsubargs:
oarsubargs = '%s -E %s' % (
oarsubargs,
os.path.join(path, jobname + '.stderr')
)
if '-J' not in oarsubargs:
oarsubargs = '%s -J' % (oarsubargs)
os.chmod(scriptfile, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
cmd.inputs.args = '%s -n %s -S %s' % (
oarsubargs,
jobname,
scriptfile
)
oldlevel = iflogger.level
iflogger.setLevel(logging.getLevelName('CRITICAL'))
tries = 0
while True:
try:
result = cmd.run()
except Exception as e:
if tries < self._max_tries:
tries += 1
sleep(self._retry_timeout)
# sleep 2 seconds and try again.
else:
iflogger.setLevel(oldlevel)
raise RuntimeError('\n'.join((('Could not submit OAR task'
' for node %s') % node._id,
str(e))))
else:
break
iflogger.setLevel(oldlevel)
# retrieve OAR taskid
o = ''
add = False
for line in result.runtime.stdout.splitlines():
if line.strip().startswith('{'):
add = True
if add:
o += line + '\n'
if line.strip().startswith('}'):
break
taskid = json.loads(o)['job_id']
self._pending[taskid] = node.output_dir()
logger.debug('submitted OAR task: %s for node %s' % (taskid, node._id))
return taskid
| {
"content_hash": "a120f783394696fd8492cbba1cdb6fe2",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 34.23287671232877,
"alnum_prop": 0.5074029611844738,
"repo_name": "dgellis90/nipype",
"id": "89fb42fbcc48f93ad29a00acb5b97f1212211b3d",
"size": "4998",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nipype/pipeline/plugins/oar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4857096"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""Code for assessing alignment accuracy"""
import re
import pysam
from mitty.simulation.sequencing.writefastq import ri, load_qname_sidecar, parse_qname
# good to have these together. Code that uses score_alignment_error is very likely to use
# ri, load_qname_sidecar and parse_qname
from mitty.lib.cigars import cigarv2_v1
cigar_parser = re.compile(r'(\d+)(\D)')
def is_simple_case(cigar1, cigar2):
_, _, op1, _ = cigar_parser.split(cigar1, maxsplit=1)
_, _, op2, _ = cigar_parser.split(cigar2, maxsplit=1)
return op1 in ['=', 'M', 'X'] and op2 in ['M', 'X', '=']
def find_first_common_reference_matching_base_positions(r1, r2):
return next(
filter(
lambda x: x[0] is not None and x[1] is not None,
zip(r1.get_reference_positions(full_length=True),
r2.get_reference_positions(full_length=True))),
(None, None))
def score_alignment_error(r, ri, max_d=200, strict=False):
"""Algorithm:
If strict is True: Look at the distance between the simulated P and the aligned P
If strict is False:
If from inside an insertion, treat like strict
Else, find the first base in the read that is placed on the reference for both the
aligned and correct reads and compute the difference
If there is no such base, treat like strict
Look at the aligned CIGAR and compute the difference between exact P and
aligned P after accounting for any soft clip at the start of the read
:param r: aligned read
:param ri: readinfo for correct alignment
:param strict: If True, simply compute difference between simulated P and aligned P
if False, find first common reference matching base
:return: -max_d <= d_err <= max_d + 2
d_err = max_d + 1 if wrong chrom
d_err = max_d + 2 if unmapped
"""
if r.is_unmapped:
d_err = max_d + 2
elif r.reference_name != ri.chrom:
d_err = max_d + 1
else:
# This is what we score against when we are 'strict' or inside an insertion
# Or we can't find a common reference matching base in the correct and aligned reads
correct_pos, aligned_pos = ri.pos - 1, r.pos
# If we are not strict AND not in an insertion we use first common reference matching base
if not strict and ri.special_cigar is None and not is_simple_case(ri.cigar, r.cigarstring):
rc = pysam.AlignedSegment()
rc.pos, rc.cigarstring = ri.pos - 1, cigarv2_v1(ri.cigar)
_correct_pos, _aligned_pos = find_first_common_reference_matching_base_positions(rc, r)
if _correct_pos is not None:
correct_pos, aligned_pos = _correct_pos, _aligned_pos
d_err = max(min((aligned_pos - correct_pos), max_d), -max_d)
return d_err
def correct_tlen(ri1, ri2, r):
"""If you give it two read_info objects it will compute the correct tlen for you. You should also pass it
a pysam.AlignedSegment object - it uses this for the tlen computations. It saves time compared to having to
create a new object every time
:param r:
:param ri1:
:param ri2:
:return:
"""
r.pos = ri1.pos
r.cigarstring = ri1.cigar
ap1 = r.get_aligned_pairs(True)
r.pos = ri2.pos
r.cigarstring = ri2.cigar
ap2 = r.get_aligned_pairs(True)
if len(ap1) > 0 and len(ap2) > 0: # No 148I like things
p10, p11, p20, p21 = ap1[0][1], ap1[-1][1], ap2[0][1], ap2[-1][1]
return p21 - p10 + 1 if p10 < p20 else p20 - p11 - 1
else:
return 0 # tlen basically undefined - one of the reads can not be aligned to a reference
#TODO: when we have graph reads we will always have a template
def tag_alignment(r, ri, max_d=200, strict=False):
"""Given correct alignment set tags on the read indicating correct alignment and other metadata
:param r:
:param ri:
:return: mutates r
"""
r.set_tags(
[('Xd', score_alignment_error(r, ri, max_d, strict), 'i'),
('XR', ri.chrom, 'Z'),
('XP', ri.pos, 'i'),
('XM', ri.cigar, 'Z')] + ([('XV', ri.v_list)] if ri.v_list else [])) | {
"content_hash": "bdd08db1eaf7223b21fc9590716b28bd",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 109,
"avg_line_length": 36.75925925925926,
"alnum_prop": 0.6642317380352645,
"repo_name": "sbg/Mitty",
"id": "bcf4cdab35e05c55f01f68d8dd61b517c9320062",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mitty/benchmarking/alignmentscore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "147223"
},
{
"name": "Python",
"bytes": "239645"
},
{
"name": "Shell",
"bytes": "834"
}
],
"symlink_target": ""
} |
__author__ = 'Kevin Godden'
import csv
import os
import time
import re
import fnmatch
import math
from datetime import datetime
from decimal import Decimal
#del show(i):
# name = i.path + i.
def to_timestamp(dt):
# return str(calendar.timegm(dt.timetuple()))
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return str(Decimal(delta.days*86400+delta.seconds)+Decimal(delta.microseconds/1000000.0).quantize(Decimal('.000001')))
class PathAttribute(object):
"""
Extracts attributes from an image file's file-path using a supplied
regular expression. The named attributes are specified as named capture
groups within the expression, e.g.:
'(\./)?(?P<experiment>\w+)/(?P<camera>\w+)/(?P<channel>\w+)/(?P<dir>\d+)/image_D(?P<timestamp>.*)Z_(?P<frame>\d{1}).*'
to match a filename like this:
'sync_test/camera1/structured_light/0000/image_D2015-10-27T13-26-52-302857Z_9.jpg'
will produce the following named attributes:
experiment : sync_test
camera : camera1
channel : structured_light
dir : 0000
timestamp : 2015-10-27T13-26-52-302857
frame : 9
A dict of named transform lambdas can be provided along with the regex,
if a lambda exists in the dict for a named attribute, then the it will
be applied to the attribute before it is returned.
For example to transform the timestamp string into a proper time by calling
strptime():
attribute = ook.PathAttribute(regex2,
{'timestamp': lambda v: ook.to_timestamp(
datetime.strptime(v, '%Y-%m-%dT%H-%M-%S-%f'))}
)
"""
def __init__(self, regtxt, transforms=None):
"""
Initialises a FileAttribute
:param regtxt: The regex string to match against the file paths.
:param transforms: A dictionary of lambdas to a applied to the raw extracted attributes.
:return:
"""
# compile regex
self.reg = re.compile(regtxt)
self.transforms = transforms
# determine attribute order based on the group order
# withing the regex, this is so that we can report the
# attributes in the same order as specified in the regex.
self.groups = sorted(self.reg.groupindex.items(), key=lambda o: o[1])
def names(self):
"""
Gets an ordered list of the attribute names, these correspond
to the named capture groups in the supplied regex.
:return: A list of attribute names
"""
return [group[0] for group in self.groups]
def evaluate(self, rel_path, base_path):
"""
Evaluates the attribute based on the passed image file path.
:param rel_path: The relative path to the image in question.
:param base_path: The absolute path to the base images directory.
:return: A list of the extracted attributes.
"""
# Apply the regex to the path
m = self.reg.match(rel_path)
if not m:
raise ValueError #ValueError('No match - %s' % rel_path)
# We have a match and some captures, now let's make an ordered
# list of features, if a transform has been specified for a feature
# we apply it to the raw feature otherwise we just return the raw
# text.
values = [self.transforms[group[0]](m.group(group[0]))
if self.transforms and group[0] in self.transforms else
m.group(group[0])
for group in self.groups]
return values
class FileSizeAttribute(object):
"""
Extracts an image file's size (in bytes) as an attribute.
"""
def __init__(self):
pass
@staticmethod
def names():
"""
Gets a list of attribute names to be extracted.
:return: A single named attribute file_size
"""
return ['file_size']
@staticmethod
def evaluate(rel_path, base_path):
"""
Calculates the size of the image file indicated
by the path and returns it as an attribute.
:param rel_path: The relative path to the image in question.
:param base_path: The absolute path to the base images directory.
:return: A list containing a single element - the file size as a string.
"""
return [str(os.stat(os.path.join(base_path, rel_path)).st_size)]
class ImageAttribute(object):
"""
Uses opencv to extract some attributes from an image, currently
min, mane and max image brightness.
"""
def __init__(self):
pass
@staticmethod
def names():
"""
:return: An ordered list of attribute names extracted by
this image feature.
"""
return ['min', 'mean', 'max']
@staticmethod
def evaluate(rel_path, base_path):
"""
Evaluates this feature using opencv to extract some image
statistics.
:param rel_path: The relative path to the image in question.
:param base_path: The absolute path to the base images directory.
:return: The extracted image features.
"""
import cv2
# Read the image
img = cv2.imread(os.path.join(base_path, rel_path), 0)
# And collect the stats
vals = [img.min(), math.trunc(img.mean()), img.max()]
# return as a list of strings
return [str(v) for v in vals]
def dump(obj):
for attr in dir(obj):
if hasattr( obj, attr ):
print( "obj.%s = %s" % (attr, getattr(obj, attr)))
class Image(object):
""" Represents an indexed image file. The index returns objects
of this type.
"""
def __init__(self):
self.microseconds = None
def near(self, other, diff):
"""
Determines if this image is 'near' another image in time.
:param other: The other image
:param diff: microsecond threshold value used to determine if this
image us near the other image in time.
:return: True / False --> Near / Not Near
"""
# If this image does not have a timestamp attribute
# then we can't say whether it's near anything!
if not hasattr(self, 'timestamp'):
return False
# Compute the microsecond delta between the two images'
# timestamps.
delta = float(self.timestamp) - float(other.timestamp)
return abs(delta) < diff
class Index(object):
""" The main image file index functionality.
If a path is supplied to its constructor then the index will
attempt to load an index file called .ook/flat, once opened
the caller can step through the images index.images() to retrieve
a generator, for example:
idx = ook.Index(images_path) # Load the index
images = idx.images() # Get a generator
image = next(images) # Get the first image
If a left index object and a predicate are supplied then this index
will chain onto the felt index and filter the images based on the
predicate.
"""
def __init__(self, path=None, left=None, predicate=None):
self.index_path = path
self.left = left
self.predicate = predicate
def images(self):
"""
Implements the images generator.
:return:
"""
# If a predicate has been specified, then we make a generator
# using the 'left' index and the predicate and implement the generator.
if self.predicate:
generator = (i for i in self.left.images() if self.predicate(i))
while True:
try:
yield next(generator)
except StopIteration:
return
# No predicate supplied, lets load an index from file instead
# and implement the generator on that line-by-line
path = (os.path.join(self.index_path, '.ook', 'flat'))
print('opening index' + path)
fields = []
with open(path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csv_reader:
# Load up the attribute names from the header line
if row[0] == 'h':
fields = [field for field in row]
continue
image = Image()
# Take the attributes from the CSV line and
# add them to the image
for i in range(1, len(row)):
setattr(image, fields[i], row[i])
yield image
# No more lines!
#raise StopIteration
def filter(self, predicate):
"""
Returns a new index based on this index but filtered
using the supplied predicate. E.g.:
idx = ook.Index(images_path)
# filter for all images with _9 in their name
idx1 = idx.filter(lambda p: '_9' in p.name)
first = next(idx1)
:param predicate: A lambda to filter the images
:return: A new index which will apply the predicate to this index.
"""
return Index(left=self, predicate=predicate)
def image(self, name):
"""
Gets the first image in the index that has the passed name.
:param name: The name of the image to find
:return: The found image, if any.
"""
return next(self.filter(lambda p: p.name == name).images())
def scan(self, attributes):
"""
Scans through a directory tree of images and create an index file from the
images found including their attributes as specified by the attributes
argument.
:param attributes: A list of attributes to extract from the indexed images.
:return: Nothing at the moment
"""
print('looking in ' + self.index_path)
ook_dir = os.path.join(self.index_path, '.ook')
# make the .ook dir if it doesn't exist
if not os.path.exists(ook_dir):
os.makedirs(ook_dir)
ii = 0
start = time.time()
interval_start = time.time()
first = True
last_values = None
# we write the index datra to ./.ook/flat
with open('%s/flat' % ook_dir, 'w') as out:
# recursively search for image files *.jpg
for root, dirs, filenames in os.walk(self.index_path):
for name in fnmatch.filter(filenames, '*.jpg'):
rel_path = os.path.relpath(root, self.index_path)
#print root
#print self.index_path
#print name
rel_path = os.path.join(rel_path, name)
if '.paused' in rel_path:
continue
# figure out the relative path for this image
rel_path = '/'.join(rel_path.split('\\'))
values = []
# extract the attributes
for a in attributes:
try:
values.extend(a.evaluate(rel_path, self.index_path))
except ValueError:
print("No match - " + rel_path)
# if this is the first row, then we write out the attributes
# header first, this names all of the attributes in order
if first:
first = False
names = []
for a in attributes:
names.extend(a.names())
out.write("h,name,path,")
out.write("".join(['%s,' % val for val in names]))
out.write('\n')
print(names)
# start to write the image row which starts with:
# i,<image-name>,<rel-path>
out.write('i,%s,%s,' % (name, os.path.dirname(rel_path)))
values_copy = list(values)
# Experimental code, gnore this
if False and last_values:
for i in range(len(values)):
if values[i] == last_values[i]:
values[i] = '.'
last_values = values_copy
# Write out the attributes
out.write("".join(['%s,' % val for val in values]))
out.write('\n')
ii += 1
every_n = 50
# every N lines, write out a progress message
if ii % every_n == 0:
duration = time.time() - interval_start
interval_start = time.time()
try:
print("Reading %d, %d images/s" % (ii, every_n / duration))
except:
pass
duration = time.time() - start
print('%d images indexed in %d seconds, %d images/s' % (ii, duration, ii / duration))
| {
"content_hash": "65655cece0b7314248123fd5942946d3",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 126,
"avg_line_length": 32.91951219512195,
"alnum_prop": 0.5406386604430614,
"repo_name": "kgodden/ook",
"id": "f7feb30949dcec103979fef3833bae8ff8b77635",
"size": "13497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ook/ook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23754"
}
],
"symlink_target": ""
} |
from .models import Task
from .serializers import TaskSerializer
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.utils import timezone
import datetime
now = datetime.date.today()
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
# def perform_create(self, serializer):
# serializer.save(owner = self.request.user)
@api_view(['GET'])
def news(request):
tasks = Task.objects.all()
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
| {
"content_hash": "afae0c47f27361fd93357897ec17779f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 52,
"avg_line_length": 25.8,
"alnum_prop": 0.7550387596899225,
"repo_name": "PabloSuarez/API_django",
"id": "d5c676812ed67aea8840e4b28876cba2042dce08",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorial/listTask/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24995"
}
],
"symlink_target": ""
} |
from chatterbot import ChatBot
# Create a new instance of a ChatBot
bot = ChatBot(
'Default Response Example Bot',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
},
{
'import_path': 'chatterbot.logic.LowConfidenceAdapter',
'threshold': 0.65,
'default_response': 'I am sorry, but I do not understand.'
}
],
trainer='chatterbot.trainers.ListTrainer'
)
# Train the chat bot with a few responses
bot.train([
'How can I help you?',
'I want to create a chat bot',
'Have you read the documentation?',
'No, I have not',
'This should help get you started: http://chatterbot.rtfd.org/en/latest/quickstart.html'
])
# Get a response for some unexpected input
response = bot.get_response('How do I make an omelette?')
print(response) | {
"content_hash": "eee80ad685243cb5b51635075a54dca5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 29.59375,
"alnum_prop": 0.6240760295670539,
"repo_name": "sahararaju/dataasservices",
"id": "854fe437ba1994a561117cfe675d5422c1c42425",
"size": "972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chatterbot/default-response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16127"
},
{
"name": "Clojure",
"bytes": "3153"
},
{
"name": "Common Lisp",
"bytes": "1095"
},
{
"name": "Go",
"bytes": "4683"
},
{
"name": "HTML",
"bytes": "7955"
},
{
"name": "JavaScript",
"bytes": "16963"
},
{
"name": "Python",
"bytes": "258118"
},
{
"name": "Rust",
"bytes": "20301"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "netavg.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "1a638291467ca70b8630d51397eb3c75",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.7124463519313304,
"repo_name": "grollins/netavg-django",
"id": "9b7db84ae2d71737ad21877bfbaee6930928f8ab",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netavg/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38183"
}
],
"symlink_target": ""
} |
import os
from os.path import relpath, join
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def find_package_data(data_root, package_root):
files = []
for root, dirnames, filenames in os.walk(data_root):
for fn in filenames:
files.append(relpath(join(root, fn), package_root))
return files
setup(
name = "openforcefield",
version = "0.1.0",
author = "John D. Chodera",
author_email = "[email protected]",
description = ("Open tools for Bayesian forcefield parameterization"),
license = "GNU Lesser General Public License (LGPL), Version 3",
keywords = "Bayesian inference; forcefield parmaeterization; ThermoML",
url = "http://github.com/open-forcefield-group/open-forcefield-tools",
packages=['openforcefield', 'openforcefield/tests', 'openforcefield/data'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: GNU Lesser General Public License (LGPL), Version 3",
],
#entry_points={'console_scripts': ['smarty = smarty.cli:main']},
package_data={'openforcefield': find_package_data('openforcefield/data', 'openforcefield')},
)
| {
"content_hash": "0ba140e3559cb8c04f8615bf19987602",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 96,
"avg_line_length": 39.57575757575758,
"alnum_prop": 0.6730474732006125,
"repo_name": "bmanubay/open-forcefield-tools",
"id": "223ab2388d1edbe612f8c775975ee7adcba6e98b",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "81"
},
{
"name": "OpenEdge ABL",
"bytes": "244162"
},
{
"name": "Python",
"bytes": "304948"
},
{
"name": "Shell",
"bytes": "4567"
},
{
"name": "TeX",
"bytes": "44964"
}
],
"symlink_target": ""
} |
import asyncio
import e2c.async as e2c
config = (
'.run -- action',
'action.out -- print')
async def action(data, out):
await out(data)
async def run_async():
sess = e2c.Session(config)
sess.actor('action', action)
sess.actor('print', lambda data: print(data))
sess.visualize()
await sess.run('Hello, E2C')
loop = asyncio.get_event_loop()
loop.run_until_complete(run_async())
loop.close()
| {
"content_hash": "4d3e7ef3c800b24e0502924d9f60cc54",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 49,
"avg_line_length": 17.2,
"alnum_prop": 0.6441860465116279,
"repo_name": "elastic-event-components/e2c",
"id": "5f072c2d8b8b10c6aeb7065f33c93557c5eb206c",
"size": "1114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/python/e2c.examples/async/quick_start1_async/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "75533"
},
{
"name": "CSS",
"bytes": "1350"
},
{
"name": "HTML",
"bytes": "962"
},
{
"name": "Python",
"bytes": "117092"
},
{
"name": "Shell",
"bytes": "509"
}
],
"symlink_target": ""
} |
"""add account id
Revision ID: 3734300868bc
Revises: 3772e5bcb34d
Create Date: 2013-09-30 18:07:21.729288
"""
# revision identifiers, used by Alembic.
revision = '3734300868bc'
down_revision = '3772e5bcb34d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('account_profile', sa.Column('account_id', sa.Integer(11)))
pass
def downgrade():
pass
| {
"content_hash": "889730f3e0d1f8a2fd316cac3bdaa03f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 17,
"alnum_prop": 0.7186700767263428,
"repo_name": "vsilent/smarty-bot",
"id": "564245834f32541caae95d4abf3e9cf4c18a206a",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/3734300868bc_add_account_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2163"
},
{
"name": "Dockerfile",
"bytes": "741"
},
{
"name": "HTML",
"bytes": "4223"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "974421"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
from mock import Mock
from oslo.config import cfg
import testtools
from glance import db as db_api
import glance.db
from glance.openstack.common import importutils
CONF = cfg.CONF
CONF.import_opt('use_tpool', 'glance.db')
CONF.import_opt('data_api', 'glance.db')
class DbApiTest(testtools.TestCase):
def test_get_dbapi_when_db_pool_is_enabled(self):
CONF.set_override('use_tpool', True)
dbapi = db_api.get_api()
self.assertTrue(isinstance(dbapi, db_api.ThreadPoolWrapper))
def test_get_dbapi_when_db_pool_is_disabled(self):
CONF.set_override('use_tpool', False)
dbapi = db_api.get_api()
self.assertFalse(isinstance(dbapi, db_api.ThreadPoolWrapper))
self.assertEqual(importutils.import_module(CONF.data_api), dbapi)
def test_unwrap_dbapi_when_db_pool_is_enabled(self):
CONF.set_override('use_tpool', False)
dbapi = db_api.get_api()
self.assertEqual(importutils.import_module(CONF.data_api),
glance.db.unwrap(dbapi))
def test_unwrap_dbapi_when_db_pool_is_disabled(self):
CONF.set_override('use_tpool', True)
dbapi = db_api.get_api()
self.assertEqual(importutils.import_module(CONF.data_api),
glance.db.unwrap(dbapi))
def method_for_test_1(*args, **kwargs):
return args, kwargs
class ThreadPoolWrapper(testtools.TestCase):
def test_thread_pool(self):
CONF.set_override('use_tpool', True)
CONF.set_override('data_api', 'glance.tests.functional.db.'
'test_db_api')
dbapi = db_api.get_api()
from eventlet import tpool
tpool.execute = Mock()
dbapi.method_for_test_1(1, 2, kwarg='arg')
tpool.execute.assert_called_with(method_for_test_1, 1, 2, kwarg='arg')
def test_unwrap(self):
CONF.set_override('use_tpool', True)
CONF.set_override('data_api', 'glance.tests.functional.db.'
'test_db_api')
dbapi = db_api.get_api()
self.assertEqual(importutils.import_module(CONF.data_api),
dbapi.unwrap())
def tearDown(self):
super(ThreadPoolWrapper, self).tearDown()
CONF.set_override('use_tpool', False)
| {
"content_hash": "ed3fbea536bac1097a020310240fd230",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 33.83582089552239,
"alnum_prop": 0.6325540361711512,
"repo_name": "cloudbau/glance",
"id": "a4605db65b1f42c923100775cca9cd7a209998a1",
"size": "2948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/functional/db/test_db_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2489476"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- mape a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
_have_ssl = False
try:
import _ssl
from _ssl import *
_have_ssl = True
except ImportError:
pass
import os, sys
try:
from errno import EBADF
except ImportError:
EBADF = 9
__all__ = ["getfqdn"]
__all__.extend(os._get_exports_list(_socket))
if _have_ssl:
__all__.extend(os._get_exports_list(_ssl))
_realsocket = socket
if _have_ssl:
_realssl = ssl
def ssl(sock, keyfile=None, certfile=None):
if hasattr(sock, "_sock"):
sock = sock._sock
return _realssl(sock, keyfile, certfile)
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
#
# These classes are used by the socket() defined on Windows and BeOS
# platforms to provide a best-effort implementation of the cleanup
# semantics needed when sockets can't be dup()ed.
#
# These are not actually used on other platforms.
#
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
send = recv = sendto = recvfrom = __getattr__ = _dummy
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "send", "recv", "sendto", "recvfrom",
"__weakref__"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
self.send = self._sock.send
self.recv = self._sock.recv
self.sendto = self._sock.sendto
self.recvfrom = self._sock.recvfrom
def close(self):
self._sock = _closedsocket()
self.send = self.recv = self.sendto = self.recvfrom = self._sock._dummy
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
_s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
"%s.__doc__ = _realsocket.%s.__doc__\n")
for _m in _socketmethods:
exec _s % (_m, _m, _m, _m)
del _m, _s
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf"]
def __init__(self, sock, mode='rb', bufsize=-1):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
| {
"content_hash": "e8b4865dffe292469e461165042a71ff",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 79,
"avg_line_length": 31.49135802469136,
"alnum_prop": 0.5362239297475302,
"repo_name": "MalloyPower/parsing-python",
"id": "f96a14683715eabfec8936cea14e6f9a63a381cb",
"size": "12847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/socket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import os
import re
import sys
from io import open
from setuptools import setup
name = 'pyramid-restful-jsonapi'
package = 'pyramid_restful_jsonapi'
description = 'JSONAPI support for Pryamid Restful Framework'
url = 'https://github.com/danpoland/pyramid-restful-jsonapi'
author = 'Daniel Poland'
author_email = '[email protected]'
install_requires = [
'pyramid-restful-framework',
'marshmallow-jsonapi',
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest'
]
here = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
except IOError:
README = ''
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license='BSD',
description=description,
long_description=README,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_require,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Pyramid',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
]
)
| {
"content_hash": "e791df915b3cefc83cdacc84e1f1e70b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 90,
"avg_line_length": 27.5,
"alnum_prop": 0.6260720411663808,
"repo_name": "danpoland/pyramid-restful-jsonapi",
"id": "85f3daf4faca298469d25cf01defceeeae002e3a",
"size": "2962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15273"
}
],
"symlink_target": ""
} |
__title__ = "persiantools"
__url__ = "https://github.com/mhajiloo/persiantools"
__version__ = "1.4.1"
__build__ = __version__
__author__ = "Majid Hajiloo"
__author_email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "Copyright 2016-2019 Majid Hajiloo"
import sys
PY2 = True if sys.version_info[0] < 3 else False
| {
"content_hash": "aebc291c88bbc18b48b74c7537012887",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 25.76923076923077,
"alnum_prop": 0.6447761194029851,
"repo_name": "mhajiloo/persiantools",
"id": "675c5ae296fe1ada1bf5ec6ee9cb5467f6d4f192",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "persiantools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71902"
}
],
"symlink_target": ""
} |
from django.contrib.gis.db import models
from apps.common.models import AbstractFeature, AbstractObservation
class SamplingFeature(AbstractFeature):
geometry = models.PointField(
help_text="Spatial information about feature.",
srid=3857
)
class Observation(AbstractObservation):
# TODO: migrate this to general Observation model
# NOTE: see parent class for more information
feature_of_interest = models.ForeignKey(
SamplingFeature,
help_text="Weather station where the observation was taken.",
related_name='observations',
editable=False,
on_delete=models.DO_NOTHING
)
class Meta:
get_latest_by = 'phenomenon_time_range'
ordering = ['-phenomenon_time_range', 'feature_of_interest', 'procedure',
'observed_property']
# unique_together see migration 0005 and 0006, index ozp_observation_uniq
| {
"content_hash": "78ee9ca07dbb214b1efe6cc3c66e25a7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 81,
"avg_line_length": 31.93103448275862,
"alnum_prop": 0.6846652267818575,
"repo_name": "gis4dis/poster",
"id": "0f17a16afce8a558f9faa8d53c20ccd232b70723",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/processing/ozp/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "782"
},
{
"name": "Dockerfile",
"bytes": "586"
},
{
"name": "HTML",
"bytes": "12793"
},
{
"name": "Jupyter Notebook",
"bytes": "23402"
},
{
"name": "Makefile",
"bytes": "1178"
},
{
"name": "Python",
"bytes": "493523"
},
{
"name": "Shell",
"bytes": "1729"
}
],
"symlink_target": ""
} |
"""
An example to show receiving events from an Event Hub with checkpoint store doing checkpoint by batch asynchronously.
In the `receive_batch` method of `EventHubConsumerClient`:
If no partition id is specified, the checkpoint_store are used for load-balance and checkpoint.
If partition id is specified, the checkpoint_store can only be used for checkpoint without load balancing.
"""
import asyncio
import os
import logging
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.extensions.checkpointstoreblobaio import BlobCheckpointStore
CONNECTION_STR = os.environ["EVENT_HUB_CONN_STR"]
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']
STORAGE_CONNECTION_STR = os.environ["AZURE_STORAGE_CONN_STR"]
BLOB_CONTAINER_NAME = "your-blob-container-name" # Please make sure the blob container resource exists.
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
async def batch_process_events(events):
# put your code here
await asyncio.sleep(2) # simulate something I/O bound
async def on_event_batch(partition_context, event_batch):
log.info("Partition {}, Received count: {}".format(partition_context.partition_id, len(event_batch)))
await batch_process_events(event_batch)
await partition_context.update_checkpoint()
async def receive_batch():
checkpoint_store = BlobCheckpointStore.from_connection_string(STORAGE_CONNECTION_STR, BLOB_CONTAINER_NAME)
client = EventHubConsumerClient.from_connection_string(
CONNECTION_STR,
consumer_group="$Default",
eventhub_name=EVENTHUB_NAME,
checkpoint_store=checkpoint_store,
)
async with client:
await client.receive_batch(
on_event_batch=on_event_batch,
max_batch_size=100,
starting_position="-1", # "-1" is from the beginning of the partition.
)
if __name__ == '__main__':
asyncio.run(receive_batch())
| {
"content_hash": "c2b1dd9defdd77fbed3dea186f401cc0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 117,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.7339927121290994,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f9518456784d7bab419352dcdeb231374a94ea7d",
"size": "2290",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-eventhub/samples/async_samples/receive_batch_with_checkpoint_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import time
from flask import current_app, request
from flask.ext.login import current_user
from functools import wraps
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
excpetions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
try_one_last_time = True
while mtries > 1:
try:
return f(*args, **kwargs)
try_one_last_time = False
break
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
if try_one_last_time:
return f(*args, **kwargs)
return
return f_retry # true decorator
return deco_retry
def logged_request(func):
@wraps(func)
def decorated_view(*args, **kwargs):
current_app.logger.info('%s request on %s' % (request.method, request.path), extra={
'method': request.method,
'path': request.path,
'ip': request.remote_addr,
'agent_platform': request.user_agent.platform,
'agent_browser': request.user_agent.browser,
'agent_browser_version': request.user_agent.version,
'agent': request.user_agent.string,
'user': current_user.id if not current_user.is_anonymous() else '<anonymous>'
})
return func(*args, **kwargs)
return decorated_view
| {
"content_hash": "6bb051aa9e0decd16c04a44da3e425a7",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 92,
"avg_line_length": 37.484848484848484,
"alnum_prop": 0.5808407437348424,
"repo_name": "alexandreblin/tvshows",
"id": "f0cc492cebaf9e5fa6a9e193a5708477c061fffb",
"size": "2498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvshows/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "152635"
},
{
"name": "HTML",
"bytes": "18900"
},
{
"name": "JavaScript",
"bytes": "15329"
},
{
"name": "Python",
"bytes": "43381"
}
],
"symlink_target": ""
} |
from horizon import tabs
from django.utils.translation import ugettext_lazy as _
class DetailsTab(tabs.Tab):
"""Class to handle the data population of the test details views."""
name = _("Details")
slug = "details"
template_name = \
"project/connections/reachability_tests/_detail_overview.html"
def get_context_data(self, request):
reachabilitytest = self.tab_group.kwargs['reachabilitytest']
return {"reachabilitytest": reachabilitytest}
class ReachabilityTestDetailTabs(tabs.TabGroup):
slug = "reachability_test_details"
tabs = (DetailsTab,)
class QuickDetailsTab(tabs.Tab):
name = _("Quick Test Results")
slug = "quick_details"
template_name = \
"project/connections/reachability_tests/_quick_detail_overview.html"
def get_context_data(self, request):
reachabilityquicktest = self.tab_group.kwargs['reachabilityquicktest']
return {"reachabilityquicktest": reachabilityquicktest}
class QuickTestDetailTabs(tabs.TabGroup):
slug = "quick_test_details"
tabs = (QuickDetailsTab,)
| {
"content_hash": "ee47cd939cd980bdc1db6fe421fba8f4",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 30.27777777777778,
"alnum_prop": 0.7073394495412844,
"repo_name": "wolverineav/horizon-bsn",
"id": "53206b0010a79b516a8eab31c97e1413fc5742e2",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon_bsn/content/connections/reachability_tests/tabs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "49220"
},
{
"name": "HTML",
"bytes": "61232"
},
{
"name": "JavaScript",
"bytes": "586272"
},
{
"name": "Python",
"bytes": "125946"
},
{
"name": "Shell",
"bytes": "4013"
}
],
"symlink_target": ""
} |
"""
Finds and prints MSVC and Windows SDK paths.
It outpus:
Line 1: the base path of the Windows SDK.
Line 2: the most recent version of the Windows SDK.
Line 3: the path of the most recent MSVC.
Example:
C:\Program Files (x86)\Windows Kits\10
10.0.19041.0
C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.28.29333
"""
import os
import subprocess
import sys
def ver_to_tuple(ver_str):
"""Turns '10.1.2' into [10,1,2] so it can be compared using > """
parts = [int(x) for x in ver_str.split('.')]
return parts
def find_max_subdir(base_dir, filter=lambda x: True):
"""Finds the max subdirectory in base_dir by comparing semantic versions."""
max_ver = None
for ver in os.listdir(base_dir) if os.path.exists(base_dir) else []:
cur = os.path.join(base_dir, ver)
if not filter(cur):
continue
if max_ver is None or ver_to_tuple(ver) > ver_to_tuple(max_ver):
max_ver = ver
return max_ver
def main():
out = [
'',
'',
'',
]
winsdk_base = 'C:\\Program Files (x86)\\Windows Kits\\10'
if os.path.exists(winsdk_base):
out[0] = winsdk_base
lib_base = winsdk_base + '\\Lib'
filt = lambda x: os.path.exists(os.path.join(x, 'ucrt', 'x64', 'ucrt.lib'))
out[1] = find_max_subdir(lib_base, filt)
for year in ['2022', '2021', '2020', '2019']:
for version in [
'BuildTools', 'Community', 'Professional', 'Enterprise', 'Preview'
]:
msvc_base = ('C:\\Program Files (x86)\\Microsoft Visual Studio\\'
f'{year}\\{version}\\VC\\Tools\\MSVC')
if os.path.exists(msvc_base):
filt = lambda x: os.path.exists(
os.path.join(x, 'lib', 'x64', 'libcmt.lib'))
max_msvc = find_max_subdir(msvc_base, filt)
if max_msvc is not None:
out[2] = os.path.join(msvc_base, max_msvc)
break
# Don't error in case of failure, GN scripts are supposed to deal with
# failures and allow the user to override the dirs.
print('\n'.join(out))
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "83165dd882c8df85c0da872ac4e65614",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 88,
"avg_line_length": 28.438356164383563,
"alnum_prop": 0.6189788053949904,
"repo_name": "google/perfetto",
"id": "1628c0720ae3b1db99340e323562c047d87ce2e0",
"size": "2699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gn/standalone/toolchain/win_find_msvc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "58347"
},
{
"name": "C++",
"bytes": "10532953"
},
{
"name": "CSS",
"bytes": "6080"
},
{
"name": "Dockerfile",
"bytes": "6650"
},
{
"name": "HTML",
"bytes": "15653"
},
{
"name": "Java",
"bytes": "12441"
},
{
"name": "JavaScript",
"bytes": "115174"
},
{
"name": "Makefile",
"bytes": "10869"
},
{
"name": "Meson",
"bytes": "1635"
},
{
"name": "Python",
"bytes": "969677"
},
{
"name": "SCSS",
"bytes": "116843"
},
{
"name": "Shell",
"bytes": "79903"
},
{
"name": "Starlark",
"bytes": "222184"
},
{
"name": "TypeScript",
"bytes": "1740641"
}
],
"symlink_target": ""
} |
from time import sleep
from gppylib.commands.base import Command, ExecutionError, REMOTE, WorkerPool
from gppylib.db import dbconn
from gppylib.commands import gp
from gppylib.gparray import GpArray
from test.behave_utils.utils import *
import platform
from behave import given, when, then
# todo ONLY implemented for a mirror; change name of step?
@given('the information of a "{seg}" segment on a remote host is saved')
@when('the information of a "{seg}" segment on a remote host is saved')
@then('the information of a "{seg}" segment on a remote host is saved')
def impl(context, seg):
if seg == "mirror":
gparray = GpArray.initFromCatalog(dbconn.DbURL())
mirror_segs = [seg for seg in gparray.getDbList()
if seg.isSegmentMirror() and seg.getSegmentHostName() != platform.node()]
context.remote_mirror_segdbId = mirror_segs[0].getSegmentDbId()
context.remote_mirror_segcid = mirror_segs[0].getSegmentContentId()
context.remote_mirror_seghost = mirror_segs[0].getSegmentHostName()
context.remote_mirror_datadir = mirror_segs[0].getSegmentDataDirectory()
@given('the information of the corresponding primary segment on a remote host is saved')
@when('the information of the corresponding primary segment on a remote host is saved')
@then('the information of the corresponding primary segment on a remote host is saved')
def impl(context):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentPrimary() and seg.getSegmentContentId() == context.remote_mirror_segcid:
context.remote_pair_primary_segdbId = seg.getSegmentDbId()
context.remote_pair_primary_datadir = seg.getSegmentDataDirectory()
context.remote_pair_primary_port = seg.getSegmentPort()
context.remote_pair_primary_host = seg.getSegmentHostName()
@given('the saved "{seg}" segment is marked down in config')
@when('the saved "{seg}" segment is marked down in config')
@then('the saved "{seg}" segment is marked down in config')
def impl(context, seg):
if seg == "mirror":
dbid = context.remote_mirror_segdbId
seghost = context.remote_mirror_seghost
datadir = context.remote_mirror_datadir
else:
dbid = context.remote_pair_primary_segdbId
seghost = context.remote_pair_primary_host
datadir = context.remote_pair_primary_datadir
qry = """select count(*) from gp_segment_configuration where status='d' and hostname='%s' and dbid=%s""" % (seghost, dbid)
row_count = getRows('postgres', qry)[0][0]
if row_count != 1:
raise Exception('Expected %s segment %s on host %s to be down, but it is running.' % (seg, datadir, seghost))
@when('user kills a "{seg}" process with the saved information')
def impl(context, seg):
if seg == "mirror":
datadir = context.remote_mirror_datadir
seghost = context.remote_mirror_seghost
elif seg == "primary":
datadir = context.remote_pair_primary_datadir
seghost = context.remote_pair_primary_host
else:
raise Exception("Got invalid segment type: %s" % seg)
datadir_grep = '[' + datadir[0] + ']' + datadir[1:]
cmdStr = "ps ux | grep %s | awk '{print $2}' | xargs kill" % datadir_grep
subprocess.check_call(['ssh', seghost, cmdStr])
@then('the saved primary segment reports the same value for sql "{sql_cmd}" db "{dbname}" as was saved')
def impl(context, sql_cmd, dbname):
psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_session_role=utility\' psql -t -h %s -p %s -c \"%s\"; " % (
dbname, context.remote_pair_primary_host, context.remote_pair_primary_port, sql_cmd)
cmd = Command(name='Running Remote command: %s' % psql_cmd, cmdStr = psql_cmd)
cmd.run(validateAfter=True)
if [cmd.get_results().stdout.strip()] not in context.stored_sql_results:
raise Exception("cmd results do not match\n expected: '%s'\n received: '%s'" % (
context.stored_sql_results, cmd.get_results().stdout.strip()))
def isSegmentUp(context, dbid):
qry = """select count(*) from gp_segment_configuration where status='d' and dbid=%s""" % dbid
row_count = getRows('template1', qry)[0][0]
if row_count == 0:
return True
else:
return False
def getPrimaryDbIdFromCid(context, cid):
dbid_from_cid_sql = "SELECT dbid FROM gp_segment_configuration WHERE content=%s and role='p';" % cid
result = getRow('template1', dbid_from_cid_sql)
return result[0]
def getMirrorDbIdFromCid(context, cid):
dbid_from_cid_sql = "SELECT dbid FROM gp_segment_configuration WHERE content=%s and role='m';" % cid
result = getRow('template1', dbid_from_cid_sql)
return result[0]
def runCommandOnRemoteSegment(context, cid, sql_cmd):
local_cmd = 'psql template1 -t -c "SELECT port,hostname FROM gp_segment_configuration WHERE content=%s and role=\'p\';"' % cid
run_command(context, local_cmd)
port, host = context.stdout_message.split("|")
port = port.strip()
host = host.strip()
psql_cmd = "PGDATABASE=\'template1\' PGOPTIONS=\'-c gp_session_role=utility\' psql -h %s -p %s -c \"%s\"; " % (host, port, sql_cmd)
Command(name='Running Remote command: %s' % psql_cmd, cmdStr = psql_cmd).run(validateAfter=True)
@then('gprecoverseg should print "{output}" to stdout for each mirror')
def impl(context, output):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for segment in segments:
if segment.isSegmentMirror():
expected = r'\(dbid {}\): {}'.format(segment.dbid, output)
check_stdout_msg(context, expected)
@then('pg_isready reports all primaries are accepting connections')
def impl(context):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
subprocess.check_call(['pg_isready', '-h', seg.getSegmentHostName(), '-p', str(seg.getSegmentPort())])
| {
"content_hash": "396bc12ebf08e270d61876abea382925",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 135,
"avg_line_length": 48.766129032258064,
"alnum_prop": 0.6816603274350918,
"repo_name": "ashwinstar/gpdb",
"id": "65d2349e22c94ef0e16593bbe303e14062494159",
"size": "6047",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "gpMgmt/test/behave/mgmt_utils/steps/recoverseg_mgmt_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3724"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "12768"
},
{
"name": "C",
"bytes": "42705726"
},
{
"name": "C++",
"bytes": "2839973"
},
{
"name": "CMake",
"bytes": "3425"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "223"
},
{
"name": "DTrace",
"bytes": "3873"
},
{
"name": "Dockerfile",
"bytes": "11990"
},
{
"name": "Emacs Lisp",
"bytes": "3488"
},
{
"name": "Fortran",
"bytes": "14863"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "342783"
},
{
"name": "HTML",
"bytes": "653351"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "229553"
},
{
"name": "M4",
"bytes": "114378"
},
{
"name": "Makefile",
"bytes": "455445"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "PLSQL",
"bytes": "160856"
},
{
"name": "PLpgSQL",
"bytes": "5722287"
},
{
"name": "Perl",
"bytes": "798287"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3267988"
},
{
"name": "Raku",
"bytes": "698"
},
{
"name": "Roff",
"bytes": "32437"
},
{
"name": "Ruby",
"bytes": "81695"
},
{
"name": "SQLPL",
"bytes": "313387"
},
{
"name": "Shell",
"bytes": "453847"
},
{
"name": "TSQL",
"bytes": "3294076"
},
{
"name": "XS",
"bytes": "6983"
},
{
"name": "Yacc",
"bytes": "672568"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
} |
from custom_exceptions.bad_plateau_init import BadPlateauInit
class Plateau:
upper_x = 0
upper_y = 0
def __init__(self, upper_x, upper_y):
if upper_x <= 0:
raise BadPlateauInit
if upper_y <= 0:
raise BadPlateauInit
self.upper_x = upper_x
self.upper_y = upper_y
def can_move(self, current_x=None, current_y=None, x_add=None, y_add=None):
# Decide whether a rover can move where it intents to move to
# Rover's moves are bounded by 0,0 - x,y
if current_x:
try:
return 0 <= (current_x + x_add) <= self.upper_x
except:
print "Cannot move from current X %s to %s" % (str(current_x), str(x_add))
return False
if current_y:
try:
return 0 <= (current_y + y_add) <= self.upper_y
except:
print "Cannot move from current Y %s to %s" % (str(current_y), str(y_add))
return False
def can_deploy_to(self, x, y):
# Decide whether a rover can be deployed to the initial
# coordinates (x, y).
# Returns True if both x and y are inside a square of plateau's 0,0 - x,y
return (0 <= x <= self.upper_x) and (0 <= y <= self.upper_y)
| {
"content_hash": "b072cc0edc9b797937e544a8a09d084f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 90,
"avg_line_length": 31.071428571428573,
"alnum_prop": 0.5348659003831417,
"repo_name": "tkanoff/therover",
"id": "3c230a6d710ced2e23cecaee171583b24fc9e261",
"size": "1330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plateau.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7667"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('beeswax.views',
url(r'^$', 'index', name='index'),
url(r'^execute/?$', 'execute_query', name='execute_query'),
url(r'^execute/design/(?P<design_id>\d+)$', 'execute_query', name='execute_design'),
url(r'^execute/query/(?P<query_history_id>\d+)$', 'execute_query', name='watch_query_history'),
url(r'^results/(?P<id>\d+)/(?P<first_row>\d+)$', 'view_results', name='view_results'),
url(r'^download/(?P<id>\d+)/(?P<format>\w+)$', 'download', name='download'),
url(r'^my_queries$', 'my_queries', name='my_queries'),
url(r'^list_designs$', 'list_designs', name='list_designs'),
url(r'^list_trashed_designs$', 'list_trashed_designs', name='list_trashed_designs'),
url(r'^delete_designs$', 'delete_design', name='delete_design'),
url(r'^restore_designs$', 'restore_design', name='restore_design'),
url(r'^clone_design/(?P<design_id>\d+)$', 'clone_design', name='clone_design'),
url(r'^query_history$', 'list_query_history', name='list_query_history'),
url(r'^configuration$', 'configuration', name='configuration'),
url(r'^install_examples$', 'install_examples', name='install_examples'),
url(r'^query_cb/done/(?P<server_id>\S+)$', 'query_done_cb', name='query_done_cb'),
)
urlpatterns += patterns(
'beeswax.create_database',
url(r'^create/database$', 'create_database', name='create_database'),
)
urlpatterns += patterns(
'beeswax.create_table',
url(r'^create/create_table/(?P<database>\w+)$', 'create_table', name='create_table'),
url(r'^create/import_wizard/(?P<database>\w+)$', 'import_wizard', name='import_wizard'),
url(r'^create/auto_load/(?P<database>\w+)$', 'load_after_create', name='load_after_create'),
)
urlpatterns += patterns(
'beeswax.api',
url(r'^api/autocomplete/$', 'autocomplete', name='api_autocomplete_databases'),
url(r'^api/autocomplete/(?P<database>\w+)$', 'autocomplete', name='api_autocomplete_tables'),
url(r'^api/autocomplete/(?P<database>\w+)/$', 'autocomplete', name='api_autocomplete_tables'),
url(r'^api/autocomplete/(?P<database>\w+)/(?P<table>\w+)$', 'autocomplete', name='api_autocomplete_columns'),
url(r'^api/autocomplete/(?P<database>\w+)/(?P<table>\w+)/$', 'autocomplete', name='api_autocomplete_columns'),
url(r'^api/design/(?P<design_id>\d+)?$', 'save_query_design', name='api_save_design'),
url(r'^api/design/(?P<design_id>\d+)/get$', 'fetch_saved_design', name='api_fetch_saved_design'),
url(r'^api/query/(?P<query_history_id>\d+)/get$', 'fetch_query_history', name='api_fetch_query_history'),
url(r'^api/query/parameters$', 'parameters', name='api_parameters'),
url(r'^api/query/execute/(?P<design_id>\d+)?$', 'execute', name='api_execute'),
url(r'^api/query/(?P<query_history_id>\d+)/cancel$', 'cancel_query', name='api_cancel_query'),
url(r'^api/query/(?P<query_history_id>\d+)/close/?$', 'close_operation', name='api_close_operation'),
url(r'^api/query/(?P<query_history_id>\d+)/results/save/hive/table$', 'save_results_hive_table', name='api_save_results_hive_table'),
url(r'^api/query/(?P<query_history_id>\d+)/results/save/hdfs/file$', 'save_results_hdfs_file', name='api_save_results_hdfs_file'),
url(r'^api/query/(?P<query_history_id>\d+)/results/save/hdfs/directory$', 'save_results_hdfs_directory', name='api_save_results_hdfs_directory'),
url(r'^api/watch/json/(?P<id>\d+)$', 'watch_query_refresh_json', name='api_watch_query_refresh_json'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)$', 'describe_table', name='describe_table'),
)
| {
"content_hash": "4c9b12647e428f67d3169f63e3dcf15b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 147,
"avg_line_length": 58.278688524590166,
"alnum_prop": 0.6610407876230661,
"repo_name": "yongshengwang/builthue",
"id": "4bd3c502227c86f72dc7deab7cb8890701933876",
"size": "4347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/beeswax/src/beeswax/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10774013"
},
{
"name": "C++",
"bytes": "184593"
},
{
"name": "CSS",
"bytes": "655282"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2418037"
},
{
"name": "Makefile",
"bytes": "86977"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "282"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "29990389"
},
{
"name": "Shell",
"bytes": "38643"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "99710"
},
{
"name": "XSLT",
"bytes": "367778"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.