text
stringlengths 4
1.02M
| meta
dict |
---|---|
from flask import Flask, jsonify, render_template, request
from slugify import slugify
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/slugify', methods=['POST'])
def _slugify():
if request.is_xhr:
text = request.get_json().get('text', '')
else:
text = request.form.get('text', '')
result = {
'text': text,
'slug': slugify(text)
}
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=7000)
| {
"content_hash": "dbc2df5ede4b59d012de9373aa39bc3e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 58,
"avg_line_length": 21.03846153846154,
"alnum_prop": 0.5813528336380256,
"repo_name": "mozillazg/slugify",
"id": "e97716ef8874e37f1968651b2c904eeb6c0ddd31",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1974"
},
{
"name": "HTML",
"bytes": "1070"
},
{
"name": "JavaScript",
"bytes": "6584"
},
{
"name": "Nginx",
"bytes": "1163"
},
{
"name": "Python",
"bytes": "1620"
}
],
"symlink_target": ""
} |
"""
Copy of ``django.contrib.admin.utils.get_deleted_objects`` and a subclass of
``django.contrib.admin.utils.NestedObjects`` that work with djongo_polymorphic querysets.
Ultimatly these should go directly into django_polymorphic or, in a more generic way, into django itself.
This code has been copied from Django 1.4.
At all locations where something has been changed, there are inline comments in the code.
"""
from __future__ import unicode_literals
from django.contrib.admin.util import NestedObjects, quote
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
from filer.utils.compatibility import get_delete_permission
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
# --- begin patch ---
collector = PolymorphicAwareNestedObjects(using=using)
# --- end patch ---
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = get_delete_permission(opts)
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe('%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class PolymorphicAwareNestedObjects(NestedObjects):
def collect(self, objs, source_attr=None, **kwargs):
if hasattr(objs, 'non_polymorphic'):
# .filter() is needed, because there may already be cached polymorphic results in the queryset
objs = objs.non_polymorphic().filter()
return super(PolymorphicAwareNestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
| {
"content_hash": "c7612c7a9067162b6228499b5afced7a",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 106,
"avg_line_length": 38.94871794871795,
"alnum_prop": 0.6270572745227123,
"repo_name": "amboycharlie/Child-Friendly-LCMS",
"id": "44b3a78f678b4449e477d2568feca2bdc1f010c3",
"size": "3061",
"binary": false,
"copies": "1",
"ref": "refs/heads/Child-Friendly-LCMS-0.5",
"path": "leonardo/module/media/admin/patched/admin_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "119867"
},
{
"name": "HTML",
"bytes": "229025"
},
{
"name": "JavaScript",
"bytes": "184465"
},
{
"name": "Python",
"bytes": "585907"
},
{
"name": "Shell",
"bytes": "4253"
}
],
"symlink_target": ""
} |
"""Home Assistant auth provider."""
import asyncio
import base64
from collections import OrderedDict
import logging
from typing import Any, Dict, List, Optional, Set, cast
import bcrypt
import voluptuous as vol
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
STORAGE_VERSION = 1
STORAGE_KEY = "auth_provider.homeassistant"
def _disallow_id(conf: Dict[str, Any]) -> Dict[str, Any]:
"""Disallow ID in config."""
if CONF_ID in conf:
raise vol.Invalid("ID is not allowed for the homeassistant auth provider.")
return conf
CONFIG_SCHEMA = vol.All(AUTH_PROVIDER_SCHEMA, _disallow_id)
class InvalidAuth(HomeAssistantError):
"""Raised when we encounter invalid authentication."""
class InvalidUser(HomeAssistantError):
"""Raised when invalid user is specified.
Will not be raised when validating authentication.
"""
class Data:
"""Hold the user data."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the user data store."""
self.hass = hass
self._store = hass.helpers.storage.Store(
STORAGE_VERSION, STORAGE_KEY, private=True
)
self._data: Optional[Dict[str, Any]] = None
# Legacy mode will allow usernames to start/end with whitespace
# and will compare usernames case-insensitive.
# Remove in 2020 or when we launch 1.0.
self.is_legacy = False
@callback
def normalize_username(self, username: str) -> str:
"""Normalize a username based on the mode."""
if self.is_legacy:
return username
return username.strip().casefold()
async def async_load(self) -> None:
"""Load stored data."""
data = await self._store.async_load()
if data is None:
data = {"users": []}
seen: Set[str] = set()
for user in data["users"]:
username = user["username"]
# check if we have duplicates
folded = username.casefold()
if folded in seen:
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that are case-insensitive"
"equivalent. Please change the username: '%s'.",
username,
)
break
seen.add(folded)
# check if we have unstripped usernames
if username != username.strip():
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that start or end in a "
"space. Please change the username: '%s'.",
username,
)
break
self._data = data
@property
def users(self) -> List[Dict[str, str]]:
"""Return users."""
return self._data["users"] # type: ignore
def validate_login(self, username: str, password: str) -> None:
"""Validate a username and password.
Raises InvalidAuth if auth invalid.
"""
username = self.normalize_username(username)
dummy = b"$2b$12$CiuFGszHx9eNHxPuQcwBWez4CwDTOcLTX5CbOpV6gef2nYuXkY7BO"
found = None
# Compare all users to avoid timing attacks.
for user in self.users:
if self.normalize_username(user["username"]) == username:
found = user
if found is None:
# check a hash to make timing the same as if user was found
bcrypt.checkpw(b"foo", dummy)
raise InvalidAuth
user_hash = base64.b64decode(found["password"])
# bcrypt.checkpw is timing-safe
if not bcrypt.checkpw(password.encode(), user_hash):
raise InvalidAuth
# pylint: disable=no-self-use
def hash_password(self, password: str, for_storage: bool = False) -> bytes:
"""Encode a password."""
hashed: bytes = bcrypt.hashpw(password.encode(), bcrypt.gensalt(rounds=12))
if for_storage:
hashed = base64.b64encode(hashed)
return hashed
def add_auth(self, username: str, password: str) -> None:
"""Add a new authenticated user/pass."""
username = self.normalize_username(username)
if any(
self.normalize_username(user["username"]) == username for user in self.users
):
raise InvalidUser
self.users.append(
{
"username": username,
"password": self.hash_password(password, True).decode(),
}
)
@callback
def async_remove_auth(self, username: str) -> None:
"""Remove authentication."""
username = self.normalize_username(username)
index = None
for i, user in enumerate(self.users):
if self.normalize_username(user["username"]) == username:
index = i
break
if index is None:
raise InvalidUser
self.users.pop(index)
def change_password(self, username: str, new_password: str) -> None:
"""Update the password.
Raises InvalidUser if user cannot be found.
"""
username = self.normalize_username(username)
for user in self.users:
if self.normalize_username(user["username"]) == username:
user["password"] = self.hash_password(new_password, True).decode()
break
else:
raise InvalidUser
async def async_save(self) -> None:
"""Save data."""
await self._store.async_save(self._data)
@AUTH_PROVIDERS.register("homeassistant")
class HassAuthProvider(AuthProvider):
"""Auth provider based on a local storage of users in HASS config dir."""
DEFAULT_TITLE = "Home Assistant Local"
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize an Home Assistant auth provider."""
super().__init__(*args, **kwargs)
self.data: Optional[Data] = None
self._init_lock = asyncio.Lock()
async def async_initialize(self) -> None:
"""Initialize the auth provider."""
async with self._init_lock:
if self.data is not None:
return
data = Data(self.hass)
await data.async_load()
self.data = data
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
return HassLoginFlow(self)
async def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.validate_login, username, password
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
norm_username = self.data.normalize_username
username = norm_username(flow_result["username"])
for credential in await self.async_credentials():
if norm_username(credential.data["username"]) == username:
return credential
# Create new credentials.
return self.async_create_credentials({"username": username})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Get extra info for this credential."""
return UserMeta(name=credentials.data["username"], is_active=True)
async def async_will_remove_credentials(self, credentials: Credentials) -> None:
"""When credentials get removed, also remove the auth."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
try:
self.data.async_remove_auth(credentials.data["username"])
await self.data.async_save()
except InvalidUser:
# Can happen if somehow we didn't clean up a credential
pass
class HassLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
await cast(HassAuthProvider, self._auth_provider).async_validate_login(
user_input["username"], user_input["password"]
)
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
user_input.pop("password")
return await self.async_finish(user_input)
schema: Dict[str, type] = OrderedDict()
schema["username"] = str
schema["password"] = str
return self.async_show_form(
step_id="init", data_schema=vol.Schema(schema), errors=errors
)
| {
"content_hash": "1cdc845ecb532ffd2a9ce35d8985d26f",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 88,
"avg_line_length": 31.728476821192054,
"alnum_prop": 0.5905865163848883,
"repo_name": "leppa/home-assistant",
"id": "9ddbf4189f79b20e49df988c3d01e74b62279671",
"size": "9582",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/auth/providers/homeassistant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_disk
description:
- Persistent disks are durable storage devices that function similarly to the physical
disks in a desktop or a server. Compute Engine manages the hardware behind these
devices to ensure data redundancy and optimize performance for you. Persistent disks
are available as either standard hard disk drives (HDD) or solid-state drives (SSD).
- Persistent disks are located independently from your virtual machine instances,
so you can detach or move persistent disks to keep your data even after you delete
your instances. Persistent disk performance scales automatically with size, so you
can resize your existing persistent disks or add more persistent disks to an instance
to meet your performance and storage space requirements.
- Add a persistent disk to your instance when you need reliable and affordable storage
with consistent performance characteristics.
short_description: Creates a GCP Disk
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
labels:
description:
- Labels to apply to this disk. A list of key->value pairs.
required: false
type: dict
version_added: '2.7'
licenses:
description:
- Any applicable publicly visible licenses.
required: false
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
size_gb:
description:
- Size of the persistent disk, specified in GB. You can specify this field when
creating a persistent disk using the sourceImage or sourceSnapshot parameter,
or specify it alone to create an empty persistent disk.
- If you specify this field along with sourceImage or sourceSnapshot, the value
of sizeGb must not be less than the size of the sourceImage or the size of the
snapshot.
required: false
type: int
physical_block_size_bytes:
description:
- Physical block size of the persistent disk, in bytes. If not present in a request,
a default value is used. Currently supported sizes are 4096 and 16384, other
sizes may be added in the future.
- If an unsupported value is requested, the error message will list the supported
values for the caller's project.
required: false
type: int
version_added: '2.8'
type:
description:
- URL of the disk type resource describing which disk type to use to create the
disk. Provide this when creating the disk.
required: false
type: str
version_added: '2.7'
source_image:
description:
- The source image used to create this disk. If the source image is deleted, this
field will not be set.
- 'To create a disk with one of the public operating system images, specify the
image by its family name. For example, specify family/debian-8 to use the latest
Debian 8 image: projects/debian-cloud/global/images/family/debian-8 Alternatively,
use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD
To create a disk with a private image that you created, specify the image name
in the following format: global/images/my-private-image You can also specify
a private image by its image family, which returns the latest version of the
image in that family. Replace the image name with family/family-name: global/images/family/my-private-family
.'
required: false
type: str
zone:
description:
- A reference to the zone where the disk resides.
required: true
type: str
source_image_encryption_key:
description:
- The customer-supplied encryption key of the source image. Required if the source
image is protected by a customer-supplied encryption key.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
kms_key_name:
description:
- The name of the encryption key that is stored in Google Cloud KMS.
required: false
type: str
disk_encryption_key:
description:
- Encrypts the disk using a customer-supplied encryption key.
- After you encrypt a disk with a customer-supplied key, you must provide the
same key if you use the disk later (e.g. to create a disk snapshot or an image,
or to attach the disk to a virtual machine).
- Customer-supplied encryption keys do not protect access to metadata of the disk.
- If you do not provide an encryption key when creating the disk, then the disk
will be encrypted using an automatically generated key and you do not need to
provide a key to use the disk later.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
kms_key_name:
description:
- The name of the encryption key that is stored in Google Cloud KMS.
required: false
type: str
source_snapshot:
description:
- The source snapshot used to create this disk. You can provide this as a partial
or full URL to the resource.
- 'This field represents a link to a Snapshot resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_snapshot task and then set this source_snapshot field to "{{
name-of-resource }}"'
required: false
type: dict
source_snapshot_encryption_key:
description:
- The customer-supplied encryption key of the source snapshot. Required if the
source snapshot is protected by a customer-supplied encryption key.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
kms_key_name:
description:
- The name of the encryption key that is stored in Google Cloud KMS.
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/disks)'
- 'Adding a persistent disk: U(https://cloud.google.com/compute/docs/disks/add-persistent-disk)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: test_object
size_gb: 50
disk_encryption_key:
raw_key: SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=
zone: us-central1-a
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
lastAttachTimestamp:
description:
- Last attach timestamp in RFC3339 text format.
returned: success
type: str
lastDetachTimestamp:
description:
- Last detach timestamp in RFC3339 text format.
returned: success
type: str
labels:
description:
- Labels to apply to this disk. A list of key->value pairs.
returned: success
type: dict
licenses:
description:
- Any applicable publicly visible licenses.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sizeGb:
description:
- Size of the persistent disk, specified in GB. You can specify this field when
creating a persistent disk using the sourceImage or sourceSnapshot parameter,
or specify it alone to create an empty persistent disk.
- If you specify this field along with sourceImage or sourceSnapshot, the value
of sizeGb must not be less than the size of the sourceImage or the size of the
snapshot.
returned: success
type: int
users:
description:
- 'Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
.'
returned: success
type: list
physicalBlockSizeBytes:
description:
- Physical block size of the persistent disk, in bytes. If not present in a request,
a default value is used. Currently supported sizes are 4096 and 16384, other sizes
may be added in the future.
- If an unsupported value is requested, the error message will list the supported
values for the caller's project.
returned: success
type: int
type:
description:
- URL of the disk type resource describing which disk type to use to create the
disk. Provide this when creating the disk.
returned: success
type: str
sourceImage:
description:
- The source image used to create this disk. If the source image is deleted, this
field will not be set.
- 'To create a disk with one of the public operating system images, specify the
image by its family name. For example, specify family/debian-8 to use the latest
Debian 8 image: projects/debian-cloud/global/images/family/debian-8 Alternatively,
use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD
To create a disk with a private image that you created, specify the image name
in the following format: global/images/my-private-image You can also specify a
private image by its image family, which returns the latest version of the image
in that family. Replace the image name with family/family-name: global/images/family/my-private-family
.'
returned: success
type: str
zone:
description:
- A reference to the zone where the disk resides.
returned: success
type: str
sourceImageEncryptionKey:
description:
- The customer-supplied encryption key of the source image. Required if the source
image is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
kmsKeyName:
description:
- The name of the encryption key that is stored in Google Cloud KMS.
returned: success
type: str
sourceImageId:
description:
- The ID value of the image used to create this disk. This value identifies the
exact image that was used to create this persistent disk. For example, if you
created the persistent disk from an image that was later deleted and recreated
under the same name, the source image ID would identify the exact version of the
image that was used.
returned: success
type: str
diskEncryptionKey:
description:
- Encrypts the disk using a customer-supplied encryption key.
- After you encrypt a disk with a customer-supplied key, you must provide the same
key if you use the disk later (e.g. to create a disk snapshot or an image, or
to attach the disk to a virtual machine).
- Customer-supplied encryption keys do not protect access to metadata of the disk.
- If you do not provide an encryption key when creating the disk, then the disk
will be encrypted using an automatically generated key and you do not need to
provide a key to use the disk later.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
kmsKeyName:
description:
- The name of the encryption key that is stored in Google Cloud KMS.
returned: success
type: str
sourceSnapshot:
description:
- The source snapshot used to create this disk. You can provide this as a partial
or full URL to the resource.
returned: success
type: dict
sourceSnapshotEncryptionKey:
description:
- The customer-supplied encryption key of the source snapshot. Required if the source
snapshot is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
kmsKeyName:
description:
- The name of the encryption key that is stored in Google Cloud KMS.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceSnapshotId:
description:
- The unique ID of the snapshot used to create this disk. This value identifies
the exact snapshot that was used to create this persistent disk. For example,
if you created the persistent disk from a snapshot that was later deleted and
recreated under the same name, the source snapshot ID would identify the exact
version of the snapshot that was used.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
labels=dict(type='dict'),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
size_gb=dict(type='int'),
physical_block_size_bytes=dict(type='int'),
type=dict(type='str'),
source_image=dict(type='str'),
zone=dict(required=True, type='str'),
source_image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'))),
disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'))),
source_snapshot=dict(type='dict'),
source_snapshot_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'), kms_key_name=dict(type='str'))),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#disk'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('labels') != request.get('labels'):
label_fingerprint_update(module, request, response)
if response.get('sizeGb') != request.get('sizeGb'):
size_gb_update(module, request, response)
def label_fingerprint_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/disks/{name}/setLabels"]).format(**module.params),
{u'labelFingerprint': response.get('labelFingerprint'), u'labels': module.params.get('labels')},
)
def size_gb_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/zones/{zone}/disks/{name}/resize"]).format(**module.params),
{u'sizeGb': module.params.get('size_gb')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#disk',
u'sourceImageEncryptionKey': DiskSourceimageencryptionkey(module.params.get('source_image_encryption_key', {}), module).to_request(),
u'diskEncryptionKey': DiskDiskencryptionkey(module.params.get('disk_encryption_key', {}), module).to_request(),
u'sourceSnapshotEncryptionKey': DiskSourcesnapshotencryptionkey(module.params.get('source_snapshot_encryption_key', {}), module).to_request(),
u'description': module.params.get('description'),
u'labels': module.params.get('labels'),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'sizeGb': module.params.get('size_gb'),
u'physicalBlockSizeBytes': module.params.get('physical_block_size_bytes'),
u'type': disk_type_selflink(module.params.get('type'), module.params),
u'sourceImage': module.params.get('source_image'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'labelFingerprint': response.get(u'labelFingerprint'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'id': response.get(u'id'),
u'lastAttachTimestamp': response.get(u'lastAttachTimestamp'),
u'lastDetachTimestamp': response.get(u'lastDetachTimestamp'),
u'labels': response.get(u'labels'),
u'licenses': response.get(u'licenses'),
u'name': module.params.get('name'),
u'sizeGb': response.get(u'sizeGb'),
u'users': response.get(u'users'),
u'physicalBlockSizeBytes': response.get(u'physicalBlockSizeBytes'),
u'type': response.get(u'type'),
u'sourceImage': module.params.get('source_image'),
}
def disk_type_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/zones/.*/diskTypes/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/diskTypes/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#disk')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class DiskSourceimageencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'kmsKeyName': self.request.get('kms_key_name')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'kmsKeyName': self.request.get(u'kmsKeyName')})
class DiskDiskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'kmsKeyName': self.request.get('kms_key_name')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'kmsKeyName': self.request.get(u'kmsKeyName')})
class DiskSourcesnapshotencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key'), u'kmsKeyName': self.request.get('kms_key_name')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey'), u'kmsKeyName': self.request.get(u'kmsKeyName')})
if __name__ == '__main__':
main()
| {
"content_hash": "17896313be436f8c66e384afe6a90d06",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 150,
"avg_line_length": 37.59175531914894,
"alnum_prop": 0.6764653861119955,
"repo_name": "thaim/ansible",
"id": "29e259078c46db47bd4ee38f36192b738c3045b8",
"size": "29006",
"binary": false,
"copies": "3",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/google/gcp_compute_disk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""
Insertion Sort
Take the last value of a list and compare it to each element
of the sorted sublist and places it accordingly until there
are no more elements of the given list.
Worst Case Performance: O(n^2) comparisons and swaps
Best Case Performance: O(n) comparisons, O(1) swaps
Average Case Performance: O(n^2) comparisons and swaps
"""
def insertionSort(inlist):
inlistLength = len(inlist)
for i in range(inlistLength):
sorting_value = inlist[i]
j = i - 1
while j >= 0 and inlist[j] > sorting_value:
inlist[j+1] = inlist[j]
j -= 1
inlist[j+1] = sorting_value
return inlist
def insertionSort2(inlist):
inlistLength = len(inlist)
for i in range(inlistLength):
sorting_value = inlist[i]
for j in range(i - 1, -1, -1):
if inlist[j] > sorting_value:
inlist[j+1] = inlist[j]
inlist[j+1] = sorting_value
return inlist
# test
mylist = [0,10,3,2,11,22,40,-1,5]
print insertionSort(mylist) | {
"content_hash": "a5ce9538bb4110425887094c81d28837",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 60,
"avg_line_length": 22.19047619047619,
"alnum_prop": 0.6963519313304721,
"repo_name": "eshim/Algorithms",
"id": "f5de44a074fb4d8d070b8dfde11200cf0aa9a820",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InsertionSort.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import forms
from legislative.models import LegislativeSession
from select2.widgets import SelectMultiple
class SearchBillForm(forms.Form):
search_name = forms.CharField(required=False,
label="Keywords")
# search_sessions = forms.ModelMultipleChoiceField(
# label="From Sessions",
# required=False,
# queryset= LegislativeSession.objects.all().order_by('name'),
# widget=SelectMultiple,
# initial=LegislativeSession.objects.all(),
# ) | {
"content_hash": "5343e3efa77cf919c5c76591e29a7ec6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 38.2,
"alnum_prop": 0.6771378708551483,
"repo_name": "access-missouri/am-django-project",
"id": "bfc71da216bf9fceadfe2d29a72b223d68ce1a96",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "am/search/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "208381"
},
{
"name": "HTML",
"bytes": "75543"
},
{
"name": "JavaScript",
"bytes": "68836"
},
{
"name": "Makefile",
"bytes": "803"
},
{
"name": "Python",
"bytes": "241729"
},
{
"name": "Ruby",
"bytes": "105"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(max_length=100)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('image', models.ImageField(upload_to=b'products_images')),
('gallery', models.ForeignKey(to='image_gallery.Gallery')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| {
"content_hash": "571ff04cb402d9589ca9ac4b5bed1c92",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 147,
"avg_line_length": 40.926829268292686,
"alnum_prop": 0.5887961859356377,
"repo_name": "gygcnc/gygcnc",
"id": "606a7a440cb2100e841cc2a83cbc0a44118c5d06",
"size": "1702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gygcnc/image_gallery/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "135198"
},
{
"name": "HTML",
"bytes": "50093"
},
{
"name": "JavaScript",
"bytes": "42590"
},
{
"name": "Python",
"bytes": "35373"
}
],
"symlink_target": ""
} |
import contextlib
import functools
import alembic
from alembic import context
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
# Neutron milestones for upgrade aliases
LIBERTY = 'liberty'
NEUTRON_MILESTONES = [
# earlier milestones were not tagged
LIBERTY,
]
CREATION_OPERATIONS = (sa.sql.ddl.CreateIndex,
sa.sql.ddl.CreateTable,
sa.sql.ddl.CreateColumn,
)
DROP_OPERATIONS = (sa.sql.ddl.DropConstraint,
sa.sql.ddl.DropIndex,
sa.sql.ddl.DropTable,
alembic.ddl.base.DropColumn)
def skip_if_offline(func):
"""Decorator for skipping migrations in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
return
return func(*args, **kwargs)
return decorator
def raise_if_offline(func):
"""Decorator for raising if a function is called in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
raise RuntimeError(_("%s cannot be called while in offline mode") %
func.__name__)
return func(*args, **kwargs)
return decorator
@raise_if_offline
def schema_has_table(table_name):
"""Check whether the specified table exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
return table_name in insp.get_table_names()
@raise_if_offline
def schema_has_column(table_name, column_name):
"""Check whether the specified column exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
# first check that the table exists
if not schema_has_table(table_name):
return
# check whether column_name exists in table columns
return column_name in [column['name'] for column in
insp.get_columns(table_name)]
@raise_if_offline
def alter_column_if_exists(table_name, column_name, **kwargs):
"""Alter a column only if it exists in the schema."""
if schema_has_column(table_name, column_name):
op.alter_column(table_name, column_name, **kwargs)
@raise_if_offline
def drop_table_if_exists(table_name):
if schema_has_table(table_name):
op.drop_table(table_name)
@raise_if_offline
def rename_table_if_exists(old_table_name, new_table_name):
if schema_has_table(old_table_name):
op.rename_table(old_table_name, new_table_name)
def alter_enum(table, column, enum_type, nullable):
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'table': table,
'column': column,
'name': enum_type.name}
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
enum_type.create(bind, checkfirst=False)
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
"old_%(column)s" % values)
op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
op.execute("UPDATE %(table)s SET %(column)s = "
"old_%(column)s::text::%(name)s" % values)
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum_type,
existing_nullable=nullable)
def create_table_if_not_exist_psql(table_name, values):
if op.get_bind().engine.dialect.server_version_info < (9, 1, 0):
op.execute("CREATE LANGUAGE plpgsql")
op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$"
"BEGIN EXECUTE $1; END;"
"$$ LANGUAGE plpgsql STRICT;")
op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as "
"$$ SELECT exists(select 1 from pg_class where relname=$1);"
"$$ language sql STRICT;")
op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) "
"WHERE NOT table_exist(%(name)r);" %
{'name': table_name,
'columns': values})
def remove_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.drop_constraint(
constraint_name=fk['name'],
table_name=table,
type_='foreignkey'
)
def create_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.create_foreign_key(
constraint_name=fk['name'],
source_table=table,
referent_table=fk['referred_table'],
local_cols=fk['constrained_columns'],
remote_cols=fk['referred_columns'],
ondelete='CASCADE'
)
@contextlib.contextmanager
def remove_fks_from_table(table):
try:
inspector = reflection.Inspector.from_engine(op.get_bind())
foreign_keys = inspector.get_foreign_keys(table)
remove_foreign_keys(table, foreign_keys)
yield
finally:
create_foreign_keys(table, foreign_keys)
| {
"content_hash": "b87182769ad84df01b339e5df3dc06d7",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 32.7962962962963,
"alnum_prop": 0.6175418784114436,
"repo_name": "dhanunjaya/neutron",
"id": "81b491083083b4fe19a39bf33ff216e65437cd35",
"size": "5940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/migration/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7686347"
},
{
"name": "Shell",
"bytes": "14690"
}
],
"symlink_target": ""
} |
'''
Class to access the Wikipedia articles' word indexes stored in the hdf5 file.
'''
import h5py
import parameters as prm
class WikiIdx():
def __init__(self, path):
if prm.load_emb_mem:
#self.f = h5py.File(path, 'r', driver='core')
# don't use driver='core'. Reading from the numpy array
# is faster for large number of indexes.
ft = h5py.File(path, 'r')
self.f = {}
self.f['idx'] = ft['idx'].value
if 'mask' in ft:
self.f['mask'] = ft['mask'].value
else:
self.f = h5py.File(path, 'r')
def get_article_idx(self, article_id):
return self.f['idx'][article_id]
def get_article_mask(self, article_id):
return self.f['mask'][article_id]
| {
"content_hash": "e506e123df3a369b0a3845ff27fa0da1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 27.448275862068964,
"alnum_prop": 0.542713567839196,
"repo_name": "nyu-dl/WebNav",
"id": "9df606dd0c6dd062e41223f4162348317130b591",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wiki_idx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "119749"
}
],
"symlink_target": ""
} |
from flask import Blueprint, render_template, request, redirect, url_for, flash
from flask_security.decorators import login_required
from flask_security import current_user
from recruit_app.user.managers import EveManager
from recruit_app.user.eve_api_manager import EveApiManager
from recruit_app.user.forms import UpdateKeyForm
from recruit_app.user.models import EveCharacter, EveAllianceInfo, EveApiKeyPair
import datetime as dt
blueprint = Blueprint("user", __name__, url_prefix='/users',
static_folder="../static")
@blueprint.route("/")
@login_required
def members():
return redirect(url_for('public.home'))
@blueprint.route("/api_add", methods=['GET', 'POST'])
@login_required
def api_add():
form = UpdateKeyForm()
if form.validate_on_submit():
characters = EveApiManager.get_characters_from_api(form.data['api_id'],
form.data['api_key'])
if EveManager.create_api_keypair(form.data['api_id'],
form.data['api_key'],
current_user.get_id()):
EveManager.create_alliances_from_list(characters)
EveManager.create_corporations_from_character_list(characters)
character_creation = EveManager.create_characters_from_list(characters, current_user, form.data['api_id'])
if character_creation:
flash(character_creation, category="message")
# else:
# flash("Character error, RIP. (contact IT)", category="message")
else:
flash("API Key already in use", category='message')
return render_template("users/api_add.html", form=form)
return redirect(url_for('user.api_manage'))
else:
return render_template("users/api_add.html", form=form)
@blueprint.route("/api_manage", methods=['GET', 'POST'])
@login_required
def api_manage():
api_key_pairs = EveManager.get_api_key_pairs(current_user)
return render_template("users/api_manage.html", api_key_pairs=api_key_pairs)
@blueprint.route("/api_delete/<api_id>", methods=['GET', 'POST'])
@login_required
def api_delete(api_id):
# Check if our users main id is in the to be deleted characters
characters = EveManager.get_characters_by_owner(current_user)
if characters is not None:
for character in characters:
if character.api_id == api_id and character.character_id == current_user.main_character_id:
# TODO disable services and such
pass
EveManager.delete_api_key_pair(api_id, current_user)
return redirect(url_for('user.api_manage'))
@blueprint.route("/api_update/<api_id>", methods=['GET', 'POST'])
@login_required
def api_update(api_id):
# Break out application logic from the view to the manager
update = EveManager.update_user_api(api_id, current_user)
if update == "Wait":
flash(u'Please wait before refreshing your api', category='message')
elif update == "Success":
flash(u'API key Refreshed', category='message')
elif update == "Failed":
flash(u'Error updating API key! Either your key is invalid or the CCP servers are temporarily down.', category='error')
return redirect(url_for('user.api_manage'))
@blueprint.route("/eve_characters", methods=['GET', 'POST'])
@login_required
def eve_characters():
characters = EveCharacter.query.filter_by(user_id=current_user.get_id()).all()
return render_template('users/eve_characters.html', characters=characters)
@blueprint.route("/eve_main_character_change/<character_id>", methods=['GET', 'POST'])
@login_required
def eve_main_character_change(character_id):
if EveManager.check_if_character_owned_by_user(character_id, current_user.get_id()):
current_user.main_character_id = character_id
current_user.save()
return redirect(url_for('user.eve_characters'))
| {
"content_hash": "89ad1b79bd640c1fba550de57c1fc61f",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 128,
"avg_line_length": 37.5377358490566,
"alnum_prop": 0.6592108569992461,
"repo_name": "tyler274/Recruitment-App",
"id": "4e6c6e35c03b8fbdac624b32ad908d044df5f60a",
"size": "3979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recruit_app/user/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2845"
},
{
"name": "HTML",
"bytes": "55851"
},
{
"name": "JavaScript",
"bytes": "222936"
},
{
"name": "Python",
"bytes": "140234"
}
],
"symlink_target": ""
} |
import datetime
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import Attachment
from corehq.form_processor.utils import convert_xform_to_json, adjust_datetimes
from couchforms import XMLSyntaxError
from couchforms.exceptions import DuplicateError, MissingXMLNSError
from dimagi.utils.couch import LockManager, ReleaseOnError
class MultiLockManager(list):
def __enter__(self):
return [lock_manager.__enter__() for lock_manager in self]
def __exit__(self, exc_type, exc_val, exc_tb):
for lock_manager in self:
lock_manager.__exit__(exc_type, exc_val, exc_tb)
class FormProcessingResult(object):
def __init__(self, submitted_form, existing_duplicate=None):
self.submitted_form = submitted_form
self.existing_duplicate = existing_duplicate
if submitted_form.is_duplicate:
assert existing_duplicate is None
if existing_duplicate:
assert existing_duplicate.is_deprecated
self.interface = FormProcessorInterface(self.submitted_form.domain)
def _get_form_lock(self, form_id):
return self.interface.acquire_lock_for_xform(form_id)
def get_locked_forms(self):
if self.existing_duplicate:
# Lock docs with their original ID's (before they got switched during deprecation)
new_id = self.existing_duplicate.form_id
old_id = self.existing_duplicate.orig_id
return MultiLockManager([
LockManager(self.submitted_form, self._get_form_lock(new_id)),
LockManager(self.existing_duplicate, self._get_form_lock(old_id)),
])
else:
return MultiLockManager([
LockManager(self.submitted_form, self._get_form_lock(self.submitted_form.form_id))
])
class LockedFormProcessingResult(FormProcessingResult):
def __init__(self, submitted_form):
super(LockedFormProcessingResult, self).__init__(submitted_form)
assert submitted_form.is_normal
self.lock = self._get_form_lock(submitted_form.form_id)
def get_locked_forms(self):
return MultiLockManager([LockManager(self.submitted_form, self.lock)])
def process_xform_xml(domain, instance, attachments=None):
"""
Create a new xform to ready to be saved to a database in a thread-safe manner
Returns a LockManager containing the new XFormInstance(SQL) and its lock,
or raises an exception if anything goes wrong.
attachments is a dictionary of the request.FILES that are not the xform;
key is parameter name, value is django MemoryFile object stream
"""
attachments = attachments or {}
try:
return _create_new_xform(domain, instance, attachments=attachments)
except (MissingXMLNSError, XMLSyntaxError) as e:
return _get_submission_error(domain, instance, e)
except DuplicateError as e:
return _handle_id_conflict(instance, e.xform, domain)
def _create_new_xform(domain, instance_xml, attachments=None):
"""
create but do not save an XFormInstance from an xform payload (xml_string)
optionally set the doc _id to a predefined value (_id)
return doc _id of the created doc
`process` is transformation to apply to the form right before saving
This is to avoid having to save multiple times
If xml_string is bad xml
- raise couchforms.XMLSyntaxError
:param domain:
"""
from corehq.form_processor.interfaces.processor import FormProcessorInterface
interface = FormProcessorInterface(domain)
assert attachments is not None
form_data = convert_xform_to_json(instance_xml)
if not form_data.get('@xmlns'):
raise MissingXMLNSError("Form is missing a required field: XMLNS")
adjust_datetimes(form_data)
xform = interface.new_xform(form_data)
xform.domain = domain
# Maps all attachments to uniform format and adds form.xml to list before storing
attachments = map(
lambda a: Attachment(name=a[0], raw_content=a[1], content_type=a[1].content_type),
attachments.items()
)
attachments.append(Attachment(name='form.xml', raw_content=instance_xml, content_type='text/xml'))
interface.store_attachments(xform, attachments)
result = LockedFormProcessingResult(xform)
with ReleaseOnError(result.lock):
if interface.is_duplicate(xform.form_id):
raise DuplicateError(xform)
return result
def _get_submission_error(domain, instance, error):
"""
Handle's a hard failure from posting a form to couch.
:returns: xform error instance with raw xml as attachment
"""
try:
message = unicode(error)
except UnicodeDecodeError:
message = unicode(str(error), encoding='utf-8')
xform = FormProcessorInterface(domain).submission_error_form_instance(instance, message)
return FormProcessingResult(xform)
def _handle_id_conflict(instance, xform, domain):
"""
For id conflicts, we check if the files contain exactly the same content,
If they do, we just log this as a dupe. If they don't, we deprecate the
previous form and overwrite it with the new form's contents.
"""
assert domain
conflict_id = xform.form_id
interface = FormProcessorInterface(domain)
if interface.is_duplicate(conflict_id, domain):
# It looks like a duplicate/edit in the same domain so pursue that workflow.
return _handle_duplicate(xform, instance)
else:
# the same form was submitted to two domains, or a form was submitted with
# an ID that belonged to a different doc type. these are likely developers
# manually testing or broken API users. just resubmit with a generated ID.
xform = interface.assign_new_id(xform)
return FormProcessingResult(xform)
def _handle_duplicate(new_doc, instance):
"""
Handle duplicate xforms and xform editing ('deprecation')
existing doc *must* be validated as an XFormInstance in the right domain
and *must* include inline attachments
"""
interface = FormProcessorInterface(new_doc.domain)
conflict_id = new_doc.form_id
existing_doc = FormAccessors(new_doc.domain).get_with_attachments(conflict_id)
existing_md5 = existing_doc.xml_md5()
new_md5 = new_doc.xml_md5()
if existing_md5 != new_md5:
# if the form contents are not the same:
# - "Deprecate" the old form by making a new document with the same contents
# but a different ID and a doc_type of XFormDeprecated
# - Save the new instance to the previous document to preserve the ID
existing_doc, new_doc = apply_deprecation(existing_doc, new_doc, interface)
return FormProcessingResult(new_doc, existing_doc)
else:
# follow standard dupe handling, which simply saves a copy of the form
# but a new doc_id, and a doc_type of XFormDuplicate
duplicate = interface.deduplicate_xform(new_doc)
return FormProcessingResult(duplicate)
def apply_deprecation(existing_xform, new_xform, interface=None):
# if the form contents are not the same:
# - "Deprecate" the old form by making a new document with the same contents
# but a different ID and a doc_type of XFormDeprecated
# - Save the new instance to the previous document to preserve the ID
interface = interface or FormProcessorInterface(existing_xform.domain)
if existing_xform.persistent_blobs:
for name, meta in existing_xform.persistent_blobs.items():
with existing_xform.fetch_attachment(name, stream=True) as content:
existing_xform.deferred_put_attachment(
content,
name=name,
content_type=meta.content_type,
content_length=meta.content_length,
)
new_xform.form_id = existing_xform.form_id
existing_xform = interface.assign_new_id(existing_xform)
existing_xform.orig_id = new_xform.form_id
# and give the new doc server data of the old one and some metadata
new_xform.received_on = existing_xform.received_on
new_xform.deprecated_form_id = existing_xform.form_id
new_xform.edited_on = datetime.datetime.utcnow()
existing_xform.edited_on = new_xform.edited_on
return interface.apply_deprecation(existing_xform, new_xform)
| {
"content_hash": "51508df1fd2543aca34b5331e48c7d73",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 102,
"avg_line_length": 38.64545454545455,
"alnum_prop": 0.6933662667607622,
"repo_name": "qedsoftware/commcare-hq",
"id": "e71a63cdc627a4c867cec90458ac6dc81a262e0d",
"size": "8502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/form_processor/parsers/form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import ast
import operator
import pytest
from radon.complexity import *
from radon.contrib.flake8 import Flake8Checker
from radon.visitors import Class, Function
from .test_complexity_visitor import GENERAL_CASES, dedent
get_index = lambda seq: lambda index: seq[index]
def _compute_cc_rank(score):
# This is really ugly
# Luckily the rank function in radon.complexity is not like this!
if score < 0:
rank = ValueError
elif 0 <= score <= 5:
rank = 'A'
elif 6 <= score <= 10:
rank = 'B'
elif 11 <= score <= 20:
rank = 'C'
elif 21 <= score <= 30:
rank = 'D'
elif 31 <= score <= 40:
rank = 'E'
else:
rank = 'F'
return rank
RANK_CASES = [(score, _compute_cc_rank(score)) for score in range(-1, 100)]
@pytest.mark.parametrize('score,expected_rank', RANK_CASES)
def test_rank(score, expected_rank):
if hasattr(expected_rank, '__call__') and isinstance(
expected_rank(), Exception
):
with pytest.raises(expected_rank):
cc_rank(score)
else:
assert cc_rank(score) == expected_rank
fun = lambda complexity: Function(
'randomname', 1, 4, 23, False, None, [], complexity
)
cls = lambda complexity: Class('randomname_', 3, 21, 18, [], [], complexity)
# This works with both the next two tests
SIMPLE_BLOCKS = [
([], [], 0.0),
([fun(12), fun(14), fun(1)], [1, 0, 2], 9.0),
([fun(4), cls(5), fun(2), cls(21)], [3, 1, 0, 2], 8.0),
]
@pytest.mark.parametrize('blocks,indices,_', SIMPLE_BLOCKS)
def test_sorted_results(blocks, indices, _):
expected_result = list(map(get_index(blocks), indices))
assert sorted_results(blocks) == expected_result
@pytest.mark.parametrize('blocks,_,expected_average', SIMPLE_BLOCKS)
def test_average_complexity(blocks, _, expected_average):
assert average_complexity(blocks) == expected_average
CC_VISIT_CASES = [
(GENERAL_CASES[0][0], 1, 1, 'f.inner'),
(GENERAL_CASES[1][0], 3, 1, 'f.inner'),
(
'''
class joe1:
i = 1
def doit1(self):
pass
class joe2:
ii = 2
def doit2(self):
pass
class joe3:
iii = 3
def doit3(self):
pass
''',
2,
4,
'joe1.joe2.joe3',
),
]
@pytest.mark.parametrize('code,number_of_blocks,diff,lookfor', CC_VISIT_CASES)
def test_cc_visit(code, number_of_blocks, diff, lookfor):
code = dedent(code)
blocks = cc_visit(code)
assert isinstance(blocks, list)
assert len(blocks) == number_of_blocks
with_inner_blocks = add_inner_blocks(blocks)
names = set(map(operator.attrgetter('name'), with_inner_blocks))
assert len(with_inner_blocks) - len(blocks) == diff
assert lookfor in names
def test_flake8_checker():
c = Flake8Checker(ast.parse(dedent(GENERAL_CASES[0][0])), 'test case')
assert c.max_cc == -1
assert c.no_assert is False
assert list(c.run()) == []
c.max_cc = 3
assert list(c.run()) == [(7, 0, 'R701 \'f\' is too complex (4)', type(c))]
| {
"content_hash": "01bf3bd810633acb846b49366c937603",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 26.449152542372882,
"alnum_prop": 0.5914770906760654,
"repo_name": "rubik/radon",
"id": "95d2d1568a074895b0db5c055f9742b41ad23720",
"size": "3121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radon/tests/test_complexity_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "521"
},
{
"name": "Makefile",
"bytes": "888"
},
{
"name": "Python",
"bytes": "162813"
}
],
"symlink_target": ""
} |
from django.urls import path
from .views import simple_session_view
urlpatterns = [
path("session/", simple_session_view),
]
| {
"content_hash": "93bae0fa16f6427ecc08968c82d3b711",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 18.714285714285715,
"alnum_prop": 0.7251908396946565,
"repo_name": "modelbrouwers/django-sessionprofile",
"id": "bf301d415c5e388d6c9e8359ab81011c9e11b96c",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "testapp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14874"
}
],
"symlink_target": ""
} |
__author__ = 'chihchieh.sun'
# -*- coding: utf-8 -*-
import pickle
# 也可以這樣: C 語言實現版本效率較佳
# import cPickle as pickle
import urllib.request
# proxy setting
proxy = urllib.request.getproxies()
proxy_support = urllib.request.ProxyHandler({'sock5': proxy.get('http')})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
url = r'http://www.pythonchallenge.com/pc/def/banner.p'
req = urllib.request.Request(url)
connect = urllib.request.urlopen(req)
data = pickle.loads(connect.read())
connect.close()
for linelist in data:
line = [ch * count for ch, count in linelist]
print("".join(line))
| {
"content_hash": "25f82f27d008ee0d8e967bf4d13e8a10",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 27.5,
"alnum_prop": 0.696969696969697,
"repo_name": "z-Wind/Python_Challenge",
"id": "dbb5894e7d9351291548aed443236ca7649e759b",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "level5_pickle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30849"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import responses
from sentry import options
from sentry.models import Integration, OrganizationIntegration
from sentry.testutils import APITestCase
UNSET = object()
LINK_SHARED_EVENT = """{
"type": "link_shared",
"channel": "Cxxxxxx",
"user": "Uxxxxxxx",
"message_ts": "123456789.9875",
"links": [
{
"domain": "example.com",
"url": "http://testserver/fizz/buzz"
},
{
"domain": "example.com",
"url": "http://testserver/sentry/sentry/issues/%(group1)s/"
},
{
"domain": "example.com",
"url": "http://testserver/sentry/sentry/issues/%(group2)s/bar/"
},
{
"domain": "example.com",
"url": "http://testserver/sentry/sentry/issues/%(group1)s/bar/"
},
{
"domain": "another-example.com",
"url": "https://yet.another-example.com/v/abcde"
}
]
}"""
class BaseEventTest(APITestCase):
def setUp(self):
super(BaseEventTest, self).setUp()
self.user = self.create_user(is_superuser=False)
self.org = self.create_organization(owner=None)
self.integration = Integration.objects.create(
provider='slack',
external_id='TXXXXXXX1',
metadata={
'access_token': 'xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
'bot_access_token': 'xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx',
}
)
OrganizationIntegration.objects.create(
organization=self.org,
integration=self.integration,
)
def post_webhook(self, event_data=None, type='event_callback', data=None,
token=UNSET, team_id='TXXXXXXX1'):
if token is UNSET:
token = options.get('slack.verification-token')
payload = {
'token': token,
'team_id': team_id,
'api_app_id': 'AXXXXXXXX1',
'type': type,
'authed_users': [],
'event_id': 'Ev08MFMKH6',
'event_time': 123456789,
}
if data:
payload.update(data)
if event_data:
payload.setdefault('event', {}).update(event_data)
return self.client.post(
'/extensions/slack/event/',
payload,
)
class UrlVerificationEventTest(BaseEventTest):
challenge = '3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P'
def test_valid_token(self):
resp = self.client.post(
'/extensions/slack/event/',
{
'type': 'url_verification',
'challenge': self.challenge,
'token': options.get('slack.verification-token'),
}
)
assert resp.status_code == 200, resp.content
assert resp.data['challenge'] == self.challenge
def test_invalid_token(self):
resp = self.client.post(
'/extensions/slack/event/',
{
'type': 'url_verification',
'challenge': self.challenge,
'token': 'fizzbuzz',
}
)
assert resp.status_code == 400, resp.content
class LinkSharedEventTest(BaseEventTest):
@responses.activate
def test_valid_token(self):
responses.add(responses.POST, 'https://slack.com/api/chat.unfurl',
json={'ok': True})
org2 = self.create_organization(name='biz')
project1 = self.create_project(organization=self.org)
project2 = self.create_project(organization=org2)
group1 = self.create_group(project=project1)
group2 = self.create_group(project=project2)
resp = self.post_webhook(event_data=json.loads(LINK_SHARED_EVENT % {
'group1': group1.id,
'group2': group2.id,
}))
assert resp.status_code == 200, resp.content
| {
"content_hash": "1fce0ee95f94e03d1fbc3799889cfff3",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 77,
"avg_line_length": 31.967741935483872,
"alnum_prop": 0.5524722502522704,
"repo_name": "gencer/sentry",
"id": "93ba4a90dd5211e7fb944d18813d591b5ec7105e",
"size": "3964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/integrations/slack/test_event_endpoint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from produto.views import *
urlpatterns = [
url(r'^inserir/$', Produto.as_view(), name='produto_inserir'),
url(r'^editar/$', Produto.as_view(), name='produto_editar'),
url(r'^deletar/$', Produto.as_view(), name='produto_deletar'),
url(r'^$', Produto.as_view(), name='cliente'),
]
| {
"content_hash": "18f613fe218810b0e526e64643032148",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 66,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6424242424242425,
"repo_name": "acca90/django-tests",
"id": "19b314dacd2911f75c141168178db0dd27ecbf76",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cello/produto/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6588"
},
{
"name": "JavaScript",
"bytes": "695"
},
{
"name": "Python",
"bytes": "10168"
}
],
"symlink_target": ""
} |
import numpy as np
import cPickle as pickle
from scipy.stats import mode
from sklearn.preprocessing import StandardScaler
from missing_data_imputation import Imputer
#declare csv headers
x = np.genfromtxt('../adult-dataset/adult-test-raw',
delimiter=', ', dtype=object)
# binarize labels
labels = (np.array(x[:,-1]) == '>50K').astype(int)
labels.dump('../adult-dataset/labels_test_bin.np')
# dump binarized labels
(np.eye(2)[labels.astype(int)]).astype(int).dump('../adult-dataset/labels_test_onehot.np')
# remove label column
x = x[:,:-1]
# remove redundant education-number feature
relevant_cols = [i for i in xrange(x.shape[1]) if i != 4]
x = x[:, relevant_cols]
# store valid information for generating data
rows, cols = np.where(x == '?')
full_obs = [i for i in xrange(x.shape[0]) if i not in rows]
labels[full_obs].dump('../adult-dataset/labels_test_drop_bin.np')
# dump binarized labels full obs
(np.eye(2)[labels[full_obs].astype(int)]).astype(int).dump('../adult-dataset/labels_test_drop_bin_onehot.np')
# enumerate parameters and instantiate Imputer
imp = Imputer()
missing_data_cond = lambda x : x == '?'
cat_cols = (1, 3, 4, 5, 6, 7, 8, 12)
n_neighbors = 5
# drop missing variables, binarize and save complete observations and labels
print 'imputing with drop'
data_drop = imp.drop(x, missing_data_cond)
np.savetxt("../adult-dataset/data_test_drop.csv", data_drop, delimiter=",", fmt="%s")
data_drop_bin = imp.binarize_data(data_drop, cat_cols).astype(float)
data_drop_bin.dump('../adult-dataset/data_test_drop_bin.np')
#load scaled
scaler = pickle.load(open('scaler_drop.pkl', 'rb'))
data_drop_bin_scaled = scaler.transform(data_drop_bin)
data_drop_bin_scaled.dump('../adult-dataset/data_test_drop_bin_scaled.np')
del data_drop
del data_drop_bin
del data_drop_bin_scaled
# replace missing values with random existing values
print 'imputing with replace'
data_replace = imp.replace(x, missing_data_cond)
np.savetxt("../adult-dataset/data_replace.csv", data_replace, delimiter=",", fmt="%s")
data_replace_bin = imp.binarize_data(data_replace, cat_cols).astype(float)
data_replace_bin.dump('../adult-dataset/data_replace_bin.np')
scaler = StandardScaler().fit(data_replace_bin)
data_replace_bin_scaled = scaler.transform(data_replace_bin)
data_replace_bin_scaled.dump('../adult-dataset/data_replace_bin_scaled.np')
del data_replace
del data_replace_bin
del data_replace_bin_scaled
# replace missing values with feature mode
print 'imputing with mode'
data_mode = imp.summarize(x, mode, missing_data_cond)
np.savetxt("../adult-dataset/data_test_mode.csv", data_mode, delimiter=",", fmt="%s")
data_mode_bin = imp.binarize_data(data_mode, cat_cols).astype(float)
data_mode_bin.dump('../adult-dataset/data_test_mode_bin.np')
scaler = pickle.load(open('scaler_mode.pkl', 'rb'))
data_mode_bin_scaled = scaler.transform(data_mode_bin)
data_mode_bin_scaled.dump('../adult-dataset/data_test_mode_bin_scaled.np')
del data_mode
del data_mode_bin
del data_mode_bin_scaled
# repace categorical features with one hot row
print 'imputing with onehot'
data_onehot = imp.binarize_data(x, cat_cols).astype(float)
data_onehot.dump('../adult-dataset/data_onehot_bin.np')
scaler = StandardScaler().fit(data_onehot)
data_onehot_scaled = scaler.transform(data_onehot)
data_onehot_scaled.dump('../adult-dataset/data_onehot_bin_scaled.np')
del data_onehot
del data_onehot_scaled
# replace missing data with predictions
print 'imputing with predicted'
data_predicted = imp.predict(x, cat_cols, missing_data_cond)
np.savetxt("../adult-dataset/data_test_predicted.csv", data_predicted, delimiter=",", fmt="%s")
data_predicted_bin = imp.binarize_data(data_predicted, cat_cols).astype(float)
data_predicted_bin.dump('../adult-dataset/data_test_predicted_bin.np')
scaler = pickle.load(open('scaler_predicted.pkl', 'rb'))
data_predicted_bin_scaled = scaler.transform(data_predicted_bin)
data_predicted_bin_scaled.dump('../adult-dataset/data_test_predicted_bin_scaled.np')
del data_predicted
del data_predicted_bin
del data_predicted_bin_scaled
# replace missing data with values obtained after factor analysis
print 'imputing with factor analysis'
data_facanal = imp.factor_analysis(x, cat_cols, missing_data_cond)
np.savetxt("../adult-dataset/data_facanal.csv", data_facanal, delimiter=",", fmt="%s")
data_facanal_bin = imp.binarize_data(data_facanal, cat_cols).astype(float)
data_facanal_bin.dump('../adult-dataset/data_facanal_bin.np')
scaler = StandardScaler().fit(data_facanal_bin)
data_facanal_bin_scaled = scaler.transform(data_facanal_bin)
data_facanal_bin_scaled.dump('../adult-dataset/data_facanal_bin_scaled.np')
del data_facanal
del data_facanal_bin
del data_facanal_bin_scaled
# replace missing data with knn
#data_knn = imp.knn(x, n_neighbors, np.mean, missing_data_cond)
#data_knn_bin = imp.binarize_data(data_knn, cat_cols).astype(float)
#data_knn_bin.dump('../adult-dataset/data_knn_bin.np')
#scaler = StandardScaler().fit(data_knn_bin)
#data_knn_bin_scaled = scaler.transform(data_knn_bin)
#data_knn_bin_scaled.dump('../adult-dataset/data_knn_bin_scaled.np') | {
"content_hash": "da11bfa4446118a644e934619f83a8e0",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 109,
"avg_line_length": 41.08064516129032,
"alnum_prop": 0.7406753042795445,
"repo_name": "jvpoulos/cs289-project",
"id": "ce067a9bed0ae96f563a8ff6cf38b1418c8b8b90",
"size": "5094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/preprocess_test_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115653"
},
{
"name": "R",
"bytes": "11839"
},
{
"name": "TeX",
"bytes": "98759"
}
],
"symlink_target": ""
} |
from datetime import timedelta, datetime
import sys
from mwparserfromhell import parse as parse_markup
from bwikibot.cli import get_wiki, action
from bwikibot.api import datetime2zulu, zulu2datetime, WikiError
from bwikibot.ui import cprint
@action('check_uploads')
def main(start_from, limit):
''' Check and mark new files for licensing issues,
and send messages to uploaders.'''
wiki = get_wiki()
if start_from in ('old', 'new'):
start_from = get_bot_param(wiki, 'BunykBot', start_from + ' start')
UploadChecker(wiki, start_from, limit)
class UploadChecker:
def __init__(self, wiki, start_from, limit):
self.limit = int(limit)
self.wiki = wiki
self.marked_counter = 0
for upload in self.get_uploads(start_from):
self.check_upload(upload)
def get_uploads(self, start):
start = zulu2datetime(start) if start else None
for upload in self.wiki.logevents(
event_type='upload', start=start,
direction='newer',
):
yield upload
def check_upload(self, upload):
if upload.time + wait_before_check > datetime.utcnow():
return
print('Checking image {} uploaded at {} by {}'.format(
upload.page.title,
datetime2zulu(upload.time),
upload.user.name,
))
if not upload.page.exists():
print('already deleted')
return
redirect = upload.page.redirect()
if redirect:
print('Redirect to:', redirect.title)
return
diagnosis = self.diagnose(upload.page)
cprint('diagnosis:', diagnosis, color='red')
return
if diagnosis:
summary = license_summaries[diagnosis]
warn(upload.user, upload.page.title, summary['summary'])
mark_image(upload.page, summary)
self.marked_counter += 1
if self.marked_counter >= self.limit:
save_and_exit(datetime2zulu(upload.time))
else:
print('ok')
def diagnose(self, page_to_check):
'''
False - ok
'fair_use': 'Потребує обгрунтування добропорядного користування',
'author': 'Для нього зазначений автор і джерело, однак нема підтвердження того, що автор цього твору дозволяє ліцензувати цей файл на зазначених умовах'
'untagged': 'Відсутня правова інформація',
'no_template': 'Нема шаблону ліцензії',
'no_source': 'Не зазначене джерело',
'''
wikicode = parse_markup(page_to_check.read())
image_template = None
fair_use_template = None
need_source = False
not_need_source = False
for template in wikicode.filter_templates(recursive=True):
t_name = str(template.name).strip()
t_page = self.wiki.page('Шаблон:' + t_name).resolve_redirect()
t_name = t_page.title.split(':')[1]
print(t_name)
if t_name == 'Зображення':
image_template = template
elif t_name == 'Обґрунтування добропорядного використання':
fair_use_template = template
else:
cprint(t_page.title, color='green')
for cat in t_page.categories():
ct = str(cat.title)
print('\t', ct)
if ct == warnings_cat:
return False
if ct == need_source_cat:
need_source = True
if ct == not_need_source_cat:
not_need_source = True
if not image_template:
return 'untagged'
else:
if need_source:
source = image_template.get('Джерело').value
source = fake.sub('', str(source)).strip()
if source:
return False
else:
return 'no_source'
else:
if not_need_source:
return False
else:
return 'no_template'
last_read_value = None
def get_bot_param(wiki, bot_name, name):
global last_read_value
last_read_value = (wiki, bot_name, name)
res = wiki.page('%s:%s/%s' % (
wiki.USER_NS_NAME, bot_name, name
)).read()
print('%s/%s = %s' % (bot_name, name, res))
return res
def set_bot_param(wiki, bot_name, name, value):
wiki.page('%s:%s/%s' % (
wiki.USER_NS_NAME, bot_name, name
)).write(value, 'Записуємо зроблене')
def save_and_exit(value):
if last_read_value:
set_bot_param(*(last_read_value + (value, )))
sys.exit(0)
wait_before_check = timedelta(hours=1, minutes=30)
user_welcome = '{{subst:welcome}}--~~~~\n\n'
problem_images_tag = '<!-- problem images list -->'
user_warning = '''
{{subst:Проблемні зображення}}
%%(images)s
%(tag)s
--~~~~
''' % {'tag': problem_images_tag}
image_issue = '* [[:%(image)s|%(image)s]]: %(summary)s\n'
usertalk_summary = 'Робот: попередження про проблеми з ліцензуванням зображень'
license_summaries= {
'untagged': {
'image': '{{subst:nld}}',
'summary': 'Відсутня правова інформація',
},
'no_license': {
'image': '{{subst:nld}}',
'summary': 'Нема шаблону ліцензії',
},
'no_source': {
'image': '{{subst:nsd}}',
'summary': 'Не зазначене джерело',
},
'prohibited': {
'image': '{{subst:nld}}',
'summary': 'Використана заборонена ліцензія',
},
'old': {
'image': '{{subst:nld}}',
'summary': 'Використана застаріла ліцензія',
},
'no_template': {
'image': '{{subst:nld}}',
'summary': 'Нема шаблону ліцензії',
},
}
category_by_date = "Файли з нез'ясованим статусом від %(day)d %(month)s %(year)d"
month_names = [0, 'січня', 'лютого', 'березня', 'квітня', 'травня', 'червня', 'липня', 'серпня', 'вересня', 'жовтня', 'листопада', 'грудня']
category_content = "[[Категорія:Файли з нез'ясованим статусом|%(month)d-%(day)02d]]\n"
category_summary = 'Робот: автоматичне створення категорії'
def mark_image(page, summary):
check_category(page.wiki)
try:
page.write(
(page.read() or '') + summary['image'],
summary['summary']
)
except WikiError as e:
print('Cannot mark image because of', e)
def check_category(wiki):
if check_category.created:
return
now = datetime.utcnow()
name = category_by_date % {
'day': now.day,
'month': month_names[now.month],
'year': now.year
}
cat = wiki.category(name)
if not cat.exists():
cat.write(
category_content % {
'day': now.day,
'month': now.month,
},
category_summary
)
print('Created', cat)
check_category.created = True
check_category.created = False
def warn(user, image, problem):
images = image_issue % {'image': image, 'summary': problem}
talkpage = user.talkpage()
talktext = talkpage.read() or user_welcome
# check if user was warned
pos = talktext.rfind(problem_images_tag)
if pos >= 0:
# check if there was new topics in talk
pos2 = talktext.rfind('=', pos)
if pos2 >= 0:
# if where was - add full message to the end
talktext += user_warning % {'images': images}
else:
# add new lines to old messages
talktext = talktext[:pos] + images + talktext[pos:]
else:
# first warning
talktext += user_warning % {'images': images}
try:
talkpage.write(talktext, usertalk_summary)
print('User warned: ' + user.name)
except Exception as e:
print('User {} not warned because of {}'.format(user.name, e))
warnings_cat = 'Категорія:Шаблони повідомлень про проблеми з завантаженням'
need_source_cat = 'Категорія:Шаблони ліцензій, що потребують джерела'
not_need_source_cat = 'Категорія:Шаблони ліцензій з необов’язковим джерелом'
| {
"content_hash": "8080bddbba88c6cc59437f9467f9c4d1",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 164,
"avg_line_length": 33.71369294605809,
"alnum_prop": 0.5627076923076924,
"repo_name": "bunyk/bwikibot",
"id": "602de55dd666483c487050a023602cce5ac0d6ab",
"size": "8966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bwikibot/extensions/check_uploads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "4529"
},
{
"name": "Python",
"bytes": "77928"
},
{
"name": "Shell",
"bytes": "310"
},
{
"name": "VimL",
"bytes": "1317"
}
],
"symlink_target": ""
} |
from flask_restful import Resource
class REST(Resource):
def __init__(self, **kwargs):
self._database = kwargs['database']
self._home = kwargs['home']
| {
"content_hash": "d15fdf65a0b0acff11865a0bbf8f50c1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 37,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.6835443037974683,
"repo_name": "abelfodil/protohome",
"id": "eaa183747479bb81e5958411a6757af298f0f378",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/python/lib/rest/general.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "3818"
},
{
"name": "C",
"bytes": "613"
},
{
"name": "CSS",
"bytes": "1762"
},
{
"name": "HTML",
"bytes": "1590"
},
{
"name": "Python",
"bytes": "17265"
},
{
"name": "TypeScript",
"bytes": "35258"
}
],
"symlink_target": ""
} |
from flask_wtf import Form
from wtforms import StringField, IntegerField
from wtforms.fields.html5 import EmailField
from wtforms.validators import *
class CreateMonkeyForm(Form):
name = StringField('name', validators=[DataRequired()])
age = IntegerField('age', validators=[DataRequired()])
email = EmailField('email', validators=[DataRequired()])
class EditMonkeyForm(Form):
name = StringField('name', validators=[DataRequired()])
age = IntegerField('age', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
| {
"content_hash": "277ae1598bac305bd46943cda50fa58a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 61,
"avg_line_length": 35.8125,
"alnum_prop": 0.7364746945898778,
"repo_name": "Nonse/monkeys",
"id": "f74dc9ecb093044dac44b4cd0ccae489bc5f31b1",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monkeygod/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2365"
},
{
"name": "Python",
"bytes": "31531"
},
{
"name": "Shell",
"bytes": "43"
}
],
"symlink_target": ""
} |
"""Formatter for the Keychain password database file."""
from plaso.formatters import interface
class KeychainApplicationRecordFormatter(interface.ConditionalEventFormatter):
"""Formatter for keychain application record event."""
DATA_TYPE = 'mac:keychain:application'
FORMAT_STRING_PIECES = [
u'Name: {entry_name}',
u'Account: {account_name}']
FORMAT_STRING_SHORT_PIECES = [u'{entry_name}']
SOURCE_LONG = 'Keychain Application password'
SOURCE_SHORT = 'LOG'
class KeychainInternetRecordFormatter(interface.ConditionalEventFormatter):
"""Formatter for keychain internet record event."""
DATA_TYPE = 'mac:keychain:internet'
FORMAT_STRING_PIECES = [
u'Name: {entry_name}',
u'Account: {account_name}',
u'Where: {where}',
u'Protocol: {protocol}',
u'({type_protocol})']
FORMAT_STRING_SHORT_PIECES = [u'{entry_name}']
SOURCE_LONG = 'Keychain Internet password'
SOURCE_SHORT = 'LOG'
| {
"content_hash": "9b84891bc8a071924656b95e71558d8c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 26.52777777777778,
"alnum_prop": 0.6973821989528796,
"repo_name": "cvandeplas/plaso",
"id": "ee70616f0a33507aae5f06dc758766c686a42a9d",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/formatters/mac_keychain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2812257"
},
{
"name": "Shell",
"bytes": "22724"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import forum.models
class Migration(migrations.Migration):
dependencies = [
('forum', '0005_auto_20171006_1336'),
]
operations = [
migrations.AlterField(
model_name='post',
name='body',
field=forum.models.MarkdownField(help_text='We use a slightly-customized version of <a data-toggle="modal" data-target="#MarkdownHelp">Markdown</a> for formatting.'),
),
]
| {
"content_hash": "cf006735e14c96776cab99ec37ebd627",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 178,
"avg_line_length": 26.94736842105263,
"alnum_prop": 0.638671875,
"repo_name": "Kromey/akwriters",
"id": "e3d9d8168b01e7b0a8edb6c23cfb74c485064e9d",
"size": "585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "forum/migrations/0006_auto_20171007_1425.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14197"
},
{
"name": "HTML",
"bytes": "69988"
},
{
"name": "JavaScript",
"bytes": "364937"
},
{
"name": "Python",
"bytes": "160020"
},
{
"name": "SCSS",
"bytes": "9316"
},
{
"name": "Shell",
"bytes": "2239"
}
],
"symlink_target": ""
} |
"""
Fix typo
Revision ID: 2d6390eebe90
Revises: 08447ab49999
Create Date: 2018-11-12 03:05:20.555925
"""
from alembic import op
revision = "2d6390eebe90"
down_revision = "08447ab49999"
def upgrade():
op.create_index(
"journals_submitted_date_id_idx",
"journals",
["submitted_date", "id"],
unique=False,
)
op.drop_index("journakls_submitted_date_id_idx", table_name="journals")
def downgrade():
op.create_index(
"journakls_submitted_date_id_idx",
"journals",
["submitted_date", "id"],
unique=False,
)
op.drop_index("journals_submitted_date_id_idx", table_name="journals")
| {
"content_hash": "cf70a51c52cf8831567d12126881a45b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 20.8125,
"alnum_prop": 0.6291291291291291,
"repo_name": "pypa/warehouse",
"id": "4274672e6054084754cda0cd69fa51c2ef3b6d84",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "warehouse/migrations/versions/2d6390eebe90_fix_typo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "676"
},
{
"name": "Dockerfile",
"bytes": "6745"
},
{
"name": "HCL",
"bytes": "42"
},
{
"name": "HTML",
"bytes": "663799"
},
{
"name": "JavaScript",
"bytes": "128585"
},
{
"name": "Makefile",
"bytes": "5068"
},
{
"name": "Mako",
"bytes": "2040"
},
{
"name": "Procfile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "3315335"
},
{
"name": "SCSS",
"bytes": "205844"
},
{
"name": "Shell",
"bytes": "9424"
},
{
"name": "YARA",
"bytes": "9079"
}
],
"symlink_target": ""
} |
import sys
from PyQt4 import QtCore
from PyQt4 import QtGui
import functools
from manual_control_ui import Ui_MainWindow
NUM_TUNNELS = 6
NUM_GATES_PER_TUNNEL = 3
NUM_LEDS_PER_TUNNEL = 2
Open = True
Close = False
On = True
Off = False
ODOR_0 = 0
ODOR_1 = 1
ETHANOL = 2
GATE_LED_DIGITAL_TAB_INDEX = 0
GATE_LED_ANALOG_TAB_INDEX = 1
OLFACTOMETER_TAB_INDEX = 2
HIGH_CURRENT_LED_TAB_INDEX = 3
class ManualControlMainWindow(QtGui.QMainWindow,Ui_MainWindow):
def __init__(self,parent=None):
super(ManualControlMainWindow,self).__init__(parent)
self.setupUi(self)
self.makeCheckBoxArrays()
self.groupRadioButtons()
self.connectActions()
def initialize(self):
self.tabWidget.setCurrentIndex(GATE_LED_DIGITAL_TAB_INDEX)
self.gateLedDigitalControlTab.setEnabled(True)
self.closeAllGates()
self.turnOffAllLeds()
self.initializeGateCheckBoxes(Close)
self.initializeLedCheckBoxes(Off)
self.gateLedAnalogControlTab.setEnabled(True)
self.analogGateTunnel = NUM_TUNNELS
self.analogGate = NUM_GATES_PER_TUNNEL
self.analogLedTunnel = NUM_TUNNELS
self.analogLed = NUM_LEDS_PER_TUNNEL
self.updateDeviceInfoLabels()
self.olfactometersControlTab.setEnabled(True)
self.updateMfcValues()
self.highCurrentLedControlTab.setEnabled(True)
self.pwmValues = {'percent_capacity':20,
'duration_on':500,
'duration_off':500}
self.percentCapacityLabel.setNum(self.pwmValues['percent_capacity'])
self.percentCapacityHorizontalSlider.setValue(self.pwmValues['percent_capacity'])
self.durationOnLabel.setNum(self.pwmValues['duration_on'])
self.durationOnHorizontalSlider.setValue(self.pwmValues['duration_on'])
self.durationOffLabel.setNum(self.pwmValues['duration_off'])
self.durationOffHorizontalSlider.setValue(self.pwmValues['duration_off'])
def groupRadioButtons(self):
self.ethanol_valve_group = QtGui.QButtonGroup()
self.ethanol_valve_group.addButton(self.ethanolValve0RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve1RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve2RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve3RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve4RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve5RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve6RadioButton)
self.ethanol_valve_group.addButton(self.ethanolValve7RadioButton)
self.ethanol_valve_group.setExclusive(False)
self.ethanolValve0RadioButton.setChecked(False)
self.ethanolValve1RadioButton.setChecked(False)
self.ethanolValve2RadioButton.setChecked(False)
self.ethanolValve3RadioButton.setChecked(False)
self.ethanolValve4RadioButton.setChecked(False)
self.ethanolValve5RadioButton.setChecked(False)
self.ethanolValve6RadioButton.setChecked(False)
self.ethanolValve7RadioButton.setChecked(False)
self.ethanol_valve_group.setExclusive(True)
self.odor0_valve_group = QtGui.QButtonGroup()
self.odor0_valve_group.addButton(self.odor0Valve0RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve1RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve2RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve3RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve4RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve5RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve6RadioButton)
self.odor0_valve_group.addButton(self.odor0Valve7RadioButton)
self.odor0_valve_group.setExclusive(False)
self.odor0Valve0RadioButton.setChecked(False)
self.odor0Valve1RadioButton.setChecked(False)
self.odor0Valve2RadioButton.setChecked(False)
self.odor0Valve3RadioButton.setChecked(False)
self.odor0Valve4RadioButton.setChecked(False)
self.odor0Valve5RadioButton.setChecked(False)
self.odor0Valve6RadioButton.setChecked(False)
self.odor0Valve7RadioButton.setChecked(False)
self.odor0_valve_group.setExclusive(True)
self.odor1_valve_group = QtGui.QButtonGroup()
self.odor1_valve_group.addButton(self.odor1Valve0RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve1RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve2RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve3RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve4RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve5RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve6RadioButton)
self.odor1_valve_group.addButton(self.odor1Valve7RadioButton)
self.odor1_valve_group.setExclusive(False)
self.odor1Valve0RadioButton.setChecked(False)
self.odor1Valve1RadioButton.setChecked(False)
self.odor1Valve2RadioButton.setChecked(False)
self.odor1Valve3RadioButton.setChecked(False)
self.odor1Valve4RadioButton.setChecked(False)
self.odor1Valve5RadioButton.setChecked(False)
self.odor1Valve6RadioButton.setChecked(False)
self.odor1Valve7RadioButton.setChecked(False)
self.odor1_valve_group.setExclusive(True)
def makeCheckBoxArrays(self):
self.t0lxCheckBox = [self.t0l0CheckBox,self.t0l1CheckBox,self.t0laCheckBox]
self.t1lxCheckBox = [self.t1l0CheckBox,self.t1l1CheckBox,self.t1laCheckBox]
self.t2lxCheckBox = [self.t2l0CheckBox,self.t2l1CheckBox,self.t2laCheckBox]
self.t3lxCheckBox = [self.t3l0CheckBox,self.t3l1CheckBox,self.t3laCheckBox]
self.t4lxCheckBox = [self.t4l0CheckBox,self.t4l1CheckBox,self.t4laCheckBox]
self.t5lxCheckBox = [self.t5l0CheckBox,self.t5l1CheckBox,self.t5laCheckBox]
self.talxCheckBox = [self.tal0CheckBox,self.tal1CheckBox,self.talaCheckBox]
self.txlxCheckBox = [self.t0lxCheckBox,self.t1lxCheckBox,self.t2lxCheckBox,self.t3lxCheckBox,self.t4lxCheckBox,self.t5lxCheckBox,self.talxCheckBox]
self.t0gxCheckBox = [self.t0g0CheckBox,self.t0g1CheckBox,self.t0g2CheckBox,self.t0gaCheckBox]
self.t1gxCheckBox = [self.t1g0CheckBox,self.t1g1CheckBox,self.t1g2CheckBox,self.t1gaCheckBox]
self.t2gxCheckBox = [self.t2g0CheckBox,self.t2g1CheckBox,self.t2g2CheckBox,self.t2gaCheckBox]
self.t3gxCheckBox = [self.t3g0CheckBox,self.t3g1CheckBox,self.t3g2CheckBox,self.t3gaCheckBox]
self.t4gxCheckBox = [self.t4g0CheckBox,self.t4g1CheckBox,self.t4g2CheckBox,self.t4gaCheckBox]
self.t5gxCheckBox = [self.t5g0CheckBox,self.t5g1CheckBox,self.t5g2CheckBox,self.t5gaCheckBox]
self.tagxCheckBox = [self.tag0CheckBox,self.tag1CheckBox,self.tag2CheckBox,self.tagaCheckBox]
self.txgxCheckBox = [self.t0gxCheckBox,self.t1gxCheckBox,self.t2gxCheckBox,self.t3gxCheckBox,self.t4gxCheckBox,self.t5gxCheckBox,self.tagxCheckBox]
def connectActions(self):
self.t0l0CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=0,led=0))
self.t1l0CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=1,led=0))
self.t2l0CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=2,led=0))
self.t3l0CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=3,led=0))
self.t4l0CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=4,led=0))
self.t5l0CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=5,led=0))
self.tal0CheckBox.clicked.connect(functools.partial(self.talxClicked_Callback,led=0))
self.t0l1CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=0,led=1))
self.t1l1CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=1,led=1))
self.t2l1CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=2,led=1))
self.t3l1CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=3,led=1))
self.t4l1CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=4,led=1))
self.t5l1CheckBox.clicked.connect(functools.partial(self.txlxClicked_Callback,tunnel=5,led=1))
self.tal1CheckBox.clicked.connect(functools.partial(self.talxClicked_Callback,led=1))
self.t0laCheckBox.clicked.connect(functools.partial(self.txlaClicked_Callback,tunnel=0))
self.t1laCheckBox.clicked.connect(functools.partial(self.txlaClicked_Callback,tunnel=1))
self.t2laCheckBox.clicked.connect(functools.partial(self.txlaClicked_Callback,tunnel=2))
self.t3laCheckBox.clicked.connect(functools.partial(self.txlaClicked_Callback,tunnel=3))
self.t4laCheckBox.clicked.connect(functools.partial(self.txlaClicked_Callback,tunnel=4))
self.t5laCheckBox.clicked.connect(functools.partial(self.txlaClicked_Callback,tunnel=5))
self.talaCheckBox.clicked.connect(self.talaClicked_Callback)
self.t0g0CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=0,gate=0))
self.t1g0CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=1,gate=0))
self.t2g0CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=2,gate=0))
self.t3g0CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=3,gate=0))
self.t4g0CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=4,gate=0))
self.t5g0CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=5,gate=0))
self.tag0CheckBox.clicked.connect(functools.partial(self.tagxClicked_Callback,gate=0))
self.t0g1CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=0,gate=1))
self.t1g1CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=1,gate=1))
self.t2g1CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=2,gate=1))
self.t3g1CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=3,gate=1))
self.t4g1CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=4,gate=1))
self.t5g1CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=5,gate=1))
self.tag1CheckBox.clicked.connect(functools.partial(self.tagxClicked_Callback,gate=1))
self.t0g2CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=0,gate=2))
self.t1g2CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=1,gate=2))
self.t2g2CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=2,gate=2))
self.t3g2CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=3,gate=2))
self.t4g2CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=4,gate=2))
self.t5g2CheckBox.clicked.connect(functools.partial(self.txgxClicked_Callback,tunnel=5,gate=2))
self.tag2CheckBox.clicked.connect(functools.partial(self.tagxClicked_Callback,gate=2))
self.t0gaCheckBox.clicked.connect(functools.partial(self.txgaClicked_Callback,tunnel=0))
self.t1gaCheckBox.clicked.connect(functools.partial(self.txgaClicked_Callback,tunnel=1))
self.t2gaCheckBox.clicked.connect(functools.partial(self.txgaClicked_Callback,tunnel=2))
self.t3gaCheckBox.clicked.connect(functools.partial(self.txgaClicked_Callback,tunnel=3))
self.t4gaCheckBox.clicked.connect(functools.partial(self.txgaClicked_Callback,tunnel=4))
self.t5gaCheckBox.clicked.connect(functools.partial(self.txgaClicked_Callback,tunnel=5))
self.tagaCheckBox.clicked.connect(self.tagaClicked_Callback)
self.analogGateOpenPushButton.clicked.connect(self.analogGateOpenClicked_Callback)
self.analogGateClosePushButton.clicked.connect(self.analogGateCloseClicked_Callback)
self.analogLedOnPushButton.clicked.connect(self.analogLedOnClicked_Callback)
self.analogGateHorizontalSlider.valueChanged.connect(self.analogGateHorizontalSliderValueChanged_Callback)
self.analogGateSpinBox.editingFinished.connect(self.analogGateSpinBoxEditingFinished_Callback)
self.analogGateTunnelComboBox.currentIndexChanged.connect(self.analogGateTunnelComboBoxCurrentIndexChanged_Callback)
self.analogGateComboBox.currentIndexChanged.connect(self.analogGateComboBoxCurrentIndexChanged_Callback)
self.analogLedHorizontalSlider.valueChanged.connect(self.analogLedHorizontalSliderValueChanged_Callback)
self.analogLedSpinBox.editingFinished.connect(self.analogLedSpinBoxEditingFinished_Callback)
self.analogLedTunnelComboBox.currentIndexChanged.connect(self.analogLedTunnelComboBoxCurrentIndexChanged_Callback)
self.analogLedComboBox.currentIndexChanged.connect(self.analogLedComboBoxCurrentIndexChanged_Callback)
self.ethanolOffPushButton.clicked.connect(functools.partial(self.valveOffClicked_Callback,olfactometer=ETHANOL))
self.ethanolValve0RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=0))
self.ethanolValve1RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=1))
self.ethanolValve2RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=2))
self.ethanolValve3RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=3))
self.ethanolValve4RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=4))
self.ethanolValve5RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=5))
self.ethanolValve6RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=6))
self.ethanolValve7RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ETHANOL,valve=7))
self.ethanolMfc0HorizontalSlider.valueChanged.connect(self.ethanolMfc0ValueChanged_Callback)
self.ethanolMfc1HorizontalSlider.valueChanged.connect(self.ethanolMfc1ValueChanged_Callback)
self.odor0OffPushButton.clicked.connect(functools.partial(self.valveOffClicked_Callback,olfactometer=ODOR_0))
self.odor0Valve0RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=0))
self.odor0Valve1RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=1))
self.odor0Valve2RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=2))
self.odor0Valve3RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=3))
self.odor0Valve4RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=4))
self.odor0Valve5RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=5))
self.odor0Valve6RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=6))
self.odor0Valve7RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_0,valve=7))
self.odor0Mfc0HorizontalSlider.valueChanged.connect(self.odor0Mfc0ValueChanged_Callback)
self.odor0Mfc1HorizontalSlider.valueChanged.connect(self.odor0Mfc1ValueChanged_Callback)
self.odor1OffPushButton.clicked.connect(functools.partial(self.valveOffClicked_Callback,olfactometer=ODOR_1))
self.odor1Valve0RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=0))
self.odor1Valve1RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=1))
self.odor1Valve2RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=2))
self.odor1Valve3RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=3))
self.odor1Valve4RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=4))
self.odor1Valve5RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=5))
self.odor1Valve6RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=6))
self.odor1Valve7RadioButton.clicked.connect(functools.partial(self.valveClicked_Callback,olfactometer=ODOR_1,valve=7))
self.odor1Mfc0HorizontalSlider.valueChanged.connect(self.odor1Mfc0ValueChanged_Callback)
self.odor1Mfc1HorizontalSlider.valueChanged.connect(self.odor1Mfc1ValueChanged_Callback)
self.percentCapacityHorizontalSlider.valueChanged.connect(self.percentCapacityValueChanged_Callback)
self.durationOnHorizontalSlider.valueChanged.connect(self.durationOnValueChanged_Callback)
self.durationOffHorizontalSlider.valueChanged.connect(self.durationOffValueChanged_Callback)
self.startPwmPushButton.clicked.connect(self.startPwmClicked_Callback)
self.stopPwmPushButton.clicked.connect(self.stopPwmClicked_Callback)
def closeEvent(self, event):
pass
def updateDeviceInfoLabels(self):
dev_info = self.get_pwm_controller_info()
self.gateOpenValueLabel.setNum(dev_info.gate_open_servo_angle)
self.gateCloseValueLabel.setNum(dev_info.gate_close_servo_angle)
self.ledOnValueLabel.setNum(dev_info.led_on_duty_cycle)
self.ledOffValueLabel.setNum(0)
self.gateOpenValueLabel_2.setNum(dev_info.gate_open_servo_angle)
self.gateCloseValueLabel_2.setNum(dev_info.gate_close_servo_angle)
self.ledOnValueLabel_2.setNum(dev_info.led_on_duty_cycle)
def analogGateOpenClicked_Callback(self):
angle = self.analogGateSpinBox.value()
self.setGateOpenServoAngle(angle)
self.updateDeviceInfoLabels()
def analogGateCloseClicked_Callback(self):
angle = self.analogGateSpinBox.value()
self.setGateCloseServoAngle(angle)
self.updateDeviceInfoLabels()
def analogLedOnClicked_Callback(self):
duty_cycle = self.analogLedSpinBox.value()
self.setLedOnDutyCycle(duty_cycle)
self.updateDeviceInfoLabels()
def main(self):
self.show()
def isCheckBoxOn(self,checkBox):
return checkBox.isChecked() == On
def initializeLedCheckBoxes(self,state):
for tunnel in range(0,NUM_TUNNELS+1):
for led in range(0,NUM_LEDS_PER_TUNNEL+1):
self.txlxCheckBox[tunnel][led].setChecked(state)
def talaClicked_Callback(self):
if self.isCheckBoxOn(self.talaCheckBox):
self.turnOnAllLeds()
for tunnel in range(0,NUM_TUNNELS+1):
for led in range(0,NUM_LEDS_PER_TUNNEL+1):
self.txlxCheckBox[tunnel][led].setChecked(On)
else:
self.turnOffAllLeds()
for tunnel in range(0,NUM_TUNNELS+1):
for led in range(0,NUM_LEDS_PER_TUNNEL+1):
self.txlxCheckBox[tunnel][led].setChecked(Off)
def talxClicked_Callback(self,led):
checkBox = self.txlxCheckBox[NUM_TUNNELS][led]
if self.isCheckBoxOn(checkBox):
self.turnOnAllLedsByLed(led)
for tunnel in range(0,NUM_TUNNELS):
self.txlxCheckBox[tunnel][led].setChecked(On)
else:
self.turnOffAllLedsByLed(led)
for tunnel in range(0,NUM_TUNNELS):
self.txlxCheckBox[tunnel][led].setChecked(Off)
for tunnel in range(0,NUM_TUNNELS):
self.txlaUpdate(tunnel)
def txlaClicked_Callback(self,tunnel):
checkBox = self.txlxCheckBox[tunnel][NUM_LEDS_PER_TUNNEL]
if self.isCheckBoxOn(checkBox):
self.turnOnAllLedsByTunnel(tunnel)
for led in range(0,NUM_LEDS_PER_TUNNEL):
self.txlxCheckBox[tunnel][led].setChecked(On)
else:
self.turnOffAllLedsByTunnel(tunnel)
for led in range(0,NUM_LEDS_PER_TUNNEL):
self.txlxCheckBox[tunnel][led].setChecked(Off)
for led in range(0,NUM_LEDS_PER_TUNNEL):
self.talxUpdate(led)
def talaUpdate(self):
flag = True
for tunnel in range(0,NUM_TUNNELS):
flag = flag and self.txlxCheckBox[tunnel][NUM_LEDS_PER_TUNNEL].isChecked()
for led in range(0,NUM_LEDS_PER_TUNNEL):
flag = flag and self.txlxCheckBox[NUM_LEDS_PER_TUNNEL][led].isChecked()
self.txlxCheckBox[NUM_TUNNELS][NUM_LEDS_PER_TUNNEL].setChecked(flag)
def txlaUpdate(self,tunnel):
flag = True
for led in range(0,NUM_LEDS_PER_TUNNEL):
flag = flag and self.txlxCheckBox[tunnel][led].isChecked()
self.txlxCheckBox[tunnel][NUM_LEDS_PER_TUNNEL].setChecked(flag)
self.talaUpdate()
def talxUpdate(self,led):
flag = True
for tunnel in range(0,NUM_TUNNELS):
flag = flag and self.txlxCheckBox[tunnel][led].isChecked()
self.txlxCheckBox[NUM_TUNNELS][led].setChecked(flag)
self.talaUpdate()
def txlxClicked_Callback(self,tunnel,led):
checkBox = self.txlxCheckBox[tunnel][led]
if self.isCheckBoxOn(checkBox):
self.turnOnLed(tunnel,led)
else:
self.turnOffLed(tunnel,led)
self.txlaUpdate(led)
self.talxUpdate(led)
def isCheckBoxOpen(self,checkBox):
return checkBox.isChecked() == Open
def initializeGateCheckBoxes(self,state):
for tunnel in range(0,NUM_TUNNELS+1):
for gate in range(0,NUM_GATES_PER_TUNNEL+1):
self.txgxCheckBox[tunnel][gate].setChecked(state)
def tagaClicked_Callback(self):
if self.isCheckBoxOpen(self.tagaCheckBox):
self.openAllGates()
for tunnel in range(0,NUM_TUNNELS+1):
for gate in range(0,NUM_GATES_PER_TUNNEL+1):
self.txgxCheckBox[tunnel][gate].setChecked(Open)
else:
self.closeAllGates()
for tunnel in range(0,NUM_TUNNELS+1):
for gate in range(0,NUM_GATES_PER_TUNNEL+1):
self.txgxCheckBox[tunnel][gate].setChecked(Close)
def tagaUpdate(self):
flag = True
for tunnel in range(0,NUM_TUNNELS):
flag = flag and self.txgxCheckBox[tunnel][NUM_GATES_PER_TUNNEL].isChecked()
for gate in range(0,NUM_GATES_PER_TUNNEL):
flag = flag and self.txgxCheckBox[NUM_GATES_PER_TUNNEL][gate].isChecked()
self.txgxCheckBox[NUM_TUNNELS][NUM_GATES_PER_TUNNEL].setChecked(flag)
def tagxUpdate(self,gate):
flag = True
for tunnel in range(0,NUM_TUNNELS):
flag = flag and self.txgxCheckBox[tunnel][gate].isChecked()
self.txgxCheckBox[NUM_TUNNELS][gate].setChecked(flag)
self.tagaUpdate()
def txgaUpdate(self,tunnel):
flag = True
for gate in range(0,NUM_GATES_PER_TUNNEL):
flag = flag and self.txgxCheckBox[tunnel][gate].isChecked()
self.txgxCheckBox[tunnel][NUM_GATES_PER_TUNNEL].setChecked(flag)
self.tagaUpdate()
def tagxClicked_Callback(self,gate):
checkBox = self.txgxCheckBox[NUM_TUNNELS][gate]
if self.isCheckBoxOpen(checkBox):
self.openAllGatesByGate(gate)
for tunnel in range(0,NUM_TUNNELS):
self.txgxCheckBox[tunnel][gate].setChecked(Open)
else:
self.closeAllGatesByGate(gate)
for tunnel in range(0,NUM_TUNNELS):
self.txgxCheckBox[tunnel][gate].setChecked(Close)
for tunnel in range(0,NUM_TUNNELS):
self.txgaUpdate(tunnel)
def txgaClicked_Callback(self,tunnel):
checkBox = self.txgxCheckBox[tunnel][NUM_GATES_PER_TUNNEL]
if self.isCheckBoxOpen(checkBox):
self.openAllGatesByTunnel(tunnel)
for gate in range(0,NUM_GATES_PER_TUNNEL):
self.txgxCheckBox[tunnel][gate].setChecked(Open)
else:
self.closeAllGatesByTunnel(tunnel)
for gate in range(0,NUM_GATES_PER_TUNNEL):
self.txgxCheckBox[tunnel][gate].setChecked(Close)
for gate in range(0,NUM_GATES_PER_TUNNEL):
self.tagxUpdate(gate)
def txgxClicked_Callback(self,tunnel,gate):
checkBox = self.txgxCheckBox[tunnel][gate]
if self.isCheckBoxOpen(checkBox):
self.openGate(tunnel,gate)
else:
self.closeGate(tunnel,gate)
self.txgaUpdate(tunnel)
self.tagxUpdate(gate)
def updateGateServoAngle(self,angle):
if self.tabWidget.currentIndex() == GATE_LED_ANALOG_TAB_INDEX:
if (self.analogGateTunnel == NUM_TUNNELS) and (self.analogGate == NUM_GATES_PER_TUNNEL):
self.setAllGatesServoAngle(angle)
elif self.analogGateTunnel == NUM_TUNNELS:
self.setAllGatesServoAngleByGate(self.analogGate,angle)
elif self.analogGate == NUM_GATES_PER_TUNNEL:
self.setAllGatesServoAngleByTunnel(self.analogGateTunnel,angle)
else:
self.setGateServoAngle(self.analogGateTunnel,self.analogGate,angle)
def analogGateHorizontalSliderValueChanged_Callback(self,angle):
self.analogGateSpinBox.setValue(angle)
self.updateGateServoAngle(angle)
def analogGateSpinBoxEditingFinished_Callback(self):
angle = self.analogGateSpinBox.value()
self.analogGateHorizontalSlider.setValue(angle)
self.updateGateServoAngle(angle)
def analogGateTunnelComboBoxCurrentIndexChanged_Callback(self,index):
text = self.analogGateTunnelComboBox.currentText()
if text == 'All':
self.analogGateTunnel = NUM_TUNNELS
else:
self.analogGateTunnel = int(text)-1
def analogGateComboBoxCurrentIndexChanged_Callback(self,index):
text = self.analogGateComboBox.currentText()
if text == 'All':
self.analogGate = NUM_GATES_PER_TUNNEL
else:
self.analogGate = int(text)-1
def updateLedDutyCycle(self,duty_cycle):
if self.tabWidget.currentIndex() == GATE_LED_ANALOG_TAB_INDEX:
if (self.analogLedTunnel == NUM_TUNNELS) and (self.analogLed == NUM_LEDS_PER_TUNNEL):
self.setAllLedsDutyCycle(duty_cycle)
elif self.analogLedTunnel == NUM_TUNNELS:
self.setAllLedsDutyCycleByLed(self.analogLed,duty_cycle)
elif self.analogLed == NUM_LEDS_PER_TUNNEL:
self.setAllLedsDutyCycleByTunnel(self.analogLedTunnel,duty_cycle)
else:
self.setLedDutyCycle(self.analogLedTunnel,self.analogLed,duty_cycle)
def analogLedHorizontalSliderValueChanged_Callback(self,duty_cycle):
self.analogLedSpinBox.setValue(duty_cycle)
self.updateLedDutyCycle(duty_cycle)
def analogLedSpinBoxEditingFinished_Callback(self):
duty_cycle = self.analogLedSpinBox.value()
self.analogLedHorizontalSlider.setValue(duty_cycle)
self.updateLedDutyCycle(duty_cycle)
def analogLedTunnelComboBoxCurrentIndexChanged_Callback(self,index):
text = self.analogLedTunnelComboBox.currentText()
if text == 'All':
self.analogLedTunnel = NUM_TUNNELS
else:
self.analogLedTunnel = int(text)-1
def analogLedComboBoxCurrentIndexChanged_Callback(self,index):
text = self.analogLedComboBox.currentText()
if text == 'All':
self.analogLed = NUM_LEDS_PER_TUNNEL
else:
self.analogLed = int(text)-1
def valveOffClicked_Callback(self,olfactometer):
self.set_odor_valves_off(olfactometer)
if olfactometer == ETHANOL:
self.ethanol_valve_group.setExclusive(False)
self.ethanolValve0RadioButton.setChecked(False)
self.ethanolValve1RadioButton.setChecked(False)
self.ethanolValve2RadioButton.setChecked(False)
self.ethanolValve3RadioButton.setChecked(False)
self.ethanolValve4RadioButton.setChecked(False)
self.ethanolValve5RadioButton.setChecked(False)
self.ethanolValve6RadioButton.setChecked(False)
self.ethanolValve7RadioButton.setChecked(False)
self.ethanol_valve_group.setExclusive(True)
elif olfactometer == ODOR_1:
self.odor1_valve_group.setExclusive(False)
self.odor1Valve0RadioButton.setChecked(False)
self.odor1Valve1RadioButton.setChecked(False)
self.odor1Valve2RadioButton.setChecked(False)
self.odor1Valve3RadioButton.setChecked(False)
self.odor1Valve4RadioButton.setChecked(False)
self.odor1Valve5RadioButton.setChecked(False)
self.odor1Valve6RadioButton.setChecked(False)
self.odor1Valve7RadioButton.setChecked(False)
self.odor1_valve_group.setExclusive(True)
elif olfactometer == ODOR_0:
self.odor0_valve_group.setExclusive(False)
self.odor0Valve0RadioButton.setChecked(False)
self.odor0Valve1RadioButton.setChecked(False)
self.odor0Valve2RadioButton.setChecked(False)
self.odor0Valve3RadioButton.setChecked(False)
self.odor0Valve4RadioButton.setChecked(False)
self.odor0Valve5RadioButton.setChecked(False)
self.odor0Valve6RadioButton.setChecked(False)
self.odor0Valve7RadioButton.setChecked(False)
self.odor0_valve_group.setExclusive(True)
def valveClicked_Callback(self,olfactometer,valve):
self.set_odor_valve_on(olfactometer,valve)
def ethanolMfc0ValueChanged_Callback(self,value):
self.set_mfc_flow_rate(ETHANOL,0,value)
self.ethanolMfc0ValueLabel.setNum(value)
def ethanolMfc1ValueChanged_Callback(self,value):
self.set_mfc_flow_rate(ETHANOL,1,value)
self.ethanolMfc1ValueLabel.setNum(value)
def odor0Mfc0ValueChanged_Callback(self,value):
self.set_mfc_flow_rate(ODOR_0,0,value)
self.odor0Mfc0ValueLabel.setNum(value)
def odor0Mfc1ValueChanged_Callback(self,value):
self.set_mfc_flow_rate(ODOR_0,1,value)
self.odor0Mfc1ValueLabel.setNum(value)
def odor1Mfc0ValueChanged_Callback(self,value):
self.set_mfc_flow_rate(ODOR_1,0,value)
self.odor1Mfc0ValueLabel.setNum(value)
def odor1Mfc1ValueChanged_Callback(self,value):
self.set_mfc_flow_rate(ODOR_1,1,value)
self.odor1Mfc1ValueLabel.setNum(value)
def updateMfcValues(self):
olfactometer = ETHANOL
mfc = 0
value = self.get_mfc_flow_rate_setting(device=olfactometer,mfc=mfc).percent_capacity
self.ethanolMfc0ValueLabel.setNum(value)
self.ethanolMfc0HorizontalSlider.setValue(value)
mfc = 1
value = self.get_mfc_flow_rate_setting(device=olfactometer,mfc=mfc).percent_capacity
self.ethanolMfc1ValueLabel.setNum(value)
self.ethanolMfc1HorizontalSlider.setValue(value)
olfactometer = ODOR_1
mfc = 0
value = self.get_mfc_flow_rate_setting(device=olfactometer,mfc=mfc).percent_capacity
self.odor1Mfc0ValueLabel.setNum(value)
self.odor1Mfc0HorizontalSlider.setValue(value)
mfc = 1
value = self.get_mfc_flow_rate_setting(device=olfactometer,mfc=mfc).percent_capacity
self.odor1Mfc1ValueLabel.setNum(value)
self.odor1Mfc1HorizontalSlider.setValue(value)
olfactometer = ODOR_0
mfc = 0
value = self.get_mfc_flow_rate_setting(device=olfactometer,mfc=mfc).percent_capacity
self.odor0Mfc0ValueLabel.setNum(value)
self.odor0Mfc0HorizontalSlider.setValue(value)
mfc = 1
value = self.get_mfc_flow_rate_setting(device=olfactometer,mfc=mfc).percent_capacity
self.odor0Mfc1ValueLabel.setNum(value)
self.odor0Mfc1HorizontalSlider.setValue(value)
def percentCapacityValueChanged_Callback(self,value):
self.pwmValues['percent_capacity'] = value
self.percentCapacityLabel.setNum(value)
def durationOnValueChanged_Callback(self,value):
self.pwmValues['duration_on'] = value
self.durationOnLabel.setNum(value)
def durationOffValueChanged_Callback(self,value):
self.pwmValues['duration_off'] = value
self.durationOffLabel.setNum(value)
def startPwmClicked_Callback(self):
self.start_current_controller_pwm(self.pwmValues['percent_capacity'],
self.pwmValues['duration_on'],
self.pwmValues['duration_off'])
def stopPwmClicked_Callback(self):
self.stop_current_controller_pwm()
def manualControlGui():
app = QtGui.QApplication(sys.argv)
mainWindow = ManualControlMainWindow()
mainWindow.main()
app.exec_()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
manualControlGui()
| {
"content_hash": "3d38fdd1fdb595895a851aeb4222ebc9",
"timestamp": "",
"source": "github",
"line_count": 632,
"max_line_length": 155,
"avg_line_length": 53.31487341772152,
"alnum_prop": 0.720848790621754,
"repo_name": "JaneliaSciComp/fly-alcohol-assay",
"id": "a1e7562a457fc37d6cd03830e1a4f49c2de6f8d3",
"size": "33695",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "faa_manual_control/src/faa_manual_control/manual_control_gui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "392"
},
{
"name": "C++",
"bytes": "36131"
},
{
"name": "CMake",
"bytes": "26184"
},
{
"name": "CSS",
"bytes": "3706"
},
{
"name": "HTML",
"bytes": "68111"
},
{
"name": "JavaScript",
"bytes": "173781"
},
{
"name": "Makefile",
"bytes": "908"
},
{
"name": "Python",
"bytes": "444569"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
} |
"""Abstraction for quantizing neural networks implemented in jax."""
import contextlib
import dataclasses
import enum
import functools
import logging
import typing
from typing import Iterable, Optional, Tuple, Union
from absl import flags
from aqt.jax_legacy.jax import compute_cost_utils
from aqt.jax_legacy.jax import fp_cast
from aqt.jax_legacy.jax import get_bounds
from aqt.jax_legacy.jax import primitives
from aqt.jax_legacy.jax import shape_utils
from aqt.jax_legacy.jax import utils
from aqt.jax_legacy.jax.flax import struct as flax_struct
from flax import linen as nn
import jax
from jax import lax
import jax.numpy as jnp
# Global bool to control the use of epsilon in the denominator of the scaling
# methods signed_int_scale and unsigned_int_scale. Epsilon is added to avoid
# division by 0. For testing, one may choose to disable the epsilon by setting
# this global to True.
# As this is a global variable, please modify it only before calling any
# functions that use it.
DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING = False
# Dtype for quantization computations: scaling; floor and clip; rescaling. this
# is chosen to optimize performance for a given hardware i.e. for TPU we set it
# to float32. It should be matching native dtype of the hardware's
# 'vector unit'.
SCALE_DTYPE = jnp.float32
dataclass = flax_struct.dataclass if not typing.TYPE_CHECKING else dataclasses.dataclass
# ActBounds can be an Jax array of floats with a shape that is broadcastable to
# the shape of activation tensors.
ActsBoundT = Union[float, jnp.ndarray, get_bounds.GetBounds.Hyper,
get_bounds.DynamicBounds.Hyper, None]
@dataclass
class _FloatQuant:
"""Parameters for floating-point quantization.
Floating-point quantization refers to degraded floating-point precision
below those natively supported, e.g., bfloat16. This quantization scheme
can either work with, or without scaling (controlled by `is_scaled`).
With scaling, these quantization steps follow,
1. Use the maximum representable floating-point value to determine a scale.
2. This scale is used to "upscale" the argument to the range of the target
floating-point format.
3. The precision of the argument is then degraded through a downcast
operation.
4. Finally the degraded-precision result is "downscaled" by the inverse
scale.
Without scaling, these quantization steps follow,
1. The argument is downcast to the target fp-format with degraded precision.
Of importance in this downcast is the saturating behavior, which is
logically equivalent to clipping by the maximum representable target
value.
"""
@dataclass
class FloatPrec:
"""Parameters for specifying a custom precision floating-point type."""
# The minimum exponent value of the quantized floating-point format.
exp_min: int
# The maximum exponent value of the quantized floating-point format.
exp_max: int
# The number of significand bits (excluding hidden bit) of the quantized
# floating-point format.
sig_bits: int
# Whether or not floating-point fake-quant makes use of scaling.
is_scaled: bool
# Precision specification for floating-point quantization.
fp_spec: FloatPrec
_PrecT = Union[None, int, _FloatQuant] # pylint: disable=invalid-name
class QuantType(str, enum.Enum):
"""Quantization strategy dataclass."""
# fake_quant strategy ensures that quantized values form an arithmetic
# sequence e.g. 0*s ... 255*s for 8-bit positive quantization, for some s.
# it can be implemented as a local op: upscale, floor, clip, downscale.
FAKE_QUANT = 'fake_quant'
# fake_quant strategy with quantized inputs/weights type-casted to int.
FAKE_QUANT_WITH_INT = 'fake_quant_with_int'
# aqt ensures that MatMul/Conv are in actual integer domain.
# It can't be implemented as a single op.
# Before matmul we have upscale, floor and clip, and after matmul we have
# downscale.
AQT = 'aqt'
def to_jax_type(self) -> primitives.jnp_dtype:
"""Returns quantized dtype for the corresponding quantization strategy."""
# Currently, this function is used to decide the return type for
# 'QuantOps.to_quantized.' The AQT implementation works by having a
# conversion to an int dtype and then back to a fp dtype happen *within*
# to_quantized, so that Jax backprop works correctly. Thus
# counter-intuitively, we need this to return a fp dtype for 'aqt' since the
# return type for 'to_quantized' overall is fp. TODO(malmaud): As part of
# the refactor of this module, clean this up to eliminate the
# counter-intuitive behavior.
if self.value in ['aqt', 'fake_quant']: # pylint: disable=comparison-with-callable
return SCALE_DTYPE
elif self.value == 'fake_quant_with_int': # pylint: disable=comparison-with-callable
return jnp.int8
else:
raise RuntimeError(f'QuantType {self.value} is unknown.')
class QuantOps:
"""Class for quantizing and dequantizing weights and activations."""
# Redefined here as nested class attributes to avoid forward-references.
FloatQuant = _FloatQuant # pylint: disable=invalid-name
PrecT = _PrecT # pylint: disable=invalid-name
@dataclass
class WeightParams:
"""Parameters for weight quantization."""
prec: _PrecT # expected precision for weight quantization.
# enable all available values during quantization
half_shift: bool
# Axis along which to quantize weights (the non-feature axis).
axis: Optional[Iterable[int]]
# expected scale shape for weights quantization. Defaults to None.
expected_scale_shape: Union[None, int, Tuple[int, ...]] = None
@dataclass
class ActHParams:
"""Parameters for activation quantization."""
# Inheriting from 'str' and making the enums have string values lets us
# conveniently serialize this class to JSON without a custom JSON encoder.
class InputDistribution(str, enum.Enum):
SYMMETRIC = 'symmetric'
POSITIVE = 'positive'
input_distribution: InputDistribution
# float means fixed bound. '-1' means no quantization.
bounds: ActsBoundT
prec: _PrecT
half_shift: bool
def __init__(
self, #
*,
prec: Union[int, _FloatQuant],
scale: Optional[jnp.ndarray],
symmetric: bool,
bounds: Optional[jnp.ndarray],
half_shift: bool):
"""Default constructor, use of named constructors is strongly encoraged.
Args:
prec: precision for the QuantOps
scale: scaling factor to scale the input to quantized precision range
symmetric: whether the input to quantize is symmetric
bounds: Optional. The clipping bounds used for calculating scale factors.
half_shift: Symmetric quantization with all available values enabled
"""
self._prec = prec
self._half_shift = half_shift
if scale is None:
self._scale = None
else:
self._scale = scale.astype(SCALE_DTYPE)
self._symmetric = symmetric
# Storing bounds are useful for two reasons: one is debugging, since it
# makes easy to see how a QuantOps instance came up with its scale factor.
# Two is that right now, we use a bounds of '-1' as a special value meaning
# to 'not quantize'. See comment on the 'should_quantize' method for more
# details.
self._bounds = bounds
@classmethod
def create_symmetric_fp(
cls,
*,
bounds: Optional[primitives.BoundsT],
fp_quant: _FloatQuant,
) -> 'QuantOps':
"""Create QuantOps for symmetric clipping to floating-point bounds.
Args:
bounds: The upper (and absolute lower) bound to clip the inputs.
fp_quant: quantization floating-point specification of the target format.
Returns:
QuantOps for quantizing/dequantizing signed activations.
"""
if bounds is None:
if fp_quant.is_scaled:
raise ValueError(
'bounds can only be None if fp_quant.is_scaled is False.')
return cls(
prec=fp_quant,
scale=None,
symmetric=True,
bounds=None,
half_shift=False) # disable half_shift for fp quantization
else:
initial_bounds = bounds
# We set bounds = -1 to indicate no quantization.
# TODO(shivaniagrawal): Move away from the hack of setting bound as -1.
bounds = jnp.asarray(bounds, SCALE_DTYPE)
if not DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING:
# to avoid log2(0)
bounds = jnp.abs(bounds) + jnp.finfo(SCALE_DTYPE).eps
scale = jnp.exp2(-jnp.floor(jnp.log2(bounds))) # Scale to unit binade.
# NOTE: stop_gradient is needed here to prevent gradient flow through
# scale when scale is not a constant, but computed as a function of
# activations or weights.
scale = lax.stop_gradient(scale)
return cls(
prec=fp_quant,
scale=scale,
symmetric=True,
bounds=initial_bounds,
half_shift=False) # disable half_shift for fp quantization
@classmethod
def create_symmetric(cls, *, bounds: primitives.BoundsT, prec: int,
half_shift: bool) -> 'QuantOps':
"""Create QuantOps for symmetric activations clipped to [-bounds, bounds].
Args:
bounds: The upper (and absolute lower) bound to clip the inputs.
prec: Signed int precision for the QuantOps.
half_shift: Symmetric quantization with all available values enabled
Returns:
QuantOps for quantizing/dequantizing signed activations.
"""
initial_bounds = bounds
bounds = jnp.asarray(bounds, SCALE_DTYPE)
if not DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING:
bounds += jnp.finfo(SCALE_DTYPE).eps # to avoid div by 0
scale = primitives.signed_int_bound(
prec=prec, half_shift=half_shift) / bounds
# NOTE: stop_gradient is needed here to prevent gradient flow through scale
# when scale is not a constant, but computed as a function of activations or
# weights.
scale = lax.stop_gradient(scale)
return cls(
prec=prec,
scale=scale,
symmetric=True,
bounds=initial_bounds,
half_shift=half_shift)
@classmethod
def create_positive(cls, *, bounds: primitives.BoundsT,
prec: int) -> 'QuantOps':
"""Create QuantOps for positive activations clipped to [0, bounds].
Args:
bounds: The upper bound to clip the activations.
prec: Unsigned int precision for the QuantOps.
Returns:
QuantOps for quantizing/dequantizing unsigned activations.
"""
initial_bounds = bounds
bounds = jnp.asarray(bounds, SCALE_DTYPE)
if not DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING:
bounds += jnp.finfo(SCALE_DTYPE).eps # to avoid div by 0
scale = primitives.unsigned_int_bound(prec=prec) / bounds
# NOTE: stop_gradient is needed here to prevent gradient flow through scale
# when scale is not a constant, but computed as a function of activations.
scale = lax.stop_gradient(scale)
return cls(
prec=prec,
scale=scale,
symmetric=False,
bounds=initial_bounds,
half_shift=False) # disable half_shift for positive distribution
def assert_scale_shape_is(self, *, shape: Union[int, Tuple[int, ...]]):
# TODO(shivaniagrawal): add option for float scale for fixed bound acts
# quantization.
assert self._scale.shape == shape, (
'scale shape is unexpected, should be %s but got %s' %
(shape, self._scale.shape))
def to_quantized(self, x: jnp.ndarray, *,
dtype: primitives.jnp_dtype) -> jnp.ndarray:
"""Quantizes the argument to the target format.
integer: "upscales", rounds or floors and clips.
floating-point: optionally upscales, then downcasts to target precision.
Args:
x: Argument to be quantized.
dtype: Type of returned quantized value of x. If quantized x is an input
to a matmul, we might be want to set it to jnp.int8. If quantized x is
weights stored in memory, same applies. In fake_quant style we might
prefer to set dtype=SCALE_DTYPE, since quantized x might get constant
folded with rescale op (`from_quantized`). Please take a look at the
comment on SCALE_DTYPE.
Returns:
Quantized value of x.
"""
if isinstance(self._prec, _FloatQuant):
if self._prec.is_scaled:
x = jnp.multiply(x, self._scale).astype(x.dtype)
fp_spec = self._prec.fp_spec
return fp_cast.downcast_sat_ftz(
x,
fp_spec.exp_min,
fp_spec.exp_max,
fp_spec.sig_bits,
)
else:
if self._symmetric:
quantize = primitives.round_and_clip_to_signed_int
else:
quantize = primitives.floor_and_clip_to_unsigned_int
scaled_x = jnp.multiply(x, self._scale)
return quantize(
scaled_x, prec=self._prec, dtype=dtype, half_shift=self._half_shift)
# Same as to_quantized but it just "downscales" using the same scale.
def from_quantized(self, x: jnp.ndarray, *,
dtype: primitives.jnp_dtype) -> jnp.ndarray:
"""'Rescales' the quantized value.
Args:
x: quantized.
dtype: return type for rescaled x
Returns:
Rescaled x cast to type dtype
"""
if (isinstance(self._prec, _FloatQuant) and not self._prec.is_scaled):
return x
rescaled_x = jnp.divide(x, self._scale)
return rescaled_x.astype(dtype)
# Helper fake quantization
def fake_quant(self,
x: jnp.ndarray,
*,
quantized_type: primitives.jnp_dtype,
fake_dependency: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_dtype = x.dtype
quantized_x = self.to_quantized(x, dtype=quantized_type)
if fake_dependency is not None:
quantized_x = lax.tie_in(fake_dependency, quantized_x)
return self.from_quantized(quantized_x, dtype=x_dtype)
# Assumes weights are unsigned int of precision prec.
@classmethod
def create_weights_ops(
cls,
w: jnp.ndarray,
*,
weight_params: WeightParams,
) -> 'QuantOps':
"""Create a QuantOps that can quantize and dequantize a weight tensor.
Args:
w: The weights to quantize.
weight_params: WeightParams Parameters required for weight quantization.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
weight_bounds = primitives.max_abs_weights(w, axis=weight_params.axis)
prec = weight_params.prec
half_shift = weight_params.half_shift
if isinstance(prec, _FloatQuant):
ops = cls.create_symmetric_fp(bounds=weight_bounds, fp_quant=prec)
else:
ops = cls.create_symmetric(
bounds=weight_bounds, prec=prec, half_shift=half_shift)
if weight_params.expected_scale_shape is not None:
# NOTE: We set keepdim to True when computing weights scale, as a result
# the axes which are reduced are left in the result as dimensions with
# size one. User should correctly pass the shape with reduced dimensions
# set to 1.
ops.assert_scale_shape_is(shape=weight_params.expected_scale_shape)
return ops
# Assumes weights are unsigned int of precision prec.
@classmethod
def create_weights_fake_quant(
cls,
w: jnp.ndarray,
*,
weight_params: WeightParams,
quantized_type: primitives.jnp_dtype = SCALE_DTYPE,
fake_dependency: Optional[jnp.ndarray] = None,
quantize_weights: bool = True,
) -> jnp.ndarray:
"""Quantize weights with fake quant approach.
Args:
w: The weights to quantize.
weight_params: WeightParams Parameters required for weight quantization.
quantized_type: type of intermediate quantized value of weights. Defaults
to SCALE_DTYPE.
fake_dependency: dynamic array, quantized weights will have fake
dependency on. lax.tie_in for more details. This is used in order to
prevent constant folding of rescale op with quantized weights. Defaults
to None, in this case quantized weights would not have a fake
dependency.
quantize_weights: whether weights should be quantized or not
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
# TODO(yichi): if weight_params.prec is None or weight_binarize flag True:
if weight_params.prec is None or not quantize_weights:
return w
weight_shape = w.shape
ops = cls.create_weights_ops(w, weight_params=weight_params)
weight_rescaled = ops.fake_quant(
w, quantized_type=quantized_type, fake_dependency=fake_dependency)
return weight_rescaled
# TODO(malmaud): rename 'input' to activation here and elsewhere in this file.
@classmethod
def create_input_ops(
cls, inputs: jnp.ndarray, *, hparams: ActHParams,
bounds_params: Union[get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params, None]
) -> 'QuantOps':
"""Create a QuantOps that can quantize and dequantize an activation tensor.
Args:
inputs: The inputs to quantize.
hparams: Input hyperparameter (ActHParams).
bounds_params: BoundsParams. Parameters for GetBounds or DynamicBounds.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
# TODO(shivaniagrawal): investigate why pytype allows types other than
# ActsBoundT.
if isinstance(hparams.bounds, int):
hparams.bounds = float(hparams.bounds)
# NOTE: if flax module name is None, default name is used.
# If we want to train with no quantization at first and then turn on
# GetBounds quantization, we still have to call GetBounds even before
# quantization is enabled since GetBounds calculates and stores the running
# statistics that we will use once quantization is enabled. But before
# quantization is enabled, we want to ignore the returned bounds and just
# return the original unquantized input. To do so, we take advantage of the
# fact that GetBounds returns a constant fixed bound for an initial time
# period and set that initial bound to a special value (-1) to indicate we
# want to store activation statistics without applying quantization. That
# will cause clip_bounds will be a tensor of all '-1', which we will check
# for in a lax.cond call below.
# TODO(malmaud): Refactor code to separate bounds calculation from tracking
# activation statistics to avoid the need to rely on special bounds values
# when disabling quantization.
if isinstance(hparams.bounds, get_bounds.GetBounds.Hyper):
if not bounds_params and not isinstance(bounds_params,
get_bounds.GetBounds.Params):
raise ValueError(
'act_hparams.bounds is of type GetBounds.Hyper, user must '
'provide bounds_params, parameters for GetBounds.')
clip_bounds = get_bounds.GetBounds(
hyper=hparams.bounds, name=bounds_params.module_name)(
inputs,
bounds_params=bounds_params,
)
elif isinstance(hparams.bounds, get_bounds.DynamicBounds.Hyper):
if not bounds_params and not isinstance(bounds_params,
get_bounds.DynamicBounds.Params):
raise ValueError(
'act_hparams.bounds is of type DynamicBounds.Hyper, user must '
'provide bounds_params, parameters for DynamicBounds.')
clip_bounds = get_bounds.DynamicBounds(
hyper=hparams.bounds, name=bounds_params.module_name)(
inputs,
bounds_params=bounds_params,
)
elif isinstance(hparams.bounds, (float, jnp.ndarray)):
clip_bounds = hparams.bounds
else:
assert False, (
'%s is not a valid type for hparams.bounds, should be float, a list '
'of floats, DynamicBounds.Hyper or GetBounds.Hyper.' %
(type(hparams.bounds)))
if isinstance(hparams.prec, _FloatQuant):
ops = cls.create_symmetric_fp(bounds=clip_bounds, fp_quant=hparams.prec)
elif hparams.input_distribution == cls.ActHParams.InputDistribution.SYMMETRIC:
ops = cls.create_symmetric(
bounds=clip_bounds, prec=hparams.prec, half_shift=hparams.half_shift)
elif hparams.input_distribution == cls.ActHParams.InputDistribution.POSITIVE:
ops = cls.create_positive(bounds=clip_bounds, prec=hparams.prec)
else:
assert False, "can't happen."
if bounds_params and bounds_params.expected_bounds_shape is not None:
if isinstance(hparams.bounds, get_bounds.GetBounds.Hyper):
ops.assert_scale_shape_is(shape=bounds_params.expected_bounds_shape)
else:
logging.info(
'Ignoring value of argument expected_scale_shape. Scale for fixed '
'bounds would be scalar.')
return ops
@classmethod
def create_inputs_fake_quant(
cls, inputs: jnp.ndarray, *, hparams: ActHParams,
bounds_params: Union[None, get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params]
) -> jnp.ndarray:
"""Quantize input with fake quant approach.
Args:
inputs: The inputs to quantize.
hparams: Input hyperparameter (ActHParams).
bounds_params: GetBounds or DynamicBounds Params.
Returns:
Quantized and rescaled inputs using fake quant approach.
"""
if hparams.bounds is None or hparams.prec is None:
# TODO(lew): support bound-clipping without quantization
return inputs
ops = cls.create_input_ops(
inputs, hparams=hparams, bounds_params=bounds_params)
quantized_inputs = ops.fake_quant(inputs, quantized_type=SCALE_DTYPE)
return lax.cond(ops.should_quantize(), lambda _: quantized_inputs,
lambda _: inputs, None)
# When using GetBounds quantization (if hparams.bounds is an instance of
# GetBounds.Hyper), if we want to disable quantization but continue to
# collect activation statistics, we have GetBounds return a clip_bounds
# tensor to all '-1' values as a signal that quantization shoulnd't be
# applied. See comment on the call to 'GetBounds' above.
# TODO(malmaud): Find a less hacky way to do this.
def should_quantize(self) -> jnp.ndarray:
"""Return whether QuantOps should quantize."""
# We return a scalar jnp.ndarray of dtype bool instead of a Python bool
# because during the Jax JIT compilation, self._bounds will be a tracer
# instead of a concrete tensor, which can't be coerced to a Python bool.
# Since the type of jnp.all is an ndarray, we invert it with '~' instead of
# 'not'
return ~jnp.all(self._bounds == -1)
def get_scale_for_aqt(self, *, allow_per_channel_scales: bool) -> jnp.ndarray:
"""Returns the scale in a shape appropriate for AQT.
An error is raised if the granularity of the scale factors are incompatible
with the current AQT implementation and the setting of
'allow_per_channel_scales'.
Args:
allow_per_channel_scales: A boolean indicating whether a separate scale
factor is allowed for each output channel (True) or if only a scalar
(ie, per-layer) scale factor is allowed (False).
Returns:
Either a scalar array that correspond to a per-layer scale factor, or an
array of shape (1, num_channels) that correspond to per-channel scale
factors.
"""
scale = self._scale
# If 'scale' is a 1x1x...x1 matrix (ie, only has one element), we
# canonicalize it to a scalar to simplify the shape-handling code in the AQT
# implementation.
if scale.size == 1:
return scale.reshape(())
# If the caller requested a a single per-layer scaling factor but the scale
# factor is non-scalar, raise an error.
del allow_per_channel_scales
return scale
PrecisionType = typing.Any
def quantized_dot(*,
w: jnp.ndarray,
act: jnp.ndarray,
quant_type: QuantType,
weight_params: QuantOps.WeightParams,
act_hparams: Optional[QuantOps.ActHParams],
bounds_params: Union[get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params, None],
prefer_int8_to_int32_dot: bool,
dot_precision: Optional[PrecisionType] = None) -> jnp.ndarray:
"""LAX dot with optionally quantized weights and activations.
Wraps LAX's `Dot
<https://github.com/google/jax/blob/f65a327c764406db45e95048dfe09209d8ef6d37/jax/_src/lax/lax.py#L632`_
operator.
Args:
w: an array representing weights
act: an array representing activations
quant_type: quantization strategy
weight_params: QuantOps.WeighstParams instance for describing weights
quantization.
act_hparams: Optional activation quantization hyperparamers; instance of
QuantOps.ActHParams. None would mean no activation quantization.
bounds_params: Optional bounds params for auto activation
quantization; instance of GetBounds.Params or DynamicBounds.Params.
prefer_int8_to_int32_dot: Whether to feed lax.dot inputs with an int8 dtype
and accumulate to int32 dtype if quantizing to 8bits or 4bits. If False,
inputs are always foating-point.
dot_precision: Optional. Either ``None``, which means the default precision
for the backend, or a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
Returns:
An array containing the result with the same dtype as 'w' and 'act'.
Raises:
RuntimeError: 'quant_type' had an unrecognized value.
TypeError: 'act' and 'w' has different input types.
ValueError: Shapes of 'act' and 'w' not compatible with quant_type.
"""
# This code was initially expanded from
# https://github.com/google/jax/blob/f65a327c764406db45e95048dfe09209d8ef6d37/jax/_src/lax/lax.py#L632
# We keep the original return-value semantics of lax.dot, which this wraps. In
# particular, the type of the return value of quantized_dot is the same as the
# type of the inputs. That means that if the inputs are bfloat16, then the
# return type of this function will also be bfloat16 even though on current
# TPUs the underlying bf16*bf16 matrix-multiplication accumulates results to
# float32. This is potentially undesirable since the user might want the raw
# float32 result, but it ultimately stems from a limitation of the HLO 'dot'
# instruction. If that instruction updates to support user-specified output
# types, we could update quantized_dot accordingly to take a dtype argument to
# control the return value type. This applies equally to
# quantized_dynamic_dot_general.
if not (1 <= act.ndim <= 2 and 1 <= w.ndim <= 2 and
act.shape[-1] == w.shape[0]):
raise ValueError('Incompatible shapes for dot: got {} and {}.'.format(
act.shape, w.shape))
dot_dimension_numbers = (((act.ndim - 1,), (0,)), ((), ()))
return quantized_dot_general(
w=w,
act=act,
quant_type=quant_type,
weight_params=weight_params,
act_hparams=act_hparams,
bounds_params=bounds_params,
prefer_int8_to_int32_dot=prefer_int8_to_int32_dot,
dimension_numbers=dot_dimension_numbers,
dot_precision=dot_precision)
def _canonicalize_feature_axes(axis: Union[int, Tuple[int, ...]],
ndims: int) -> Tuple[int, ...]:
"""Returns feature axes from reduction axes."""
if not isinstance(axis, tuple):
axis = (axis,)
if not all([dim < ndims and dim >= -ndims for dim in axis]):
raise ValueError(f'Axis {axis} should correspond to dimensions '
f'of the weight.')
axis = tuple(sorted([(ndims + dim) % ndims for dim in axis]))
# Feature axes must be either first len(Feature_axis) consecutive
# dimensions of weight or last len(axis) consecutive axes.
is_first_consecutive_dims = (axis == tuple(range(len(axis))))
if not is_first_consecutive_dims:
raise ValueError(
f'Reduction axis {axis} should be consecutive first dimensions '
f'of the weight.')
return axis
@dataclass
class QuantW:
quantized_w: jnp.ndarray
scale: jnp.ndarray
def flaxformer_dot_general(*,
act: jnp.ndarray,
w: Optional[jnp.ndarray],
dimension_numbers: lax.DotDimensionNumbers,
weight_params: QuantOps.WeightParams,
act_hparams: Optional[QuantOps.ActHParams],
bounds_params: Union[None,
get_bounds.DynamicBounds.Params,
get_bounds.GetBounds.Params],
dot_precision: Optional[PrecisionType] = None,
prefer_int8_to_int32_dot: bool = True,
quant_w: Optional[QuantW] = None) -> jnp.ndarray:
"""Flaxformer dot general with optionally quantized weights and activations."""
input_dtype = act.dtype
is_weight_quantized = False
is_act_quantized = False
(axis, _), (_, _) = dimension_numbers
act_scale_shape = tuple(
[act.shape[dim] if dim not in axis else 1 for dim in range(0, act.ndim)])
if w is not None:
weight_scale_shape = (1,) * len(axis) + tuple(
[w.shape[dim] for dim in range(len(axis), w.ndim)])
elif quant_w is not None:
weight_scale_shape = (1,) * len(axis) + tuple([
quant_w.quantized_w.shape[dim]
for dim in range(len(axis), quant_w.quantized_w.ndim)
])
else:
raise ValueError('Either of w or quant_w must be not None')
if act_hparams is not None and act_hparams.prec is not None:
is_act_quantized = True
# Calculate 's', the per-column scale factor on activations.
act_op = QuantOps.create_input_ops(
act, hparams=act_hparams, bounds_params=bounds_params)
act_quantized = act_op.to_quantized(act, dtype=input_dtype)
# Now calculate s^-1. First we extract s, the activation scale factor,
# into a variable called 'act_scale'. We extract it from 'act_op', the
# QuantOps instance that calculated the scale factors for the activation
# matrix.
act_scale = act_op._scale.astype(input_dtype) # pylint: disable=protected-access
if act_scale.ndim == 0:
act_scale = act_scale * jnp.ones(act_scale_shape, act_scale.dtype)
else:
# In this case, activations are not being quantized; only weights. There
# is no need to absorb activation scales into the rows of the weight
# matrix so 'w_scaled_rows' can just be set to the original weight matrix.
act_quantized = act
act_scale = jnp.ones(act_scale_shape, act.dtype)
if weight_params is not None and weight_params.prec is not None:
is_weight_quantized = True
if quant_w is not None:
weight_quantized = quant_w.quantized_w.astype(input_dtype)
weight_scale = quant_w.scale
else:
# Calculate 'r' from (s^-1) * w
weight_op = QuantOps.create_weights_ops(w, weight_params=weight_params)
weight_scale = weight_op._scale.astype(input_dtype) # pylint: disable=protected-access
if weight_params.expected_scale_shape:
shape_utils.assert_shapes_equal(weight_scale.shape,
weight_params.expected_scale_shape)
# Quantize weight matrix by calculating RoundAndClip(s^-1 * w * t)
# TODO(malmaud): See comment on 'act_op.to_quantized' above, which
# applies here as well.
weight_quantized = weight_op.to_quantized(w, dtype=input_dtype)
else:
assert w is not None, ('w can not be None if weight quantization is not '
'specified.')
weight_quantized = w
weight_scale = jnp.ones(weight_scale_shape, w.dtype)
weight_prec = None if weight_params is None else weight_params.prec
metadata_context = contextlib.suppress()
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=None, rhs_prec=weight_prec, rhs_is_weight=True)
# Use metadata context to annotate op metadata with quantization info
# TODO(shivaniagrawal): this is duplicated code with quantized_dot that can
# be de-duplicated.
# To decide whether to use an integer-domain dot operation, we first check
# if the static quantization parameters are compatible with it by seeing if
# they request that both inputs be quantized 8bits or less. Then check if
# the dynamic parameters are compatible with it. ie, in a training run with
# quantization enabled, are we past the activation start step yet.
# We also do not use int8_to_int32_dot if activation has positive
# distribution and prec=8, since we would not be able to fit uint8 range in
# int8.
# TODO(shivaniagrawal): A proper solution for this would be to have mixed
# dot(uint8, int8) -> int32 in XLA.
act_prec = None if act_hparams is None else act_hparams.prec
act_has_symm_distribution = act_hparams is not None and (
act_hparams.input_distribution
== QuantOps.ActHParams.InputDistribution.SYMMETRIC)
weight_prec = None if weight_params is None else weight_params.prec
# To decide whether to use an integer-domain dot operation, we first check
# if the static quantization parameters are compatible with it by seeing if
# they request that both inputs be quantized 8bits or less. Then check if
# the dynamic parameters are compatible with it. ie, in a training run with
# quantization enabled, are we past the activation start step yet.
# We also do not use int8_to_int32_dot if activation has positive
# distribution and prec=8, since we would not be able to fit uint8 range in
# int8.
# TODO(shivaniagrawal): A proper solution for this would be to have mixed
# dot(uint8, int8) -> int32 in XLA.
weight_fits_in_int8 = is_weight_quantized and (weight_prec is not None and
weight_prec <= 8)
# is_act_quantized might be an instance of a Jax tracer instead of a
# Python boolean since it is generally computed from a dynamic input to a
# JITted Jax function. Thus we use '&' instead of 'and'.
act_prec_fits_int8 = act_prec is not None and (
(act_prec == 8 and act_has_symm_distribution) or (act_prec < 8))
act_fits_in_int8 = is_act_quantized & act_prec_fits_int8
use_int8_to_int32_dot = prefer_int8_to_int32_dot & weight_fits_in_int8 & act_fits_in_int8
with metadata_context:
# Calculate matmul(...)
# we are not quantizing activations yet, hence not using int8 matmul
out_quantized = dot_general_aqt(
act_quantized,
weight_quantized,
dimension_numbers=dimension_numbers,
dot_precision=dot_precision,
use_int8_to_int32_dot=use_int8_to_int32_dot)
if is_weight_quantized or is_act_quantized:
inv_scale = lax.dot_general(
(1 / act_scale),
(1 / weight_scale),
dimension_numbers=dimension_numbers,
)
return out_quantized * inv_scale
else:
return out_quantized
# TODO(shivaniagrawal): extend it for generic dot_dimenson_numbers
def quantized_dot_general(*,
w: Optional[jnp.ndarray],
act: jnp.ndarray,
quant_type: QuantType,
weight_params: QuantOps.WeightParams,
act_hparams: Optional[QuantOps.ActHParams],
bounds_params: Union[None,
get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params],
prefer_int8_to_int32_dot: bool,
dimension_numbers: lax.DotDimensionNumbers,
dot_precision: Optional[PrecisionType] = None,
quant_w: Optional[QuantW] = None) -> jnp.ndarray:
"""LAX dot general with optionally quantized weights and activations.
Wraps LAX's `Dot General
<https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dot_general.html.
Args:
w: an array representing weights
act: an array representing activations
quant_type: quantization strategy
weight_params: QuantOps.WeighstParams instance for describing weights
quantization.
act_hparams: Optional activation quantization hyperparamers; instance of
QuantOps.ActHParams. None would mean no activation quantization.
bounds_params: Optional bounds params for auto activation
quantization; instance of GetBounds.Params or DynamicBounds.Params.
prefer_int8_to_int32_dot: Whether to feed lax.dot inputs with an int8 dtype
and accumulate to int32 dtype if quantizing to 8bits or 4bits. If False,
inputs are always foating-point.
dimension_numbers: a tuple of tuples of the form `((lhs_contracting_dims,
rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))`.
dot_precision: Optional. Either ``None``, which means the default precision
for the backend, or a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
quant_w: Quantization weights and scales, provided as class QuantW. If
provided, quantization variables are used directly. Defaults to None.
Returns:
An array containing the result with the same dtype as 'w' and 'act'.
Raises:
RuntimeError: 'quant_type' had an unrecognized value.
TypeError: 'act' and 'w' has different input types.
ValueError: Shapes of 'act' and 'w' not compatible with quant_type.
"""
supported_contracting_dims = ((act.ndim - 1,), (0,))
# (lhs_contracting_dims, rhs_contracting_dims)
if dimension_numbers != (supported_contracting_dims,
((), ())): # (lhs_batch_dims, rhs_batch_dims)):
raise ValueError(
'Quantization is only supported for contracting dimension to be last '
'dimension of lhs and first dimension of rhs.')
if quant_type == QuantType.AQT:
# Let 's' be activation scales and 't' be weight scales. We implement
# matmul(RoundAndClip(act*s), RoundAndClip(s^-1 * w * t)) *t^-1. In the
# comments below, we refer to this terminology.
# lax.dot accepts any combination of 1d and 2d arguments for its lhs and rhs
# input. To simplify the AQT implementation, we only accept 2d arguments for
# now.
if w is not None:
if act.shape[-1] != w.shape[0]:
raise ValueError(
'AQT is currently only implemented for matrix*matrix operations')
num_input_channels = act.shape[-1]
if weight_params.axis is None:
out_channel_shape = ()
else:
axes = _canonicalize_feature_axes(weight_params.axis, w.ndim)
out_channel_shape = tuple(
[w.shape[i] for i in range(w.ndim) if i not in axes])
# The ValueError raised in the guard at the beginning of this function
# should have already checked that the weight matrix has a number of rows
# equal to the number of channels in the activation.
assert w.shape[0] == num_input_channels
# We carry out all intermediate calculations using the same dtype as the
# inputs. We want to be careful to not take a model configured to be
# trained in bf16 and accidentally train it in fp32 by virtue of the scale
# dtype being fp32.
if act.dtype != w.dtype:
raise TypeError(
f'Activations and weight must have the same dtype, but got {act.dtype} and {w.dtype}'
)
else:
assert quant_w is not None
input_dtype = act.dtype
is_act_quantized = False
# In this case, activations will be quantized at some point during training
# (either now or later) and so we need to gather activation statistics by
# calling 'QuantOps.create_input_ops', even if activations are not being
# quantized on this particular training step (see b/174516400).
if act_hparams is not None and act_hparams.prec is not None:
# Calculate 's', the per-column scale factor on activations.
act_op = QuantOps.create_input_ops(
act, hparams=act_hparams, bounds_params=bounds_params)
is_act_quantized = act_op.should_quantize()
# Quantize activation matrix by computing RoundAndClip(w*s)
# TODO(malmaud): We have to cast quantized activations to an fp format
# instead of int8 since int8 matmul with int32 accumulation is not yet
# supported in XLA (and therefore in Jax). See b/170293520. We keep
# 'act_quantized' in whatever it's original fp format was, typically bf16
# or fp32, to follow what Fakequant does (see the type cast at the end of
# QuantOpts.fake_quant).
act_quantized = act_op.to_quantized(act, dtype=input_dtype)
# Now calculate s^-1. First we extract s, the activation scale factor,
# into a variable called 'act_scale'. We extract it from 'act_op', the
# QuantOps instance that calculated the scale factors for the activation
# matrix.
act_scale = act_op.get_scale_for_aqt(allow_per_channel_scales=True)
# act_scale should either be a scalar, corresponding to per-layer
# quantization, or a matrix with shape (1,.., 1, num_input_channels),
# corresponding to per-activation-channel scale factors.
if act_scale.ndim != 0:
shape_utils.assert_shapes_equal(act_scale.shape,
(1,) * (act_scale.ndim - 1) +
(num_input_channels,))
# 'w' has one row per column of 'act_scale'. To scale each row of 'w' by
# the inverse of the corresponding column in 'act_scale', we first have
# to reshape 'act_scale' from (1, ..., num_input_channels) to
# (num_input_channels, 1) so the scale factors will broadcast
# appropriately across the columns of 'w'.
act_scale = act_scale.reshape(num_input_channels, 1)
# Now we calculate s^-1 * w.
# TODO(shivaniagrawal): This section repeats code from the 'else' block.
# The code is repeated twice because quantization can either be disabled
# dynamically by setting the clipping bound to -1 (see comments on
# 'should_quantize'), or statically by setting the 'prec' hyperparameter
# to None. This block deals with the dynamic case (hence necessitating the
# use of the dynamic 'lax.cond') while the 'else' block handles the static
# case. Ultimately, we should unify them.
act_quantized, act_scale = lax.cond(
is_act_quantized,
lambda _: (act_quantized, act_scale),
lambda _: # pylint: disable=g-long-lambda
(act, jnp.ones_like(act_scale)),
None)
else:
# In this case, activations are not being quantized; only weights. There
# is no need to absorb activation scales into the rows of the weight
# matrix so 'w_scaled_rows' can just be set to the original weight matrix.
act_quantized = act
act_scale = jnp.array(1.0, dtype=SCALE_DTYPE)
is_weight_quantized = False
if weight_params is not None and weight_params.prec is not None:
is_weight_quantized = True
if quant_w is not None:
weight_quantized = quant_w.quantized_w.astype(input_dtype)
weight_scale = quant_w.scale
else:
# Calculate 'r' from (s^-1) * w
w_scaled_rows = ((1 / act_scale) * w).astype(input_dtype)
weight_op = QuantOps.create_weights_ops(
w_scaled_rows, weight_params=weight_params)
weight_scale = weight_op.get_scale_for_aqt(
allow_per_channel_scales=True)
# Similar to 'act_scale' above, the weight_scale can either be a single
# scalar or be a matrix with shape (1, out_channel_shape), corresponding
# to a per-channel scale factor for the weight matrix. We verify it
# here.
if weight_scale.ndim != 0:
shape_utils.assert_shapes_equal(weight_scale.shape,
(1,) + out_channel_shape)
if act.ndim != 0:
weight_scale_shape = (1,) * (act.ndim - 1) + out_channel_shape
weight_scale = weight_scale.reshape(weight_scale_shape)
# Quantize weight matrix by calculating RoundAndClip(s^-1 * w * t)
# TODO(malmaud): See comment on 'act_op.to_quantized' above, which
# applies here as well.
weight_quantized = weight_op.to_quantized(
w_scaled_rows, dtype=input_dtype)
else:
assert w is not None, ('w can not be None if weight quantization is not '
'specified.')
weight_quantized = ((1 / act_scale) * w).astype(input_dtype)
weight_scale = jnp.array(1.0, dtype=SCALE_DTYPE)
# Use metadata context to annotate op metadata with quantization info
act_prec = None if act_hparams is None else act_hparams.prec
act_has_symm_distribution = act_hparams is not None and (
act_hparams.input_distribution
== QuantOps.ActHParams.InputDistribution.SYMMETRIC)
weight_prec = None if weight_params is None else weight_params.prec
# To decide whether to use an integer-domain dot operation, we first check
# if the static quantization parameters are compatible with it by seeing if
# they request that both inputs be quantized 8bits or less. Then check if
# the dynamic parameters are compatible with it. ie, in a training run with
# quantization enabled, are we past the activation start step yet.
# We also do not use int8_to_int32_dot if activation has positive
# distribution and prec=8, since we would not be able to fit uint8 range in
# int8.
# TODO(shivaniagrawal): A proper solution for this would be to have mixed
# dot(uint8, int8) -> int32 in XLA.
weight_fits_in_int8 = is_weight_quantized and (weight_prec is not None and
weight_prec <= 8)
# is_act_quantized might be an instance of a Jax tracer instead of a
# Python boolean since it is generally computed from a dynamic input to a
# JITted Jax function. Thus we use '&' instead of 'and'.
act_prec_fits_int8 = act_prec is not None and (
(act_prec == 8 and act_has_symm_distribution) or (act_prec < 8))
act_fits_in_int8 = is_act_quantized & act_prec_fits_int8
use_int8_to_int32_dot = prefer_int8_to_int32_dot & weight_fits_in_int8 & act_fits_in_int8
metadata_context = contextlib.suppress()
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=act_prec, rhs_prec=weight_prec, rhs_is_weight=True)
with metadata_context:
# Calculate matmul(...)
out_quantized = dot_general_aqt(
act_quantized,
weight_quantized,
dimension_numbers=dimension_numbers,
dot_precision=dot_precision,
use_int8_to_int32_dot=use_int8_to_int32_dot)
# Scale the columns of the matmul output by computing `matmul(...) * t^-1`
# TODO(malmaud): Make it possible to return an unquantized matmul to support
# disabling quantization during initial phase of training.
#
# We convert the return value back to input_dtype to ensure the output
# tensor of quantized_dot has the same dtype as the input tensors to
# quantized_dot. This explicit cast is necessary since if the inputs are
# bf16, 'weight_scale' will still fp32 and so multipying out_quantized by
# (1/weight_scale) will result in a fp32 tensor. We want to convert that
# back to bf16.
return (out_quantized * (1 / weight_scale)).astype(input_dtype)
elif quant_type in (QuantType.FAKE_QUANT, QuantType.FAKE_QUANT_WITH_INT):
if quant_w is not None:
raise ValueError(
'quantized vars are not supported in `fake_quant` style quantization.'
)
if quant_type == QuantType.FAKE_QUANT_WITH_INT:
fake_dependency = act
# create a dependency on fake input to control constant folding
else:
fake_dependency = None
quantized_type = quant_type.to_jax_type()
w = QuantOps.create_weights_fake_quant(
w,
weight_params=weight_params,
quantized_type=quantized_type,
fake_dependency=fake_dependency)
# TODO(shivaniagrawal): HParams currently allows act_hparams to be NONE.
# Going forward we can change act_hparams to be required field where if
# either `prec` or `bounds` is None will result in No activation
# quantization.
if act_hparams:
act = QuantOps.create_inputs_fake_quant(
act, hparams=act_hparams, bounds_params=bounds_params)
metadata_context = contextlib.suppress()
# Use metadata context to annotate op metadata with quantization info
act_prec = None if act_hparams is None else act_hparams.prec
weight_prec = None if weight_params is None else weight_params.prec
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=act_prec, rhs_prec=weight_prec, rhs_is_weight=True)
with metadata_context:
out_quantized = lax.dot_general(
act, w, dimension_numbers=dimension_numbers, precision=dot_precision)
return out_quantized
else:
raise RuntimeError(f'Unsupported quant_type {quant_type}')
class QuantizedDot(nn.Module):
"""Flax module that calculates a quantized 'dot' operation."""
quant_type: QuantType
weight_params: QuantOps.WeightParams
act_hparams: Optional[QuantOps.ActHParams]
prefer_int8_to_int32_dot: bool
dot_precision: Optional[PrecisionType] = None
# TODO(malmaud): Remove the 'padding_mask' field from 'GetBounds.Params'
# so that 'bounds_params' can be a hyperparameter of this class and
# only the padding mask will be passed as an argumen to '__call__'.
@nn.compact
def __call__(
self, w: jnp.ndarray, act: jnp.ndarray,
bounds_params: Union[None, get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params]
) -> jnp.ndarray:
return quantized_dot(
w=w,
act=act,
bounds_params=bounds_params,
quant_type=self.quant_type,
weight_params=self.weight_params,
act_hparams=self.act_hparams,
dot_precision=self.dot_precision,
prefer_int8_to_int32_dot=self.prefer_int8_to_int32_dot)
def quantized_dynamic_dot_general(
*,
lhs_act: jnp.ndarray,
rhs_act: jnp.ndarray,
quant_type: QuantType,
lhs_act_hparams: Optional[QuantOps.ActHParams],
lhs_bounds_params: Union[None, get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params],
rhs_act_hparams: Optional[QuantOps.ActHParams],
rhs_bounds_params: Union[None, get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params],
dot_dimension_numbers: lax.DotDimensionNumbers,
dot_precision: Optional[PrecisionType] = None) -> jnp.ndarray:
"""LAX dot general with optionally quantized dynamic inputs.
Wraps LAX's `DotGeneral
<https://github.com/google/jax/blob/f65a327c764406db45e95048dfe09209d8ef6d37/jax/_src/lax/lax.py#L667`_
operator.
Args:
lhs_act: an array representing weights
rhs_act: an array representing activations
quant_type: quantization strategy
lhs_act_hparams: Optional activation quantization hyperparamers for lhs act;
instance of QuantOps.ActHParams. None means no quantization.
lhs_bounds_params: Optional get bounds params for lhs act auto
quantization; instance of GetBounds.Params.
rhs_act_hparams: Optional activation quantization hyperparamers for rhs act;
instance of QuantOps.ActHParams. None means no quantization.
rhs_bounds_params: Optional get bounds params for rhs act auto
quantization; instance of GetBounds.Params.
dot_dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims,
rhs_batch_dims)).
dot_precision: Optional. Either ``None``, which means the default precision
for the backend, or a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
Returns:
An array containing the result.
Raises:
RuntimeError: 'quant_type' had an unrecognized value.
TypeError: Dtypes of lhs_act and rhs_act differed.
"""
# See comment at the beginning of quantized_dot regarding its return type,
# which also applies to this function.
if quant_type == QuantType.AQT:
# Let 's1' be the scale of 'lhs_act' and 's2' be the scale of 'rhs_act'. We
# calculate dot_general(RoundAndClip(s1*lhs_act),
# RoundAndClip(s2*rhs_act))/(s1*s2). Note that unlike in
# quantized_dot_general, the scale factors must be scalar (ie, per-tensor
# quantization) since activations always have static scale factors and so
# there is no way to absorb per-column scale factor from lhs_act into the
# rows of rhs_act.
# See comment on 'input_dtype' in 'quantized_dot'.
if lhs_act.dtype != rhs_act.dtype:
raise TypeError('Both activations must have the same dtypes, but got '
f'{lhs_act.dtype} and {rhs_act.dtype}')
input_dtype = lhs_act.dtype
def get_tensor_and_scale_for_act(
act: jnp.ndarray, hparams: Optional[QuantOps.ActHParams],
bounds_params: Union[None, get_bounds.GetBounds.Params,
get_bounds.DynamicBounds.Params]
) -> Tuple[jnp.ndarray, jnp.ndarray]:
# We check whether activations should be quantized based on 'hparams'. If
# so, we quantize it. If not, we return it unchanged. In either case, we
# return a scale factor appropriate for unscaling the result of the
# lax.dot_general.
if hparams is not None and hparams.prec is not None:
quant_op = QuantOps.create_input_ops(
act, hparams=hparams, bounds_params=bounds_params)
scale = quant_op.get_scale_for_aqt(allow_per_channel_scales=False)
# Since only per-layer scale factors are supported, we assert that the
# scale factors are scalars.
shape_utils.assert_shapes_compatible(scale.shape, ())
# TODO(malmaud): See comment on 'act_op.to_quantized' earlier in this
# file, which applies here as well.
act_quantized = quant_op.to_quantized(act, dtype=input_dtype)
# TODO(shivaniagrawal): See comment in 'dot_general' above on why this
# logic is duplicated here and in the 'else' block below.
return lax.cond(
quant_op.should_quantize(), #
lambda _: (act_quantized, scale), #
lambda _: (act, jnp.array(1.0, dtype=SCALE_DTYPE)), #
None)
else:
# To avoid having a separate code path for every possibility of which of
# the two input tensors are quantized , we implement not quantizing an
# activation tensor by simply setting its corresponding scale factor to
# 1.0.
return act, jnp.array(1.0, dtype=SCALE_DTYPE)
lhs_quantized, lhs_scale = get_tensor_and_scale_for_act(
lhs_act, lhs_act_hparams, lhs_bounds_params)
rhs_quantized, rhs_scale = get_tensor_and_scale_for_act(
rhs_act, rhs_act_hparams, rhs_bounds_params)
metadata_context = contextlib.suppress()
# Use metadata context to annotate op metadata with quantization info
lhs_prec = None if lhs_act_hparams is None else lhs_act_hparams.prec
rhs_prec = None if rhs_act_hparams is None else rhs_act_hparams.prec
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=lhs_prec, rhs_prec=rhs_prec, rhs_is_weight=False)
with metadata_context:
out_quantized = lax.dot_general(
lhs_quantized,
rhs_quantized,
dimension_numbers=dot_dimension_numbers,
precision=dot_precision)
# TODO(malmaud): There is an asymmetry here: when we scale the activations
# to quantize them, the scaling happens in QuantOps.to_quantized. But here,
# when we dequantize the matrix multiplication of the activations by
# dividing by the product of the scale factors, we don't use QuantOps. It
# would be cleaner to do both operations at the same level of abstraction.
out = (out_quantized / (lhs_scale * rhs_scale)).astype(input_dtype)
elif quant_type in (QuantType.FAKE_QUANT, QuantType.FAKE_QUANT_WITH_INT):
# TODO(shivaniagrawal): HParams currently allows act_hparams to be NONE.
# Going forward we can change act_hparams to be required field where if
# either `prec` or `bounds` is None will result in No activation
# quantization.
if lhs_act_hparams:
lhs_act = QuantOps.create_inputs_fake_quant(
lhs_act, hparams=lhs_act_hparams, bounds_params=lhs_bounds_params)
if rhs_act_hparams:
rhs_act = QuantOps.create_inputs_fake_quant(
rhs_act, hparams=rhs_act_hparams, bounds_params=rhs_bounds_params)
metadata_context = contextlib.suppress()
# Use metadata context to annotate op metadata with quantization info
lhs_prec = None if lhs_act_hparams is None else lhs_act_hparams.prec
rhs_prec = None if rhs_act_hparams is None else rhs_act_hparams.prec
if flags.FLAGS.metadata_enabled:
metadata_context = compute_cost_utils.DotMetadataMonkeyPatch(
lhs_prec=lhs_prec, rhs_prec=rhs_prec, rhs_is_weight=False)
with metadata_context:
out = lax.dot_general(
lhs_act,
rhs_act,
dimension_numbers=dot_dimension_numbers,
precision=dot_precision)
else:
raise RuntimeError(f'Unknown quant_type {quant_type}')
return out
@functools.partial(jax.custom_jvp, nondiff_argnums=(1, 2, 3))
def quantized_sum(
x: jnp.ndarray, #
axis: Union[int, Tuple[int, ...]],
keepdims: bool,
prec: Optional[QuantOps.FloatQuant.FloatPrec]) -> jnp.ndarray:
"""Sums a tensor while quantizing intermediate accumulations.
This is almost a drop-in replacement for jnp.sum. It only differs in that it
takes in an 'act_hparams' parameter that controls the quantization of
intermediate accumulations during the reduction.
Arguments:
x: Input, a Jax array
axis: Which axes to reduce over (see jnp.sum docs)
keepdims: Whether to keep of drop axes that are reduced (see jnp.sum docs)
prec: Precision to quantize intermediate to. Currently can only an instance
of QuantOps.FloatQuant.FloatPrec, corresponding to an unscaled
floating-point format, or it can be None to indicate no quantization
should be applied.
Returns:
A Jax array with the quantized sum of 'x'.
"""
# Don't quantize. In this case, this function just wraps jnp.sum.
if prec is None:
return jnp.sum(x, axis=axis, keepdims=keepdims)
# We bypass QuantOps.create_input_ops and directly call
# QuantOps.create_symmetric_fp because the former creates an instance of
# GetBounds, which in turn creates state variables to store activation
# statistics. We do not want to compute statistics for each individual
# addition within the sum reduction.
fp_quant = QuantOps.FloatQuant(is_scaled=False, fp_spec=prec)
quant_ops = QuantOps.create_symmetric_fp(fp_quant=fp_quant, bounds=None)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = utils.normalize_axes(axis, x.ndim)
dtype = x.dtype
zero = jnp.zeros((), dtype=dtype)
x_quantized_sum = lax.reduce(
x,
init_values=zero,
computation=lambda a, b: quant_ops.to_quantized(a + b, dtype=dtype),
dimensions=axis)
if keepdims:
x_quantized_sum = jnp.expand_dims(x_quantized_sum, axis)
return x_quantized_sum
@quantized_sum.defjvp
def _quantized_sum_jvp(axis, keepdims, prec, primals, tangents):
(x,), (x_dot,) = primals, tangents
y = quantized_sum(x, axis=axis, keepdims=keepdims, prec=prec)
# We calculate the JVP based on the JVP of the original jnp.sum function. That
# corresponds to using a straight-through-estimator for the quantization
# operators in 'quantized_sum'.
_, y_dot = jax.jvp(lambda x: jnp.sum(x, keepdims=keepdims, axis=axis), (x,),
(x_dot,))
return y, y_dot
@functools.partial(jax.custom_jvp, nondiff_argnums=(2, 3, 4))
def dot_general_aqt(lhs, rhs, dimension_numbers, dot_precision,
use_int8_to_int32_dot):
"""Wrapper around lax.dot_general, but with option to use integer dot.
This function comes equipped with a custom gradient that defines the
gradient of this function to be the same as the equivalent call to
lax.dot_general, ignoring casts to and from integer types so that
quantization-aware-training will work correctly.
See docstring of lax.dot_general.
Args:
lhs: same as in lax.dot_general
rhs: same as in lax.dot_general
dimension_numbers: same as in lax.dot_general
dot_precision: same as in lax.dot_general
use_int8_to_int32_dot: boolean. If true, inputs to lax.dot_general will be
cast to int8 and results accumulated to int32, then converted back to the
original input type.
Returns:
Same as lax.dot_general.
"""
# We define two versions of a dot operation. The first feeds lax.dot_general
# the original inputs, which are typically bfloat16 or float32. The second
# converts the inputs to int8 tensors and accumulates results to an int32
# output.
def dot_general_fp(ops):
lhs_, rhs_ = ops
return lax.dot_general(
lhs_,
rhs_,
dimension_numbers=dimension_numbers,
precision=dot_precision)
def dot_general_int(ops):
lhs_, rhs_ = ops
input_dtype = lhs_.dtype
lhs_int = lhs_.astype(jnp.int8)
rhs_int = rhs_.astype(jnp.int8)
return lax.dot_general(
lhs_int,
rhs_int,
dimension_numbers=dimension_numbers,
precision=dot_precision,
preferred_element_type=jnp.int32).astype(input_dtype)
return lax.cond(use_int8_to_int32_dot, dot_general_int, dot_general_fp,
(lhs, rhs))
@dot_general_aqt.defjvp
def _dot_general_aqt_jvp(dimension_numbers, dot_precision,
use_int8_to_int32_dot, primals, tangents):
"""Custom gradient for dot_general_aqt that ignores integer casts."""
lhs, rhs = primals
lhs_dot, rhs_dot = tangents
y = dot_general_aqt(
lhs,
rhs,
dimension_numbers=dimension_numbers,
dot_precision=dot_precision,
use_int8_to_int32_dot=use_int8_to_int32_dot)
def differentiable_dot_general(lhs_, rhs_):
return lax.dot_general(
lhs_,
rhs_,
dimension_numbers=dimension_numbers,
precision=dot_precision)
_, y_tangent = jax.jvp(differentiable_dot_general, (lhs, rhs),
(lhs_dot, rhs_dot))
return y, y_tangent
| {
"content_hash": "8a1ac73df2e2780515871adc76873748",
"timestamp": "",
"source": "github",
"line_count": 1469,
"max_line_length": 105,
"avg_line_length": 42.89720898570456,
"alnum_prop": 0.6749873048114764,
"repo_name": "google/aqt",
"id": "983f575b7815ffd2cc5b4e274c2369216b8b117e",
"size": "63592",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "aqt/jax_legacy/jax/quantization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "131927"
},
{
"name": "Python",
"bytes": "1450310"
},
{
"name": "Shell",
"bytes": "2862"
}
],
"symlink_target": ""
} |
from generate import Generator
from gensim import models
from plan import train_planner
from paths import save_dir
import argparse
import os
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Chinese poem generation.')
parser.add_argument('-p', dest = 'planner', default = False,
action = 'store_true', help = 'train planning model')
parser.add_argument('-g', dest = 'generator', default = False,
action = 'store_true', help = 'train generation model')
parser.add_argument('-a', dest = 'all', default = False,
action = 'store_true', help = 'train both models')
parser.add_argument('--clean', dest = 'clean', default = False,
action = 'store_true', help = 'delete all models')
args = parser.parse_args()
if args.clean:
for f in os.listdir(save_dir):
os.remove(os.path.join(save_dir, f))
else:
if args.all or args.planner:
train_planner()
if args.all or args.generator:
generator = Generator()
generator.train(n_epochs = 1000)
print("All training is done!")
| {
"content_hash": "4ce31915268003ee6d05f6954a43a1ad",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 37.29032258064516,
"alnum_prop": 0.6150519031141869,
"repo_name": "DevinZ1993/Chinese-Poetry-Generation",
"id": "b58f0e15533faf70fb925811a0406397246f9606",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39589"
}
],
"symlink_target": ""
} |
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QStackedLayout
app = QApplication(sys.argv)
# Make widgets
window = QWidget()
btn1 = QPushButton("One")
btn2 = QPushButton("Two")
btn3 = QPushButton("Three")
# Set the layout
stack = QStackedLayout()
stack.addWidget(btn1)
stack.addWidget(btn2)
stack.addWidget(btn3)
stack.setCurrentIndex(1)
window.setLayout(stack)
# Show
window.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| {
"content_hash": "bbaf2753ea86a318f46a288a23b94d75",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 119,
"avg_line_length": 21.52777777777778,
"alnum_prop": 0.7496774193548387,
"repo_name": "jeremiedecock/snippets",
"id": "b128a107077ef8ee2b91133b3a62bba47b54c3c4",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyqt/pyqt5/layout_QStackedLayout.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import os
import os.path
import click
from .geocoder import (BingGeocoder, get_addresses_from_file,
pretty_print_statuses, write_addresses_to_file)
try:
from .service import download_jobs, check_pending_jobs
except ImportError:
download_jobs = None
check_pending_jobs = None
# Set up logging
logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s - %(levelname)s - line: %(lineno)d - %(message)s'
}
},
'handlers': {
'default': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': os.path.join(os.path.expanduser('~'), 'logs', 'geocode_service.log'),
'filename': os.path.join(os.path.expanduser('~'), 'logs', 'bing_geocoder.log'),
'formatter': 'standard'
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
}
}
}
class ConfigurationError(Exception):
pass
def get_geocoder(api_key=None):
if api_key is None:
try:
env_var = 'BING_MAPS_API_KEY'
api_key = os.environ[env_var]
except KeyError:
raise ConfigurationError("Need to provide Bing Maps API key")
pass
return BingGeocoder(api_key)
@click.group()
def cli():
pass
@click.command()
@click.argument('path', type=click.Path(exists=True, dir_okay=False))
@click.option('--api-key', type=str, default=None, help="Bing Maps API key")
def upload(path, api_key=None):
logging.config.dictConfig(logging_config)
try:
geocoder = get_geocoder(api_key)
except ConfigurationError:
print('Error: Need to provide Bing Maps API key.')
return
batch = geocoder.batch_addresses(get_addresses_from_file(path))
job_id = geocoder.upload_address_batch(batch)
if job_id:
print('Successful upload. Job id is {}'.format(job_id))
cli.add_command(upload)
@click.command()
@click.option('--api-key', type=str, default=None, help="Bing Maps API key")
def status(api_key=None):
logging.config.dictConfig(logging_config)
try:
geocoder = get_geocoder(api_key)
except ConfigurationError:
print('Error: Need to provide Bing Maps API key.')
return
pretty_print_statuses(geocoder.get_job_statuses())
cli.add_command(status)
@click.command()
@click.argument('job_id', type=str)
@click.argument('path', type=click.Path(dir_okay=False))
@click.option('--api-key', type=str, default=None, help="Bing Maps API key")
def download(job_id, path, api_key=None):
logging.config.dictConfig(logging_config)
try:
geocoder = get_geocoder(api_key)
except ConfigurationError:
print('Error: Need to provide Bing Maps API key.')
return
results = geocoder.get_job_results(job_id)
if len(results):
write_addresses_to_file(path, results)
cli.add_command(download)
@click.command()
@click.argument('task')
@click.option('--api-key', type=str, default=None, help="Bing Maps API key")
def service(task, api_key=None):
if download_jobs is not None and check_pending_jobs is not None:
# Check that these tasks are not None. If they are None it probably
# means boto isn't installed
print("To run the service command, you need to install the boto package")
return
logfile = '{}/logs/geocode_service.log'.format(expanduser('~'))
logging_config['handlers']['default']['filename'] = logfile
logging.config.dictConfig(logging_config)
try:
geocoder = get_geocoder()
except ConfigurationError as e:
logging.error(e)
return
commands = {
'download': download_jobs,
'statuses': check_pending_jobs,
}
try:
task_fn = commands[task]
except KeyError:
logging.error('{} is an unsupported task'.format(task))
return
task_fn(geocoder)
cli.add_command(service)
| {
"content_hash": "1886d0c3ffe8b1d3545e22589452a368",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 93,
"avg_line_length": 28.58041958041958,
"alnum_prop": 0.6266209933936873,
"repo_name": "newsapps/bing-bulk-geocoder",
"id": "d39cd18ebb1bd9c01823e0e4c9571dd252fc4a91",
"size": "4087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bing_geocoder/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2597"
},
{
"name": "HTML",
"bytes": "3700"
},
{
"name": "Python",
"bytes": "19924"
},
{
"name": "Shell",
"bytes": "452"
}
],
"symlink_target": ""
} |
"""
Author: robalar <[email protected]>
URL: github.com/Streams
This file is part of streams
Streams is free software, and is distributed under the MIT licence.
See LICENCE or opensource.org/licenses/MIT
"""
from streams.search import providers
import logging
logger = logging.getLogger(__name__)
def do_search(term):
"""Gets movies matching term from all providers.
Args:
term (string): the search term to submit to providers
Returns:
A list of Movie objects fetched from all providers
"""
results = []
for provider in providers.get_provider_list():
provider_results = []
try:
logger.info('Searching {0} for \'{1}\''.format(provider.name, term))
provider_results = provider.do_search(term)
except Exception as exc:
logger.warning('Could not get results from {0}: {1}'.format(provider.name, exc))
continue
results += provider_results
return results
| {
"content_hash": "922ec69b41f98194246fde3eaf921247",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 92,
"avg_line_length": 24.75,
"alnum_prop": 0.6515151515151515,
"repo_name": "robalar/Streams",
"id": "670bf98f4d755c4929a8b6db09a2fd44bb0ff2ca",
"size": "990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streams/search/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35842"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, session, redirect, url_for, flash
from flask_wtf import FlaskForm
from flask_bootstrap import Bootstrap
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
app.config['SECRET_KEY'] = 'had to guess string'
Bootstrap(app)
class NameForm(FlaskForm):
name = StringField('What is your name? ', validators=[DataRequired()])
submit = SubmitField('Submit :233')
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('名字已经修改完成!')
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name', None))
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "78c95847b3622f33965c6ab312981067",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 83,
"avg_line_length": 29.151515151515152,
"alnum_prop": 0.6621621621621622,
"repo_name": "sharkspeed/dororis",
"id": "600c6d9aefcbb980d4e7a79dfb0c7f4bc9a786cc",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "packages/python/flask/flask-dog-book/4-chapter/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Agda",
"bytes": "152"
},
{
"name": "AppleScript",
"bytes": "4936"
},
{
"name": "Assembly",
"bytes": "6654"
},
{
"name": "C",
"bytes": "568507"
},
{
"name": "C#",
"bytes": "2446"
},
{
"name": "C++",
"bytes": "15567"
},
{
"name": "CSS",
"bytes": "74090"
},
{
"name": "Clojure",
"bytes": "986"
},
{
"name": "CoffeeScript",
"bytes": "1055"
},
{
"name": "Crystal",
"bytes": "13171"
},
{
"name": "Dart",
"bytes": "22343"
},
{
"name": "Elixir",
"bytes": "27938"
},
{
"name": "Fortran",
"bytes": "400"
},
{
"name": "Go",
"bytes": "117383"
},
{
"name": "HTML",
"bytes": "780346"
},
{
"name": "Haskell",
"bytes": "33977"
},
{
"name": "Idris",
"bytes": "167"
},
{
"name": "Java",
"bytes": "105613"
},
{
"name": "JavaScript",
"bytes": "1453348"
},
{
"name": "Kotlin",
"bytes": "24078"
},
{
"name": "Lex",
"bytes": "1156"
},
{
"name": "Makefile",
"bytes": "22596"
},
{
"name": "Mako",
"bytes": "1976"
},
{
"name": "Objective-C",
"bytes": "1500"
},
{
"name": "PHP",
"bytes": "868941"
},
{
"name": "Python",
"bytes": "553417"
},
{
"name": "Racket",
"bytes": "11698"
},
{
"name": "Roff",
"bytes": "3741"
},
{
"name": "Ruby",
"bytes": "129923"
},
{
"name": "Rust",
"bytes": "27692"
},
{
"name": "Scala",
"bytes": "791"
},
{
"name": "Shell",
"bytes": "17297"
},
{
"name": "Smarty",
"bytes": "421"
},
{
"name": "Swift",
"bytes": "197600"
},
{
"name": "TeX",
"bytes": "3875"
},
{
"name": "TypeScript",
"bytes": "24815"
},
{
"name": "Vim script",
"bytes": "6936"
},
{
"name": "Vue",
"bytes": "32921"
},
{
"name": "Zig",
"bytes": "634"
}
],
"symlink_target": ""
} |
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
list = []
return self.helper(root, list)
def helper(self, root, list):
if root == None:
return []
self.helper(root.left, list)
list.append(root.val)
self.helper(root.right, list)
return list
| {
"content_hash": "3797b07561e4fc7a121d046641067170",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 38,
"avg_line_length": 23.88235294117647,
"alnum_prop": 0.5197044334975369,
"repo_name": "Jspsun/LEETCodePractice",
"id": "426bdec1dd0db261e03771a3ddf712db2fe2dd22",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/InOrderTraversal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "15259"
},
{
"name": "JavaScript",
"bytes": "747"
},
{
"name": "Python",
"bytes": "138441"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import _pytest._code
import py
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
class TestGeneralUsage(object):
def test_config_error(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
import pytest
raise pytest.UsageError("hello")
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
result.stderr.fnmatch_lines([
'*ERROR: hello'
])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest("""
def pytest_sessionstart():
0 / 0
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines([
'*INTERNALERROR*File*conftest.py*line 2*',
'*0 / 0*',
])
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines([
'*INTERNALERROR*def pytest_sessionstart():*',
'*INTERNALERROR*0 / 0*',
])
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest("""
def pytest_configure():
0 / 0
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines([
'*INTERNALERROR*File*conftest.py*line 2*',
'*0 / 0*',
])
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest("""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
""")
result = testdir.runpytest("-s", "asd")
assert result.ret == 4 # EXIT_USAGEERROR
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
result.stdout.fnmatch_lines([
"*---configure",
"*---unconfigure",
])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
""")
testdir.makepyfile(test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
""")
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines([
'*1 passed*',
])
def test_assertion_magic(self, testdir):
p = testdir.makepyfile("""
def test_this():
x = 0
assert x
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"> assert x",
"E assert 0",
])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile("""
import import_fails
def test_this():
assert import_fails.a == 1
""")
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
# XXX on jython this fails: "> import import_fails",
"ImportError while importing test module*",
"*No module named *does_not_work*",
])
assert result.ret == 2
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret
result.stderr.fnmatch_lines([
"*ERROR: not found:*%s" % (p2.basename,)
])
def test_issue486_better_reporting_on_conftest_load_failure(self, testdir):
testdir.makepyfile("")
testdir.makeconftest("import qwerty")
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines("""
*--version*
*warning*conftest.py*
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines("""
*ERROR*could not load*conftest.py*
""")
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest("""
import pytest
def pytest_collect_directory():
pytest.skip("early")
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"*1 skip*"
])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.makeconftest("""
import pytest
class MyFile(pytest.File):
def collect(self):
return [MyItem("hello", parent=self)]
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyItem(pytest.Item):
pass
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines([
"*MyFile*test_issue88*",
"*Module*test_issue88*",
])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest("""
import sys
print ("should not be seen")
sys.stderr.write("stder42\\n")
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "should not be seen" not in result.stdout.str()
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest("""
print ("should be seen")
assert 0
""")
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
reason="symlink not available on this platform")
def test_chdir(self, testdir):
testdir.tmpdir.join("py").mksymlinkto(py._pydir)
p = testdir.tmpdir.join("main.py")
p.write(_pytest._code.Source("""
import sys, os
sys.path.insert(0, '')
import py
print (py.__file__)
print (py.__path__)
os.chdir(os.path.dirname(os.getcwd()))
print (py.log)
"""))
result = testdir.runpython(p)
assert not result.ret
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.tmpdir.mkdir("sub1")
sub2 = testdir.tmpdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == EXIT_NOTESTSCOLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest(sub1)
assert result.ret == EXIT_USAGEERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
""")
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest("""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector(path, parent)
""")
result = testdir.runpytest(c.basename + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 pass*",
])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_generate_tests(metafunc):
metafunc.addcall({'x': 3}, id='hello-123')
def pytest_runtest_setup(item):
print (item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
""")
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall({'i': 1}, id="1")
metafunc.addcall({'i': 2}, id="2")
def test_func(i):
pass
""")
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile("""
def test_func():
pass
""")
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest("""
def pytest_configure():
x
""")
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines([
"INTERNAL*pytest_configure*",
"INTERNAL*x*",
])
assert 'sessionstarttime' not in result.stderr.str()
@pytest.mark.parametrize('lookfor', ['test_fun.py::test_a'])
def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(test_fun="""
def test_a():
pass
def""")
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(['*SyntaxError*'])
if '::' in lookfor:
result.stderr.fnmatch_lines([
'*ERROR*',
])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines([
"*ERROR*test_a.py::a*",
"*ERROR*test_b.py::b*",
])
@pytest.mark.usefixtures('recwarn')
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
"""
Ref #383. Python 3.3's namespace package messed with our import hooks
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
Use recwarn here to silence this warning in Python 2.6 and 2.7:
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
"""
testdir.mkdir('not_a_package')
p = testdir.makepyfile("""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
""")
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines("""
*unrecognized*
""")
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile("""
def raise_error(obj):
raise IOError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
""")
res = testdir.runpytest(p)
res.stdout.fnmatch_lines([
"*source code not available*",
"E*fixture 'invalid_fixture' not found",
])
def test_plugins_given_as_strings(self, tmpdir, monkeypatch):
"""test that str values passed to main() as `plugins` arg
are interpreted as module names to be imported and registered.
#855.
"""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=['invalid.module'])
assert 'invalid' in str(excinfo.value)
p = tmpdir.join('test_test_plugins_given_as_strings.py')
p.write('def test_foo(): pass')
mod = py.std.types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, 'myplugin', mod)
assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0
def test_parametrized_with_bytes_regex(self, testdir):
p = testdir.makepyfile("""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines([
'*1 passed*'
])
def test_parametrized_with_null_bytes(self, testdir):
"""Test parametrization with values that contain null bytes and unicode characters (#2644)"""
p = testdir.makepyfile(u"""
# encoding: UTF-8
import pytest
@pytest.mark.parametrize("data", ["\\x00", u'ação'])
def test_foo(data):
assert data
""")
res = testdir.runpytest(p)
res.assert_outcomes(passed=2)
class TestInvocationVariants(object):
def test_earlyinit(self, testdir):
p = testdir.makepyfile("""
import pytest
assert hasattr(pytest, 'mark')
""")
result = testdir.runpython(p)
assert result.ret == 0
@pytest.mark.xfail("sys.platform.startswith('java')")
def test_pydoc(self, testdir):
for name in ('py.test', 'pytest'):
result = testdir.runpython_c("import %s;help(%s)" % (name, name))
assert result.ret == 0
s = result.stdout.str()
assert 'MarkGenerator' in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile("""
from py.test import *
#collect
#cmdline
#Item
# assert collect.Item is Item
# assert collect.Collector is Collector
main
skip
xfail
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile("""
from pytest import *
#Item
#File
main
skip
xfail
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(run="""
import pytest
pytest.main()
pytest.main()
""")
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"*1 passed*",
"*1 passed*",
])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pytest(self):
assert pytest.main == py.test.cmdline.main
def test_invoke_with_string(self, capsys):
retcode = pytest.main("-h")
assert not retcode
out, err = capsys.readouterr()
assert "--help" in out
pytest.raises(ValueError, lambda: pytest.main(0))
def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir)
assert retcode == EXIT_NOTESTSCOLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, testdir, capsys):
class MyPlugin(object):
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write('raise ImportError')
result = testdir.runpytest_subprocess("--pyargs", "tpkg.test_hello")
assert result.ret != 0
result.stdout.fnmatch_lines([
"collected*0*items*/*1*errors"
])
def test_cmdline_python_package(self, testdir, monkeypatch):
import warnings
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*"
])
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 passed*"
])
def join_pythonpath(what):
cur = py.std.os.environ.get('PYTHONPATH')
if cur:
return str(what) + os.pathsep + cur
return what
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter('ignore', ImportWarning)
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*"
])
monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))
result = testdir.runpytest("--pyargs", "tpkg.test_missing")
assert result.ret != 0
result.stderr.fnmatch_lines([
"*not*found*test_missing*",
])
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
"""
test --pyargs option with namespace packages (#1567)
"""
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', raising=False)
search_path = []
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
search_path.append(d)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)")
lib = ns.mkdir(dirname)
lib.ensure("__init__.py")
lib.join("test_{0}.py".format(dirname)). \
write("def test_{0}(): pass\n"
"def test_other():pass".format(dirname))
# The structure of the test directory is now:
# .
# ├── hello
# │ └── ns_pkg
# │ ├── __init__.py
# │ └── hello
# │ ├── __init__.py
# │ └── test_hello.py
# └── world
# └── ns_pkg
# ├── __init__.py
# └── world
# ├── __init__.py
# └── test_world.py
def join_pythonpath(*dirs):
cur = py.std.os.environ.get('PYTHONPATH')
if cur:
dirs += (cur,)
return os.pathsep.join(str(p) for p in dirs)
monkeypatch.setenv('PYTHONPATH', join_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# mixed module and filenames:
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "world/ns_pkg")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*test_hello.py::test_hello*PASSED",
"*test_hello.py::test_other*PASSED",
"*test_world.py::test_world*PASSED",
"*test_world.py::test_other*PASSED",
"*4 passed*"
])
# specify tests within a module
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.world.test_world::test_other")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*test_world.py::test_other*PASSED",
"*1 passed*"
])
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines([
"ERROR*file*or*package*not*found*",
])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile("""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile('.txt', """
>>> x=3
>>> x
4
""")
result = testdir.runpytest("-rf")
lines = result.stdout.str().splitlines()
for line in lines:
if line.startswith("FAIL "):
testid = line[5:].strip()
break
result = testdir.runpytest(testid, '-rf')
result.stdout.fnmatch_lines([
line,
"*1 failed*",
])
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin('python')
class TestDurations(object):
source = """
import time
frag = 0.002
def test_something():
pass
def test_2():
time.sleep(frag*5)
def test_1():
time.sleep(frag)
def test_3():
time.sleep(frag*10)
"""
def test_calls(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*durations*",
"*call*test_3*",
"*call*test_2*",
"*call*test_1*",
])
def test_calls_show_2(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0")
assert result.ret == 0
for x in "123":
for y in 'call', : # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found %s %s" % (x, y))
def test_with_deselected(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*durations*",
"*call*test_1*",
])
def test_with_failing_collection(self, testdir):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines([
"*Interrupted: 1 errors during collection*",
])
# Collection errors abort test execution, therefore no duration is
# output
assert "duration" not in result.stdout.str()
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("-k not 1")
assert result.ret == 0
class TestDurationWithFixture(object):
source = """
import time
frag = 0.001
def setup_function(func):
time.sleep(frag * 3)
def test_1():
time.sleep(frag*2)
def test_2():
time.sleep(frag)
"""
def test_setup_function(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random("""
*durations*
* setup *test_1*
* call *test_1*
""")
def test_zipimport_hook(testdir, tmpdir):
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip('zipapp')
testdir.tmpdir.join('app').ensure(dir=1)
testdir.makepyfile(**{
'app/foo.py': """
import pytest
def main():
pytest.main(['--pyarg', 'foo'])
""",
})
target = tmpdir.join('foo.zip')
zipapp.create_archive(str(testdir.tmpdir.join('app')), str(target), main='foo:main')
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(['*not found*foo*'])
assert 'INTERNALERROR>' not in result.stdout.str()
def test_import_plugin_unicode_name(testdir):
testdir.makepyfile(
myplugin='',
)
testdir.makepyfile("""
def test(): pass
""")
testdir.makeconftest("""
pytest_plugins = [u'myplugin']
""")
r = testdir.runpytest()
assert r.ret == 0
def test_deferred_hook_checking(testdir):
"""
Check hooks as late as possible (#1821).
"""
testdir.syspathinsert()
testdir.makepyfile(**{
'plugin.py': """
class Hooks:
def pytest_my_hook(self, config):
pass
def pytest_configure(config):
config.pluginmanager.add_hookspecs(Hooks)
""",
'conftest.py': """
pytest_plugins = ['plugin']
def pytest_my_hook(config):
return 40
""",
'test_foo.py': """
def test(request):
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
"""
})
result = testdir.runpytest()
result.stdout.fnmatch_lines(['* 1 passed *'])
| {
"content_hash": "57564e435b0207331e3215ca71498d0c",
"timestamp": "",
"source": "github",
"line_count": 847,
"max_line_length": 101,
"avg_line_length": 34.226682408500594,
"alnum_prop": 0.5460158675405312,
"repo_name": "MichaelAquilina/pytest",
"id": "71277690641f16763fd0581d7ec5e78f3e28fdeb",
"size": "29108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/acceptance_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1466946"
}
],
"symlink_target": ""
} |
from testbase import TestBase
from corepro.externalaccount import ExternalAccount
class TestAcExternalAccountPrepaid(TestBase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create(self):
ea = ExternalAccount()
ea.customerId = TestBase.prepaidCustomerId
ea.nickName = "Ext acct " + TestBase.timestamp
ea.tag = "tag python " + TestBase.timestamp
ea.accountNumber = '00001234'
ea.firstName = 'Jimmy'
ea.lastName = 'Jameson'
ea.type = 'Client'
TestBase.prepaidExternalAccountId = ea.create(TestBase.prepaidConn, TestBase.loggingObject)
self.assertTrue(TestBase.prepaidExternalAccountId > 0)
def test_get(self):
ea = ExternalAccount.getItem(TestBase.prepaidCustomerId, TestBase.prepaidExternalAccountId, TestBase.prepaidConn, TestBase.loggingObject)
self.assertTrue(ea.externalAccountId == TestBase.prepaidExternalAccountId)
def test_getbytag(self):
ea = ExternalAccount.getItemByTag(TestBase.prepaidCustomerId, "tag python " + TestBase.timestamp, TestBase.prepaidConn, TestBase.loggingObject)
self.assertTrue(ea.externalAccountId == TestBase.prepaidExternalAccountId)
def test_list(self):
eas = ExternalAccount.listItems(TestBase.prepaidCustomerId, TestBase.prepaidConn, TestBase.loggingObject)
self.assertTrue(len(eas) > 0)
def test_update(self):
ea = ExternalAccount()
ea.customerId = TestBase.prepaidCustomerId
ea.externalAccountId = TestBase.prepaidExternalAccountId
ea.nickName = "Updated ext act " + TestBase.timestamp
externalAccountId = ea.update(TestBase.prepaidConn, TestBase.loggingObject)
self.assertTrue(TestBase.prepaidExternalAccountId == externalAccountId)
def test_zzz_deactivate(self):
pass
| {
"content_hash": "9765e9b846a66d0480e3dce702a51f0a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 151,
"avg_line_length": 41.266666666666666,
"alnum_prop": 0.7162089391491653,
"repo_name": "socialmoney/corepro-sdk-python",
"id": "c072edf331b6d0ee524ce2fa8b2eb97b0a1e7345",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_acexternalaccount_prepaid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55423"
}
],
"symlink_target": ""
} |
from nussl.separation.base.separation_base import SeparationBase
import pytest
from nussl.separation import (
primitive,
factorization,
composite,
SeparationException
)
import numpy as np
import os
import nussl
import copy
import random
REGRESSION_PATH = 'tests/separation/regression/composite/'
os.makedirs(REGRESSION_PATH, exist_ok=True)
def test_ensemble_clustering(
music_mix_and_sources,
check_against_regression_data
):
mix, sources = music_mix_and_sources
mix = copy.deepcopy(mix)
vox = copy.deepcopy(sources['vocals'])
acc = copy.deepcopy(sources['drums+bass+other'])
separators = [
primitive.FT2D(mix),
factorization.RPCA(mix),
primitive.HPSS(mix),
]
weights = [3, 3, 1]
returns = [[1], [1], [0]]
fixed_centers = np.array([
[0 for i in range(sum(weights))],
[1 for i in range(sum(weights))],
])
config = [
({}, 'defaults'),
({
'init': fixed_centers,
'fit_clusterer': False,
'weights': weights,
'returns': returns
}, 'fixed_means'),
({
'extracted_feature': 'estimates',
'weights': weights,
'returns': returns
}, 'use_estimates'),
]
for kwargs, name in config:
nussl.utils.seed(0)
ensemble = composite.EnsembleClustering(
mix, 2, separators, **kwargs)
estimates = ensemble()
evaluator = nussl.evaluation.BSSEvalScale(
[acc, vox], estimates,
source_labels=['acc', 'vocals'],
compute_permutation=True
)
scores = evaluator.evaluate()
reg_path = os.path.join(
REGRESSION_PATH, f'ensemble_clustering_{name}.json')
check_against_regression_data(scores, reg_path, atol=1e-2)
pytest.raises(SeparationException, composite.EnsembleClustering,
mix, 2, separators, extracted_feature='none of the above')
pytest.raises(SeparationException, composite.EnsembleClustering,
mix, 2, separators, weights=[1, 1])
pytest.raises(SeparationException, composite.EnsembleClustering,
mix, 2, separators, returns=[[1], [1]])
def test_overlap_add(random_noise):
# Check the static methods
nussl.utils.seed(0)
mix = random_noise(10, 2, 'random')
windows, shape = composite.OverlapAdd.collect_windows(mix, 2, 1)
recombined = composite.OverlapAdd.overlap_and_add(
windows, shape, mix.sample_rate, 2, 1)
assert np.allclose(recombined.audio_data, mix.audio_data)
class DoNothing(SeparationBase):
def __init__(self, input_audio_signal):
super().__init__(input_audio_signal)
def run(self):
return
def make_audio_signals(self):
sig = self.audio_signal.make_copy_with_audio_data(self.audio_signal.audio_data)
return [sig]
mix = random_noise(1, 2, 'random')
do_nothing = DoNothing(mix)
overlap_add = composite.OverlapAdd(do_nothing)
estimates = overlap_add()
assert np.allclose(estimates[0].audio_data, mix.audio_data)
for k in ['ones', 'random']:
for dur in [1.5, 10, 30, 95, 101]:
for ch in range(1, 3):
mix = random_noise(dur, ch, k)
before_mix = copy.deepcopy(mix)
do_nothing = DoNothing(mix)
overlap_add = composite.OverlapAdd(do_nothing, window_duration=1)
estimates = overlap_add()
assert before_mix == mix
assert np.allclose(estimates[0].audio_data, mix.audio_data)
class RandomReorder(SeparationBase):
def __init__(self, input_audio_signal, shuffle=True):
super().__init__(input_audio_signal)
self.shuffle = shuffle
def run(self):
L = 2
self.even = copy.deepcopy(self.audio_signal)
self.even.audio_data[..., ::L] = 0
self.odd = copy.deepcopy(self.audio_signal)
self.odd.audio_data[..., 1::L] = 0
return
def make_audio_signals(self):
sigs = [self.even, self.odd]
indices = [0, 1]
if self.shuffle:
random.shuffle(indices)
return [sigs[i] for i in indices]
def test_permutations_allclose(estimates, overlap_estimates):
close_enough = []
for i in range(len(estimates)):
for j in range(len(overlap_estimates)):
est0 = estimates[i]
est1 = overlap_estimates[j]
error = np.allclose(est0.audio_data, est1.audio_data)
close_enough.append(error)
assert sum(close_enough) == 2
mix = random_noise(10, 2, 'random')
random_reorder = RandomReorder(mix, shuffle=False)
estimates = random_reorder()
# Reordering estimates, with find_permutation=False should fail.
random_reorder = RandomReorder(mix, shuffle=True)
overlap_add = composite.OverlapAdd(
random_reorder,
find_permutation=False,
window_duration=1
)
overlap_estimates = overlap_add()
pytest.raises(AssertionError, test_permutations_allclose, estimates, overlap_estimates)
# Not reordering estimates, with find_permutation=True should succeed.
random_reorder = RandomReorder(mix, shuffle=False)
overlap_add = composite.OverlapAdd(
random_reorder,
find_permutation=True,
verbose=True,
window_duration=1
)
overlap_estimates = overlap_add()
test_permutations_allclose(estimates, overlap_estimates)
# Reordering estimates, with find_permutation=True should succeed.
random_reorder = RandomReorder(mix, shuffle=True)
overlap_add = composite.OverlapAdd(
random_reorder,
find_permutation=True,
window_duration=1
)
overlap_estimates = overlap_add()
test_permutations_allclose(estimates, overlap_estimates)
| {
"content_hash": "0fa28c71c8e349c5ab2cec17235eed82",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 91,
"avg_line_length": 31.117948717948718,
"alnum_prop": 0.5993737640079103,
"repo_name": "interactiveaudiolab/nussl",
"id": "0a15c69258caec171821e543117e9e250eeddf4a",
"size": "6068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/separation/test_composite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
import pexpect, os, sys, tty, select
lispCmd = '../arc.sh' # or, 'sbcl', or whatever should probably work
pipeLoc = os.path.expanduser("~/.vimarc-pipe") # this path has to be the same as in vimarc.vim
if not os.path.exists(pipeLoc):
os.system("mkfifo -m go-rwx " + pipeLoc)
class Funnel(pexpect.spawn):
""" hacky monkey patch of pexpect to merge `interact' and input from a pipe. spawn the lisp using
this command, and then vim connects to the pipe and you can still see/use the lisp repl in your
shell window."""
def mergePipeAndInteract(self, pipe):
self.stdout.write (self.buffer)
self.stdout.flush()
self.buffer = ''
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__merge_copy(pipe)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
while data != '' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
return os.read(fd, 1000)
def __select (self, iwtd, owtd, ewtd, timeout=None):
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error, e:
if e[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise
def __merge_copy(self, pipe):
while self.isalive():
r,w,e = self.__select([self.child_fd, self.STDIN_FILENO, pipe], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
self.__interact_writen(self.child_fd, data)
if pipe in r:
data = self.__interact_read(pipe)
self.__interact_writen(self.child_fd, data)
f = Funnel(lispCmd, logfile=sys.stdout)
pipe = open(pipeLoc, "r+")
pipefn = pipe.fileno()
try:
f.mergePipeAndInteract(pipefn)
except OSError:
pass
| {
"content_hash": "97366ba89297b945780049a0792231ef",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 39.73529411764706,
"alnum_prop": 0.5636565507031829,
"repo_name": "LaxWorks/news",
"id": "ff3aa818a4d24b9b38055f33a63d0e43d1c6afe8",
"size": "2820",
"binary": false,
"copies": "2",
"ref": "refs/heads/news.academical.io",
"path": "extras/vim/vimarc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arc",
"bytes": "466299"
},
{
"name": "Batchfile",
"bytes": "809"
},
{
"name": "Emacs Lisp",
"bytes": "31350"
},
{
"name": "Perl",
"bytes": "25009"
},
{
"name": "Perl 6",
"bytes": "28038"
},
{
"name": "Python",
"bytes": "2820"
},
{
"name": "Racket",
"bytes": "1172"
},
{
"name": "Scheme",
"bytes": "64063"
},
{
"name": "Shell",
"bytes": "3290"
},
{
"name": "Vim script",
"bytes": "20459"
},
{
"name": "XSLT",
"bytes": "2862"
}
],
"symlink_target": ""
} |
"""Tests of skipped field log message filtering"""
import logging
import os
import fiona
from fiona.logutils import LogFiltering, FieldSkipLogFilter
def test_filtering(caplog):
"""Test that ordinary log messages pass"""
logger = logging.getLogger()
with LogFiltering(logger, FieldSkipLogFilter()):
logger.warning("Attention!")
logger.warning("Skipping field 1")
logger.warning("Skipping field 2")
logger.warning("Danger!")
logger.warning("Skipping field 1")
assert len(caplog.records) == 4
assert caplog.records[0].getMessage() == "Attention!"
assert caplog.records[1].getMessage() == "Skipping field 1"
assert caplog.records[2].getMessage() == "Skipping field 2"
assert caplog.records[3].getMessage() == "Danger!"
def test_skipping_slice(caplog, data_dir):
"""Collection filters out all but one warning message"""
with fiona.open(os.path.join(data_dir, "issue627.geojson")) as src:
results = list(src)
assert len(results) == 3
assert not any(['skip_me' in f['properties'] for f in results])
assert len([rec for rec in caplog.records if rec.getMessage().startswith('Skipping')]) == 1
def test_skipping_list(caplog, data_dir):
"""Collection filters out all but one warning message"""
with fiona.open(os.path.join(data_dir, "issue627.geojson")) as src:
results = list(src)
assert len(results) == 3
assert not any(['skip_me' in f['properties'] for f in results])
assert len([rec for rec in caplog.records if rec.getMessage().startswith('Skipping')]) == 1
def test_log_filter_exception(caplog):
"""FieldSkipLogFilter handles exceptions from log.exception()."""
logger = logging.getLogger()
with LogFiltering(logger, FieldSkipLogFilter()):
logger.exception(ValueError("Oh no"))
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == "Oh no"
| {
"content_hash": "4c2c747df4ce75ce8b87055ff65575ba",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 95,
"avg_line_length": 36.96153846153846,
"alnum_prop": 0.68210197710718,
"repo_name": "Toblerity/Fiona",
"id": "baa9bcf18b1a3f750f23c3062772e01c017f2b91",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_logutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Cython",
"bytes": "215771"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "456515"
},
{
"name": "Shell",
"bytes": "4572"
}
],
"symlink_target": ""
} |
import json
import hashlib
import hmac
import pytest
from slackeventsapi import SlackEventAdapter
def create_signature(secret, timestamp, data):
req = str.encode('v0:' + str(timestamp) + ':') + str.encode(data)
request_signature= 'v0='+hmac.new(
str.encode(secret),
req, hashlib.sha256
).hexdigest()
return request_signature
def load_event_fixture(event, as_string=True):
filename = "tests/data/{}.json".format(event)
with open(filename) as json_data:
event_data = json.load(json_data)
if not as_string:
return event_data
else:
return json.dumps(event_data)
def event_with_bad_token():
event_data = load_event_fixture('reaction_added', as_string=False)
event_data['token'] = "bad_token"
return json.dumps(event_data)
def pytest_configure():
pytest.reaction_event_fixture = load_event_fixture('reaction_added')
pytest.url_challenge_fixture = load_event_fixture('url_challenge')
pytest.bad_token_fixture = event_with_bad_token()
pytest.create_signature = create_signature
@pytest.fixture
def adapter():
return SlackEventAdapter("SIGNING_SECRET")
@pytest.fixture
def app(adapter):
app = adapter.server
app.testing = True
return app
| {
"content_hash": "5cddb11c3a7973b26c249f22595e0456",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 27.04255319148936,
"alnum_prop": 0.6805664830841857,
"repo_name": "slackapi/python-slack-events-api",
"id": "052341564dfa5074214fff4ed1d2b8da2d86797b",
"size": "1271",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16295"
}
],
"symlink_target": ""
} |
"""
We will start by initializing the database from wurfl stream.
It should return a tuple (db, index)
>>> from mobi.devices.index.radixtree import NOTSET
>>> from mobi.devices.wurfl.db import initialize_db
>>> db, index = initialize_db(config)
>>> db is not None
True
>>> index #doctest: +ELLIPSIS
<mobi.devices.index.radixtree.RadixTree ...>
Now we'll have a look at what's inside the index.
>>> user_agent = 'Mozilla/5.0 (iPhone; ...'
>>> node, string, pos = index.search(user_agent)
>>> node.value
<class 'mobi.devices.index.radixtree.NOTSET'>
>>> string
u'Mozilla/5.0 (iPhone; '
>>> pos
21
>>> dev_id = node.values().next()
Let's look that up into the database.
>>> from mobi.devices.wurfl.db import Device
>>> device = Device.deserialize(db[dev_id])
>>> device #doctest: +ELLIPSIS
<mobi.devices.wurfl.parser.Device user_agent="Mozilla/5.0 (iPhone; ...
>>> int(device.get_capability('xhtml_support_level'))
4
>>> device.parent_id
u'apple_iphone_ver2'
>>> device.type
<InterfaceClass mobi.interfaces.devices.IAdvancedDeviceType>
>>> device.platform
u'iphone'
"""
import shutil
import os
from mobi.devices.wurfl.parser import Device
data_dir = os.path.join(os.path.dirname(__file__), 'var')
config = {
'var': data_dir
}
def setup(test):
teardown(test)
try:
os.mkdir(data_dir)
except OSError:
pass
def teardown(test):
try:
if Device.db:
Device.db.close()
shutil.rmtree(data_dir)
except:
pass
def test_suite():
import unittest
import doctest
suite = unittest.TestSuite()
suite.addTest(
doctest.DocTestSuite(__name__, setUp=setup, tearDown=teardown))
return suite
| {
"content_hash": "e642619e721e7bfddee5ef4c11933d36",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 24.54794520547945,
"alnum_prop": 0.6311383928571429,
"repo_name": "infrae/mobi.devices",
"id": "6a990231532ca3ec1a4fe6cab7d851d5de957a01",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mobi/devices/tests/test_doctest_wurfl_parsing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "47277"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from kombu import Connection
from kombu.tests.case import Case, mock, patch
class test_get_manager(Case):
@mock.mask_modules('pyrabbit')
def test_without_pyrabbit(self):
with self.assertRaises(ImportError):
Connection('amqp://').get_manager()
@mock.module_exists('pyrabbit')
def test_with_pyrabbit(self):
with patch('pyrabbit.Client', create=True) as Client:
manager = Connection('amqp://').get_manager()
self.assertIsNotNone(manager)
Client.assert_called_with(
'localhost:15672', 'guest', 'guest',
)
@mock.module_exists('pyrabbit')
def test_transport_options(self):
with patch('pyrabbit.Client', create=True) as Client:
manager = Connection('amqp://', transport_options={
'manager_hostname': 'admin.mq.vandelay.com',
'manager_port': 808,
'manager_userid': 'george',
'manager_password': 'bosco',
}).get_manager()
self.assertIsNotNone(manager)
Client.assert_called_with(
'admin.mq.vandelay.com:808', 'george', 'bosco',
)
| {
"content_hash": "273e30c375236fa1120989814bfad94d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5849358974358975,
"repo_name": "Elastica/kombu",
"id": "03eb634e5bbda631932057a11157831612894ab1",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kombu/tests/utils/test_amq_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "Makefile",
"bytes": "1514"
},
{
"name": "PowerShell",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "1017395"
},
{
"name": "Shell",
"bytes": "1955"
}
],
"symlink_target": ""
} |
"""
Library for retrieving information about catkin packages.
"""
__version__ = '0.3.9' # same version as in setup.py
| {
"content_hash": "9177dea7105c7742e11fe7ae13efb8a6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 57,
"avg_line_length": 24,
"alnum_prop": 0.6833333333333333,
"repo_name": "harmishhk/catkin_pkg",
"id": "54ef71c53b2233b6b2fa5797ed4ee45fd138c6c4",
"size": "1725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/catkin_pkg/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "524"
},
{
"name": "Python",
"bytes": "218885"
}
],
"symlink_target": ""
} |
import os
import unittest
from exprail import loader
from exprail.node import NodeType
class LoaderTest(unittest.TestCase):
"""Unittest for grammar loading"""
def test_missing_grammar_file(self):
with self.assertRaises(ValueError):
_ = loader.load_expressions('grammars/missing.grammar')
def test_empty_grammar_file(self):
expressions = loader.load_expressions('grammars/empty.grammar')
self.assertEqual(len(expressions), 0)
def test_single_expression(self):
expressions = loader.load_expressions('grammars/single.grammar')
self.assertEqual(len(expressions), 1)
self.assertIn('sample', expressions)
expression = expressions['sample']
self.assertTrue(expression.is_entry_expression())
nodes = expression.nodes
self.assertEqual(len(nodes), 4)
self.assertIn(1, nodes)
self.assertEqual(nodes[1].type, NodeType.START)
self.assertEqual(nodes[1].value, '')
self.assertIn(2, nodes)
self.assertEqual(nodes[2].type, NodeType.INFO)
self.assertEqual(nodes[2].value, 'Nothing new')
self.assertIn(3, nodes)
self.assertEqual(nodes[3].type, NodeType.EXPRESSION)
self.assertEqual(nodes[3].value, 'sample')
self.assertIn(4, nodes)
self.assertEqual(nodes[4].type, NodeType.FINISH)
self.assertEqual(nodes[4].value, '')
targets = {
1: {2},
2: {3},
3: {4},
4: set()
}
for node_id, reference_ids in targets.items():
target_ids = expression.get_target_node_ids(node_id)
self.assertEqual(target_ids, reference_ids)
def test_escaped_characters(self):
expressions = loader.load_expressions('grammars/escaped_names.grammar')
self.assertEqual(len(expressions), 1)
self.assertIn('escaped', expressions)
expression = expressions['escaped']
self.assertEqual(len(expression.nodes), 7)
self.assertEqual(expression.nodes[2].value, 'a \" character')
self.assertEqual(expression.nodes[3].value, 'a \\ character')
self.assertEqual(expression.nodes[4].value, 'both \" and \\ characters')
self.assertEqual(expression.nodes[5].value, 'multiple \"\\\" and \\\\\" in value')
self.assertEqual(expression.nodes[6].value, 'three two one zero')
def test_empty_expressions(self):
expressions = loader.load_expressions('grammars/empties.grammar')
self.assertEqual(len(expressions), 6)
expression_names = [
'first',
'the second',
'some more words in expression name',
'name with \" character',
'name with \\ character',
'both \\\" and \"\\ character combinations'
]
for name, expression in expressions.items():
self.assertIn(name, expression_names)
self.assertEqual(len(expression.nodes), 0)
def test_multiple_expressions(self):
expressions = loader.load_expressions('grammars/multiple.grammar')
self.assertEqual(len(expressions), 2)
self.assertIn('first', expressions)
expression = expressions['first']
self.assertEqual(len(expression.nodes), 6)
self.assertIn('second', expressions)
targets = {
1: {4, 5},
2: set(),
3: {2},
4: {3, 6},
5: {6},
6: {2}
}
for node_id, reference_ids in targets.items():
target_ids = expression.get_target_node_ids(node_id)
self.assertEqual(target_ids, reference_ids)
expression = expressions['second']
self.assertEqual(len(expression.nodes), 5)
targets = {
1: {3, 4, 5},
2: set(),
3: {2},
4: {2},
5: {2}
}
for node_id, reference_ids in targets.items():
target_ids = expression.get_target_node_ids(node_id)
self.assertEqual(target_ids, reference_ids)
| {
"content_hash": "6c53043ebbcedf8c654aacac1600260e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 90,
"avg_line_length": 38.68571428571428,
"alnum_prop": 0.5960118168389956,
"repo_name": "piller-imre/exprail-python",
"id": "f52c6ce170958a64180905fd083e4dfaf59a0d1f",
"size": "4062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92137"
}
],
"symlink_target": ""
} |
"""
Created on Sat Jan 25 22:34:05 2020
@author: mostafamousavi
"""
from EQTransformer.core.tester import tester
import pytest
import glob
import os
def test_predictor():
tester(input_hdf5='../sampleData&Model/100samples.hdf5',
input_testset='test_trainer_outputs/test.npy',
input_model='test_trainer_outputs/models/test_trainer_001.h5',
output_name='test_tester',
detection_threshold=0.20,
P_threshold=0.1,
S_threshold=0.1,
number_of_plots=3,
estimate_uncertainty=True,
number_of_sampling=2,
input_dimention=(6000, 3),
normalization_mode='std',
mode='generator',
batch_size=10,
gpuid=None,
gpu_limit=None)
dir_list = [ev for ev in os.listdir('.') if ev.split('_')[-1] == 'outputs']
if 'test_tester_outputs' in dir_list:
successful = True
else:
successful = False
assert successful == True
def test_report():
report = glob.glob("test_tester_outputs/X_report.txt")
assert len(report) == 1
def test_results():
results = glob.glob("test_tester_outputs/X_test_results.csv")
assert len(results) == 1
def test_plots():
plots = glob.glob("test_tester_outputs/figures/*.png")
assert len(plots) == 3 | {
"content_hash": "bea568bf0a0ae36e90e1ea6b0662186d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 81,
"avg_line_length": 26.5,
"alnum_prop": 0.6067924528301887,
"repo_name": "smousavi05/EQTransformer",
"id": "731d1ce3e2a8a41d67af3ffcb21dd9a473dcf006",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "101813"
},
{
"name": "Python",
"bytes": "394475"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
import uuid
from contextlib import contextmanager
import pytest
def exec_cmd(cmd, env=None, stdin=None, timeout=None):
"""Execute CLI command
:param cmd: Program and arguments
:type cmd: [str]
:param env: Environment variables
:type env: dict | None
:param stdin: File to use for stdin
:type stdin: file
:param timeout: The timeout for the process to terminate.
:type timeout: int
:raises: subprocess.TimeoutExpired when the timeout is reached
before the process finished.
:returns: A tuple with the returncode, stdout and stderr
:rtype: (int, bytes, bytes)
"""
print('CMD: {!r}'.format(cmd))
process = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
try:
streams = process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
# The child process is not killed if the timeout expires, so in order
# to cleanup properly a well-behaved application should kill the child
# process and finish communication.
# https://docs.python.org/3.5/library/subprocess.html#subprocess.Popen.communicate
process.kill()
stdout, stderr = process.communicate()
print('STDOUT: {}'.format(stdout.decode('utf-8')))
print('STDERR: {}'.format(stderr.decode('utf-8')))
raise
# This is needed to get rid of '\r' from Windows's lines endings.
stdout, stderr = [stream.replace(b'\r', b'').decode('utf-8') for stream in streams]
# We should always print the stdout and stderr
print('STDOUT: {}'.format(stdout))
print('STDERR: {}'.format(stderr))
return (process.returncode, stdout, stderr)
@pytest.fixture()
def default_cluster():
cluster = _setup_cluster()
yield cluster
code, _, _ = exec_cmd(['dcos', 'cluster', 'remove', cluster['cluster_id']])
assert code == 0
@contextmanager
def setup_cluster(**kwargs):
try:
cluster = _setup_cluster(**kwargs)
yield cluster
finally:
code, _, _ = exec_cmd(['dcos', 'cluster', 'remove', cluster['cluster_id']])
assert code == 0
def _setup_cluster(name='DEFAULT', scheme='http', insecure=False, env={}):
env = {**os.environ.copy(), **env}
cluster = {
'variant': os.environ.get('DCOS_TEST_' + name + '_CLUSTER_VARIANT'),
'username': os.environ.get('DCOS_TEST_' + name + '_CLUSTER_USERNAME'),
'password': os.environ.get('DCOS_TEST_' + name + '_CLUSTER_PASSWORD'),
'name': 'test_cluster_' + str(uuid.uuid4()),
}
cmd = 'dcos cluster setup --name={} --username={} --password={} {}://{}'.format(
cluster['name'],
cluster['username'],
cluster['password'],
scheme,
os.environ.get('DCOS_TEST_' + name + '_CLUSTER_HOST'))
if scheme == 'https':
cmd += ' --no-check'
if insecure:
cmd += ' --insecure'
code, _, _ = exec_cmd(cmd.split(' '), env=env)
assert code == 0
code, out, _ = exec_cmd(['dcos', 'cluster', 'list', '--json', '--attached'])
clusters = json.loads(out)
assert len(clusters) == 1
assert clusters[0]['name'] == cluster['name']
cluster['dcos_url'] = clusters[0]['url']
cluster['version'] = clusters[0]['version']
cluster['cluster_id'] = clusters[0]['cluster_id']
code, out, _ = exec_cmd(['dcos', 'config', 'show', 'core.dcos_acs_token'])
assert code == 0
cluster['acs_token'] = out.rstrip()
return cluster
| {
"content_hash": "d5b2935d9ecb9b231f161c51d3ebd385",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 90,
"avg_line_length": 30.347457627118644,
"alnum_prop": 0.607092990784697,
"repo_name": "kensipe/dcos-cli",
"id": "e12f120251770645c3af9c8f56ade010f6f8a001",
"size": "3581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468"
},
{
"name": "Dockerfile",
"bytes": "148"
},
{
"name": "Go",
"bytes": "194122"
},
{
"name": "Groovy",
"bytes": "9712"
},
{
"name": "HTML",
"bytes": "2583"
},
{
"name": "JavaScript",
"bytes": "18776"
},
{
"name": "Makefile",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "28329"
},
{
"name": "Shell",
"bytes": "16472"
}
],
"symlink_target": ""
} |
class Axis(object):
"""
:param label: Name of the Axis
:param format: d3.js axis format
:param dateFormat: Modify values to JS Date objects and set d3.time.format
refer to https://github.com/mbostock/d3/wiki/Time-Formatting
"""
def __init__(self, label=None, format=None, dateFormat=None):
self.label = label
self.format = format
self.dateFormat = dateFormat
class Chart(object):
template_partial = ''
height = 320
title = ''
class MultiBarChart(Chart):
"""
:param title: The chart title
"param x_axis: Instance of corehq.apps.reports.graph_models.Axis
"param y_axis: Instance of corehq.apps.reports.graph_models.Axis
Class fields:
data: see add_dataset function
marginTop: Top Margin in pixels
marginLeft: Left Margin in pixels
marginRight: Right Margin in pixels
marginBottom: Bottom Margin in pixels
showControls: True to show controls
showLegend: True to show legend
reduceXTicks: True to reduce the number of X ticks
rotateLabels: Degrees to rotate X-Axis labels e.g. -45
tooltips: True to show tooltips
tooltipFormat: Seperator text bw x,y values in tooltipContent e.g." in ", " on "
stacked: True to make default view stacked, False for grouped
staggerLabels: True to stagger the X-Axis labels.
groupSpacing: Used to adjust amount of space between X-Axis groups. Value between 0 and 1.
forceY: Used to force values into the Y scale domain. Useful to ensure max / min scales. Must be list of numbers
translateLabelsX: Pixels to move X-Axis labels in X direction
translateLabelsY: Pixels to move X-Axis labels in Y direction
"""
template_partial = 'reports/partials/graphs/multibar_chart.html'
def __init__(self, title, x_axis, y_axis):
self.title = title
self.x_axis = x_axis
self.y_axis = y_axis
self.data = []
self.marginTop = 30
self.marginRight = 20
self.marginBottom = 50
self.marginLeft = 100
self.showControls = True
self.showLegend = True
self.reduceXTicks = False
self.rotateLabels = 0
self.tooltips = True
self.tooltipFormat = ""
self.stacked = False
self.translateLabelsX = 0
self.translateLabelsY = 0
self.staggerLabels = False
self.groupSpacing = 0.3
self.forceY = [0, 1]
def add_dataset(self, key, values, color=None):
"""
:param key: dataset name
:param values: List of dictionaries containing x and y values i.e. [{x=1, y=2}, ...]
:param color: HTML color value
"""
d = dict(key=key, values=values)
if color:
d['color'] = color
self.data.append(d)
def config_dict(self):
if self.rotateLabels and not self.translateLabelsX:
self.translateLabelsX = -10
return dict(margin={'top': self.marginTop,
'right': self.marginRight,
'bottom': self.marginBottom,
'left': self.marginLeft},
showControls=self.showControls,
showLegend=self.showLegend,
reduceXTicks=self.reduceXTicks,
rotateLabels=self.rotateLabels,
tooltips=self.tooltips,
stacked=self.stacked,
translateLabelsX=self.translateLabelsX,
translateLabelsY=self.translateLabelsY,
staggerLabels=self.staggerLabels,
forceY=self.forceY,
groupSpacing=self.groupSpacing)
class LineChart(Chart):
"""
:param title: The chart title
"param x_axis: Instance of corehq.apps.reports.graph_models.Axis
"param y_axis: Instance of corehq.apps.reports.graph_models.Axis
Class fields:
data: see add_dataset function
marginTop: Top Margin in pixels
marginLeft: Left Margin in pixels
marginRight: Right Margin in pixels
marginBottom: Bottom Margin in pixels
"""
template_partial = 'reports/partials/graphs/line_chart.html'
def __init__(self, title, x_axis, y_axis):
self.title = title
self.x_axis = x_axis
self.y_axis = y_axis
self.data = []
self.marginTop = 30
self.marginRight = 20
self.marginBottom = 50
self.marginLeft = 100
self.tooltips = True
self.showLegend = True
self.data_needs_formatting = False # this determines whether or not the data should get formatted client side
# using the data formatting helpers in nvd3_charts_helper.js
self.x_axis_uses_dates = False # determines whether or not we should use a date format for the xaxis
def add_dataset(self, key, values, color=None):
"""
:param key: dataset name
:param values: List of dictionaries containing x and y values i.e. [{x=1, y=2}, ...]
:param color: HTML color value
"""
d = dict(key=key, values=values)
if color:
d['color'] = color
self.data.append(d)
def config_dict(self):
return dict(margin={'top': self.marginTop,
'right': self.marginRight,
'bottom': self.marginBottom,
'left': self.marginLeft},
showLegend=self.showLegend,
tooltips=self.tooltips)
class PieChart(Chart):
"""
:param title: The chart title
"param key: The name of the dataset
"param values: List of dicts each with 'label' and 'value' keys e.g. [{'label': 'One', 'value': 1}, ...]
Class fields:
marginTop: Top Margin in pixels
marginLeft: Left Margin in pixels
marginRight: Right Margin in pixels
marginBottom: Bottom Margin in pixels
showLabels: True to show labels
donut: Draw chart as a donut.
tooltips: True to show tooltips
"""
template_partial = 'reports/partials/graphs/pie_chart.html'
def __init__(self, title, key, values, color=None):
if not color: color = []
self.title = title
self.data = [dict(key=key, values=values)]
self.marginTop = 30
self.marginRight = 20
self.marginBottom = 50
self.marginLeft = 80
self.showLabels = True
self.donut = False
self.tooltips = True
self.color = color
def config_dict(self):
return dict(margin={'top': self.marginTop,
'right': self.marginRight,
'bottom': self.marginBottom,
'left': self.marginLeft},
showLabels=self.showLabels,
tooltips=self.tooltips,
donut=self.donut,
color=self.color)
| {
"content_hash": "52a6d81a678b9e6d378c22b27aa61479",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 120,
"avg_line_length": 36.402061855670105,
"alnum_prop": 0.5845369583687341,
"repo_name": "qedsoftware/commcare-hq",
"id": "87c6841e6f560c7562ae90996f68b9761ecfd02b",
"size": "7062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/reports/graph_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20160929_1254'),
]
operations = [
migrations.AlterField(
model_name='attempt',
name='date',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 30, 3, 2, 1, 852066, tzinfo=utc)),
),
migrations.AlterField(
model_name='concept',
name='level',
field=models.TextField(default='innocent'),
),
]
| {
"content_hash": "7d28978216e67e001c8c9db90acca26c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 108,
"avg_line_length": 25.76,
"alnum_prop": 0.5993788819875776,
"repo_name": "maxwallasaurus/arboretum",
"id": "de634cc8816665748513fd6952251d4f6f43ffbd",
"size": "717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/migrations/0004_auto_20160930_0302.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "198584"
},
{
"name": "HTML",
"bytes": "64847"
},
{
"name": "JavaScript",
"bytes": "5178638"
},
{
"name": "Python",
"bytes": "149963"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
from loguru import logger as log
from astropy.time import Time, TimeDelta
from nicer.plotutils import *
def sci_plots(etable, gtitable, args):
# GRID SET UP
figure2 = plt.figure(figsize=(11, 8.5), facecolor="white")
sci_grid = gridspec.GridSpec(5, 7)
# Build PHA Fast/Slow ratio plot before filtering by ratio
# Only do this if powerspectrum not requested
if not args.powspec:
log.info("Building fast/slow subplot")
plt.subplot(sci_grid[1:3, 2:5])
plot_slowfast(etable, args)
# Now, filter out the points above the ratio cut, if requested
if args.filtratio:
log.info("Applying ratio filter using trumpet")
etable = filt_ratio_trumpet(etable)
# Light Curve
log.info("Building light curve")
plt.subplot(sci_grid[3:5, :7])
meanrate, a = plot_light_curve(etable, args.lclog, gtitable, binsize=args.lcbinsize)
plot.title("Light Curve")
plot.xlabel("Time Elapsed (s)")
# Energy Spectrum
log.info("Building energy spectrum")
plt.subplot(sci_grid[1:3, :2])
plot_energy_spec(etable)
# Power Spectrum
if args.powspec:
log.info("Looking at power spectrum")
plt.subplot(sci_grid[1:3, 2:5])
# plot_fft_of_power(etable, args.nyquist, args.pslog, args.writeps)
# PULSE PROFILE
log.info("Building pulse profile")
axprofile = plt.subplot(sci_grid[1:3, 5:7])
if (args.orb is not None) and (args.par is not None):
log.info("Calling pulse profile using PINT")
pulse_profile(axprofile, etable, args)
elif args.foldfreq > 0.0:
log.info("Calling pulse profile with fixed frequency")
pulse_profile_fixed(etable, args.foldfreq)
else:
pass
# Making the plot all nice and stuff
plt.subplots_adjust(
left=0.07, right=0.99, bottom=0.05, top=0.9, wspace=0.8, hspace=0.8
)
figure2.suptitle(
"ObsID {0}: {1} on {2}".format(
etable.meta["OBS_ID"],
etable.meta["OBJECT"],
etable.meta["DATE-OBS"].replace("T", " at "),
),
fontsize=14,
)
# tstart, tstop, exposure
exposure = float(etable.meta["EXPOSURE"])
# tstart = etable.meta['DATE-OBS'].replace('T',' at ')
# tend = etable.meta['DATE-END'].replace('T', ' at ')
tstart = TimeDelta(etable.meta["TSTART"], format="sec", scale="tt") + Time(
etable.meta["MJDREFI"] + etable.meta["MJDREFF"], format="mjd", scale="tt"
)
tend = TimeDelta(etable.meta["TSTOP"], format="sec", scale="tt") + Time(
etable.meta["MJDREFI"] + etable.meta["MJDREFF"], format="mjd", scale="tt"
)
fraction = exposure / (float(etable.meta["TSTOP"]) - float(etable.meta["TSTART"]))
# Add text info here:
plt.figtext(
0.07,
0.93,
"Start time: {0} - End time: {1}".format(tstart.iso, tend.iso),
fontsize=10,
)
plt.figtext(
0.07,
0.90,
"Total clock time between start and stop: {0:.1f} s".format(
float(etable.meta["TSTOP"]) - float(etable.meta["TSTART"])
),
fontsize=10,
)
plt.figtext(
0.07,
0.87,
"Exposure: {0:.1f} s --> Coverage fraction is {1:.3f}".format(
exposure, fraction
),
fontsize=10,
)
plt.figtext(0.07, 0.84, "Mean count rate: {0:.3f} c/s".format(meanrate), fontsize=10)
# plt.figtext(.07, .84, etable.meta['FILT_STR'], fontsize=10)
if args.mask:
plt.figtext(0.07, 0.81, "IDS {0} are masked".format(args.mask), fontsize=10)
stringtable_start, _ = format_gti_longstring(gtitable["START"], nCut=3)
stringtable_stop, _ = format_gti_longstring(gtitable["STOP"], nCut=3)
stringtable_duration, nb_omitted_gtis = format_gti_longstring(gtitable["DURATION"], nCut=3)
plt.figtext(0.5, 0.77, stringtable_start, fontsize=10, fontname='Courier')
plt.figtext(0.58, 0.77, stringtable_stop, fontsize=10, fontname='Courier')
plt.figtext(0.66, 0.77, stringtable_duration, fontsize=10, fontname='Courier')
if nb_omitted_gtis>0:
plt.figtext(0.55, 0.75, "with {} omitted GTIS".format(nb_omitted_gtis), fontsize=10, fontname='Courier')
return figure2
| {
"content_hash": "9d28f5c487d4830d32ea9d4dd3faa9ed",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 112,
"avg_line_length": 35.00813008130081,
"alnum_prop": 0.6179749187180679,
"repo_name": "paulray/NICERsoft",
"id": "d0ce75029bee86adbbb5a54042ba300ff4b4a22c",
"size": "4306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nicer/sci_plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "837767"
},
{
"name": "Python",
"bytes": "416808"
}
],
"symlink_target": ""
} |
"""
GIS: GIS related utilities.
"""
###############################################################################
## Imports
###############################################################################
import math
###############################################################################
## GIS Format Conversions
###############################################################################
def GPRMC2DegDec(lat, latDirn, lng, lngDirn):
"""Converts GPRMC formats (Decimal Minutes) to Degrees Decimal
Eg.
"""
x = float(lat[0:2]) + float(lat[2:]) / 60
y = float(lng[0:3]) + float(lng[3:]) / 60
if latDirn == 'S':
x = -x
if lngDirn == 'W':
y = -y
return x, y
def TinyGPS2DegDec(lat, lng):
"""Converts TinyGPS formats (Decimal Degrees to e-5) to Degrees Decimal
Eg.
"""
x = float(lat[:-5] + '.' + lat[-5:])
y = float(lng[:-5] + '.' + lng[-5:])
return x, y
###############################################################################
## Functions to convert miles to change in lat, long (approx)
###############################################################################
# Distances are measured in miles.
# Longitudes and latitudes are measured in degrees.
# Earth is assumed to be perfectly spherical.
earth_radius = 3960.0
degrees_to_radians = math.pi / 180.0
radians_to_degrees = 180.0 / math.pi
def ChangeInLatitude(miles):
"""Given a distance north, return the change in latitude."""
return (miles / earth_radius) * radians_to_degrees
def ChangeInLongitude(lat, miles):
"""Given a latitude and a distance west, return the change in longitude."""
# Find the radius of a circle around the earth at given latitude.
r = earth_radius * math.cos(lat * degrees_to_radians)
return (miles / r) * radians_to_degrees
def CalculateBoundingBox(lng, lat, miles):
"""
Given a latitude, longitude and a distance in miles, calculate
the co-ordinates of the bounding box 2*miles on long each side with the
given co-ordinates at the center.
"""
latChange = ChangeInLatitude(miles)
latSouth = lat - latChange
latNorth = lat + latChange
lngChange = ChangeInLongitude(lat, miles)
lngWest = lng + lngChange
lngEast = lng - lngChange
return (lngWest, latSouth, lngEast, latNorth)
| {
"content_hash": "ad974598f3aeb61071e6b1f2d20d5df1",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 28.962962962962962,
"alnum_prop": 0.5166240409207161,
"repo_name": "scdoshi/djutils",
"id": "ead5fa33dec7171d3a19058af867a48ad9c74f86",
"size": "2346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djutils/gis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15582"
}
],
"symlink_target": ""
} |
'''
Written by Lijun An and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import argparse
import pandas as pd
import numpy as np
import xgboost as xgb
from scipy.stats import sem
from config import global_config
from utils.misc import txt2list, list2txt, create_folder
from utils.normalization import ICV_norm
from evaluation.XGBoost.model import site_pred_model
from utils.metrics import site_prediction_metric
def train_XGBoost_args_parser():
"""
Parameters for training XGBoost site prediction model
"""
parser = argparse.ArgumentParser(prog='TrainXGBoostPredArgs')
parser.add_argument('--data_path', type=str, default='/')
parser.add_argument('--checkpoint_path', type=str, default='/')
parser.add_argument('--output_path', type=str, default='/')
parser.add_argument('--train_name', type=str, default='train')
parser.add_argument('--val_name', type=str, default='val')
parser.add_argument('--test_name', type=str, default='test')
parser.add_argument('--save_suffix', type=str, default='/')
parser.add_argument('--nb_folds', type=int, default=10)
parser.add_argument('--norm', action='store_false', default=True)
# model releated hyper-parameters
parser.add_argument('--num_boost_rounds', type=int, default=100)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--threshold', type=float, default=0.01)
# xgboost parameters
params = {
'booster': 'gbtree',
'objective': 'binary:logistic',
'eval_metric': 'error',
'nthread': 1,
'max_depth': 6,
'subsample': 1.0
}
parser.add_argument('--params', type=dict, default=params)
args, _ = parser.parse_known_args()
return args
def grid_search(args, train_df, val_df, ROIs):
"""
Perform grid search on validation set to get optimal hyper-parameters
Args:
args (tuple): Parameters
train_df (class DataFrame): Dataframe for training
val_df (class DataFrame): Dataframe for validation
ROIs (list): Features
"""
best_score, best_threshold, best_model = \
site_pred_model(args, train_df, val_df, ROIs)
# also outputs optimal hyper-parameters here is max_depth & subsample
best_max_depth = 6
best_subsample = 1.0
for max_depth in (3, 4, 5, 6, 7):
for subsample in (0.5, 0.6, 0.7, 0.8, 0.9, 1):
args.params['max_depth'] = max_depth
args.params['subsample'] = subsample
score, threshold, model = \
site_pred_model(args, train_df, val_df, ROIs)
if score > best_score:
best_score = score
best_threshold = threshold
best_model = model
best_max_depth = max_depth
best_subsample = subsample
return best_model, best_threshold, best_max_depth, best_subsample
def train(args):
"""
Train XGBoost model for 10 folds and save model & performance
Args:
args (tuple): Parameters
"""
ROIs = txt2list(global_config.ROI_features_path)
logger = []
for fold in range(args.nb_folds):
fold_input_path = os.path.join(args.data_path, str(fold))
fold_checkpoint_path = os.path.join(args.checkpoint_path, str(fold))
fold_out_path = os.path.join(args.output_path, str(fold))
create_folder(fold_checkpoint_path)
create_folder(fold_out_path)
# read data
train_df = pd.read_csv(
os.path.join(fold_input_path, args.train_name + '.csv'))
val_df = pd.read_csv(
os.path.join(fold_input_path, args.val_name + '.csv'))
test_df = pd.read_csv(
os.path.join(fold_input_path, args.test_name + '.csv'))
if args.norm:
# run ICV normalization
norm_train_df = ICV_norm(train_df, ROIs)
norm_val_df = ICV_norm(val_df, ROIs)
norm_test_df = ICV_norm(test_df, ROIs)
else:
norm_train_df, norm_val_df, norm_test_df =\
train_df, val_df, test_df
# run grid search
model, threshold, max_depth, subsamaple = \
grid_search(args, norm_train_df, norm_val_df, ROIs)
# save model and threshold
model.save_model(
os.path.join(fold_checkpoint_path,
'site_pred_model_' + args.save_suffix + '.json'))
list2txt([threshold],
os.path.join(
fold_checkpoint_path,
'site_pred_threshold_' + args.save_suffix + '.txt'))
list2txt([max_depth],
os.path.join(
fold_checkpoint_path,
'site_pred_max-depth_' + args.save_suffix + '.txt'))
list2txt([subsamaple],
os.path.join(
fold_checkpoint_path,
'site_pred_subsample_' + args.save_suffix + '.txt'))
# making prediction on testset and save prediction
x_testset = norm_test_df[ROIs]
y_testset = norm_test_df['SITE']
xg_testset = xgb.DMatrix(x_testset, y_testset, feature_names=ROIs)
pred = model.predict(xg_testset)
acc_vec, acc = site_prediction_metric(norm_test_df['RID'].values, pred,
y_testset.values, threshold)
# save prediction
np.save(
os.path.join(fold_out_path,
'site_pred_test_' + args.save_suffix + '.npy'),
acc_vec)
logger.append(acc)
acc_mean = np.mean(logger)
acc_std = sem(logger)
logger.append(str(acc_mean) + '_' + str(acc_std))
# save logger
list2txt(
logger,
os.path.join(args.output_path,
'site_pred_' + args.save_suffix + '.txt'))
if __name__ == '__main__':
train(train_XGBoost_args_parser())
| {
"content_hash": "a12fe5dca1cdc70906cc124c79947431",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 38.712418300653596,
"alnum_prop": 0.5880465980077664,
"repo_name": "ThomasYeoLab/CBIG",
"id": "e81efd4d257f9580a1e51a1f042e0630ba163031",
"size": "5972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stable_projects/predict_phenotypes/An2022_gcVAE/evaluation/XGBoost/train_XGBoost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35378"
},
{
"name": "C",
"bytes": "2076236"
},
{
"name": "C++",
"bytes": "1461097"
},
{
"name": "CSS",
"bytes": "6852"
},
{
"name": "Fortran",
"bytes": "598090"
},
{
"name": "HTML",
"bytes": "287918"
},
{
"name": "Jupyter Notebook",
"bytes": "569200"
},
{
"name": "MATLAB",
"bytes": "10013692"
},
{
"name": "Makefile",
"bytes": "7902"
},
{
"name": "Objective-C",
"bytes": "77"
},
{
"name": "PostScript",
"bytes": "8416"
},
{
"name": "Python",
"bytes": "2499129"
},
{
"name": "R",
"bytes": "33929"
},
{
"name": "Shell",
"bytes": "1923688"
},
{
"name": "TeX",
"bytes": "8993"
},
{
"name": "Vim Script",
"bytes": "2859"
},
{
"name": "XSLT",
"bytes": "19506"
}
],
"symlink_target": ""
} |
from django import template
from django.core.cache import cache
from docutils import nodes
from docutils.core import publish_parts
from docutils.parsers.rst import directives, Directive
from docutils.core import publish_cmdline, default_description
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
from pygments.formatters import HtmlFormatter
from django.utils.safestring import mark_safe
register = template.Library()
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
# Add syntax highlighting to code blocks
directives.register_directive('sourcecode', Pygments)
directives.register_directive('code', Pygments)
# This is our restructuredtextify tag to use in templates.
# The tag accepts an object which MUST have get_cache_key
# as a callable function!
@register.filter(name='restructuredtextify', needs_autoescape=True)
def restructuredtextify(content, slug, autoescape=None):
key = 'ss.lib.tag.%s' % slug.get_cache_key()
parts = cache.get(key)
if not parts:
parts = publish_parts(
source=content,
writer_name="html4css1",
settings_overrides={
'cloak_email_addresses': True,
'initial_header_level': 2,
},
)
# XXX: Hacky!!
# Because docutils adds its own paragraph tags into shit, this
# mess below attempts to correct new lines and <p> tags.
# Docutils does not fix newlines of entered text in paragraph tags either
# and therefore running this through the linebreaksbr tag in the template
# causes spacing fuckups. This ugly and awful mess fixes those.
parts['fragment'] = parts['fragment'].replace('\n', '<br />')
parts['fragment'] = parts['fragment'].replace('<p></p>', '')
parts['fragment'] = parts['fragment'].replace('<p>\n</p>', '')
parts['fragment'] = parts['fragment'].replace('</p><br /><p>', '</p><p>')
parts['fragment'] = parts['fragment'].replace('</p>\n<br /><p>', '</p><p>')
parts['fragment'] = parts['fragment'].replace('</p><br />\n<p>', '</p><p>')
parts['fragment'] = parts['fragment'].replace('</p>\n<br />\n<p>', '</p><p>')
parts['fragment'] = parts['fragment'].replace('</p><br />', '</p>')
parts['fragment'] = parts['fragment'].replace('<p><br />', '</p>')
parts['fragment'] = parts['fragment'].replace('<br /><li>', '<li>')
parts['fragment'] = parts['fragment'].replace('</li><br />', '</li>')
parts['fragment'] = parts['fragment'].replace('</ol><br />', '</ol>')
parts['fragment'] = parts['fragment'].replace('<br /></pre></div><br /><p>', '</pre></div><p>')
cache.set(key, parts)
return mark_safe(parts['fragment'])
| {
"content_hash": "53cd6d327f628a7f6911648fc411e7f4",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 103,
"avg_line_length": 41.989010989010985,
"alnum_prop": 0.6370060193666579,
"repo_name": "Justasic/StackSmash",
"id": "4f03f114e11f943f32ffde8aa2b6c0ef86568917",
"size": "3821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StackSmash/apps/lib/templatetags/restructuredtext_filter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8920"
},
{
"name": "JavaScript",
"bytes": "1722"
},
{
"name": "Python",
"bytes": "38929"
}
],
"symlink_target": ""
} |
import logging
from pprint import pformat
import re
from string import Formatter
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.local_yaml import CustomLoader
logger = logging.getLogger(__name__)
def deep_format(obj, paramdict, allow_empty=False):
"""Apply the paramdict via str.format() to all string objects found within
the supplied obj. Lists and dicts are traversed recursively."""
# YAML serialisation was originally used to achieve this, but that places
# limitations on the values in paramdict - the post-format result must
# still be valid YAML (so substituting-in a string containing quotes, for
# example, is problematic).
if hasattr(obj, 'format'):
try:
ret = CustomFormatter(allow_empty).format(obj, **paramdict)
except KeyError as exc:
missing_key = exc.args[0]
desc = "%s parameter missing to format %s\nGiven:\n%s" % (
missing_key, obj, pformat(paramdict))
raise JenkinsJobsException(desc)
except Exception:
logging.error("Problem formatting with args:\nallow_empty:"
"%s\nobj: %s\nparamdict: %s" %
(allow_empty, obj, paramdict))
raise
elif isinstance(obj, list):
ret = type(obj)()
for item in obj:
ret.append(deep_format(item, paramdict, allow_empty))
elif isinstance(obj, dict):
ret = type(obj)()
for item in obj:
try:
ret[CustomFormatter(allow_empty).format(item, **paramdict)] = \
deep_format(obj[item], paramdict, allow_empty)
except KeyError as exc:
missing_key = exc.args[0]
desc = "%s parameter missing to format %s\nGiven:\n%s" % (
missing_key, obj, pformat(paramdict))
raise JenkinsJobsException(desc)
except Exception:
logging.error("Problem formatting with args:\nallow_empty:"
"%s\nobj: %s\nparamdict: %s" %
(allow_empty, obj, paramdict))
raise
else:
ret = obj
if isinstance(ret, CustomLoader):
# If we have a CustomLoader here, we've lazily-loaded a template;
# attempt to format it.
ret = deep_format(ret, paramdict, allow_empty=allow_empty)
return ret
class CustomFormatter(Formatter):
"""
Custom formatter to allow non-existing key references when formatting a
string
"""
_expr = '{({{)*(?:obj:)?(?P<key>\w+)(?:\|(?P<default>[\w\s]*))?}(}})*'
def __init__(self, allow_empty=False):
super(CustomFormatter, self).__init__()
self.allow_empty = allow_empty
def vformat(self, format_string, args, kwargs):
matcher = re.compile(self._expr)
# special case of returning the object if the entire string
# matches a single parameter
try:
result = re.match('^%s$' % self._expr, format_string)
except TypeError:
return format_string.format(**kwargs)
if result is not None:
try:
return kwargs[result.group("key")]
except KeyError:
pass
# handle multiple fields within string via a callback to re.sub()
def re_replace(match):
key = match.group("key")
default = match.group("default")
if default is not None:
if key not in kwargs:
return default
else:
return "{%s}" % key
return match.group(0)
format_string = matcher.sub(re_replace, format_string)
return Formatter.vformat(self, format_string, args, kwargs)
def get_value(self, key, args, kwargs):
try:
return Formatter.get_value(self, key, args, kwargs)
except KeyError:
if self.allow_empty:
logger.debug(
'Found uninitialized key %s, replaced with empty string',
key
)
return ''
raise
| {
"content_hash": "4ffb128d66c75223693941295a500632",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 36.719298245614034,
"alnum_prop": 0.5652173913043478,
"repo_name": "rsig/jenkins-job-builder",
"id": "008d66e74d8cf1e6494108f0839347c580c5b346",
"size": "4852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jenkins_jobs/formatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "246"
},
{
"name": "C++",
"bytes": "260"
},
{
"name": "PHP",
"bytes": "1186"
},
{
"name": "Python",
"bytes": "1109647"
},
{
"name": "Shell",
"bytes": "1344"
},
{
"name": "SourcePawn",
"bytes": "26"
}
],
"symlink_target": ""
} |
'''
This module provides utility functions to format data
'''
def prettify_seconds(seconds):
"""
Prettifies seconds.
Takes number of seconds (int) as input and returns a prettified string.
Example:
>>> prettify_seconds(342543)
'3 days, 23 hours, 9 minutes and 3 seconds'
"""
if seconds < 0:
raise ValueError("negative input not allowed")
signs = {"s": {"singular": "second", "plural": "seconds", },
"h": {"singular": "hour", "plural": "hours"},
"min": {"singular": "minute", "plural": "minutes"},
"d": {"singular": "day", "plural": "days"}
}
seperator = ", "
last_seperator = " and "
def get_sign(unit, value):
if value == 1 or value == -1:
return signs[unit]["singular"]
else:
return signs[unit]["plural"]
days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
daystext = "{} {}".format(days, get_sign("d", days)) if days else ""
hourstext = "{} {}".format(hours, get_sign("h", hours)) if hours else ""
minutestext = "{} {}".format(minutes, get_sign("min", minutes)) if minutes else ""
if (not seconds) and (days or hours or minutes):
secondstext = ""
else:
secondstext = "{} {}".format(seconds, get_sign("s", seconds))
output_list = [daystext, hourstext, minutestext, secondstext]
filtered = [item for item in output_list if item]
if len(filtered) <= 2:
output = last_seperator.join(filtered)
else:
output = seperator.join(filtered[:-1]) + last_seperator + filtered[-1]
return output
def format_max_len(string_to_format, max_len=15, replace="[...]"):
"""
Formats a string so len(format_max_length(string_to_format)) <= max_len
If the string_to_format is longer than max_len, it replaces characters in the middle with [...]
Example:
>>> util.format_max_len('abcdefghijklmnopqrstuvwxyz')
'abcde[...]vwxyz'
"""
real_max_len = max_len-len(replace) # needed to count the [...] in the ouput length
if len(string_to_format) <= max_len:
return string_to_format
first = real_max_len // 2 # Insert the [...] in the (floored) middle
last = real_max_len - first
return string_to_format[0:first] + replace + string_to_format[len(string_to_format) - last:]
| {
"content_hash": "e237482f899b60d31657a501a57fb421",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 99,
"avg_line_length": 38.87096774193548,
"alnum_prop": 0.6016597510373444,
"repo_name": "Stefan-Code/gglsbl3",
"id": "b07ecf333dc0ef599503df7aef43ffcd1a132b40",
"size": "2410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gglsbl3/util/format_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3632"
},
{
"name": "Protocol Buffer",
"bytes": "1030"
},
{
"name": "Python",
"bytes": "70075"
},
{
"name": "Shell",
"bytes": "340"
}
],
"symlink_target": ""
} |
import scrapy
import re
from locations.items import GeojsonPointItem
class LouGreySpider(scrapy.Spider):
name = "lou_grey"
allowed_domains = ["stores.louandgrey.com"]
download_delay = 0.5
start_urls = (
'https://stores.louandgrey.com/',
)
def parse_stores(self, response):
ref = re.findall(r"[^(\/)]+$", response.url)
if (len(ref) > 0):
ref = ref[0].split('.')[0]
properties = {
'addr_full': response.xpath('normalize-space(//meta[@itemprop="streetAddress"]/@content)').extract_first(),
'phone': response.xpath(
'normalize-space(//span[@itemprop="telephone"]/text())').extract_first(),
'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(),
'state': response.xpath('normalize-space(//span[@itemprop="addressRegion"]/text())').extract_first(),
'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
'ref': ref,
'website': response.url,
'lat': response.xpath('normalize-space(//meta[@itemprop="latitude"]/@content)').extract_first(),
'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
}
hours = response.xpath(
'//div[@class="row"]/div[@class="nap-row-left-col-row-right-hours-of-operation"]/div[@class="c-location-hours-details-wrapper js-location-hours"]/table/tbody/tr/@content').extract()
if hours != []:
hours = " ; ".join(hours)
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
def parse_city_stores(self, response):
stores = response.xpath('//h3[@class="Teaser-title Link Link--teaser Heading--h5"]/a/@href').extract()
for store in stores:
yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)
def parse_state(self, response):
urls = response.xpath('//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href').extract()
for path in urls:
pattern = re.compile("^[a-z]{2}\/[^()]+\/[^()]+.html$")
if (pattern.match(path.strip())):
yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
else:
yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores)
def parse(self, response):
urls = response.xpath('//div[@class="c-directory-list-content-wrapper"]/ul/li/a/@href').extract()
for path in urls:
pattern = re.compile("^[a-z]{2}.html$")
pattern1 = re.compile("^[a-z]{2}\/[^()]+\/[^()]+.html$")
if (pattern.match(path.strip())):
yield scrapy.Request(response.urljoin(path), callback=self.parse_state)
elif (pattern1.match(path.strip())):
yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
else:
yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores) | {
"content_hash": "aa3b98d56a62c8be726ba76a5ca51dc5",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 193,
"avg_line_length": 48.276923076923076,
"alnum_prop": 0.5959209687699172,
"repo_name": "iandees/all-the-places",
"id": "b1fca5a5dd85c2a86d8a89382454462f22c07fba",
"size": "3138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locations/spiders/lou_grey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2134"
},
{
"name": "Python",
"bytes": "116132"
},
{
"name": "Shell",
"bytes": "4477"
}
],
"symlink_target": ""
} |
from setuptools import setup
import re
try:
import multiprocessing
except ImportError:
pass
# harddeal, hardeal, onus
pledge_py = open('pledge/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", pledge_py))
# Metadata fields extracted from decontractors.py
AUTHOR_EMAIL = metadata['author']
VERSION = metadata['version']
WEBSITE = metadata['website']
LICENSE = metadata['license']
# Extract name and e-mail ("Firstname Lastname <[email protected]>")
AUTHOR, EMAIL = re.match(r'(.*) <(.*)>', AUTHOR_EMAIL).groups()
setup(name='pledge',
version=VERSION,
description='Lambda based design by contract (dbc)',
keywords='dbc contract lambda testing',
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
url=WEBSITE,
packages=['pledge'],
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose'],
)
| {
"content_hash": "ee8fefe6834992d3cf39abd44ee68350",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.670814479638009,
"repo_name": "jhorman/pledge",
"id": "b727a0bef801592b0d0fd19536fc01c74a317b8b",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11704"
}
],
"symlink_target": ""
} |
import copy
import json
import logging
from datetime import datetime
from cryptography.fernet import Fernet
from sqlitedict import SqliteDict
from mirror.event_bus import Event
from mirror.paths import ROOTDIR
_logger = logging.getLogger(__name__)
class PluginContext:
"""Services provided to plugins.
Plugins are provided with an instance of this class and do not create one
themselves.
"""
def __init__(self, plugin_name, event_bus) -> None:
self.plugin_name = plugin_name
self._event_bus = event_bus
self.db = PluginDatabase(plugin_name)
async def post_event(self, name: str, data: dict) -> None:
"""Post an event that is available to client-side JavaScript.
:param name: Name of the event (see notes below).
:param data: Data sent with the event. Send {} if there is no data.
The plugin name is automatically used to namespace all events as they appear on
the client side. For example:
`myplugin.myeventname`
Some metadata keys are also automatically added to the event data:
_source: The name of the plugin
_time: When the event was raised
"""
if data is None:
return
data = copy.deepcopy(data) # Copy data to avoid caller side-effects.
full_name = f"{self.plugin_name}.{name}"
data["_source"] = self.plugin_name
data["_time"] = datetime.now().isoformat()
await self._event_bus.post(Event(name=full_name, data=data))
_connectivity_score = 0
@property
def is_connected(self):
"""Whether the network is connected."""
return PluginContext._connectivity_score >= 0
def vote_connected(self):
"""Allows a plugin to vote that the network is connected."""
score = min(PluginContext._connectivity_score + 1, 10)
PluginContext._connectivity_score = score
_logger.info(
"%s votes connected; score: %s",
self.plugin_name,
score,
)
def vote_disconnected(self, cause: Exception):
"""Allows a plugin to vote that the network is disconnected."""
score = max(PluginContext._connectivity_score - 1, -10)
PluginContext._connectivity_score = score
_logger.info(
"%s votes disconnected because of %s; score: %s",
self.plugin_name,
cause,
score,
)
class PluginDatabase(SqliteDict): # pylint: disable=too-many-ancestors
"""Database for persistent plug-in data.
The database exposes a dict-like interface, and can be used in either async or sync
code without blocking I/O (since the underlying SqliteDict queues database
operations to be handled on a separate thread).
"""
_data_dir = ROOTDIR / "instance"
_key = None
def __init__(self, plugin_name, filename=None):
self._init_key()
self._fernet = Fernet(self._key)
super().__init__(
filename=filename or self._data_dir / "mirror.db",
tablename=plugin_name,
autocommit=True,
encode=self._encrypted_json_encoder,
decode=self._encrypted_json_decoder,
)
@staticmethod
def _init_key():
if PluginDatabase._key:
return
key_path = PluginDatabase._data_dir / "mirror.key"
if not key_path.exists():
key_path.parent.mkdir(parents=True, exist_ok=True)
key = Fernet.generate_key()
key_path.write_bytes(key)
PluginDatabase._key = key
_logger.debug("New database key created at %s", key_path.absolute())
else:
PluginDatabase._key = key_path.read_bytes()
_logger.debug("Existing database key used at %s", key_path.absolute())
def _encrypted_json_encoder(self, obj: object):
return self._fernet.encrypt(json.dumps(obj, cls=_ExtendedEncoder).encode())
def _encrypted_json_decoder(self, data: bytes) -> object:
return json.loads(self._fernet.decrypt(data), cls=_ExtendedDecoder)
class _ExtendedEncoder(json.JSONEncoder):
"""JSON encoder that handles additional object types."""
def default(self, o):
if hasattr(o, "isoformat"):
return {"_dt_": o.isoformat()}
return json.JSONEncoder.default(self, o)
class _ExtendedDecoder(json.JSONDecoder):
"""JSON decoder that handles additional object types."""
def __init__(self, *args, **kwargs) -> None:
kwargs["object_hook"] = self._object_hook
super().__init__(*args, **kwargs)
@staticmethod
def _object_hook(obj):
if "_dt_" in obj:
try:
return datetime.fromisoformat(obj["_dt_"])
except ValueError:
pass
return obj
| {
"content_hash": "e64e11127d665093f10ebe7e24f7a921",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 87,
"avg_line_length": 32.554054054054056,
"alnum_prop": 0.6168534661685346,
"repo_name": "genericmoniker/mirror",
"id": "24d8469626e65737defc8c1e1ee47cd4cdac12c2",
"size": "4818",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "backend/src/mirror/plugin_context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31880"
},
{
"name": "Dockerfile",
"bytes": "3441"
},
{
"name": "HTML",
"bytes": "15567"
},
{
"name": "JavaScript",
"bytes": "8782"
},
{
"name": "Python",
"bytes": "66404"
},
{
"name": "Riot",
"bytes": "179"
},
{
"name": "Shell",
"bytes": "3851"
},
{
"name": "Svelte",
"bytes": "33588"
}
],
"symlink_target": ""
} |
"""Opinionated basic logging setup."""
import logging
import sys
LOGGERS = {}
def get_handler():
"""Return a stdout stream handler"""
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"[%(asctime)s][%(name)10s][%(levelname)7s] %(message)s"
)
handler.setFormatter(formatter)
return handler
def get_logger(name):
"""Return an opinionated basic logger named `name` that logs to
stdout."""
if LOGGERS.get(name):
return LOGGERS.get(name)
else:
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
if not (
len(logger.handlers) > 0
and type(logger.handlers[0]) == logging.StreamHandler
):
logger.addHandler(get_handler())
logger.propagate = False
return logger
| {
"content_hash": "4143227b86b7fe478f0aa1cfcf246b0c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.618421052631579,
"repo_name": "uc-cdis/cdis-python-utils",
"id": "f21cc1a58da18b64e89ab228744bbb8e21934065",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdispyutils/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81760"
}
],
"symlink_target": ""
} |
from ...Helpers.environment import Environment
def while_statement(env, node):
"""
'While' statement def for AST.
interpret - runtime function for Evaluator (body interpret while condition).
"""
while node.condition.interpret(env):
node.body.interpret(env)
def for_statement(env, node):
"""
'For' statement def for AST.
interpret - runtime function for Evaluator ('for' loop).
"""
node.stmt1.interpret(env)
while node.stmt2.interpret(env):
iteration_env = Environment(env).create()
node.body.interpret(iteration_env)
node.stmt3.interpret(env)
return
def repeat_statement(env, node):
"""
'Repeat' statement def for AST.
interpret - runtime function for Evaluator (body interpret while condition).
"""
while True:
node.body.interpret(env)
condition_value = node.condition.interpret(env)
if condition_value:
break
| {
"content_hash": "39acff34432539ffea07163c0f28c8d5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 27.228571428571428,
"alnum_prop": 0.6516264428121721,
"repo_name": "PetukhovVictor/compiler",
"id": "22ae1abead9672da3fd0260a0ec97cbb1165a3ad",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Interpreter/Eval/statements/loop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "16410"
},
{
"name": "Python",
"bytes": "239647"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class StepsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="steps", parent_name="layout.slider", **kwargs):
super(StepsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Step"),
data_docs=kwargs.pop(
"data_docs",
"""
args
Sets the arguments values to be passed to the
Plotly method set in `method` on slide.
execute
When true, the API method is executed. When
false, all other behaviors are the same and
command execution is skipped. This may be
useful when hooking into, for example, the
`plotly_sliderchange` method and executing the
API command manually without losing the benefit
of the slider automatically binding to the
state of the plot through the specification of
`method` and `args`.
label
Sets the text label to appear on the slider
method
Sets the Plotly method to be called when the
slider value is changed. If the `skip` method
is used, the API slider will function as normal
but will perform no API calls and will not bind
automatically to state updates. This may be
used to create a component interface and attach
to slider events manually via JavaScript.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
Sets the value of the slider step, used to
refer to the step programatically. Defaults to
the slider label if not provided.
visible
Determines whether or not this step is included
in the slider.
""",
),
**kwargs,
)
| {
"content_hash": "19ac47ecc07917f60de19516ee13fe89",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 83,
"avg_line_length": 47.940298507462686,
"alnum_prop": 0.563200498132005,
"repo_name": "plotly/plotly.py",
"id": "c2f6b1f0c7d250fa5dbae75e4b2a67da8f5ec1e2",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/slider/_steps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
Code to load an expert policy and generate roll-out data for behavioral cloning.
Example usage:
python run_expert.py experts/Humanoid-v1.pkl Humanoid-v1 --render \
--num_rollouts 20
Author of this script and included expert policies: Jonathan Ho ([email protected])
"""
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
<<<<<<< HEAD
=======
>>>>>>> e82c0ba0166126de9dfb8be3bc5a2670e178714d
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert roll outs')
args = parser.parse_args()
print('loading and building expert policy')
policy_fn = load_policy.load_policy(args.expert_policy_file)
print('loaded and built')
with tf.Session():
tf_util.initialize()
import gym
env = gym.make(args.envname)
max_steps = args.max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
<<<<<<< HEAD
steps = []
=======
>>>>>>> e82c0ba0166126de9dfb8be3bc5a2670e178714d
for i in range(args.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
<<<<<<< HEAD
totalsteps = 0
while not done:
action = policy_fn(obs[None, :])
=======
steps = 0
while not done:
action = policy_fn(obs[None,:])
>>>>>>> e82c0ba0166126de9dfb8be3bc5a2670e178714d
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
<<<<<<< HEAD
totalsteps += 1
if args.render:
env.render()
if totalsteps % 100 == 0: print("%i/%i"%(totalsteps, max_steps))
if totalsteps >= max_steps:
break
returns.append(totalr)
steps.append(totalsteps)
=======
steps += 1
if args.render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
>>>>>>> e82c0ba0166126de9dfb8be3bc5a2670e178714d
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
<<<<<<< HEAD
print('steps', steps)
expert_data = {'observations': np.array(observations),
'actions': np.array(actions),
'returns': np.array(returns),
'steps': np.array(steps)}
#pickle.dump(expert_data, open('imitation/original/{}.pkl'.format(args.envname), 'wb+'))
=======
expert_data = {'observations': np.array(observations),
'actions': np.array(actions)}
>>>>>>> e82c0ba0166126de9dfb8be3bc5a2670e178714d
if __name__ == '__main__':
main()
| {
"content_hash": "6fe2dd67d13d3bdad38be44c57ea2504",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 96,
"avg_line_length": 30.990654205607477,
"alnum_prop": 0.5464414957780458,
"repo_name": "pashadude/TensorflowReinforcementLearning",
"id": "a4ceea40a3644eb477b84dbaba23506e70618d40",
"size": "3339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw1/run_expert.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "455074"
},
{
"name": "Python",
"bytes": "95376"
},
{
"name": "Shell",
"bytes": "174"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import utils.validators
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20151012_1419'),
]
operations = [
migrations.AlterField(
model_name='event',
name='admin_group',
field=models.ForeignKey(null=True, verbose_name='grupp som kan administrera eventet.',
help_text='Utöver den användare som nu skapar eventet.', to='auth.Group',
blank=True),
),
migrations.AlterField(
model_name='event',
name='deregister_delta',
field=models.PositiveIntegerField(verbose_name='Senaste avanmälan, dagar.',
help_text='Är dagar innan eventet börjar. 1 betyder att en användare kan'
' avanmäla sig senast en dag innan eventet börjar. ',
default=1),
),
migrations.AlterField(
model_name='event',
name='lead',
field=models.TextField(verbose_name='ingress', help_text='Max 160 characters',
validators=[utils.validators.less_than_160_characters_validator]),
),
migrations.AlterField(
model_name='event',
name='location',
field=models.CharField(verbose_name='plats', max_length=30),
),
migrations.AlterField(
model_name='event',
name='visible_from',
field=models.DateTimeField(verbose_name='evenemanget är synligt ifrån'),
),
]
| {
"content_hash": "b96001f9ff41bf50ad14173670f699f9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 119,
"avg_line_length": 40.86046511627907,
"alnum_prop": 0.5304496300512237,
"repo_name": "I-sektionen/i-portalen",
"id": "6c935cb64ee9416bc5928c898edde1420c3a07f1",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi/iportalen_django/events/migrations/0015_auto_20151020_2223.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18420"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "355692"
},
{
"name": "JavaScript",
"bytes": "415020"
},
{
"name": "Python",
"bytes": "660556"
},
{
"name": "SCSS",
"bytes": "72077"
},
{
"name": "Sass",
"bytes": "23813"
},
{
"name": "Shell",
"bytes": "1190"
}
],
"symlink_target": ""
} |
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
import os
import time
class TestDebugFifos(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
extra_args = ['--proxy-threads=1']
def setUp(self):
self.add_server(Memcached())
self.mcrouter = self.add_mcrouter(self.config,
extra_args=self.extra_args)
def get_fifo(self, substr):
fifos = os.listdir(self.mcrouter.debug_fifo_root)
self.assertEqual(2, len(fifos))
fifos = [f for f in fifos if substr in f]
self.assertEqual(1, len(fifos))
return os.path.join(self.mcrouter.debug_fifo_root, fifos[0])
def test_mcpiper_fifo(self):
key = 'test.abc'
value = 'abc123'
self.assertTrue(self.mcrouter.set(key, value))
self.assertEqual('abc123', self.mcrouter.get(key))
# Wait mcrouter create the fifos.
time.sleep(2)
# Connects to the client and server fifos
cfd = os.open(self.get_fifo('client'), os.O_RDONLY | os.O_NONBLOCK)
sfd = os.open(self.get_fifo('server'), os.O_RDONLY | os.O_NONBLOCK)
# Wait mcrouter detects new fifo connection
time.sleep(2)
# Send requests
self.mcrouter.get(key)
# Reads client fifo
buf = os.read(cfd, 4096)
self.assertTrue(len(buf) > 0)
self.assertTrue(value in buf.decode('ascii', errors='ignore'))
# Read server fifo
buf = os.read(sfd, 4096)
self.assertTrue(len(buf) > 0)
self.assertTrue(value in buf.decode('ascii', errors='ignore'))
os.close(cfd)
os.close(sfd)
| {
"content_hash": "f0bc44ae49000c034b86e54cc1ffcea1",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 32.01851851851852,
"alnum_prop": 0.6124927703875073,
"repo_name": "facebook/mcrouter",
"id": "c334d162bc1320b110e0422ba9758d3b2da399bd",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mcrouter/test/test_debug_fifos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103743"
},
{
"name": "C++",
"bytes": "6438246"
},
{
"name": "Dockerfile",
"bytes": "1885"
},
{
"name": "M4",
"bytes": "79438"
},
{
"name": "Makefile",
"bytes": "29378"
},
{
"name": "Python",
"bytes": "284816"
},
{
"name": "Ragel",
"bytes": "30888"
},
{
"name": "Shell",
"bytes": "35466"
},
{
"name": "Thrift",
"bytes": "57019"
}
],
"symlink_target": ""
} |
"""
Command Interface for interacting with controllers.
"""
from pycopia import CLI
from pycopia import IO
from pycopia import UI
class ConfiguratorShellCLI(CLI.GenericCLI):
def _setup(self, obj, name):
# Obtain host name directly from device.
# this also asserts the configurator is working.
self._obj = obj
hostname = obj.hostname()
self._environ["hostname"] = hostname
self._environ["PS1"] = "Configurator(%%I%s%%N)> " % (hostname,)
self._namespace = {"ctor":self._obj, "environ":self._environ}
self._reset_scopes()
def tail(self, argv):
"""tail <fname> [<filter>]
tail a file, using the optional filter regular expression to filter lines."""
fname = argv[1]
if len(argv) > 2:
filt = argv[2]
else:
filt = None
s = self._obj
s.tail(fname, filt)
try:
while 1:
l = s.readline()
self._print(l)
except KeyboardInterrupt:
s.interrupt()
def exit(self, argv):
"""exit
Exit from root if root. If not root, exit shell configurator."""
if self._obj.is_root():
self._obj.exit()
return
else:
self._obj.exit()
raise CommandQuit
class ConfiguratorTheme(UI.DefaultTheme):
pass
def controller_cli(argv):
"""controller_cli [-s <script>] [-g] <device>
Interact with a DUT configurator. If no device is specified use the testbed DUT.
Options:
-g Use paged output (like 'more')
-s <script> Run a CLI script from the given file instead of entering
interactive mode.
"""
import os
from pycopia import getopt
from pycopia.QA import controller
from pycopia.QA import config
paged = False
script = None
try:
optlist, longopts, args = getopt.getopt(argv[1:], "s:?g")
except GetoptError:
print((controller_cli.__doc__))
return
for opt, val in optlist:
if opt == "-?":
print((controller_cli.__doc__))
return
elif opt == "-g":
paged = True
elif opt == "-s":
script = val
if not args:
print((controller_cli.__doc__))
return
if paged:
from pycopia import tty
io = tty.PagedIO()
else:
io = IO.ConsoleIO()
# do runtime setup
cf = config.get_config(initdict=longopts)
cf.reportfile = "controller_cli"
cf.logbasename = "controller_cli.log"
cf.arguments = argv
dev = cf.devices[args[0]]
cont = controller.get_configurator(dev, logfile=cf.logfile)
# construct the CLI
theme = ConfiguratorTheme("Controller> ")
ui = UI.UserInterface(io, cf, theme)
cmd = CLI.get_generic_cmd(cont, ui, ConfiguratorShellCLI)
cmd.device = dev # stash actual device for future reference
parser = CLI.CommandParser(cmd, historyfile=os.path.expandvars("$HOME/.hist_controller"))
if script:
try:
parser.parse(script)
except KeyboardInterrupt:
pass
else:
parser.interact()
try:
cont.close()
except:
pass
if __name__ == "__main__":
import sys
controller_cli(sys.argv)
| {
"content_hash": "0b4a0081f42584282765db70ea5b790a",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 93,
"avg_line_length": 25.84375,
"alnum_prop": 0.5722490931076178,
"repo_name": "kdart/pycopia3",
"id": "8fcbe036225a504978c1b61f440472d0bdf48aa6",
"size": "3376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QA/pycopia/QA/controller_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82876"
},
{
"name": "CSS",
"bytes": "22279"
},
{
"name": "HTML",
"bytes": "11125"
},
{
"name": "JavaScript",
"bytes": "70505"
},
{
"name": "Makefile",
"bytes": "5417"
},
{
"name": "Python",
"bytes": "1557130"
},
{
"name": "Roff",
"bytes": "7289"
},
{
"name": "Shell",
"bytes": "11157"
}
],
"symlink_target": ""
} |
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, transaction, router
from django.utils.encoding import force_unicode
from django.utils import timezone
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key = self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(force_unicode(s.session_data))
except (Session.DoesNotExist, SuspiciousOperation):
self.create()
return {}
def exists(self, session_key):
try:
Session.objects.get(session_key=session_key)
except Session.DoesNotExist:
return False
return True
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
obj = Session(
session_key=self._get_or_create_session_key(),
session_data=self.encode(self._get_session(no_load=must_create)),
expire_date=self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
sid = transaction.savepoint(using=using)
try:
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
transaction.savepoint_rollback(sid, using=using)
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
# At bottom to avoid circular import
from django.contrib.sessions.models import Session
| {
"content_hash": "bb9cee00b0789f76e7d7f1692e1f4231",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 33.851851851851855,
"alnum_prop": 0.5929978118161926,
"repo_name": "mixman/djangodev",
"id": "219d97d36841a6ab4a163e80f1d9069a11516f65",
"size": "2742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/contrib/sessions/backends/db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
"""This example updates the CPC bid and status for a given ad group.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CPC_BID_MICRO_AMOUNT = 'INSERT_CPC_BID_MICRO_AMOUNT_HERE'
def main(client, ad_group_id, bid_micro_amount=None):
# Initialize appropriate service.
ad_group_service = client.GetService('AdGroupService', version='v201806')
# Construct operations and update an ad group.
operations = [{
'operator': 'SET',
'operand': {
'id': ad_group_id,
'status': 'PAUSED'
}
}]
if bid_micro_amount:
operations[0]['operand']['biddingStrategyConfiguration'] = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': bid_micro_amount,
}
}]
}
ad_groups = ad_group_service.mutate(operations)
# Display results.
for ad_group in ad_groups['value']:
bidding_strategy_configuration = ad_group['biddingStrategyConfiguration']
# Find the CpcBid in the bidding strategy configuration's bids collection.
cpc_bid_micros = None
if bidding_strategy_configuration:
bids = bidding_strategy_configuration['bids']
if bids:
for bid in bids:
if bid['Bids.Type'] == 'CpcBid':
cpc_bid_micros = bid['bid']['microAmount']
break
print ('Ad group with name "%s", and id "%s" was updated to have status '
'"%s" and CPC bid %d.'
% (ad_group['name'], ad_group['id'], ad_group['status'],
cpc_bid_micros))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CPC_BID_MICRO_AMOUNT)
| {
"content_hash": "596d2f48328c27490ce80000c9b6642e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 29.231884057971016,
"alnum_prop": 0.6351016360932077,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "879c7334f6eef0a5322cb3d3466aca019ca4f033",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201806/basic_operations/update_ad_group.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
} |
from django.core import formfields, validators
from django.core.extensions import DjangoContext, render_to_response
from django.core.template import loader
from django.models.auth import users
from django.views.decorators.auth import login_required
from django.utils.httpwrappers import HttpResponseRedirect
class PasswordResetForm(formfields.Manipulator):
"A form that lets a user request a password reset"
def __init__(self):
self.fields = (
formfields.EmailField(field_name="email", length=40, is_required=True,
validator_list=[self.isValidUserEmail]),
)
def isValidUserEmail(self, new_data, all_data):
"Validates that a user exists with the given e-mail address"
try:
self.user_cache = users.get_object(email__iexact=new_data)
except users.UserDoesNotExist:
raise validators.ValidationError, "That e-mail address doesn't have an associated user acount. Are you sure you've registered?"
def save(self, domain_override=None):
"Calculates a new password randomly and sends it to the user"
from django.core.mail import send_mail
from django.models.core import sites
new_pass = users.make_random_password()
self.user_cache.set_password(new_pass)
self.user_cache.save()
if not domain_override:
current_site = sites.get_current()
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
t = loader.get_template('registration/password_reset_email')
c = {
'new_password': new_pass,
'email': self.user_cache.email,
'domain': domain,
'site_name': site_name,
'user': self.user_cache,
}
send_mail('Password reset on %s' % site_name, t.render(c), None, [self.user_cache.email])
class PasswordChangeForm(formfields.Manipulator):
"A form that lets a user change his password."
def __init__(self, user):
self.user = user
self.fields = (
formfields.PasswordField(field_name="old_password", length=30, maxlength=30, is_required=True,
validator_list=[self.isValidOldPassword]),
formfields.PasswordField(field_name="new_password1", length=30, maxlength=30, is_required=True,
validator_list=[validators.AlwaysMatchesOtherField('new_password2', "The two 'new password' fields didn't match.")]),
formfields.PasswordField(field_name="new_password2", length=30, maxlength=30, is_required=True),
)
def isValidOldPassword(self, new_data, all_data):
"Validates that the old_password field is correct."
if not self.user.check_password(new_data):
raise validators.ValidationError, "Your old password was entered incorrectly. Please enter it again."
def save(self, new_data):
"Saves the new password."
self.user.set_password(new_data['new_password1'])
self.user.save()
def password_reset(request, is_admin_site=False):
new_data, errors = {}, {}
form = PasswordResetForm()
if request.POST:
new_data = request.POST.copy()
errors = form.get_validation_errors(new_data)
if not errors:
if is_admin_site:
form.save(request.META['HTTP_HOST'])
else:
form.save()
return HttpResponseRedirect('%sdone/' % request.path)
return render_to_response('registration/password_reset_form', {'form': formfields.FormWrapper(form, new_data, errors)},
context_instance=DjangoContext(request))
def password_reset_done(request):
return render_to_response('registration/password_reset_done', context_instance=DjangoContext(request))
def password_change(request):
new_data, errors = {}, {}
form = PasswordChangeForm(request.user)
if request.POST:
new_data = request.POST.copy()
errors = form.get_validation_errors(new_data)
if not errors:
form.save(new_data)
return HttpResponseRedirect('%sdone/' % request.path)
return render_to_response('registration/password_change_form', {'form': formfields.FormWrapper(form, new_data, errors)},
context_instance=DjangoContext(request))
password_change = login_required(password_change)
def password_change_done(request):
return render_to_response('registration/password_change_done', context_instance=DjangoContext(request))
| {
"content_hash": "2ce634bfbcee6bb9d625eb3461888f44",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 139,
"avg_line_length": 45.31,
"alnum_prop": 0.6618847936437873,
"repo_name": "tungvx/deploy",
"id": "09d3037560b2f4c8e16ef2f97b56d367c50946e2",
"size": "4531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Django-0.90/django/views/registration/passwords.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400492"
},
{
"name": "JavaScript",
"bytes": "477245"
},
{
"name": "Python",
"bytes": "16861113"
},
{
"name": "Shell",
"bytes": "8221"
}
],
"symlink_target": ""
} |
import math
import random
import pickle
ALPHA = 1
def sigmoid(x):
return 1/(1 + math.pow(math.e, -x))
def linear(x):
return x
"""NEURONS"""
class Neuron(object):
def __init__(self, fn):
self.sum = 0
self.fn = fn
def add(self, value):
pass
def clear(self):
self.sum = 0
def output(self):
return self.fn(self.sum)
class InputNeuron(Neuron):
def __init__(self):
super(InputNeuron, self).__init__(linear)
def set(self, _input):
self.input = _input
def output(self):
return self.input
class HiddenNeuron(Neuron):
def __init__(self, fn = sigmoid):
super(HiddenNeuron, self).__init__(fn)
def add(self, value):
self.sum += value
class OutputNeuron(HiddenNeuron):
def __init__(self, fn = sigmoid):
super(OutputNeuron, self).__init__(fn)
def result(self):
return self.output()
class BiasNeuron(InputNeuron):
def __init__(self, val = 1):
super(BiasNeuron, self).__init__()
self.set(val)
class Weights(object):
def __init__(self, parent, child):
self.parent = parent
self.child = child
self.weights = {}
for n1 in parent.neurons:
for n2 in child.neurons:
self.weights[(n1, n2)] = 0
def randomize(self):
for key, val in self.weights.items():
self.weights[key] = random.randint(-2, 2)
def get(self, key):
return self.weights[key]
def set(self, key, val):
self.weights[key] = val
def set_matrix(self, matrix):
for i in range(0, len(self.parent.neurons)):
n1 = self.parent.neurons[i]
for j in range(0, len(self.child.neurons)):
n2 = self.child.neurons[j]
if not isinstance(n2, BiasNeuron):
self.weights[(n1, n2)] = matrix[i][j]
def items(self):
return self.weights.items()
"""LAYERS"""
class Layer(object):
def __init__(self, size):
self.child = None
self.parent = None
def connect(self, layer):
self._set_child(layer)
layer._set_parent(self)
self.weights = Weights(self, layer)
def clean(self):
for n in self.neurons:
n.clear()
self.errors = {}
def forward_prop(self):
self.child.clean()
for n1 in self.neurons:
for n2 in self.child.neurons:
if not isinstance(n2, BiasNeuron):
n2.add(n1.output() * self.weights.get((n1, n2)))
def back_prop(self, output, expected):
pass
def _set_parent(self, layer):
self.parent = layer
def _set_child(self, layer):
self.child = layer
def weight_update(self):
weights = self.parent.weights
for n1 in self.parent.neurons:
for n2 in self.neurons:
weights.set((n1, n2), weights.get((n1, n2)) + ALPHA * self.errors[n2] * n1.output())
class HiddenLayer(Layer):
def __init__(self, size):
self.child = None
self.parent = None
self.neurons = [ HiddenNeuron() for i in range(0, size)]
self.neurons.append(BiasNeuron())
self.errors = {}
def forward_prop(self):
super().forward_prop()
self.child.forward_prop()
def back_prop(self):
for n1 in self.neurons:
_sum = sum([ self.child.errors[n2] * self.weights.get((n1, n2)) for n2 in self.child.neurons])
self.errors[n1] = n1.output() * (1 - n1.output()) * _sum
self.parent.back_prop()
def weight_update(self):
super().weight_update()
self.parent.weight_update()
class InputLayer(Layer):
def __init__(self, size):
self.child = None
self.parent = None
self.neurons = [ InputNeuron() for i in range(0, size)]
self.neurons.append(BiasNeuron())
def forward_prop(self, inputs):
#Set inputs
for i in range(0, len(inputs)):
self.neurons[i].set(inputs[i])
super().forward_prop()
self.child.forward_prop()
def _set_parent(self, layer):
raise NotImplementedError("You cannot use this method")
def back_prop(self):
pass
def weight_update(self):
pass
class OutputLayer(Layer):
def __init__(self, size):
self.child = None
self.parent = None
self.neurons = [ OutputNeuron() for i in range(0, size)]
self.errors = {}
def _set_child(self, layer):
raise NotImplementedError("You cannot use this method")
def forward_prop(self):
pass
def result(self):
return [ neuron.result() for neuron in self.neurons ]
def back_prop(self, output, expected):
for i in range(0, len(self.neurons)):
neuron = self.neurons[i]
self.errors[neuron] = output[i] * (expected[i] - output[i]) * (1 - output[i])
self.parent.back_prop()
def weight_update(self):
super().weight_update()
self.parent.weight_update()
class Network(object):
def __init__(self, _in, _out, hidden):
self.layers = []
#create layers
parentLayer = InputLayer(_in)
self.inputLayer = parentLayer
self.layers.append(parentLayer)
for h in hidden:
newLayer = HiddenLayer(h)
parentLayer.connect(newLayer)
parentLayer.weights.randomize()
self.layers.append(newLayer)
parentLayer = newLayer
newLayer = OutputLayer(_out)
parentLayer.connect(newLayer)
parentLayer.weights.randomize()
self.layers.append(newLayer)
self.outputLayer = newLayer
def train(self, inputs, expected):
self.inputLayer.forward_prop(inputs)
result = self.outputLayer.result()
error = self.calc_error(result, expected)
self.outputLayer.back_prop(result, expected)
self.outputLayer.weight_update()
return error
def calc_error(self, result, expected):
return sum([ math.pow(result[i] - expected[i], 2) for i in range(0, len(result))])
def predict(self, inputs):
self.inputLayer.forward_prop(inputs)
return self.outputLayer.result()
def save(self, fileName = "network.bin"):
with open(fileName, "wb") as f:
pickle.dump(network, f, pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, fileName = "network.bin"):
with open(fileName, "rb") as f:
return pickle.load(f)
| {
"content_hash": "a8eff397e6ffc58d385f572ac3f4ac08",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 97,
"avg_line_length": 22.518072289156628,
"alnum_prop": 0.6695202425539504,
"repo_name": "zaibacu/pynn",
"id": "797ccd19d148f88205dd7efa50228aa5bea32309",
"size": "5607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9320"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from tracker.views import *
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
app_name = 'tracker'
urlpatterns = [
#url(r'^$', lambda r: HttpResponseRedirect('issues')),
url(r'^issues/$', issue_view, name='issues'),
url(r'^notifications/$', login_required(NotificationListView.as_view()), name='notifications'),
url(r'^maintenance/$', maintenance, name='maintenance'),
url(r'^documents/$', ListDocument.as_view(), name='documents'),
url(r'^documents/department/(?P<dept_name>[A-Z a-z]+)/$', ListDepartmentDocument.as_view(), name='department_documents'),
url(r'^documents/myuploads/$', ListMyDocument.as_view(), name='my_documents'),
url(r'^documents/upload/$', CreateDocument.as_view(), name='upload_document'),
url(r'^search/$', search, name='search_document'),
url(r'^hashids/$', try_hashids, name='try_hashids'),
] | {
"content_hash": "d50c65b658eaa50f5060a612d5669dfd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 122,
"avg_line_length": 50.833333333333336,
"alnum_prop": 0.7191256830601093,
"repo_name": "giantas/elibrary",
"id": "5866033e00848bf77cd0bdd9b3a5224827964135",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1252319"
},
{
"name": "HTML",
"bytes": "76721"
},
{
"name": "JavaScript",
"bytes": "799802"
},
{
"name": "Python",
"bytes": "85992"
}
],
"symlink_target": ""
} |
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
| {
"content_hash": "0751d96a395e7ed93586359c49ec348a",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 81,
"avg_line_length": 44.16883116883117,
"alnum_prop": 0.6833284328138782,
"repo_name": "quoniammm/happy-machine-learning",
"id": "c768b30212490bf96274cab7b9d6dc91795de14e",
"size": "3401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mine_DL_ML/mnist_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3908116"
},
{
"name": "Jupyter Notebook",
"bytes": "7157646"
},
{
"name": "Matlab",
"bytes": "280414"
},
{
"name": "Python",
"bytes": "54572"
}
],
"symlink_target": ""
} |
from json import loads
from manager_rest.test import base_test
class UserTestCase(base_test.BaseServerTestCase):
def test_get_user(self):
response = self.get('/user')
result = loads(response.data)
self._assert_response(result)
def _assert_response(self, result):
self.assertEqual('admin', result['username'])
self.assertEqual('sys_admin', result['role'])
self.assertEqual(0, result['groups'])
self.assertEqual(1, result['tenants'])
self.assertEqual(True, result['active'])
self.assertEqual(False, result['is_locked'])
self.assertEqual(True, result['show_getting_started'])
| {
"content_hash": "de2c3439218ac66f56c25e530fabf64b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 33.3,
"alnum_prop": 0.6576576576576577,
"repo_name": "cloudify-cosmo/cloudify-manager",
"id": "27a511bd74a0cd89482a2193c0d014127d2ccdb2",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest-service/manager_rest/test/endpoints/test_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Dockerfile",
"bytes": "3843"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "119062"
},
{
"name": "Python",
"bytes": "3825971"
},
{
"name": "Shell",
"bytes": "49121"
}
],
"symlink_target": ""
} |
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.GetRosParam import GetRosParam
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.log_state import LogState
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.moveit_move import MoveitMove
from sara_flexbe_states.set_gripper_state import SetGripperState
from sara_flexbe_states.SetRosParam import SetRosParam
from sara_flexbe_states.sara_follow import SaraFollow
from sara_flexbe_states.torque_reader import ReadTorque
from sara_flexbe_states.sara_say import SaraSay
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.KeepLookingAt import KeepLookingAt
from flexbe_states.calculation_state import CalculationState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu May 10 2018
@author: Philippe La Madeleine
'''
class Action_GiveSM(Behavior):
'''
give the content of the gripper to a person.
'''
def __init__(self):
super(Action_GiveSM, self).__init__()
self.name = 'Action_Give'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:1195 y:433, x:132 y:431, x:750 y:42, x:991 y:471
_state_machine = OperatableStateMachine(outcomes=['Given', 'Person_not_found', 'No_object_in_hand', 'fail'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:130 y:365
_sm_lookat_0 = OperatableStateMachine(outcomes=['failed'], input_keys=['ID'])
with _sm_lookat_0:
# x:114 y:127
OperatableStateMachine.add('look',
KeepLookingAt(),
transitions={'failed': 'look'},
autonomy={'failed': Autonomy.Off},
remapping={'ID': 'ID'})
# x:299 y:300, x:263 y:535
_sm_give_1 = OperatableStateMachine(outcomes=['failed', 'given'], input_keys=['Object'])
with _sm_give_1:
# x:67 y:27
OperatableStateMachine.add('SetPose',
SetKey(Value="ShowGripper"),
transitions={'done': 'say_give'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'target'})
# x:53 y:413
OperatableStateMachine.add('read torque',
ReadTorque(watchdog=5, Joint="right_elbow_pitch_joint", Threshold=2, min_time=1),
transitions={'threshold': 'open gripper', 'watchdog': 'read torque', 'fail': 'failed'},
autonomy={'threshold': Autonomy.Off, 'watchdog': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'torque': 'torque'})
# x:52 y:500
OperatableStateMachine.add('open gripper',
SetGripperState(width=0.15, effort=1),
transitions={'object': 'given', 'no_object': 'given'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:64 y:248
OperatableStateMachine.add('say pull',
SaraSay(sentence="You can pull on it", input_keys=[], emotion=1, block=False),
transitions={'done': 'wait 1'},
autonomy={'done': Autonomy.Off})
# x:64 y:325
OperatableStateMachine.add('wait 1',
WaitState(wait_time=1),
transitions={'done': 'read torque'},
autonomy={'done': Autonomy.Off})
# x:57 y:175
OperatableStateMachine.add('moveArm',
MoveitMove(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'say pull', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'target'})
# x:60 y:88
OperatableStateMachine.add('say_give',
SaraSay(sentence=lambda x: "Hi. I'm giving you this "+str(x), input_keys=[], emotion=0, block=True),
transitions={'done': 'moveArm'},
autonomy={'done': Autonomy.Off})
# x:596 y:480
_sm_follow_2 = OperatableStateMachine(outcomes=['finished'], input_keys=['ID'])
with _sm_follow_2:
# x:180 y:123
OperatableStateMachine.add('follow',
SaraFollow(distance=1.5, ReplanPeriod=0.5),
transitions={'failed': 'follow'},
autonomy={'failed': Autonomy.Off},
remapping={'ID': 'ID'})
# x:313 y:247, x:301 y:177, x:103 y:293, x:343 y:113, x:397 y:411, x:311 y:27, x:630 y:365
_sm_give_3 = ConcurrencyContainer(outcomes=['failed', 'given', 'continue'], input_keys=['ID', 'Object'], conditions=[
('failed', [('Give', 'failed')]),
('given', [('Give', 'given')]),
('given', [('Follow', 'finished')]),
('failed', [('LookAt', 'failed')])
])
with _sm_give_3:
# x:91 y:50
OperatableStateMachine.add('Follow',
_sm_follow_2,
transitions={'finished': 'given'},
autonomy={'finished': Autonomy.Inherit},
remapping={'ID': 'ID'})
# x:84 y:164
OperatableStateMachine.add('Give',
_sm_give_1,
transitions={'failed': 'failed', 'given': 'given'},
autonomy={'failed': Autonomy.Inherit, 'given': Autonomy.Inherit},
remapping={'Object': 'Object'})
# x:175 y:371
OperatableStateMachine.add('LookAt',
_sm_lookat_0,
transitions={'failed': 'failed'},
autonomy={'failed': Autonomy.Inherit},
remapping={'ID': 'ID'})
with _state_machine:
# x:77 y:29
OperatableStateMachine.add('Get hand content',
GetRosParam(ParamName="behavior/GripperContent"),
transitions={'done': 'is object in hand?', 'failed': 'fail'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'Value': 'Object'})
# x:58 y:108
OperatableStateMachine.add('is object in hand?',
CheckConditionState(predicate=lambda x: x),
transitions={'true': 'name', 'false': 'log empty hand'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'Object'})
# x:70 y:277
OperatableStateMachine.add('list persons',
list_entities_by_name(frontality_level=0.5, distance_max=10),
transitions={'found': 'get id', 'none_found': 'Person_not_found'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'name', 'entity_list': 'People_list', 'number': 'number'})
# x:414 y:37
OperatableStateMachine.add('log empty hand',
LogState(text="The hand is empty. Set the GripperContent rosParam", severity=Logger.REPORT_HINT),
transitions={'done': 'No_object_in_hand'},
autonomy={'done': Autonomy.Off})
# x:754 y:223
OperatableStateMachine.add('log moveitfail',
LogState(text="moveit failed", severity=Logger.REPORT_HINT),
transitions={'done': 'fail'},
autonomy={'done': Autonomy.Off})
# x:606 y:371
OperatableStateMachine.add('log movebase fail',
LogState(text="giving Failed", severity=Logger.REPORT_HINT),
transitions={'done': 'fail'},
autonomy={'done': Autonomy.Off})
# x:402 y:133
OperatableStateMachine.add('set idle pose',
SetKey(Value="IdlePose"),
transitions={'done': 'say_good'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'target'})
# x:751 y:108
OperatableStateMachine.add('moveArm2',
MoveitMove(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'set none', 'failed': 'log moveitfail'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'target'})
# x:920 y:278
OperatableStateMachine.add('close gripper',
SetGripperState(width=0, effort=1),
transitions={'object': 'Given', 'no_object': 'Given'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
# x:1048 y:236
OperatableStateMachine.add('remove gripper content',
SetRosParam(ParamName="GripperContent"),
transitions={'done': 'close gripper'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'none'})
# x:910 y:182
OperatableStateMachine.add('set none',
SetKey(Value=None),
transitions={'done': 'close gripper'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'none'})
# x:408 y:333
OperatableStateMachine.add('give',
_sm_give_3,
transitions={'failed': 'log movebase fail', 'given': 'set idle pose', 'continue': 'give'},
autonomy={'failed': Autonomy.Inherit, 'given': Autonomy.Inherit, 'continue': Autonomy.Inherit},
remapping={'ID': 'ID', 'Object': 'Object'})
# x:256 y:278
OperatableStateMachine.add('get id',
CalculationState(calculation=lambda x: x[0].ID),
transitions={'done': 'give'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'People_list', 'output_value': 'ID'})
# x:68 y:192
OperatableStateMachine.add('name',
SetKey(Value="person"),
transitions={'done': 'list persons'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'name'})
# x:591 y:138
OperatableStateMachine.add('say_good',
SaraSay(sentence=lambda x: "Good, enjoy your "+str(x), input_keys=[], emotion=0, block=True),
transitions={'done': 'moveArm2'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| {
"content_hash": "15e8bfac84d7a3f0b33d2945ea389e4f",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 119,
"avg_line_length": 35.872262773722625,
"alnum_prop": 0.6241733645335232,
"repo_name": "WalkingMachine/sara_behaviors",
"id": "cbe28f27bf07d835654278475b19593f281497e2",
"size": "10236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sara_flexbe_behaviors/src/sara_flexbe_behaviors/action_give_sm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "6456"
},
{
"name": "CMake",
"bytes": "2065"
},
{
"name": "Python",
"bytes": "905600"
},
{
"name": "Shell",
"bytes": "2661"
}
],
"symlink_target": ""
} |
"""Low level testing function (draw a cube)
This module provides a simple function for drawing
a cube. It is used by various modules for low level
testing purposes (i.e. in module-level rather than
system level tests).
This version was taken from the NeHe tutorials,
to replace the original which did not include
texture coordinate information.
"""
from OpenGL.GL import *
from OpenGL.arrays import vbo
from OpenGLContext.arrays import array
from OpenGLContext.scenegraph import box
VBO = None
def drawCube():
"""Draw a cube 2,2,2 units centered around the origin"""
# draw six faces of a cube
global VBO
if VBO is None:
if vbo.get_implementation():
data = vbo.VBO( array( list(box.yieldVertices( (2,2,2) )), 'f') )
def draw():
data.bind()
try:
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS)
try:
glEnable( GL_VERTEX_ARRAY )
glEnable( GL_NORMAL_ARRAY )
glEnable( GL_TEXTURE_COORD_ARRAY )
glVertexPointer( 3, GL_FLOAT, 32, data+20 )
glNormalPointer( GL_FLOAT, 32, data+8 )
glTexCoordPointer( 2, GL_FLOAT, 32, data )
glDrawArrays( GL_TRIANGLES, 0, 36 )
finally:
glPopClientAttrib()
finally:
data.unbind()
VBO = draw
else:
data = array( list(yieldVertices( (2,2,2) )), 'f')
def draw():
try:
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS)
try:
# interleaved arrays is not 3.1 compatible,
# but this is the old-code path...
glInterleavedArrays( GL_T2F_N3F_V3F, 0, data )
glDrawArrays( GL_TRIANGLES, 0, 36 )
finally:
glPopClientAttrib()
finally:
data.unbind()
VBO = draw
return VBO() | {
"content_hash": "15a2cab4298195f9bca87cce46052f42",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 36.94827586206897,
"alnum_prop": 0.5142323845076995,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "4f555ce3ada735e80060766e05f3a4ba67d2659a",
"size": "2143",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/drawcube.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import os, sys, re
import numpy as np
import pyfits, pylab
from .dataset import Dataset
class DECaLSData(Dataset):
def __init__(self, decalsfilename=None):
"""DECaLSData(decalsfilename=None)
Read in DECaLS catalog data from FITS file.
"""
Dataset.__init__(self, decalsfilename, "DECaLSData", '')
self.readin()
@classmethod
def flux_to_magnitude(cls, flux, ivar):
"""flux_to_magnitude(cls, flux)
Convert flux(es) into magnitude(s).
Examples:
>>> DECaLSData.flux_to_magnitude(20, 0.1)
19.247425010840047
>>> DECaLSData.flux_to_magnitude(-20, 0.1)
21.25
>>> DECaLSData.flux_to_magnitude([20, -20], [0.1, 0.1])
array([ 19.24742501, 21.25 ])
"""
# Version 1: convert abs(flux) to magnitude,
# and flip the sign of anything negative.
#s = np.sign(flux)
#magnitude = s * (-2.5 * np.log10(np.abs(flux)) + 22.5)
# Version 2: convert abs(flux) to magnitude,
# and replace any fluxes that are < 1 sigma with the 1-sigma upper limit.
rms = np.atleast_1d(1 / np.sqrt(ivar))
# Convert flux to an np.array so this works whether input
# is scalar or array
flux = np.atleast_1d(flux)
flux2 = np.atleast_1d(rms) # initialize with rms
flux2[flux > rms] = flux[flux > rms] # override if flux is valid
magnitude = (-2.5 * np.log10(flux2)) + 22.5
# Return an array if more than one result; else return a scalar
if len(magnitude) > 1:
return magnitude
else:
return magnitude[0]
def readin(self):
"""readin()
Read in DECaLS table data from FITS file.
"""
datafile = pyfits.open(self.filename)
# Read in the desired columns.
'''
# First the easy ones: we'll index them with [1,2,4] to get G,R,Z bands
columns = ['DECAM_FLUX',
'DECAM_ANYMASK',
'DECAM_FRACFLUX',
'DECAM_FRACMASKED',
'DECAM_RCHI2']
# Use the G,R,Z bands for several features
self.data = datafile[1].data[:].field(columns[0])[:,[1,2,4]]
self.features = ['%s %s' % (columns[0], b) for b in ['G','R','Z']]
for c in columns[1:]:
self.data = np.hstack([self.data,
datafile[1].data[:].field(c)[:,[1,2,4]]])
self.features += ['%s %s' % (c, b) for b in ['G','R','Z']]
'''
# Compute the color ratios
model_type = datafile[1].data[:].field('TYPE')
#model_type_to_use = 'PSF'
#model_type_to_use = 'SIMP'
#model_type_to_use = 'EXP'
model_type_to_use = 'DEV'
use_data = datafile[1].data[model_type == model_type_to_use]
G = use_data.field('DECAM_FLUX')[:,1]
R = use_data.field('DECAM_FLUX')[:,2]
Z = use_data.field('DECAM_FLUX')[:,4]
G_ivar = use_data.field('DECAM_FLUX_IVAR')[:,1]
R_ivar = use_data.field('DECAM_FLUX_IVAR')[:,2]
Z_ivar = use_data.field('DECAM_FLUX_IVAR')[:,4]
# Z magnitude
self.data = DECaLSData.flux_to_magnitude(Z, Z_ivar)
self.features = ['Z']
# difference G-R
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(G, G_ivar) -
DECaLSData.flux_to_magnitude(R, R_ivar)])
self.features += ['G-R']
'''
# difference G-Z
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(G, G_ivar) -
DECaLSData.flux_to_magnitude(Z, Z_ivar)])
self.features += ['G-Z']
'''
# difference R-Z
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(R, R_ivar) -
DECaLSData.flux_to_magnitude(Z, Z_ivar)])
self.features += ['R-Z']
# WISE features
W1 = use_data.field('WISE_FLUX')[:,0]
W2 = use_data.field('WISE_FLUX')[:,1]
W1_ivar = use_data.field('WISE_FLUX_IVAR')[:,0]
W2_ivar = use_data.field('WISE_FLUX_IVAR')[:,1]
# WISE difference Z - W1
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude( Z, Z_ivar) -
DECaLSData.flux_to_magnitude(W1, W1_ivar)])
self.features += ['Z - W1']
# WISE difference W1 - W2
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(W1, W1_ivar) -
DECaLSData.flux_to_magnitude(W2, W2_ivar)])
self.features += ['W1 - W2']
'''
G = use_data.field('DECAM_APFLUX')[:,1,2]
R = use_data.field('DECAM_APFLUX')[:,2,2]
Z = use_data.field('DECAM_APFLUX')[:,4,2]
G_ivar = use_data.field('DECAM_APFLUX_IVAR')[:,1,2]
R_ivar = use_data.field('DECAM_APFLUX_IVAR')[:,2,2]
Z_ivar = use_data.field('DECAM_APFLUX_IVAR')[:,4,2]
# aperture difference G-R
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(G, G_ivar) -
DECaLSData.flux_to_magnitude(R, R_ivar)])
self.features += ['AP G-R']
# aperture difference G-Z
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(G, G_ivar) -
DECaLSData.flux_to_magnitude(Z, Z_ivar)])
self.features += ['AP G-Z']
# aperture difference R-Z
self.data = np.vstack([self.data,
DECaLSData.flux_to_magnitude(R, R_ivar) -
DECaLSData.flux_to_magnitude(Z, Z_ivar)])
self.features += ['AP R-Z']
'''
#self.data = self.data.T # features x samples
self.labels = ['%s_%d_%.6f_%.6f' % (b,id,ra,dec) for (b,id,ra,dec) in \
zip(use_data.field('BRICKNAME'),
use_data.field('OBJID'),
use_data.field('RA'),
use_data.field('DEC'))]
datafile.close()
self.xvals = np.arange(self.data.shape[0]).reshape(-1,1)
self.features = np.array(self.features)
def plot_item(self, m, ind, x, r, k, label, U, rerr, feature_weights):
"""plot_item(self, m, ind, x, r, k, label, U, rerr, feature_weights)
Borrowed from UCIDataset.
Plot selection m (index ind, data in x) and its reconstruction r,
with k and label to annotate of the plot.
U and rerr are here ignored. Could use them to plot a projection
into the first two PCs' space (see dataset_libs.py).
If feature_weights are specified, omit any 0-weighted features
from the plot.
"""
if len(x) == 0 or len(r) == 0:
print("Error: No data in x and/or r.")
return
# Select the features to plot
if len(feature_weights) > 0:
goodfeat = [f for f in range(len(feature_weights)) \
if feature_weights[f] > 0]
else:
goodfeat = list(range(len(self.xvals)))
# Make a dual bar graph of the original and reconstructed features
width = 0.35
offset = (1 - 2*width) // 2
fig = pylab.figure()
ax = fig.add_subplot(1, 1, 1)
x = np.array(x)
xvals = [self.xvals[z][0] for z in range(self.xvals.shape[0])]
x = [x[z] for z in range(x.shape[0])]
bars1 = ax.bar([xvals[i] + offset for i in goodfeat],
x, width, color='b', label='Observations')
bars2 = ax.bar([xvals[i] + width + offset for i in goodfeat],
r, width, color='r', label='Expected')
# dashed line to show 0
pylab.plot([0, len(self.features)], [0, 0], '--')
pylab.xlabel(self.xlabel)
pylab.ylabel(self.ylabel)
pylab.title('DEMUD selection %d (%s),\n item %d, using K=%d' % \
(m, label, ind, k))
pylab.legend(fontsize=10)
padding = 1.19
pylab.ylim([min(0, float(min(min(x), min(r))) * padding),
max(0, float(max(max(x), max(r))) * padding)])
if len(self.features) == 0:
pylab.xticks(pylab.arange(len(x)) + width + offset, list(range(len(x))))
else:
pylab.xticks(pylab.arange(len(x)) + width + offset, self.features,
rotation=-30, ha='left')
pylab.tight_layout()
if not os.path.exists('results'):
os.mkdir('results')
outdir = os.path.join('results', self.name)
if not os.path.exists(outdir):
os.mkdir(outdir)
figfile = os.path.join(outdir, 'sel-%d-k-%d-(%s).png' % (m, k, label))
pylab.savefig(figfile)
print('Wrote plot to %s' % figfile)
pylab.close()
# Write a list of the selections in CSV format
def write_selections_csv(self, i, k, orig_ind, label, ind, scores):
outdir = os.path.join('results', self.name)
selfile = os.path.join(outdir, 'selections-k%d.csv' % k)
(brickname, objid, RA, DEC) = label.split('_')
# If this is the first selection, open for write
# to clear out previous run.
if i == 0:
fid = open(selfile, 'w')
# Output a header. For some data sets, the label is a class;
# for others it is an object identifier. To be generic,
# here we call this 'Name'.
fid.write('# Selection, Index, Name, RA, DEC, Score\n')
# If scores is empty, the (first) selection was pre-specified,
# so there are no scores. Output 0 for this item.
if len(scores) == 0:
fid.write('%d,%d,%s_%s,%s,%s,0.0\n' % (i, orig_ind, brickname, objid,
RA, DEC))
else:
fid.write('%d,%d,%s_%s,%s,%s,%g\n' % (i, orig_ind, brickname, objid,
RA, DEC, scores[ind]))
else:
# Append to the CSV file
fid = open(selfile, 'a')
fid.write('%d,%d,%s_%s,%s,%s,%g\n' % (i, orig_ind, brickname, objid,
RA, DEC, scores[ind]))
# Close the file
fid.close()
# Also, append selections to a growing .html file
self.write_selections_html(10, i, k, ind, label, scores)
# Write a list of n selections that are similar to selection i (index ind)
# using scores (with respect to selection i).
def write_selections_html(self, n, i, k, ind, label, scores):
outdir = os.path.join('results', self.name)
selfile = os.path.join(outdir, 'selections-k%d.html' % k)
(brickname, objid, RA, DEC) = label.split('_')
# If this is the first selection, open for write
# to clear out previous run.
if i == 0:
# Start up the HTML file
fid = open(selfile, 'w')
fid.write('<html><head><title>DEMUD: %s, k=%d</title></head>\n' % (self.name, k))
fid.write('<body>\n')
fid.write('<h1>DEMUD experiments on %s with k=%d</h1>\n' % (self.name, k))
fid.write('<ul>\n')
fid.write('<li>Selections are presented in decreasing order of novelty.</li>\n')
fid.write('<li>The bar plot shows the <font color="blue">observed</font> values compared to the <font color="red">expected (modeled)</font> values. Discrepancies explain why the chosen object is considered novel. Click to enlarge.</li>\n')
fid.write('<li>Clicking the object image will take you to the DECaLS sky survey.</li>\n')
fid.write('<li>Scores close to 0 (for items other than the first one) indicate an arbitrary choice; novelty has been exhausted.</li>\n')
fid.write('</ul>\n\n')
# If scores is empty, the (first) selection was pre-specified,
# so there are no scores. Output -1 for this item.
if len(scores) == 0:
score = 'N/A'
else:
score = '%f' % scores[ind]
else:
# Append to the HTML file
fid = open(selfile, 'a')
score = scores[ind]
fid.write('<h2>Selection %d: RA %s, DEC %s, score %s</h2>\n' % (i, RA, DEC, score))
fid.write('<a href="http://legacysurvey.org/viewer?ra=%s&dec=%s&zoom=13&layer=decals-dr3" id="[%d] %s %s">\n<img title="[%d] %s %s" src="http://legacysurvey.org/viewer/jpeg-cutout/?ra=%s&dec=%s&pixscale=0.27&size=256"></a>\n' %
(RA, DEC,
i, brickname, objid,
i, brickname, objid, RA, DEC))
figfile = 'sel-%d-k-%d-(%s).png' % (i, k, label)
fid.write('<a href="%s"><img height=270 src="%s"></a>\n\n' %
(figfile, figfile))
# Close the file
fid.close()
if __name__ == "__main__":
# Run inline tests
import doctest
(num_failed, num_tests) = doctest.testmod()
filename = os.path.basename(__file__)
if num_failed == 0:
print("%-20s All %3d tests passed!" % (filename, num_tests))
else:
sys.exit(1)
| {
"content_hash": "02541e87bd34f87d4b6f1e9761d06892",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 247,
"avg_line_length": 36.65395894428153,
"alnum_prop": 0.5562044963597088,
"repo_name": "wkiri/DEMUD",
"id": "dd53069e184d5d7799bf23f596ca12d6842c7e6b",
"size": "13373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demud/dataset/dataset_decals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "384831"
}
],
"symlink_target": ""
} |
"""Some utility functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from collections import OrderedDict
from copy import deepcopy
import logging
import json
import numpy as np
from .check import _check_pandas_installed, _check_preload, _validate_type
from ._logging import warn, verbose
from .numerics import object_size, object_hash
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
class SizeMixin(object):
"""Estimate MNE object sizes."""
def __eq__(self, other):
"""Compare self to other.
Parameters
----------
other : object
The object to compare to.
Returns
-------
eq : bool
True if the two objects are equal.
"""
return isinstance(other, type(self)) and hash(self) == hash(other)
@property
def _size(self):
"""Estimate the object size."""
try:
size = object_size(self.info)
except Exception:
warn('Could not get size for self.info')
return -1
if hasattr(self, 'data'):
size += object_size(self.data)
elif hasattr(self, '_data'):
size += object_size(self._data)
return size
def __hash__(self):
"""Hash the object.
Returns
-------
hash : int
The hash
"""
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..io.base import BaseRaw
if isinstance(self, Evoked):
return object_hash(dict(info=self.info, data=self.data))
elif isinstance(self, (BaseEpochs, BaseRaw)):
_check_preload(self, "Hashing ")
return object_hash(dict(info=self.info, data=self._data))
else:
raise RuntimeError('Hashing unknown object type: %s' % type(self))
class GetEpochsMixin(object):
"""Class to add epoch selection and metadata to certain classes."""
def __getitem__(self, item):
"""Return an Epochs object with a copied subset of epochs.
Parameters
----------
item : slice, array-like, str, or list
See below for use cases.
Returns
-------
epochs : instance of Epochs
See below for use cases.
Notes
-----
Epochs can be accessed as ``epochs[...]`` in several ways:
1. **Integer or slice:** ``epochs[idx]`` will return an `~mne.Epochs`
object with a subset of epochs chosen by index (supports single
index and Python-style slicing).
2. **String:** ``epochs['name']`` will return an `~mne.Epochs` object
comprising only the epochs labeled ``'name'`` (i.e., epochs created
around events with the label ``'name'``).
If there are no epochs labeled ``'name'`` but there are epochs
labeled with /-separated tags (e.g. ``'name/left'``,
``'name/right'``), then ``epochs['name']`` will select the epochs
with labels that contain that tag (e.g., ``epochs['left']`` selects
epochs labeled ``'audio/left'`` and ``'visual/left'``, but not
``'audio_left'``).
If multiple tags are provided *as a single string* (e.g.,
``epochs['name_1/name_2']``), this selects epochs containing *all*
provided tags. For example, ``epochs['audio/left']`` selects
``'audio/left'`` and ``'audio/quiet/left'``, but not
``'audio/right'``. Note that tag-based selection is insensitive to
order: tags like ``'audio/left'`` and ``'left/audio'`` will be
treated the same way when selecting via tag.
3. **List of strings:** ``epochs[['name_1', 'name_2', ... ]]`` will
return an `~mne.Epochs` object comprising epochs that match *any* of
the provided names (i.e., the list of names is treated as an
inclusive-or condition). If *none* of the provided names match any
epoch labels, a ``KeyError`` will be raised.
If epoch labels are /-separated tags, then providing multiple tags
*as separate list entries* will likewise act as an inclusive-or
filter. For example, ``epochs[['audio', 'left']]`` would select
``'audio/left'``, ``'audio/right'``, and ``'visual/left'``, but not
``'visual/right'``.
4. **Pandas query:** ``epochs['pandas query']`` will return an
`~mne.Epochs` object with a subset of epochs (and matching
metadata) selected by the query called with
``self.metadata.eval``, e.g.::
epochs["col_a > 2 and col_b == 'foo'"]
would return all epochs whose associated ``col_a`` metadata was
greater than two, and whose ``col_b`` metadata was the string 'foo'.
Query-based indexing only works if Pandas is installed and
``self.metadata`` is a :class:`pandas.DataFrame`.
.. versionadded:: 0.16
"""
return self._getitem(item)
def _item_to_select(self, item):
if isinstance(item, str):
item = [item]
# Convert string to indices
if isinstance(item, (list, tuple)) and len(item) > 0 and \
isinstance(item[0], str):
select = self._keys_to_idx(item)
elif isinstance(item, slice):
select = item
else:
select = np.atleast_1d(item)
if len(select) == 0:
select = np.array([], int)
return select
def _getitem(self, item, reason='IGNORED', copy=True, drop_event_id=True,
select_data=True, return_indices=False):
"""
Select epochs from current object.
Parameters
----------
item: slice, array-like, str, or list
see `__getitem__` for details.
reason: str
entry in `drop_log` for unselected epochs
copy: bool
return a copy of the current object
drop_event_id: bool
remove non-existing event-ids after selection
select_data: bool
apply selection to data
(use `select_data=False` if subclasses do not have a
valid `_data` field, or data has already been subselected)
return_indices: bool
return the indices of selected epochs from the original object
in addition to the new `Epochs` objects
Returns
-------
`Epochs` or tuple(Epochs, np.ndarray) if `return_indices` is True
subset of epochs (and optionally array with kept epoch indices)
"""
data = self._data
del self._data
inst = self.copy() if copy else self
self._data = inst._data = data
del self
select = inst._item_to_select(item)
has_selection = hasattr(inst, 'selection')
if has_selection:
key_selection = inst.selection[select]
drop_log = list(inst.drop_log)
if reason is not None:
for k in np.setdiff1d(inst.selection, key_selection):
drop_log[k] = (reason,)
inst.drop_log = tuple(drop_log)
inst.selection = key_selection
del drop_log
inst.events = np.atleast_2d(inst.events[select])
if inst.metadata is not None:
pd = _check_pandas_installed(strict=False)
if pd:
metadata = inst.metadata.iloc[select]
if has_selection:
metadata.index = inst.selection
else:
metadata = np.array(inst.metadata, 'object')[select].tolist()
# will reset the index for us
GetEpochsMixin.metadata.fset(inst, metadata, verbose=False)
if inst.preload and select_data:
# ensure that each Epochs instance owns its own data so we can
# resize later if necessary
inst._data = np.require(inst._data[select], requirements=['O'])
if drop_event_id:
# update event id to reflect new content of inst
inst.event_id = {k: v for k, v in inst.event_id.items()
if v in inst.events[:, 2]}
if return_indices:
return inst, select
else:
return inst
def _keys_to_idx(self, keys):
"""Find entries in event dict."""
keys = keys if isinstance(keys, (list, tuple)) else [keys]
try:
# Assume it's a condition name
return np.where(np.any(
np.array([self.events[:, 2] == self.event_id[k]
for k in _hid_match(self.event_id, keys)]),
axis=0))[0]
except KeyError as err:
# Could we in principle use metadata with these Epochs and keys?
if (len(keys) != 1 or self.metadata is None):
# If not, raise original error
raise
msg = str(err.args[0]) # message for KeyError
pd = _check_pandas_installed(strict=False)
# See if the query can be done
if pd:
md = self.metadata if hasattr(self, '_metadata') else None
self._check_metadata(metadata=md)
try:
# Try metadata
mask = self.metadata.eval(keys[0], engine='python').values
except Exception as exp:
msg += (' The epochs.metadata Pandas query did not '
'yield any results: %s' % (exp.args[0],))
else:
return np.where(mask)[0]
else:
# If not, warn this might be a problem
msg += (' The epochs.metadata Pandas query could not '
'be performed, consider installing Pandas.')
raise KeyError(msg)
def __len__(self):
"""Return the number of epochs.
Returns
-------
n_epochs : int
The number of remaining epochs.
Notes
-----
This function only works if bad epochs have been dropped.
Examples
--------
This can be used as::
>>> epochs.drop_bad() # doctest: +SKIP
>>> len(epochs) # doctest: +SKIP
43
>>> len(epochs.events) # doctest: +SKIP
43
"""
from ..epochs import BaseEpochs
if isinstance(self, BaseEpochs) and not self._bad_dropped:
raise RuntimeError('Since bad epochs have not been dropped, the '
'length of the Epochs is not known. Load the '
'Epochs with preload=True, or call '
'Epochs.drop_bad(). To find the number '
'of events in the Epochs, use '
'len(Epochs.events).')
return len(self.events)
def __iter__(self):
"""Facilitate iteration over epochs.
This method resets the object iteration state to the first epoch.
Notes
-----
This enables the use of this Python pattern::
>>> for epoch in epochs: # doctest: +SKIP
>>> print(epoch) # doctest: +SKIP
Where ``epoch`` is given by successive outputs of
:meth:`mne.Epochs.next`.
"""
self._current = 0
self._current_detrend_picks = self._detrend_picks
return self
def __next__(self, return_event_id=False):
"""Iterate over epoch data.
Parameters
----------
return_event_id : bool
If True, return both the epoch data and an event_id.
Returns
-------
epoch : array of shape (n_channels, n_times)
The epoch data.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if not hasattr(self, '_current_detrend_picks'):
self.__iter__() # ensure we're ready to iterate
if self.preload:
if self._current >= len(self._data):
self._stop_iter()
epoch = self._data[self._current]
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
self._stop_iter()
epoch_noproj = self._get_epoch_from_raw(self._current)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, self._current_detrend_picks)
epoch = self._project_epoch(epoch_noproj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._do_delayed_proj:
epoch = epoch_noproj
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
def _stop_iter(self):
del self._current
del self._current_detrend_picks
raise StopIteration # signal the end
next = __next__ # originally for Python2, now b/c public
def _check_metadata(self, metadata=None, reset_index=False):
"""Check metadata consistency."""
# reset_index=False will not copy!
if metadata is None:
return
else:
pd = _check_pandas_installed(strict=False)
if pd:
_validate_type(metadata, types=pd.DataFrame,
item_name='metadata')
if len(metadata) != len(self.events):
raise ValueError('metadata must have the same number of '
'rows (%d) as events (%d)'
% (len(metadata), len(self.events)))
if reset_index:
if hasattr(self, 'selection'):
# makes a copy
metadata = metadata.reset_index(drop=True)
metadata.index = self.selection
else:
metadata = deepcopy(metadata)
else:
_validate_type(metadata, types=list,
item_name='metadata')
if reset_index:
metadata = deepcopy(metadata)
return metadata
@property
def metadata(self):
"""Get the metadata."""
return self._metadata
@metadata.setter
@verbose
def metadata(self, metadata, verbose=None):
metadata = self._check_metadata(metadata, reset_index=True)
if metadata is not None:
if _check_pandas_installed(strict=False):
n_col = metadata.shape[1]
else:
n_col = len(metadata[0])
n_col = ' with %d columns' % n_col
else:
n_col = ''
if hasattr(self, '_metadata') and self._metadata is not None:
action = 'Removing' if metadata is None else 'Replacing'
action += ' existing'
else:
action = 'Not setting' if metadata is None else 'Adding'
logger.info('%s metadata%s' % (action, n_col))
self._metadata = metadata
def _prepare_write_metadata(metadata):
"""Convert metadata to JSON for saving."""
if metadata is not None:
if not isinstance(metadata, list):
metadata = metadata.to_json(orient='records')
else: # Pandas DataFrame
metadata = json.dumps(metadata)
assert isinstance(metadata, str)
return metadata
def _prepare_read_metadata(metadata):
"""Convert saved metadata back from JSON."""
if metadata is not None:
pd = _check_pandas_installed(strict=False)
# use json.loads because this preserves ordering
# (which is necessary for round-trip equivalence)
metadata = json.loads(metadata, object_pairs_hook=OrderedDict)
assert isinstance(metadata, list)
if pd:
metadata = pd.DataFrame.from_records(metadata)
assert isinstance(metadata, pd.DataFrame)
return metadata
def _hid_match(event_id, keys):
"""Match event IDs using HID selection.
Parameters
----------
event_id : dict
The event ID dictionary.
keys : list | str
The event ID or subset (for HID), or list of such items.
Returns
-------
use_keys : list
The full keys that fit the selection criteria.
"""
# form the hierarchical event ID mapping
use_keys = []
for key in keys:
if not isinstance(key, str):
raise KeyError('keys must be strings, got %s (%s)'
% (type(key), key))
use_keys.extend(k for k in event_id.keys()
if set(key.split('/')).issubset(k.split('/')))
if len(use_keys) == 0:
raise KeyError('Event "{}" is not in Epochs. Event_ids must be one of '
'"{}"'.format(key, ', '.join(event_id.keys())))
use_keys = list(set(use_keys)) # deduplicate if necessary
return use_keys
class _FakeNoPandas(object): # noqa: D101
def __enter__(self): # noqa: D105
def _check(strict=True):
if strict:
raise RuntimeError('Pandas not installed')
else:
return False
import mne
self._old_check = _check_pandas_installed
mne.epochs._check_pandas_installed = _check
mne.utils.mixin._check_pandas_installed = _check
def __exit__(self, *args): # noqa: D105
import mne
mne.epochs._check_pandas_installed = self._old_check
mne.utils.mixin._check_pandas_installed = self._old_check
class ShiftTimeMixin(object):
"""Class for shift_time method (Epochs, Evoked, and DipoleFixed)."""
def shift_time(self, tshift, relative=True):
"""Shift time scale in epoched or evoked data.
Parameters
----------
tshift : float
The (absolute or relative) time shift in seconds. If ``relative``
is True, positive tshift increases the time value associated with
each sample, while negative tshift decreases it.
relative : bool
If True, increase or decrease time values by ``tshift`` seconds.
Otherwise, shift the time values such that the time of the first
sample equals ``tshift``.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
Notes
-----
This method allows you to shift the *time* values associated with each
data sample by an arbitrary amount. It does *not* resample the signal
or change the *data* values in any way.
"""
from ..epochs import BaseEpochs
_check_preload(self, 'shift_time')
start = tshift + (self.times[0] if relative else 0.)
new_times = start + np.arange(len(self.times)) / self.info['sfreq']
if isinstance(self, BaseEpochs):
self._set_times(new_times)
else:
self.times = new_times
self._update_first_last()
return self
def _update_first_last(self):
"""Update self.first and self.last (sample indices)."""
self.first = int(round(self.times[0] * self.info['sfreq']))
self.last = len(self.times) + self.first - 1
| {
"content_hash": "48f11157258414f5187fed6442606d65",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 79,
"avg_line_length": 36.625698324022345,
"alnum_prop": 0.5458612975391499,
"repo_name": "rkmaddox/mne-python",
"id": "79cd45f6c9ed7b981cf6150cce8a94216af7772c",
"size": "19692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/utils/mixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3114"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4400215"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import tornado.ioloop
import tornado.web
import tornado.options
import logging
import json
from feedgen.feed import FeedGenerator
import os
logger=logging
# logger = logging.getLogger('web-codereview')
# logger.setLevel(logging.DEBUG)
# # create console handler and set level to debug
# ch = logging.StreamHandler()
# ch.setLevel(logging.DEBUG)
# # create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# # add formatter to ch
# ch.setFormatter(formatter)
# # add ch to logger
# logger.addHandler(ch)
fg = FeedGenerator()
fg.id('http://codereview.education.scholastic.com')
fg.title('Feed for updates on Hosting/Ops Code Reviews')
fg.description('Feed for updates on Hosting/Ops Code Reviews')
fg.author({'name': 'Hosting and Operations - SETG - Scholastic Inc',
'email': '[email protected]',
'uri': 'http://education.scholastic.com'})
fg.link({
'href': 'https://github.com/ScholasticInc/sch-hosting-chef',
'rel': 'related',
'title': 'Hosting and Ops Github Repository'
})
fg.logo('https://confluence.education.scholastic.com/download/attachments/327682/global.logo?version=2&modificationDate=1332873595000')
fg.language('en')
RSS_PATH = 'rss'
def create_rss():
feed_path = os.path.join (RSS_PATH, 'feed.xml')
fg.rss_file(feed_path)
def get_json_item(data, path):
"""
`data` is a json dict object
`path` is a list of strings, indicating the path to the get_json_item
"""
_data = data
for fragment in path:
_data = _data.get(fragment,{})
return _data
def get_author(data):
name = get_json_item(data,['pull_request','user','login'])
email = "%[email protected]" % get_json_item(data,['pull_request','user','login']) #github doesn't provide emails so we make one up
uri = get_json_item(data,['pull_request','user','html_url'])
return {
"name": name,
"email": email,
"uri": uri
}
def make_link(url):
return {
'href': url,
'rel': 'alternate',
'title': url
}
class MainHandler(tornado.web.RequestHandler):
def deal_create_pull_request(self, data):
"""
deals with a pull request that was created
data is a json object
"""
html_url = get_json_item(data,['pull_request','_links','html','href'])
author_name = get_json_item(data,['pull_request','user','login'])
branch = get_json_item(data,['pull_request','head','ref'])
repo = get_json_item(data,['pull_request','base','repo','name'])
pub_date = get_json_item(data, ['pull_request', 'created_at'])
body = '<p><h2>In %s:%s</h2></p>' % (repo, branch)
body += get_json_item(data,['pull_request','body'])
body += '<br />'
title = get_json_item(data,['pull_request','title'])
title_line_break = '<p />'
content = '%s\n%s\n%s\n' % (title, title_line_break, body)
feed_entry = fg.add_entry()
feed_entry.title('A pull request was opened by %s: %s' % (author_name,title))
feed_entry.id(html_url)
feed_entry.author(get_author(data))
feed_entry.content(body)
feed_entry.link(make_link(html_url))
feed_entry.published(pub_date)
create_rss()
def deal_comment_pull_request(self, data):
"""
deals with a pull request that was created
data is a json object
"""
html_url = get_json_item(data,['comment','url'])
author_name = get_json_item(data,['comment','user','login'])
repo = get_json_item(data,['repository','name'])
title = get_json_item(data,['issue','title'])
pub_date = get_json_item(data, ['comment', 'updated_at'])
body = '<p><h1>Issue: %s</h1></p>' % (title)
body += get_json_item(data,['comment','body'])
body += '<br />'
content = body
feed_entry = fg.add_entry()
feed_entry.title('A comment was made by %s, repo: %s' % (author_name, repo))
feed_entry.id(html_url)
feed_entry.author(get_author(data))
feed_entry.content(body)
feed_entry.link(make_link(html_url))
feed_entry.published(pub_date)
create_rss()
def deal_comment_on_filepull_request(self, data):
"""
deals with a pull request that was created
data is a json object
"""
html_url = get_json_item(data,['comment','_links','html'])
author_name = get_json_item(data,['comment','user','login'])
repo = get_json_item(data,['repository','name'])
filename_path = get_json_item(data,['comment','path'])
pub_date = get_json_item(data, ['comment', 'updated_at'])
body = '<p><h1>File: %s</h1></p>' % (filename_path)
body += get_json_item(data,['comment','body'])
body += '<br />'
content = body
feed_entry = fg.add_entry()
feed_entry.filename_path('A comment was made on a file by %s, repo: %s' % (author_name, repo))
feed_entry.id(html_url)
feed_entry.author(get_author(data))
feed_entry.content(body)
feed_entry.link(make_link(html_url))
feed_entry.published(pub_date)
create_rss()
def get(self):
self.write('Hello world')
def post(self):
logger.info('Received HTTP Post: %s' % self.request)
logger.debug('============ REQUEST ===========')
logger.debug(self.request)
logger.debug('================================')
logger.debug('============ REQUEST.BODY ===========')
logger.debug(self.request.body)
logger.debug('=====================================')
body = json.loads(self.request.body)
action = get_json_item(body, ['action'])
if action == "opened" and get_json_item(body, ['pull_request']):
self.deal_create_pull_request(body)
elif action == "created" and get_json_item(body, ['comment']):
self.deal_comment_pull_request(body)
logger.debug('=========== RSS STR =================')
logger.debug(fg.rss_str(pretty=True))
logger.debug('=====================================')
application = tornado.web.Application([
(r'/rss/(.*)', tornado.web.StaticFileHandler, {'path': RSS_PATH}),
(r"/", MainHandler),
])
if __name__ == "__main__":
# logger
tornado.options.parse_command_line()
logger.debug('Starting Web service...')
application.listen(8888)
logger.debug('Starting Tornado IOLoop...')
tornado.ioloop.IOLoop.instance().start()
| {
"content_hash": "aca6d75bc59d2fefa6a322475b882d6f",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 139,
"avg_line_length": 29.366336633663366,
"alnum_prop": 0.6481793661496965,
"repo_name": "adewinter/hosting-codereview",
"id": "cbc72ded37bf07337d0d84cc4adf4def88de5217",
"size": "5932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5769"
}
],
"symlink_target": ""
} |
import httplib as http
import os
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from website import util
from website import settings
from website import language
from website.util import sanitize
from website import views as website_views
from website.assets import env as assets_env
from website.search import views as search_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
return {
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_url': user.url if user else '',
'user_api_url': user.api_url if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'status': status.pop_status_messages(),
'css_all': assets_env['css'].urls(),
'domain': settings.DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'sanitize': sanitize,
'js_str': lambda x: x.replace("'", r"\'").replace('"', r'\"')
}
class OsfWebRenderer(WebRenderer):
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', render_mako_string)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye(**kwargs):
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('dashboard'))
status.push_status_message(language.LOGOUT, 'info')
return {}
def make_url_map(app):
'''Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
'''
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule('/<path:_>', ['get', 'post'], HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string)),
Rule('/api/v1/<path:_>', ['get', 'post'],
HTTPError(http.NOT_FOUND), json_renderer),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
OsfWebRenderer('', render_mako_string),
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule('/dashboard/', 'get', website_views.dashboard, OsfWebRenderer('dashboard.mako')),
Rule('/reproducibility/', 'get',
website_views.reproducibility, OsfWebRenderer('', render_mako_string)),
Rule('/about/', 'get', {}, OsfWebRenderer('public/pages/about.mako')),
Rule('/howosfworks/', 'get', {}, OsfWebRenderer('public/pages/howosfworks.mako')),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako')),
Rule('/getting-started/', 'get', {}, OsfWebRenderer('public/pages/getting_started.mako')),
Rule('/explore/', 'get', {}, OsfWebRenderer('public/explore.mako')),
Rule(['/messages/', '/help/'], 'get', {}, OsfWebRenderer('public/comingsoon.mako')),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako'),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako'),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako'),
),
Rule('/news/', 'get', {}, OsfWebRenderer('public/pages/news.mako')),
])
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
# API route for getting summary information for dashboard nodes.
Rule('/dashboard/get_nodes/', 'get', website_views.get_dashboard_nodes, json_renderer),
# API route for getting serialized HGrid data, e.g. for the project
# organizer
# TODO: Perhaps this should be namespaced to so that the above route
# can use the /dashboard/ URL. e.g.
# /dashboard/<nid> -> Return info about dashboard nodes
# /dashboard/grid/<nid>/ -> Return hgrid-serialized data for dashboard nodes
Rule(
[
'/dashboard/<nid>',
'/dashboard/',
],
'get', website_views.get_dashboard, json_renderer),
], prefix='/api/v1')
### Meta-data ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/',
'/project/<pid>/node/<nid>/comments/',
],
'get',
project_views.comment.list_comments,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/discussion/',
'/project/<pid>/node/<nid>/comments/discussion/',
],
'get',
project_views.comment.comment_discussion,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/',
'/project/<pid>/node/<nid>/comment/',
],
'post',
project_views.comment.add_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'put',
project_views.comment.edit_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'delete',
project_views.comment.delete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/undelete/',
'/project/<pid>/node/<nid>/comment/<cid>/undelete/',
],
'put',
project_views.comment.undelete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/report/',
'/project/<pid>/node/<nid>/comment/<cid>/report/',
],
'post',
project_views.comment.report_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/unreport/',
'/project/<pid>/node/<nid>/comment/<cid>/unreport/',
],
'post',
project_views.comment.unreport_abuse,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/registration/', 'get', website_views.registration_form, json_renderer),
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule('/explore/activity/', 'get', discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako')),
])
### Auth ###
# Web
process_rules(app, [
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
# View will either redirect or display error message
OsfWebRenderer('error.mako', render_mako_string)
),
Rule(
'/resend/',
['get', 'post'],
auth_views.resend_confirmation,
OsfWebRenderer('resend.mako', render_mako_string)
),
Rule(
'/resetpassword/<verification_key>/',
['get', 'post'],
auth_views.reset_password,
OsfWebRenderer('public/resetpassword.mako', render_mako_string)
),
# TODO: Remove `auth_register_post`
Rule('/register/', 'post', auth_views.auth_register_post,
OsfWebRenderer('public/login.mako')),
Rule('/api/v1/register/', 'post', auth_views.register_user, json_renderer),
Rule(['/login/', '/account/'], 'get',
auth_views.auth_login, OsfWebRenderer('public/login.mako')),
Rule('/login/', 'post', auth_views.auth_login,
OsfWebRenderer('public/login.mako'), endpoint_suffix='__post'),
Rule('/login/first/', 'get', auth_views.auth_login,
OsfWebRenderer('public/login.mako'),
endpoint_suffix='__first', view_kwargs={'first': True}),
Rule('/logout/', 'get', auth_views.auth_logout, notemplate),
Rule('/forgotpassword/', 'post', auth_views.forgot_password,
OsfWebRenderer('public/login.mako')),
Rule([
'/midas/', '/summit/', '/accountbeta/', '/decline/'
], 'get', auth_views.auth_registerbeta, OsfWebRenderer('', render_mako_string)),
])
### Profile ###
# Web
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, OsfWebRenderer('profile.mako')),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id,
OsfWebRenderer('profile.mako')),
Rule('/settings/key_history/<kid>/', 'get', profile_views.user_key_history,
OsfWebRenderer('profile/key_history.mako')),
Rule('/addons/', 'get', profile_views.profile_addons,
OsfWebRenderer('profile/addons.mako')),
Rule(["/user/merge/"], 'get', auth_views.merge_user_get,
OsfWebRenderer("merge_accounts.mako")),
Rule(["/user/merge/"], 'post', auth_views.merge_user_post,
OsfWebRenderer("merge_accounts.mako")),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(['/user/<uid>/<pid>/claim/'], ['get', 'post'],
project_views.contributor.claim_user_form, OsfWebRenderer('claim_account.mako')),
Rule(['/user/<uid>/<pid>/claim/verify/<token>/'], ['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako')),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako'),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako'),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako'),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako'),
),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/settings/keys/', 'get', profile_views.get_keys, json_renderer),
Rule('/settings/create_key/', 'post', profile_views.create_user_key, json_renderer),
Rule('/settings/revoke_key/', 'post', profile_views.revoke_user_key, json_renderer),
Rule('/settings/key_history/<kid>/', 'get', profile_views.user_key_history, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule('/search/', 'get', {}, OsfWebRenderer('search.mako')),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
], prefix='/api/v1')
# Project
# Web
process_rules(app, [
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako')),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('index.mako')),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, OsfWebRenderer('project/project.mako')),
# Create a new subproject/component
Rule('/project/<pid>/newnode/', 'post', project_views.node.project_new_node,
OsfWebRenderer('', render_mako_string)),
Rule([
'/project/<pid>/key_history/<kid>/',
'/project/<pid>/node/<nid>/key_history/<kid>/',
], 'get', project_views.key.node_key_history, OsfWebRenderer('project/key_history.mako')),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako')),
Rule('/folder/<nid>', 'get', project_views.node.folder_new,
OsfWebRenderer('project/new_folder.mako')),
Rule('/api/v1/folder/<nid>', 'post', project_views.node.folder_new_post, json_renderer),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako'),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako')
),
# Permissions
Rule(
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako')
),
### Logs ###
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, OsfWebRenderer('project/forks.mako')),
# Registrations
Rule([
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
], 'get', project_views.register.node_register_page,
OsfWebRenderer('project/register.mako')),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako')),
Rule([
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
], 'get', project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako')),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako')),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako'),
view_kwargs={'mode': 'page'},
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
# Create project, used by projectCreator.js
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule([
'/project/<pid>/expand/',
'/project/<pid>/node/<nid>/expand/',
], 'post', project_views.node.expand, json_renderer),
Rule([
'/project/<pid>/collapse/',
'/project/<pid>/node/<nid>/collapse/',
], 'post', project_views.node.collapse, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointers/',
],
'delete',
project_views.node.remove_pointers_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>',
],
'delete',
project_views.node.delete_folder,
json_renderer,
),
Rule('/folder/', 'put', project_views.node.add_folder, json_renderer),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
], 'get', project_views.node.get_children, json_renderer),
Rule([
'/project/<pid>/get_folder_pointers/'
], 'get', project_views.node.get_folder_pointers, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
Rule('/log/<log_id>/', 'get', project_views.log.get_log, json_renderer),
Rule([
'/project/<pid>/log/',
'/project/<pid>/node/<nid>/log/',
], 'get', project_views.log.get_logs, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule([
'/project/<pid>/get_most_in_common_contributors/',
'/project/<pid>/node/<nid>/get_most_in_common_contributors/',
], 'get', project_views.contributor.get_most_in_common_contributors, json_renderer),
Rule([
'/project/<pid>/get_recently_added_contributors/',
'/project/<pid>/node/<nid>/get_recently_added_contributors/',
], 'get', project_views.contributor.get_recently_added_contributors, json_renderer),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# API keys
Rule([
'/project/<pid>/create_key/',
'/project/<pid>/node/<nid>/create_key/',
], 'post', project_views.key.create_node_key, json_renderer),
Rule([
'/project/<pid>/revoke_key/',
'/project/<pid>/node/<nid>/revoke_key/'
], 'post', project_views.key.revoke_node_key, json_renderer),
Rule([
'/project/<pid>/keys/',
'/project/<pid>/node/<nid>/keys/',
], 'get', project_views.key.get_node_keys, json_renderer),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Tags
Rule([
'/project/<pid>/addtag/<tag>/',
'/project/<pid>/node/<nid>/addtag/<tag>/',
], 'post', project_views.tag.project_addtag, json_renderer),
Rule([
'/project/<pid>/removetag/<tag>/',
'/project/<pid>/node/<nid>/removetag/<tag>/',
], 'post', project_views.tag.project_removetag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
Rule([
'/project/<pid>/beforeremovecontributors/',
'/project/<pid>/node/<nid>/beforeremovecontributors/',
], 'post', project_views.contributor.project_before_remove_contributor, json_renderer),
# TODO(sloria): should be a delete request to /contributors/
Rule([
'/project/<pid>/removecontributors/',
'/project/<pid>/node/<nid>/removecontributors/',
], 'post', project_views.contributor.project_removecontributor, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'post', project_views.register.node_register_template_page_post, json_renderer),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Wiki ###
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
Rule([
'/watched/logs/'
], 'get', website_views.watched_logs_get, json_renderer),
### Accounts ###
Rule([
'/user/merge/'
], 'post', auth_views.merge_user_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
),
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
| {
"content_hash": "3829dcd947512e550431dc7caf6ab991",
"timestamp": "",
"source": "github",
"line_count": 1097,
"max_line_length": 106,
"avg_line_length": 32.27894257064722,
"alnum_prop": 0.5204462016379554,
"repo_name": "AndrewSallans/osf.io",
"id": "1ec502771bfcc3edabff7e6fa7ce26d6a34a4905",
"size": "35434",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70439"
},
{
"name": "JavaScript",
"bytes": "2555546"
},
{
"name": "Python",
"bytes": "2159449"
}
],
"symlink_target": ""
} |
import sys
import unittest
import itertools
from shenv import core
context = core.Context()
universe = context.universe
category = context.category
version = context.version
class HandlesCallbacks:
def __append(self,name,univ,cat,ver,state):
t = lambda x: type(x) if x is not None else None
selft,univ,cat,ver = map(t,(self,univ,cat,ver))
state.append((selft,name,univ,cat,ver))
def install(self,univ,cat,ver,state):
self.__append('install',univ,cat,ver,state)
def remove(self,univ,cat,ver,state):
self.__append('remove',univ,cat,ver,state)
def clean(self,univ,cat,state):
self.__append('clean',univ,cat,None,state)
@universe()
class Universe(HandlesCallbacks):
description = 'The Test Universe'
@category()
class Python(HandlesCallbacks):
name = 'python', 'py'
@version(Python)
class Python27(HandlesCallbacks):
name = '27'
@version(Python)
class Python31:
name = '31'
@category()
class Java(HandlesCallbacks):
name = 'java'
autoenable = True
@version(Java)
class Java14Sun(object):
name = '14','4','sun14','sun4'
@version(Java)
class Java5Sun(object):
name = '15','5','sun15','sun5'
@version(Java)
class Java6Sun(object):
name = '16','6','sun16','sun6'
default = True
@category()
class Ruby:
name = 'ruby'
@version(Ruby)
class Ruby19(object):
name = '19'
@category()
class Common:
name = 'common'
autoenable = True
@version(Common)
class CommonStd(HandlesCallbacks):
name = 'std'
default = True
expected_cleans = (
(Python,'clean',Universe,Python,None),
(Java,'clean',Universe,Java,None),
(Universe,'clean',Universe,Ruby,None),
(Universe,'clean',Universe,Common,None),
)
class ContextTestCase(unittest.TestCase):
def assertSameTarget(self,ctx1,ctx2,msg=None):
self.assertIs(ctx1.target,ctx2.target,msg)
class TestContext(ContextTestCase):
longMessage = True
empty_context = core.Context()
class SomeUnrelatedClass: pass
def setUp(self):
pass
def tearDown(self):
pass
def testuniverse(self):
pass
def testcategory(self):
pass
def testversion(self):
pass
def testtarget(self):
self.assertTrue(isinstance(context.target, Universe))
self.assertIsNot(None, self.empty_context.target)
def testtarget_factory(self):
self.assertIs(Universe, context.target_factory)
self.assertIsNot(None, self.empty_context.target)
def testdescription(self):
self.assertEquals(Universe.description, context.description)
self.assertEquals('context', self.empty_context.description)
def testprocess_env_changes__defaults(self):
callbacks = []
result = context.process_env_changes('', '', callbacks)
exp = list(expected_cleans)
exp.append((Java,'install',Universe,Java,Java6Sun))
exp.append((CommonStd,'install',Universe,Common,CommonStd))
self.assertSequenceEqual(exp,callbacks)
exp_res = 'python:~,java:~16,ruby:~,common:~std'
self.assertEqual(exp_res,result)
def testprocess_env_changes__empty(self):
result = self.empty_context.process_env_changes('', '', [])
self.assertEqual('',result)
def testprocess_env_changes__callbacks_not_found(self):
c2 = core.Context()
@c2.category()
class Cat:
name='cat'
@c2.version(Cat)
class Ver:
name ='ver'
result = c2.process_env_changes('', 'cat:ver', [])
self.assertEqual('cat:ver',result)
def testprocess_env_changes__explicits(self):
callbacks = []
result = context.process_env_changes('', 'ruby:*,java:sun4', callbacks)
exp = list(expected_cleans)
exp.append((Java,'install',Universe,Java,Java14Sun))
exp.append((Universe,'install',Universe,Ruby,Ruby19))
exp.append((CommonStd,'install',Universe,Common,CommonStd))
self.assertSequenceEqual(exp,callbacks)
exp_res = 'python:~,java:14,ruby:*19,common:~std'
self.assertEqual(exp_res,result)
def testprocess_env_changes__force_off(self):
callbacks = []
result = context.process_env_changes('', 'java:', callbacks)
exp = list(expected_cleans)
exp.append((CommonStd,'install',Universe,Common,CommonStd))
self.assertSequenceEqual(exp,callbacks)
exp_res = 'python:~,java:,ruby:~,common:~std'
self.assertEqual(exp_res,result)
def testprocess_env_changes__change(self):
callbacks = []
result = context.process_env_changes('python:27,java:14,ruby:19', 'python:31,java:~,ruby:~', callbacks)
exp = []
exp.append((Python27,'remove',Universe,Python,Python27))
exp.append((Java,'remove',Universe,Java,Java14Sun))
exp.append((Universe,'remove',Universe,Ruby,Ruby19))
exp.extend(list(expected_cleans))
exp.append((Python,'install',Universe,Python,Python31))
exp.append((Java,'install',Universe,Java,Java6Sun))
exp.append((CommonStd,'install',Universe,Common,CommonStd))
self.assertSequenceEqual(exp,callbacks)
exp_res = 'python:31,java:~16,ruby:~,common:~std'
self.assertEqual(exp_res,result)
def testgetcategory_context(self):
cc = context.getcategory_context('java')
self.assertTrue(isinstance(cc.target,Java))
for c in ('java',Java,cc.target):
self.assertSameTarget(cc,
context.getcategory_context(c),str(c))
cc = context.getcategory_context('python')
self.assertTrue(isinstance(cc.target,Python))
for c in('python','py',Python,cc.target):
self.assertSameTarget(cc,
context.getcategory_context(c),str(c))
with self.assertRaises(LookupError):
context.getcategory_context(None)
with self.assertRaises(LookupError):
context.getcategory_context('missing')
with self.assertRaises(LookupError):
context.getcategory_context(self.SomeUnrelatedClass)
with self.assertRaises(LookupError):
context.getcategory_context(self.SomeUnrelatedClass())
with self.assertRaises(LookupError):
context.getcategory_context(Java()) #Wrong instance
def testgetversion_context(self):
cc = context.getcategory_context('java')
vc = context.getversion_context('java','6')
self.assertTrue(isinstance(cc.target,Java))
self.assertTrue(isinstance(vc.target,Java6Sun))
for c,v in itertools.product(('java',Java,cc.target),
('6','16','sun6','sun16',Java6Sun,vc.target)):
self.assertSameTarget(cc,
context.getcategory_context(c),str((c,v)))
self.assertSameTarget(vc,
context.getversion_context(c,v),str((c,v)))
cc = context.getcategory_context('python')
vc = context.getversion_context('python','27')
self.assertTrue(isinstance(cc.target,Python))
self.assertTrue(isinstance(vc.target,Python27))
for c,v in itertools.product(('python','py',Python,cc.target),
('27',Python27,vc.target)):
self.assertSameTarget(cc,
context.getcategory_context(c),str((c,v)))
self.assertSameTarget(vc,
context.getversion_context(c,v),str((c,v)))
with self.assertRaises(LookupError):
context.getversion_context(None,Java6Sun)
with self.assertRaises(LookupError):
context.getversion_context('missing',Java6Sun)
with self.assertRaises(LookupError):
context.getversion_context(self.SomeUnrelatedClass,Java6Sun)
with self.assertRaises(LookupError):
context.getversion_context(self.SomeUnrelatedClass(),Java6Sun)
with self.assertRaises(LookupError):
context.getversion_context(Java,None)
with self.assertRaises(LookupError):
context.getversion_context(Java,'missing')
with self.assertRaises(LookupError):
context.getversion_context(Java,self.SomeUnrelatedClass)
with self.assertRaises(LookupError):
context.getversion_context(Java,self.SomeUnrelatedClass())
with self.assertRaises(LookupError):
context.getversion_context(Java,Java6Sun()) #Wrong instance
def testgetcategory_contexts(self):
pass
def testgetversion_contexts(self):
pass
def test__str__(self):
pass
class TestCategoryContext(ContextTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testgetversion_context(self):
pass
def testgetversion_contexts(self):
pass
def testautoenable(self):
pass
def testprimary_name(self):
pass
def testnames(self):
pass
def testtarget(self):
pass
def testtarget_factory(self):
pass
def testdescription(self):
pass
def test__str__(self):
pass
class TestVersionContext(ContextTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testdefault(self):
pass
def testprimary_name(self):
pass
def testnames(self):
pass
def testtarget(self):
pass
def testtarget_factory(self):
pass
def testdescription(self):
pass
def test__str__(self):
pass
def runtests(argv=None):
unittest.main(module='shenv.test.core_test',argv=argv)
def main():
#print(context)
runtests(sys.argv+['-v'])
if __name__ == '__main__':
main()
| {
"content_hash": "6a204b906f51ca3a24994b4b14e46b90",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 111,
"avg_line_length": 29.665689149560116,
"alnum_prop": 0.6099248714907078,
"repo_name": "josharnold52/shenv",
"id": "234b3108c6214c5e87ac49c02e7e2bc1fc7a2311",
"size": "10460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shenv/test/core_test.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "95757"
},
{
"name": "Shell",
"bytes": "1738"
}
],
"symlink_target": ""
} |
"""
Django settings for ccbc_library project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
#from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_PATH = os.path.join(BASE_DIR, 'ccbc_library/templates')
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_PATH,
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '76zi637_w493mr-0&^(_70m@0$m$1yj@22y+l4qf$ou$#-&#(-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
"autocomplete_light",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ccbclib',
'registration',
'django_tables2',
"django_cron",
"post_office",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
ROOT_URLCONF = 'ccbc_library.urls'
WSGI_APPLICATION = 'ccbc_library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Melbourne'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
#STATIC_ROOT = ''
STATIC_PATH = os.path.join(BASE_DIR,'ccbc_library/static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATIC_PATH,
)
# django_cron
CRON_CLASSES = [
"ccbc_library.cron.MyCronJob",
]
"""
#post-office
EMAIL_BACKEND = 'post_office.EmailBackend'
"""
#django.core.mail
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('MY_GMAIL_ADDRESS','')
EMAIL_HOST_PASSWORD = os.environ.get("MY_GMAIL_PW",'')
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
#Admins
ADMINS = (('Long', os.environ.get('MY_GMAIL_ADDRESS','')))
#django-registration-redux
REGISTRATION_OPEN = True # If True, users can register
ACCOUNT_ACTIVATION_DAYS = 9 # One-week activation window; you may, of course, use a different value.
REGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.
LOGIN_REDIRECT_URL = '/ccbclib/home/' # The page you want users to arrive at after they successful log in
LOGIN_URL = '/accounts/login/' # The page users are directed to if they are not logged in,
# and are trying to access pages requiring authentication
#bcrypt
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
)
### Deploy
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {
'default': dj_database_url.config()
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
# Turn off DEBUG mode
DEBUG = False
TEMPLATE_DEBUG = False
# Import all of local settings if the file exists
try:
from .local_settings import *
except ImportError:
pass
### End Deploy | {
"content_hash": "59015dce5f7fa200ca9e9d3a51f8d0cc",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 106,
"avg_line_length": 30.47486033519553,
"alnum_prop": 0.6823098075160403,
"repo_name": "comsaint/ccbc",
"id": "1722b215e3137778b99cb34a7ab2aef496fc08c5",
"size": "5455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CCBC_Library/ccbc_library/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8756"
},
{
"name": "HTML",
"bytes": "14622"
},
{
"name": "JavaScript",
"bytes": "50392"
},
{
"name": "Python",
"bytes": "64623"
}
],
"symlink_target": ""
} |
"""
stringimporter
=================
dumb loading of arbitrary strings into simulated Python modules
"""
import importlib.abc
import sys
import types
class DummyModuleLoader(importlib.abc.SourceLoader):
def __init__(self, name, src_code, filename=None):
self._dummy_name = name
self._src_code = src_code
if not filename:
filename = '{}.py'.format(self._dummy_name.replace('.', '/'))
self._filename = filename
def get_filename(self, path):
return self._filename
def get_data(self, path):
return self._src_code.encode('utf-8')
def create_module(self, spec):
mod = types.ModuleType(self._dummy_name)
mod.__file__ = self._filename
sys.modules[mod.__name__] = mod
return mod
def import_str(module_name, python_code, filename=None, exec_module=True):
"""
Creates a Python module from a string containing code
:param str module_name: apparent name of the created module
:param str python_code: the Python code
:param bool exec_module: if True (default), imported code will be executed.
else, you'll have to run loader.exec_module(module)
:param str filename: we emulate loading from a file,
you can specify the name here.
:return: tuple of module loader and module
"""
loader = DummyModuleLoader(module_name, python_code, filename=filename)
module = loader.create_module(None)
if exec_module:
loader.exec_module(module)
return loader, module
| {
"content_hash": "e3cbfa4ff5f2927bd078cd7c95abadb2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 30.5,
"alnum_prop": 0.6531147540983606,
"repo_name": "majerteam/stringimporter",
"id": "c3c39683360ffb72e280af062cfe055a2bf2df6f",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stringimporter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3197"
}
],
"symlink_target": ""
} |
from .chainpoint import ChainPointV2
__version__ = '0.2.1'
| {
"content_hash": "bdb9b7b1234ea3f01b2194cd7cf04835",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 20,
"alnum_prop": 0.7,
"repo_name": "karask/blockchain-proofs",
"id": "ba85dd38a5f328628643aec7b2232593b2969230",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blockchain_proofs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10642"
}
],
"symlink_target": ""
} |
import time
from upm import pyupm_temperature as upm
from upm import pyupm_servo as servo
from wiringx86 import GPIOGalileo as GPIO
from upm import pyupm_jhd1313m1 as lcd
pino_sensor_temperatura = 0
pino_rele = 5
pino_pot = 15
pino_servo = 8
pinos = GPIO(debug=False)
pinos.pinMode(pino_rele, pinos.OUTPUT)
pinos.pinMode(pino_pot, pinos.ANALOG_INPUT)
pinos.pinMode(pino_servo, pinos.OUTPUT)
temperatura = upm.Temperature(pino_sensor_temperatura)
sg_servo = servo.ES08A(pino_servo)
tela = lcd.Jhd1313m1(0, 0x3E, 0x62)
def leitura_temperatura():
return temp.value()
def leitura_pot():
resulado = pinos.analogRead(pino_pot)
voltagem = resulado*5.0/1023.0
return voltagem
def liga_rele():
pinos.digitalWrite(pino_rele, pinos.HIGH)
def desliga_rele():
pinos.digitalWrite(pino_rele, pinos.LOW)
def move_servo(posicao):
sg_servo.setAngle(posicao)
def escreve_lcd(texto_linha1, texto_linha2):
tela.clear()
tela.setCursor(0, 0)
tela.write(texto_linha1)
tela.setCursor(1, 0)
tela.write(texto_linha2)
| {
"content_hash": "120fa9ede52f413b8ba5a68c441b6efa",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 54,
"avg_line_length": 24.904761904761905,
"alnum_prop": 0.7351816443594646,
"repo_name": "Atzingen/curso-IoT-2017",
"id": "ae3f88632e2eb4202f8b30a15280686076dd4f80",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aula-09-ajax-jquery/sensores.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10044"
},
{
"name": "HTML",
"bytes": "11324"
},
{
"name": "Jupyter Notebook",
"bytes": "34332"
},
{
"name": "Python",
"bytes": "23685"
}
],
"symlink_target": ""
} |
from sql_translator.sql_parser.parser import SQLScript
from sql_translator.sql_parser.lexer import SQLLexer
def parse(sql):
return SQLScript.parse(SQLLexer(sql))
| {
"content_hash": "aa9eab0910fb6df354b12adbe0daf51b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 54,
"avg_line_length": 28,
"alnum_prop": 0.7976190476190477,
"repo_name": "GoogleCloudPlatform/datacatalog-tag-engine",
"id": "2683da5a534610887042129a1d68829c2d02f287",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/query_cookbook/top_queries/sql_translator/sql_parser/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1296"
},
{
"name": "HCL",
"bytes": "15021"
},
{
"name": "HTML",
"bytes": "156624"
},
{
"name": "Python",
"bytes": "333682"
},
{
"name": "Shell",
"bytes": "2982"
}
],
"symlink_target": ""
} |
from core.moduleguess import ModuleGuess
from core.moduleexception import ProbeException, ProbeSucceed
from core.argparse import ArgumentParser
from tempfile import mkdtemp
from os import path
mysqlphpdump = """
function dmp ($tableQ)
{
$result = "\n-- Dumping data for table `$tableQ`\n";
$query = mysql_query("SELECT * FROM ".$tableQ);
$numrow = mysql_num_rows($query);
$numfields = mysql_num_fields($query);
print $numrow . " " . $numfields;
if ($numrow > 0)
{
$result .= "INSERT INTO `".$tableQ."` (";
$i = 0;
for($k=0; $k<$numfields; $k++ )
{
$result .= "`".mysql_field_name($query, $k)."`";
if ($k < ($numfields-1))
$result .= ", ";
}
$result .= ") VALUES ";
while ($row = mysql_fetch_row($query))
{
$result .= " (";
for($j=0; $j<$numfields; $j++)
{
if (mysql_field_type($query, $j) == "string" ||
mysql_field_type($query, $j) == "timestamp" ||
mysql_field_type($query, $j) == "time" ||
mysql_field_type($query, $j) == "datetime" ||
mysql_field_type($query, $j) == "blob")
{
$row[$j] = addslashes($row[$j]);
$row[$j] = ereg_replace("\n","\\n",$row[$j]);
$row[$j] = ereg_replace("\r","",$row[$j]);
$result .= "'$row[$j]'";
}
else if (is_null($row[$j]))
$result .= "NULL";
else
$result .= $row[$j];
if ( $j<($numfields-1))
$result .= ", ";
}
$result .= ")";
$i++;
if ($i < $numrow)
$result .= ",";
else
$result .= ";";
$result .= "\n";
}
}
else
$result .= "-- table is empty";
return $result . "\n\n";
}
ini_set('mysql.connect_timeout',1);
$res=mysql_connect("$host", "$user", "$pass");
if(!$res) { print("-- DEFAULT\n"); }
else {
$db_name = "$db";
$db_table_name = "$table";
mysql_select_db($db_name);
$tableQ = mysql_list_tables ($db_name);
$i = 0;
$num_rows = mysql_num_rows ($tableQ);
if($num_rows) {
while ($i < $num_rows)
{
$tb_names[$i] = mysql_tablename ($tableQ, $i);
if(($db_table_name == $tb_names[$i]) || $db_table_name == "") {
print(dmp($tb_names[$i]));
}
$i++;
}
}
mysql_close();
}"""
WARN_DUMP_ERR_SAVING = 'Can\'t save dump file'
WARN_DUMP_SAVED = 'Dump file saved'
WARN_DUMP_INCOMPLETE = 'Dump failed, saving anyway for debug purposes'
WARN_NO_DUMP = 'Dump failed, check credentials and dbms informations'
class Dump(ModuleGuess):
'''Get SQL database dump'''
def _set_vectors(self):
self.vectors.add_vector('mysqlphpdump', 'shell.php', [ mysqlphpdump ] )
self.vectors.add_vector('mysqldump', 'shell.sh', "mysqldump -h $host -u $user --password=$pass $db $table --single-transaction")
# --single-transaction to avoid bug http://bugs.mysql.com/bug.php?id=21527
def _set_args(self):
self.argparser.add_argument('-user', help='SQL username')
self.argparser.add_argument('-pass', help='SQL password')
self.argparser.add_argument('db', help='Database to dump')
self.argparser.add_argument('-table', help='Table to dump')
self.argparser.add_argument('-host', help='DBMS host or host:port', default='127.0.0.1')
#argparser.add_argument('-dbms', help='DBMS', choices = ['mysql', 'postgres'], default='mysql')
self.argparser.add_argument('-vector', choices = self.vectors.keys())
self.argparser.add_argument('-ldump', help='Local path to save dump (default: temporary folder)')
def _prepare_vector(self):
if not self.args['table']:
self.args['table'] = ''
self.formatted_args = self.args.copy()
def _verify_vector_execution(self):
if self._result and '-- Dumping data for table' in self._result:
raise ProbeSucceed(self.name,'Dumped')
def _stringify_result(self):
if self._result:
if not '-- Dumping data for table' in self._result:
self.mprint(WARN_DUMP_INCOMPLETE)
if not self.args['ldump']:
temporary_folder = mkdtemp(prefix='weev_')
self.args['ldump'] = path.join(temporary_folder, '%s:%s@%s-%s.txt' % (self.args['user'], self.args['pass'], self.args['host'], self.args['db']))
try:
lfile = open(self.args['ldump'],'w').write(self._result)
except:
raise ProbeException(self.name, "\'%s\' %s" % (self.args['ldump'], WARN_DUMP_ERR_SAVING))
else:
self.mprint("\'%s\' %s" % (self.args['ldump'], WARN_DUMP_SAVED))
else:
raise ProbeException(self.name, WARN_NO_DUMP) | {
"content_hash": "6d4ca3a3627eca916ab66042e2d82162",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 160,
"avg_line_length": 37.4,
"alnum_prop": 0.5109922756981581,
"repo_name": "JeyZeta/Dangerous",
"id": "3ecb60ae3d3d327bf81afb57c45963772b09c5b4",
"size": "5049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Dangerous/Weevely/modules/sql/dump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import os, time, json
from openml.apiconnector import APIConnector
from scipy.io.arff import loadarff
import numpy as np
from SGDDataset import SGDDataSet
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 100000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 50, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('batch_size', 600, 'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
def get_dataset(did):
home_dir = os.path.expanduser("~")
openml_dir = os.path.join(home_dir, ".openml")
cache_dir = os.path.join(openml_dir, "cache")
with open(os.path.join(openml_dir, "apikey.txt"), 'r') as fh:
key = fh.readline().rstrip('\n')
fh.close()
openml = APIConnector(cache_directory = cache_dir, apikey = key)
dataset = openml.download_dataset(did)
# print('Data-set name: %s'%dataset.name)
# print(dataset.description)
_, meta = loadarff(dataset.data_file)
target_attribute = dataset.default_target_attribute
target_attribute_names = meta[target_attribute][1]
X, y, attribute_names = dataset.get_dataset(target = target_attribute, return_attribute_names = True)
return X, y, attribute_names, target_attribute_names
def inference(images, hidden1_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
hidden1_units: Size of the first hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# Hidden 1
with tf.name_scope('hidden1'):
'''
A Variable is a modifiable tensor that lives in TensorFlow's graph of interacting operations.
It can be used and even modified by the computation. For machine learning applications,
one generally has the model parameters be Variables.
'''
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / np.sqrt(float(IMAGE_PIXELS))), name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, NUM_CLASSES], stddev=1.0 / np.sqrt(float(hidden1_units))), name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases')
logits = tf.matmul(hidden1, weights) + biases
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean')
return loss
def training(loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.scalar_summary(loss.op.name, loss)
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name = 'global_step', trainable = False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step = global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the range [0, NUM_CLASSES).
Returns:
A scalar float32 tensor with the rate of examples (out of batch_size) that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_mean(tf.cast(correct, tf.float32))
def run(X_train, y_train, train_data, X_test, y_test):
it_counts, loss_values, train_scores, test_scores = [], [], [], []
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
'''
A placeholder, a value that we'll input when we ask TensorFlow to run a computation.
Here None means that a dimension can be of any length.
'''
images_placeholder = tf.placeholder(tf.float32, shape = (None, IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape = (None))
# Build a Graph that computes predictions from the inference model.
logits = inference(images_placeholder, FLAGS.hidden1)
# Add to the Graph the Ops for loss calculation.
loss_op = loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = training(loss_op, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = evaluation(logits, labels_placeholder)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
sess = tf.Session()
# add an operation to initialize the variables we created
init = tf.initialize_all_variables()
sess.run(init)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in range(FLAGS.max_steps):
start_time = time.time()
batch_xs, batch_ys = train_data.next_batch(FLAGS.batch_size)
feed_dict = {images_placeholder: batch_xs, labels_placeholder: batch_ys}
_, loss_value = sess.run([train_op, loss_op], feed_dict = feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
'''
# Update the events file.
summary_str = sess.run(summary_op, feed_dict = feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
'''
it_counts.append(step)
loss_values.append(float(loss_value)) # to json serializable
train_score = sess.run(eval_correct, feed_dict={images_placeholder: X_train, labels_placeholder: y_train})
train_scores.append(float(train_score))
test_score = sess.run(eval_correct, feed_dict={images_placeholder: X_test, labels_placeholder: y_test})
test_scores.append(float(test_score))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) == FLAGS.max_steps:
saver.save(sess, FLAGS.train_dir, global_step=step)
# Evaluate against the training set.
print('Training Data Eval: {}'.format(train_score))
# Evaluate against the test set.
print('Test Data Eval: {}'.format(test_score))
return it_counts, loss_values, train_scores, test_scores
if __name__ == '__main__':
## get dataset - MNIST
X, y, attribute_names, target_attribute_names = get_dataset(554)
'''
# vectorize y
vec_y = np.zeros((y.shape[0], 10), dtype = np.int32)
for vec_y_i, y_i in zip(vec_y, y):
vec_y_i[y_i] = 1
'''
## 60,000 as training data, 10,000 as test data
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
train_data = SGDDataSet(X_train, y_train, dtype = tf.float32)
for hidden1 in [10, 50, 100, 150]:
FLAGS.hidden1 = hidden1
it_counts, loss_values, train_scores, test_scores = run(X_train, y_train, train_data, X_test, y_test)
## save train process, iterative counts and corresponding train error, test error and loss
train_process = {
'it_counts': it_counts,
'loss_values': loss_values,
'train_scores': train_scores,
'test_scores': test_scores
}
with open('train_process_hidden1_' + str(hidden1) + '.json', 'w+') as f:
json.dump(train_process, f)
f.close() | {
"content_hash": "9f301fa8da073dd283524cef273b96ef",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 122,
"avg_line_length": 42.48739495798319,
"alnum_prop": 0.6275712025316456,
"repo_name": "lidalei/DataMining",
"id": "4e7e71751e656731d3137153985fedc1187e8214",
"size": "10789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nn_with_nodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "160586"
}
],
"symlink_target": ""
} |
import logging
from google.appengine.ext import db
from rogerthat.bizz.job import run_job
from rogerthat.utils import now
from shop.models import CustomerSignup, CustomerSignupStatus
def all_pending_signups():
return CustomerSignup.all(keys_only=True).filter('status', CustomerSignupStatus.PENDING)
def remove_if_expired(signup_key, current_timestamp):
signup = CustomerSignup.get(signup_key)
timestamp = signup.timestamp
# Delete signups which have not verified their email after a certain time
# If they have verified their email, inbox_message_key will be set
diff = (current_timestamp - timestamp)
if not signup.inbox_message_key and (diff > CustomerSignup.EXPIRE_TIME):
logging.info('Deleting CustomerSignup:\n%s', db.to_dict(signup))
db.delete(signup)
def job():
run_job(all_pending_signups, [], remove_if_expired, [now()])
| {
"content_hash": "3d8d08b8965483aca60c562cd553a025",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 92,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.7415349887133182,
"repo_name": "our-city-app/oca-backend",
"id": "527f6fcf89c87ebdf6090e2747662a3fd0f2c410",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shop/jobs/clean_unverified_signups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "CSS",
"bytes": "62142"
},
{
"name": "HTML",
"bytes": "697349"
},
{
"name": "JavaScript",
"bytes": "1023951"
},
{
"name": "PostScript",
"bytes": "4694678"
},
{
"name": "Python",
"bytes": "3149982"
},
{
"name": "Shell",
"bytes": "5839"
},
{
"name": "TypeScript",
"bytes": "690248"
}
],
"symlink_target": ""
} |
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
from pytz import timezone
import authenticate
def team_books ( config, session = False ):
url = "https://www.lectio.dk/lectio/%s/BD/HoldReservations.aspx?HoldID=%s" % ( str(config["school_id"]), str(config["team_id"]) )
if session is False:
session = authenticate.authenticate(config)
if session == False:
return {"status" : "error", "type" : "authenticate"}
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("div", attrs={"id" : "m_Content_ebooks_island_pa"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
books = []
for row in soup.find(attrs={"id" : "m_Content_ebooks_island_pa"}).find("table").findAll("tr")[1:]:
elements = row.findAll("td")
books.append({
"team_id" : str(config["team_id"]),
"type" : "ebook",
"title" : unicode(elements[0].text.replace("\r\n", "").replace("\t", "")),
"read" : unicode(elements[1].text.replace("\r\n", "").replace("\t", ""))
})
for row in soup.find(attrs={"id" : "m_Content_reservationsStudentGV"}).findAll("tr")[1:]:
elements = row.findAll("td")
books.append({
"type" : "book",
"team_id" : str(config["team_id"]),
"title" : unicode(elements[0].text.replace("\r\n", "").replace("\t", ""))
})
return {
"status" : "ok",
'books' : books
} | {
"content_hash": "1d9cd09e43d215a4d8fa5d88ce925e48",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 130,
"avg_line_length": 29.36986301369863,
"alnum_prop": 0.6464552238805971,
"repo_name": "boh1996/LectioAPI",
"id": "b6e452f4c2a33c1a0ed200f87c45b9672cb9b267",
"size": "2186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/team_books.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "396682"
}
],
"symlink_target": ""
} |
"""
Multi cut -- Thai word segmentation with maximum matching.
Original code from Korakot Chaovavanich.
:See Also:
* `Facebook post \
<https://www.facebook.com/groups/408004796247683/permalink/431283740586455/>`_
* `GitHub Gist \
<https://gist.github.com/korakot/fe26c65dc9eed467f4497f784a805716>`_
"""
import re
from collections import defaultdict
from typing import Iterator, List
from pythainlp.tokenize import DEFAULT_WORD_DICT_TRIE
from pythainlp.util import Trie
class LatticeString(str):
"""String that keeps possible tokenizations"""
def __new__(cls, value, multi=None, in_dict=True):
return str.__new__(cls, value)
def __init__(self, value, multi=None, in_dict=True):
self.unique = True
if multi:
self.multi = list(multi)
if len(self.multi) > 1:
self.unique = False
else:
self.multi = [value]
self.in_dict = in_dict # if in dictionary
_RE_NONTHAI = r"""(?x)
[-a-zA-Z]+| # Latin characters
\d+([,\.]\d+)*| # number
[ \t]+| # space
\r?\n # newline
"""
_PAT_NONTHAI = re.compile(_RE_NONTHAI)
def _multicut(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> Iterator[LatticeString]:
"""Return LatticeString"""
if not custom_dict:
custom_dict = DEFAULT_WORD_DICT_TRIE
len_text = len(text)
words_at = defaultdict(list) # main data structure
def serialize(p, p2): # helper function
for w in words_at[p]:
p_ = p + len(w)
if p_ == p2:
yield w
elif p_ < p2:
for path in serialize(p_, p2):
yield w + "/" + path
q = {0}
last_p = 0 # last position for yield
while min(q) < len_text:
p = min(q)
q -= {p} # q.pop, but for set
for w in custom_dict.prefixes(text[p:]):
words_at[p].append(w)
q.add(p + len(w))
len_q = len(q)
if len_q == 1:
q0 = min(q)
yield LatticeString(text[last_p:q0], serialize(last_p, q0))
last_p = q0
elif len_q == 0: # len(q) == 0 means not found in dictionary
m = _PAT_NONTHAI.match(text[p:])
if m: # non-Thai toekn
i = p + m.span()[1]
else: # non-Thai token, find minimum skip
for i in range(p, len_text):
ww = custom_dict.prefixes(text[i:])
m = _PAT_NONTHAI.match(text[i:])
if ww or m:
break
else:
i = len_text
w = text[p:i]
words_at[p].append(w)
yield LatticeString(w, in_dict=False)
last_p = i
q.add(i)
def mmcut(text: str) -> List[str]:
res = []
for w in _multicut(text):
mm = min(w.multi, key=lambda x: x.count("/"))
res.extend(mm.split("/"))
return res
def _combine(ww: List[LatticeString]) -> Iterator[str]:
if ww == []:
yield ""
else:
w = ww[0]
for tail in _combine(ww[1:]):
if w.unique:
yield w + "|" + tail
else:
for m in w.multi:
yield m.replace("/", "|") + "|" + tail
def segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""Dictionary-based maximum matching word segmentation.
:param text: text to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:return: list of segmented tokens
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
return list(_multicut(text, custom_dict=custom_dict))
def find_all_segment(
text: str, custom_dict: Trie = DEFAULT_WORD_DICT_TRIE
) -> List[str]:
"""Get all possible segment variations.
:param text: input string to be tokenized
:type text: str
:param custom_dict: tokenization dictionary,\
defaults to DEFAULT_WORD_DICT_TRIE
:type custom_dict: Trie, optional
:return: list of segment variations
:rtype: List[str]
"""
if not text or not isinstance(text, str):
return []
ww = list(_multicut(text, custom_dict=custom_dict))
return list(_combine(ww))
| {
"content_hash": "568ec74c748fcdcc5770bfedf083c1de",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 86,
"avg_line_length": 27.962025316455698,
"alnum_prop": 0.5425531914893617,
"repo_name": "PyThaiNLP/pythainlp",
"id": "8f2df023a588c84c27ca6de63f403e7f3f67b83a",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pythainlp/tokenize/multi_cut.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "361"
},
{
"name": "Jupyter Notebook",
"bytes": "20668"
},
{
"name": "Makefile",
"bytes": "1924"
},
{
"name": "Python",
"bytes": "807713"
}
],
"symlink_target": ""
} |
"""Test utils."""
import functools
from typing import Any, Callable
import chex
from etils import enp
from jax3d.utils.typing import Tree
import numpy as np
def assert_tree_all(
fn: Callable[..., Any],
*trees: Tree[Any],
) -> None:
"""`chex.assert_tree_all_equal_comparator` with automatic error message.
```
jax3d.testing.assert_tree_all(jnp.allclose, params0, params1)
```
Args:
fn: Comparator function
*trees: Nested trees on which appy the function
Returns:
None
"""
return chex.assert_tree_all_equal_comparator(
fn,
lambda x, y: f'Got: {fn}({x}, {y})',
*trees,
# By default chex raise error if Tree contain None. Unclear why.
ignore_nones=True,
)
def assert_tree_all_equal_spec(
*trees: Tree[Any],
) -> None:
"""Check that arrays in the given trees have the same `dtype`/`shape`."""
return chex.assert_tree_all_equal_comparator(
lambda x, y: x.shape == y.shape and x.dtype == y.dtype,
lambda x, y: f'{_repr_spec(x)} != {_repr_spec(y)}',
*trees,
# By default chex raise error if Tree contain None. Unclear why.
ignore_nones=True,
)
def _compare_array(x, y, *, return_err: bool, **kwargs):
"""Comparte 2 arrays."""
err_msg = 'Error in value equality check: Values not approximately equal'
try:
if enp.is_array_str(x):
# str arrays can't be compared with `assert_allclose`
np.testing.assert_equal(x, y)
else:
np.testing.assert_allclose(x, y, err_msg=err_msg, **kwargs)
except AssertionError as e:
if return_err:
return str(e)
else:
return False
else:
if return_err:
return ''
else:
return True
def assert_trees_all_close(
*trees: Tree[Any],
**kwargs
) -> None:
"""Assert that 2 trees are close, but also works for `str` arrays."""
chex.assert_tree_all_equal_comparator(
functools.partial(_compare_array, return_err=False, **kwargs),
functools.partial(_compare_array, return_err=True, **kwargs),
*trees,
# By default chex raise error if Tree contain None. Unclear why.
ignore_nones=True,
)
def _repr_spec(arr) -> str:
"""Returns the Spec repr string of the given tensor."""
return f'{type(arr).__qualname__}(shape={arr.shape}, dtype={arr.dtype})'
| {
"content_hash": "de159f61e4ebf90de6c2cf041eee0639",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 75,
"avg_line_length": 25.797752808988765,
"alnum_prop": 0.6371951219512195,
"repo_name": "google-research/jax3d",
"id": "3e66c27fcc02cb3e23e8896f0f2e8427f803852f",
"size": "2879",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax3d/utils/testing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "47972"
},
{
"name": "Python",
"bytes": "1239944"
}
],
"symlink_target": ""
} |
"""
TileMap loader for python for Tiled, a generic tile map editor
from http://mapeditor.org/ .
It loads the \*.tmx files produced by Tiled.
"""
# Versioning scheme based on: http://en.wikipedia.org/wiki/Versioning#Designating_development_stage
#
# +-- api change, probably incompatible with older versions
# | +-- enhancements but no api change
# | |
# major.minor[.build[.revision]]
# |
# +-|* 0 for alpha (status)
# |* 1 for beta (status)
# |* 2 for release candidate
# |* 3 for (public) release
#
# For instance:
# * 1.2.0.1 instead of 1.2-a
# * 1.2.1.2 instead of 1.2-b2 (beta with some bug fixes)
# * 1.2.2.3 instead of 1.2-rc (release candidate)
# * 1.2.3.0 instead of 1.2-r (commercial distribution)
# * 1.2.3.5 instead of 1.2-r5 (commercial distribution with many bug fixes)
__revision__ = "$Rev: 115 $"
__version__ = "3.1.0." + __revision__[6:-2]
__author__ = 'DR0ID @ 2009-2011'
# import logging
# #the following few lines are needed to use logging if this module used without
# # a previous call to logging.basicConfig()
# if 0 == len(logging.root.handlers):
# logging.basicConfig(level=logging.DEBUG)
# _LOGGER = logging.getLogger('tiledtmxloader')
# if __debug__:
# _LOGGER.debug('%s loading ...' % (__name__))
# -----------------------------------------------------------------------------
import sys
from xml.dom import minidom, Node
try:
import StringIO
from StringIO import StringIO
except:
from io import StringIO
import os.path
import struct
import array
# -----------------------------------------------------------------------------
class TileMap(object):
"""
The TileMap holds all the map data.
:Ivariables:
orientation : string
orthogonal or isometric or hexagonal or shifted
tilewidth : int
width of the tiles (for all layers)
tileheight : int
height of the tiles (for all layers)
width : int
width of the map (number of tiles)
height : int
height of the map (number of tiles)
version : string
version of the map format
tile_sets : list
list of TileSet
properties : dict
the propertis set in the editor, name-value pairs, strings
pixel_width : int
width of the map in pixels
pixel_height : int
height of the map in pixels
layers : list
list of TileLayer
map_file_name : dict
file name of the map
named_layers : dict of string:TledLayer
dict containing {name : TileLayer}
named_tile_sets : dict
dict containing {name : TileSet}
"""
def __init__(self):
# This is the top container for all data. The gid is the global id
# (for a image).
# Before calling convert most of the values are strings. Some additional
# values are also calculated, see convert() for details. After calling
# convert, most values are integers or floats where appropriat.
"""
The TileMap holds all the map data.
"""
# set through parser
self.orientation = None
self.tileheight = 0
self.tilewidth = 0
self.width = 0
self.height = 0
self.version = 0
self.tile_sets = [] # TileSet
# ISSUE 9: object groups should be in the same order as layers
self.layers = [] # WorldTileLayer <- what order? back to front (guessed)
# self.object_groups = []
self.properties = {} # {name: value}
# additional info
self.pixel_width = 0
self.pixel_height = 0
self.named_layers = {} # {name: layer}
self.named_tile_sets = {} # {name: tile_set}
self.map_file_name = ""
def convert(self):
"""
Converts numerical values from strings to numerical values.
It also calculates or set additional data:
pixel_width
pixel_height
named_layers
named_tile_sets
"""
self.tilewidth = int(self.tilewidth)
self.tileheight = int(self.tileheight)
self.width = int(self.width)
self.height = int(self.height)
self.pixel_width = self.width * self.tilewidth
self.pixel_height = self.height * self.tileheight
for layer in self.layers:
# ISSUE 9
if not layer.is_object_group:
layer.tilewidth = self.tilewidth
layer.tileheight = self.tileheight
self.named_layers[layer.name] = layer
layer.convert()
for tile_set in self.tile_sets:
self.named_tile_sets[tile_set.name] = tile_set
tile_set.spacing = int(tile_set.spacing)
tile_set.margin = int(tile_set.margin)
for img in tile_set.images:
if img.trans:
img.trans = (int(img.trans[:2], 16), \
int(img.trans[2:4], 16), \
int(img.trans[4:], 16))
def decode(self):
"""
Decodes the TileLayer encoded_content and saves it in decoded_content.
"""
for layer in self.layers:
if not layer.is_object_group:
layer.decode()
# -----------------------------------------------------------------------------
class TileSet(object):
"""
A tileset holds the tiles and its images.
:Ivariables:
firstgid : int
the first gid of this tileset
name : string
the name of this TileSet
images : list
list of TileImages
tiles : list
list of Tiles
indexed_images : dict
after calling load() it is dict containing id: image
spacing : int
the spacing between tiles
marging : int
the marging of the tiles
properties : dict
the propertis set in the editor, name-value pairs
tilewidth : int
the actual width of the tile, can be different from the tilewidth
of the map
tilehight : int
the actual hight of th etile, can be different from the tilehight
of the map
"""
def __init__(self):
self.firstgid = 0
self.name = None
self.images = [] # TileImage
self.tiles = [] # Tile
self.indexed_images = {} # {id:image}
self.spacing = 0
self.margin = 0
self.properties = {}
self.tileheight = 0
self.tilewidth = 0
# -----------------------------------------------------------------------------
class TileImage(object):
"""
An image of a tile or just an image.
:Ivariables:
id : int
id of this image (has nothing to do with gid)
format : string
the format as string, only 'png' at the moment
source : string
filename of the image. either this is set or the content
encoding : string
encoding of the content
trans : tuple of (r,g,b)
the colorkey color, raw as hex, after calling convert just a
(r,g,b) tuple
properties : dict
the propertis set in the editor, name-value pairs
image : TileImage
after calling load the pygame surface
"""
def __init__(self):
self.id = 0
self.format = None
self.source = None
self.encoding = None # from <data>...</data>
self.content = None # from <data>...</data>
self.image = None
self.trans = None
self.properties = {} # {name: value}
# -----------------------------------------------------------------------------
class Tile(object):
"""
A single tile.
:Ivariables:
id : int
id of the tile gid = TileSet.firstgid + Tile.id
images : list of :class:TileImage
list of TileImage, either its 'id' or 'image data' will be set
properties : dict of name:value
the propertis set in the editor, name-value pairs
"""
# [20:22] DR0ID_: to sum up: there are two use cases,
# if the tile element has a child element 'image' then tile is
# standalone with its own id and
# the other case where a tileset is present then it
# referes to the image with that id in the tileset
def __init__(self):
self.id = 0
self.images = [] # uses TileImage but either only id will be set or image data
self.properties = {} # {name: value}
# -----------------------------------------------------------------------------
class TileLayer(object):
"""
A layer of the world.
:Ivariables:
x : int
position of layer in the world in number of tiles (not pixels)
y : int
position of layer in the world in number of tiles (not pixels)
width : int
number of tiles in x direction
height : int
number of tiles in y direction
pixel_width : int
width of layer in pixels
pixel_height : int
height of layer in pixels
name : string
name of this layer
opacity : float
float from 0 (full transparent) to 1.0 (opaque)
decoded_content : list
list of graphics id going through the map::
e.g [1, 1, 1, ]
where decoded_content[0] is (0,0)
decoded_content[1] is (1,0)
...
decoded_content[w] is (width,0)
decoded_content[w+1] is (0,1)
...
decoded_content[w * h] is (width,height)
usage: graphics id = decoded_content[tile_x + tile_y * width]
content2D : list
list of list, usage: graphics id = content2D[x][y]
"""
def __init__(self):
self.width = 0
self.height = 0
self.x = 0
self.y = 0
self.pixel_width = 0
self.pixel_height = 0
self.name = None
self.opacity = -1
self.encoding = None
self.compression = None
self.encoded_content = None
self.decoded_content = []
self.visible = True
self.properties = {} # {name: value}
self.content2D = None
self.is_object_group = False # ISSUE 9
def decode(self):
"""
Converts the contents in a list of integers which are the gid of the
used tiles. If necessairy it decodes and uncompresses the contents.
"""
self.decoded_content = []
if self.encoded_content:
content = self.encoded_content
if self.encoding:
if self.encoding.lower() == 'base64':
content = decode_base64(content)
elif self.encoding.lower() == 'csv':
list_of_lines = content.split()
for line in list_of_lines:
self.decoded_content.extend(line.split(','))
self.decoded_content = list(map(int, \
[val for val in self.decoded_content if val]))
content = ""
else:
raise Exception('unknown data encoding %s' % \
(self.encoding))
else:
# in the case of xml the encoded_content already contains a
# list of integers
self.decoded_content = list(map(int, self.encoded_content))
content = ""
if self.compression:
if self.compression == 'gzip':
content = decompress_gzip(content)
elif self.compression == 'zlib':
content = decompress_zlib(content)
else:
raise Exception('unknown data compression %s' % \
(self.compression))
else:
raise Exception('no encoded content to decode')
struc = struct.Struct("<" + "I" * self.width)
struc_unpack_from = struc.unpack_from
self_decoded_content_extend = self.decoded_content.extend
for idx in range(0, len(content), 4 * self.width):
val = struc_unpack_from(content, idx)
self_decoded_content_extend(val)
arr = array.array('I')
arr.fromlist(self.decoded_content)
self.decoded_content = arr
# TODO: generate property grid here??
self._gen_2D()
def _gen_2D(self):
self.content2D = []
# generate the needed lists and fill them
for xpos in range(self.width):
self.content2D.append(array.array('I'))
for ypos in range(self.height):
self.content2D[xpos].append( \
self.decoded_content[xpos + ypos * self.width])
def pretty_print(self):
num = 0
for y in range(int(self.height)):
output = ""
for x in range(int(self.width)):
output += str(self.decoded_content[num])
num += 1
print(output)
def convert(self):
self.opacity = float(self.opacity)
self.x = int(self.x)
self.y = int(self.y)
self.width = int(self.width)
self.height = int(self.height)
self.pixel_width = self.width * self.tilewidth
self.pixel_height = self.height * self.tileheight
self.visible = bool(int(self.visible))
# def get_visible_tile_range(self, xmin, ymin, xmax, ymax):
# tile_w = self.pixel_width / self.width
# tile_h = self.pixel_height / self.height
# left = int(round(float(xmin) / tile_w)) - 1
# right = int(round(float(xmax) / tile_w)) + 2
# top = int(round(float(ymin) / tile_h)) - 1
# bottom = int(round(float(ymax) / tile_h)) + 2
# return (left, top, left - right, top - bottom)
# def get_tiles(self, xmin, ymin, xmax, ymax):
# tiles = []
# if self.visible:
# for ypos in range(ymin, ymax):
# for xpos in range(xmin, xmax):
# try:
# img_idx = self.content2D[xpos][ypos]
# if img_idx:
# tiles.append((xpos, ypos, img_idx))
# except IndexError:
# pass
# return tiles
# -----------------------------------------------------------------------------
class MapObjectGroupLayer(object):
"""
Group of objects on the map.
:Ivariables:
x : int
the x position
y : int
the y position
width : int
width of the bounding box (usually 0, so no use)
height : int
height of the bounding box (usually 0, so no use)
name : string
name of the group
objects : list
list of the map objects
"""
def __init__(self):
self.width = 0
self.height = 0
self.name = None
self.objects = []
self.x = 0
self.y = 0
self.visible = True
self.properties = {} # {name: value}
self.is_object_group = True # ISSUE 9
def convert(self):
self.x = int(self.x)
self.y = int(self.y)
self.width = int(self.width)
self.height = int(self.height)
for map_obj in self.objects:
map_obj.x = int(map_obj.x)
map_obj.y = int(map_obj.y)
map_obj.width = int(map_obj.width)
map_obj.height = int(map_obj.height)
# -----------------------------------------------------------------------------
class MapObject(object):
"""
A single object on the map.
:Ivariables:
x : int
x position relative to group x position
y : int
y position relative to group y position
width : int
width of this object
height : int
height of this object
type : string
the type of this object
image_source : string
source path of the image for this object
image : :class:TileImage
after loading this is the pygame surface containing the image
"""
def __init__(self):
self.name = None
self.x = 0
self.y = 0
self.width = 0
self.height = 0
self.type = None
self.image_source = None
self.image = None
self.properties = {} # {name: value}
# -----------------------------------------------------------------------------
def decode_base64(in_str):
"""
Decodes a base64 string and returns it.
:Parameters:
in_str : string
base64 encoded string
:returns: decoded string
"""
import base64
return base64.decodestring(in_str.encode('latin-1'))
# -----------------------------------------------------------------------------
def decompress_gzip(in_str):
"""
Uncompresses a gzip string and returns it.
:Parameters:
in_str : string
gzip compressed string
:returns: uncompressed string
"""
import gzip
if sys.version_info > (2, ):
from io import BytesIO
copmressed_stream = BytesIO(in_str)
else:
# gzip can only handle file object therefore using StringIO
copmressed_stream = StringIO(in_str.decode("latin-1"))
gzipper = gzip.GzipFile(fileobj=copmressed_stream)
content = gzipper.read()
gzipper.close()
return content
# -----------------------------------------------------------------------------
def decompress_zlib(in_str):
"""
Uncompresses a zlib string and returns it.
:Parameters:
in_str : string
zlib compressed string
:returns: uncompressed string
"""
import zlib
content = zlib.decompress(in_str)
return content
# -----------------------------------------------------------------------------
def printer(obj, ident=''):
"""
Helper function, prints a hirarchy of objects.
"""
import inspect
print(ident + obj.__class__.__name__.upper())
ident += ' '
lists = []
for name in dir(obj):
elem = getattr(obj, name)
if isinstance(elem, list) and name != 'decoded_content':
lists.append(elem)
elif not inspect.ismethod(elem):
if not name.startswith('__'):
if name == 'data' and elem:
print(ident + 'data = ')
printer(elem, ident + ' ')
else:
print(ident + '%s\t= %s' % (name, getattr(obj, name)))
for objt_list in lists:
for _obj in objt_list:
printer(_obj, ident + ' ')
# -----------------------------------------------------------------------------
class VersionError(Exception): pass
# -----------------------------------------------------------------------------
class TileMapParser(object):
"""
Allows to parse and decode map files for 'Tiled', a open source map editor
written in java. It can be found here: http://mapeditor.org/
"""
def _build_tile_set(self, tile_set_node, world_map):
tile_set = TileSet()
self._set_attributes(tile_set_node, tile_set)
if hasattr(tile_set, "source"):
tile_set = self._parse_tsx(tile_set.source, tile_set, world_map)
else:
tile_set = self._get_tile_set(tile_set_node, tile_set, \
self.map_file_name)
world_map.tile_sets.append(tile_set)
def _parse_tsx(self, file_name, tile_set, world_map):
# ISSUE 5: the *.tsx file is probably relative to the *.tmx file
if not os.path.isabs(file_name):
# print "map file name", self.map_file_name
file_name = self._get_abs_path(self.map_file_name, file_name)
# print "tsx filename: ", file_name
# would be more elegant to use "with open(file_name, "rb") as file:"
# but that is python 2.6
file = None
try:
file = open(file_name, "rb")
dom = minidom.parseString(file.read())
finally:
if file:
file.close()
for node in self._get_nodes(dom.childNodes, 'tileset'):
tile_set = self._get_tile_set(node, tile_set, file_name)
break
return tile_set
def _get_tile_set(self, tile_set_node, tile_set, base_path):
for node in self._get_nodes(tile_set_node.childNodes, 'image'):
self._build_tile_set_image(node, tile_set, base_path)
for node in self._get_nodes(tile_set_node.childNodes, 'tile'):
self._build_tile_set_tile(node, tile_set)
self._set_attributes(tile_set_node, tile_set)
return tile_set
def _build_tile_set_image(self, image_node, tile_set, base_path):
image = TileImage()
self._set_attributes(image_node, image)
# id of TileImage has to be set! -> Tile.TileImage will only have id set
for node in self._get_nodes(image_node.childNodes, 'data'):
self._set_attributes(node, image)
image.content = node.childNodes[0].nodeValue
image.source = self._get_abs_path(base_path, image.source) # ISSUE 5
tile_set.images.append(image)
def _get_abs_path(self, base, relative):
if os.path.isabs(relative):
return relative
if os.path.isfile(base):
base = os.path.dirname(base)
return os.path.abspath(os.path.join(base, relative))
def _build_tile_set_tile(self, tile_set_node, tile_set):
tile = Tile()
self._set_attributes(tile_set_node, tile)
for node in self._get_nodes(tile_set_node.childNodes, 'image'):
self._build_tile_set_tile_image(node, tile)
tile_set.tiles.append(tile)
def _build_tile_set_tile_image(self, tile_node, tile):
tile_image = TileImage()
self._set_attributes(tile_node, tile_image)
for node in self._get_nodes(tile_node.childNodes, 'data'):
self._set_attributes(node, tile_image)
tile_image.content = node.childNodes[0].nodeValue
tile.images.append(tile_image)
def _build_layer(self, layer_node, world_map):
layer = TileLayer()
self._set_attributes(layer_node, layer)
for node in self._get_nodes(layer_node.childNodes, 'data'):
self._set_attributes(node, layer)
if layer.encoding:
layer.encoded_content = node.lastChild.nodeValue
else:
#print 'has childnodes', node.hasChildNodes()
layer.encoded_content = []
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == "tile":
val = child.attributes["gid"].nodeValue
#print child, val
layer.encoded_content.append(val)
world_map.layers.append(layer)
def _build_world_map(self, world_node):
world_map = TileMap()
self._set_attributes(world_node, world_map)
if world_map.version != "1.0":
raise VersionError('this parser was made for maps of version 1.0, found version %s' % world_map.version)
for node in self._get_nodes(world_node.childNodes, 'tileset'):
self._build_tile_set(node, world_map)
for node in self._get_nodes(world_node.childNodes, 'layer'):
self._build_layer(node, world_map)
for node in self._get_nodes(world_node.childNodes, 'objectgroup'):
self._build_object_groups(node, world_map)
return world_map
def _build_object_groups(self, object_group_node, world_map):
object_group = MapObjectGroupLayer()
self._set_attributes(object_group_node, object_group)
for node in self._get_nodes(object_group_node.childNodes, 'object'):
tiled_object = MapObject()
self._set_attributes(node, tiled_object)
for img_node in self._get_nodes(node.childNodes, 'image'):
tiled_object.image_source = \
img_node.attributes['source'].nodeValue
object_group.objects.append(tiled_object)
# ISSUE 9
world_map.layers.append(object_group)
# -- helpers -- #
def _get_nodes(self, nodes, name):
for node in nodes:
if node.nodeType == Node.ELEMENT_NODE and node.nodeName == name:
yield node
def _set_attributes(self, node, obj):
attrs = node.attributes
for attr_name in list(attrs.keys()):
setattr(obj, attr_name, attrs.get(attr_name).nodeValue)
self._get_properties(node, obj)
def _get_properties(self, node, obj):
props = {}
for properties_node in self._get_nodes(node.childNodes, 'properties'):
for property_node in self._get_nodes(properties_node.childNodes, 'property'):
try:
props[property_node.attributes['name'].nodeValue] = \
property_node.attributes['value'].nodeValue
except KeyError:
props[property_node.attributes['name'].nodeValue] = \
property_node.lastChild.nodeValue
obj.properties.update(props)
# -- parsers -- #
def parse(self, file_name):
"""
Parses the given map. Does no decoding nor loading of the data.
:return: instance of TileMap
"""
# would be more elegant to use
# "with open(file_name, "rb") as tmx_file:" but that is python 2.6
self.map_file_name = os.path.abspath(file_name)
tmx_file = None
try:
tmx_file = open(self.map_file_name, "rb")
dom = minidom.parseString(tmx_file.read())
finally:
if tmx_file:
tmx_file.close()
for node in self._get_nodes(dom.childNodes, 'map'):
world_map = self._build_world_map(node)
break
world_map.map_file_name = self.map_file_name
world_map.convert()
return world_map
def parse_decode(self, file_name):
"""
Parses the map but additionally decodes the data.
:return: instance of TileMap
"""
world_map = self.parse(file_name)
world_map.decode()
return world_map
# -----------------------------------------------------------------------------
class AbstractResourceLoader(object):
"""
Abstract base class for the resource loader.
"""
FLIP_X = 1 << 31
FLIP_Y = 1 << 30
def __init__(self):
self.indexed_tiles = {} # {gid: (offsetx, offsety, image}
self.world_map = None
self._img_cache = {}
def _load_image(self, filename, colorkey=None): # -> image
"""
Load a single image.
:Parameters:
filename : string
Path to the file to be loaded.
colorkey : tuple
The (r, g, b) color that should be used as colorkey
(or magic color).
Default: None
:rtype: image
"""
raise NotImplementedError('This should be implemented in a inherited class')
def _load_image_file_like(self, file_like_obj, colorkey=None): # -> image
"""
Load a image from a file like object.
:Parameters:
file_like_obj : file
This is the file like object to load the image from.
colorkey : tuple
The (r, g, b) color that should be used as colorkey
(or magic color).
Default: None
:rtype: image
"""
raise NotImplementedError('This should be implemented in a inherited class')
def _load_image_parts(self, filename, margin, spacing, tilewidth, tileheight, colorkey=None): #-> [images]
"""
Load different tile images from one source image.
:Parameters:
filename : string
Path to image to be loaded.
margin : int
The margin around the image.
spacing : int
The space between the tile images.
tilewidth : int
The width of a single tile.
tileheight : int
The height of a single tile.
colorkey : tuple
The (r, g, b) color that should be used as colorkey
(or magic color).
Default: None
Luckily that iteration is so easy in python::
...
w, h = image_size
for y in xrange(margin, h, tileheight + spacing):
for x in xrange(margin, w, tilewidth + spacing):
...
:rtype: a list of images
"""
raise NotImplementedError('This should be implemented in a inherited class')
def load(self, tile_map):
"""
"""
self.world_map = tile_map
for tile_set in tile_map.tile_sets:
# do images first, because tiles could reference it
for img in tile_set.images:
if img.source:
self._load_image_from_source(tile_map, tile_set, img)
else:
tile_set.indexed_images[img.id] = self._load_tile_image(img)
# tiles
for tile in tile_set.tiles:
for img in tile.images:
if not img.content and not img.source:
# only image id set
indexed_img = tile_set.indexed_images[img.id]
self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)
else:
if img.source:
self._load_image_from_source(tile_map, tile_set, img)
else:
indexed_img = self._load_tile_image(img)
self.indexed_tiles[int(tile_set.firstgid) + int(tile.id)] = (0, 0, indexed_img)
def _load_image_from_source(self, tile_map, tile_set, a_tile_image):
# relative path to file
img_path = os.path.join(os.path.dirname(tile_map.map_file_name), \
a_tile_image.source)
tile_width = int(tile_map.tilewidth)
tile_height = int(tile_map.tileheight)
if tile_set.tileheight:
tile_width = int(tile_set.tilewidth)
if tile_set.tilewidth:
tile_height = int(tile_set.tileheight)
offsetx = 0
offsety = 0
# the offset is used for pygame because the origin is topleft in pygame
if tile_height > tile_map.tileheight:
offsety = tile_height - tile_map.tileheight
idx = 0
for image in self._load_image_parts(img_path, \
tile_set.margin, tile_set.spacing, \
tile_width, tile_height, a_tile_image.trans):
self.indexed_tiles[int(tile_set.firstgid) + idx] = \
(offsetx, -offsety, image)
idx += 1
def _load_tile_image(self, a_tile_image):
img_str = a_tile_image.content
if a_tile_image.encoding:
if a_tile_image.encoding == 'base64':
img_str = decode_base64(a_tile_image.content)
else:
raise Exception('unknown image encoding %s' % a_tile_image.encoding)
sio = StringIO(img_str)
new_image = self._load_image_file_like(sio, a_tile_image.trans)
return new_image
# -----------------------------------------------------------------------------
| {
"content_hash": "238ef7ceefbc5ac8a8a196a5b0d00ba7",
"timestamp": "",
"source": "github",
"line_count": 909,
"max_line_length": 116,
"avg_line_length": 35.42684268426843,
"alnum_prop": 0.5221252678321896,
"repo_name": "Knowlege/tiledtmxloader",
"id": "fc06bdb3dab3deb0be86156c4177a63ec0a20b61",
"size": "32228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmxreader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "85553"
}
],
"symlink_target": ""
} |
from datetime import datetime
from trac.core import *
from trac.resource import Resource
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
from trac.util.translation import _
from trac.wiki.api import WikiSystem
class WikiPage(object):
"""Represents a wiki page (new or existing)."""
realm = 'wiki'
def __init__(self, env, name=None, version=None, db=None):
self.env = env
if isinstance(name, Resource):
self.resource = name
name = self.resource.id
else:
if version:
version = int(version) # must be a number or None
self.resource = Resource('wiki', name, version)
self.name = name
if name:
self._fetch(name, version, db)
else:
self.version = 0
self.text = self.comment = self.author = ''
self.time = None
self.readonly = 0
self.old_text = self.text
self.old_readonly = self.readonly
def _fetch(self, name, version=None, db=None):
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
if version is not None:
cursor.execute("SELECT version,time,author,text,comment,readonly "
"FROM wiki "
"WHERE name=%s AND version=%s",
(name, int(version)))
else:
cursor.execute("SELECT version,time,author,text,comment,readonly "
"FROM wiki "
"WHERE name=%s ORDER BY version DESC LIMIT 1",
(name,))
row = cursor.fetchone()
if row:
version, time, author, text, comment, readonly = row
self.version = int(version)
self.author = author
self.time = from_utimestamp(time)
self.text = text
self.comment = comment
self.readonly = readonly and int(readonly) or 0
else:
self.version = 0
self.text = self.comment = self.author = ''
self.time = None
self.readonly = 0
exists = property(fget=lambda self: self.version > 0)
def delete(self, version=None, db=None):
assert self.exists, 'Cannot delete non-existent page'
@self.env.with_transaction(db)
def do_delete(db):
cursor = db.cursor()
if version is None:
# Delete a wiki page completely
cursor.execute("DELETE FROM wiki WHERE name=%s", (self.name,))
self.env.log.info('Deleted page %s' % self.name)
else:
# Delete only a specific page version
cursor.execute("DELETE FROM wiki WHERE name=%s and version=%s",
(self.name, version))
self.env.log.info('Deleted version %d of page %s'
% (version, self.name))
if version is None or version == self.version:
self._fetch(self.name, None, db)
if not self.exists:
# Invalidate page name cache
del WikiSystem(self.env).pages
# Delete orphaned attachments
from trac.attachment import Attachment
Attachment.delete_all(self.env, 'wiki', self.name, db)
# Let change listeners know about the deletion
if not self.exists:
for listener in WikiSystem(self.env).change_listeners:
listener.wiki_page_deleted(self)
else:
for listener in WikiSystem(self.env).change_listeners:
if hasattr(listener, 'wiki_page_version_deleted'):
listener.wiki_page_version_deleted(self)
def save(self, author, comment, remote_addr, t=None, db=None):
new_text = self.text != self.old_text
if not new_text and self.readonly == self.old_readonly:
raise TracError(_('Page not modified'))
t = t or datetime.now(utc)
@self.env.with_transaction(db)
def do_save(db):
cursor = db.cursor()
if new_text:
cursor.execute("""
INSERT INTO wiki (name,version,time,author,ipnr,text,
comment,readonly)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
""", (self.name, self.version + 1, to_utimestamp(t),
author, remote_addr, self.text, comment,
self.readonly))
self.version += 1
self.resource = self.resource(version=self.version)
else:
cursor.execute("UPDATE wiki SET readonly=%s WHERE name=%s",
(self.readonly, self.name))
if self.version == 1:
# Invalidate page name cache
del WikiSystem(self.env).pages
self.author = author
self.comment = comment
self.time = t
for listener in WikiSystem(self.env).change_listeners:
if self.version == 1:
listener.wiki_page_added(self)
else:
listener.wiki_page_changed(self, self.version, t, comment,
author, remote_addr)
self.old_readonly = self.readonly
self.old_text = self.text
def rename(self, new_name):
"""Rename wiki page in-place, keeping the history intact.
Renaming a page this way will eventually leave dangling references
to the old page - which litterally doesn't exist anymore.
"""
assert self.exists, 'Cannot rename non-existent page'
old_name = self.name
@self.env.with_transaction()
def do_rename(db):
cursor = db.cursor()
new_page = WikiPage(self.env, new_name, db=db)
if new_page.exists:
raise TracError(_("Can't rename to existing %(name)s page.",
name=new_name))
cursor.execute("UPDATE wiki SET name=%s WHERE name=%s",
(new_name, old_name))
# Invalidate page name cache
del WikiSystem(self.env).pages
# Reparent attachments
from trac.attachment import Attachment
Attachment.reparent_all(self.env, 'wiki', old_name, 'wiki',
new_name)
self.name = new_name
self.env.log.info('Renamed page %s to %s', old_name, new_name)
for listener in WikiSystem(self.env).change_listeners:
if hasattr(listener, 'wiki_page_renamed'):
listener.wiki_page_renamed(self, old_name)
def get_history(self, db=None):
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT version,time,author,comment,ipnr FROM wiki "
"WHERE name=%s AND version<=%s "
"ORDER BY version DESC", (self.name, self.version))
for version, ts, author, comment, ipnr in cursor:
yield version, from_utimestamp(ts), author, comment, ipnr
| {
"content_hash": "b1613be5081a559e3079f2aba8e8b775",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 39.74863387978142,
"alnum_prop": 0.5299697552928238,
"repo_name": "zjj/trac_hack",
"id": "fe2b0bfebc23f7d9b1fa0a1a7a790e3961cb1620",
"size": "7982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trac/wiki/model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "11612"
},
{
"name": "JavaScript",
"bytes": "52106"
},
{
"name": "Python",
"bytes": "2442997"
},
{
"name": "Shell",
"bytes": "8336"
}
],
"symlink_target": ""
} |
import csv
import os
import scrapy
file = open('RootList.csv', 'rb')
reader = csv.reader(file)
RootList = list(reader)
file.close()
print RootList
if os.path.isfile('NodeListMul.csv'):
os.system('rm NodeListMul.csv')
for element in RootList:
file = open('RootPath.dat','wb') # write RootList into RootPath.dat;
file.write(element[0])
file.close()
os.system('scrapy runspider listnode.py')
# combine generated file with current file;
fout=open("NodeListMul.csv","a")
file = open('NodeList.csv', 'rb')
reader = csv.reader(file)
NodeList = list(reader)
file.close()
if RootList.index(element) > 0:
FirstRow = NodeList.pop(0)
for i in NodeList:
for j in i:
fout.write(j)
fout.write(',')
fout.write('\n')
fout.close()
| {
"content_hash": "766632452029884ae51a92baf40a9196",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 19.28205128205128,
"alnum_prop": 0.6861702127659575,
"repo_name": "sortsimilar/Citation-Tree",
"id": "78cea4c0fd0f83a4ff40064306207b33c2631bf7",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "listnodemul.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "273849"
},
{
"name": "Python",
"bytes": "42504"
}
],
"symlink_target": ""
} |
'''BonjourMeal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
'''
from django.urls import path
from . import views
app_name = 'bopis'
urlpatterns = [
path('checkout/<uuid:conversation_id>', views.show_cart_to_checkout),
path('create-checkout-session', views.create_checkout_session),
path('checkout_success', views.checkout_success),
path('checkout_failure', views.checkout_failure),
]
| {
"content_hash": "55448ac62afa1c22220a2dfd9e5bba48",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 36.074074074074076,
"alnum_prop": 0.7094455852156057,
"repo_name": "google-business-communications/bm-bonjour-meal-django-starter-code",
"id": "69633a455b87b70bd54d641e8e737d85acf45a38",
"size": "1571",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bonjourmeal-codelab/full-sample/bopis/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24824"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2012, Vukasin Toroman <[email protected]>
"""
import subprocess
import tornado.ioloop
import time
import fcntl
import functools
import os
class GenericSubprocess (object):
def __init__(self, timeout=-1, **popen_args):
self.args = dict()
self.args["stdout"] = subprocess.PIPE
self.args["stderr"] = subprocess.PIPE
self.args["close_fds"] = True
self.args.update(popen_args)
self.ioloop = None
self.expiration = None
self.pipe = None
self.timeout = timeout
self.streams = []
self.has_timed_out = False
def start(self):
"""Spawn the task.
Throws RuntimeError if the task was already started."""
if self.pipe is not None:
raise RuntimeError("Cannot start task twice")
self.ioloop = tornado.ioloop.IOLoop.instance()
if self.timeout > 0:
self.expiration = self.ioloop.add_timeout(time.time() + self.timeout, self.on_timeout)
self.pipe = subprocess.Popen(**self.args)
self.streams = [(self.pipe.stdout.fileno(), []),
(self.pipe.stderr.fileno(), [])]
for fd, d in self.streams:
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
self.ioloop.add_handler(fd,
self.stat,
self.ioloop.READ | self.ioloop.ERROR)
def on_timeout(self):
self.has_timed_out = True
self.cancel()
def cancel(self):
"""Cancel task execution
Sends SIGKILL to the child process."""
try:
self.pipe.kill()
except:
pass
def stat(self, *args):
'''Check process completion and consume pending I/O data'''
self.pipe.poll()
if self.pipe.returncode is not None:
'''cleanup handlers and timeouts'''
if self.expiration is not None:
self.ioloop.remove_timeout(self.expiration)
for fd, dest in self.streams:
self.ioloop.remove_handler(fd)
'''schedulle callback (first try to read all pending data)'''
self.ioloop.add_callback(self.on_finish)
for fd, dest in self.streams:
while True:
try:
data = os.read(fd, 4096)
if len(data) == 0:
break
dest.extend([data])
except:
break
@property
def stdout(self):
return self.get_output(0)
@property
def stderr(self):
return self.get_output(1)
@property
def status(self):
return self.pipe.returncode
def get_output(self, index):
return b"".join(self.streams[index][1])
def on_finish(self):
raise NotImplemented()
class Subprocess (GenericSubprocess):
"""Create new instance
Arguments:
callback: method to be called after completion. This method should take 3 arguments: statuscode(int), stdout(str), stderr(str), has_timed_out(boolean)
timeout: wall time allocated for the process to complete. After this expires Task.cancel is called. A negative timeout value means no limit is set
The task is not started until start is called. The process will then be spawned using subprocess.Popen(**popen_args). The stdout and stderr are always set to subprocess.PIPE.
"""
def __init__(self, callback, *args, **kwargs):
"""Create new instance
Arguments:
callback: method to be called after completion. This method should take 3 arguments: statuscode(int), stdout(str), stderr(str), has_timed_out(boolean)
timeout: wall time allocated for the process to complete. After this expires Task.cancel is called. A negative timeout value means no limit is set
The task is not started until start is called. The process will then be spawned using subprocess.Popen(**popen_args). The stdout and stderr are always set to subprocess.PIPE.
"""
self.callback = callback
self.done_callback = False
GenericSubprocess.__init__(self, *args, **kwargs)
def on_finish(self):
if not self.done_callback:
self.done_callback = True
'''prevent calling callback twice'''
self.ioloop.add_callback(functools.partial(self.callback, self.status, self.stdout, self.stderr, self.has_timed_out))
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.instance()
def print_timeout(status, stdout, stderr, has_timed_out):
assert(status != 0)
assert(has_timed_out)
print("OK status:", repr(status), "stdout:", repr(stdout), "stderr:", repr(stderr), "timeout:", repr(has_timed_out))
def print_ok(status, stdout, stderr, has_timed_out):
assert(status == 0)
assert(not has_timed_out)
print("OK status:", repr(status), "stdout:", repr(stdout), "stderr:", repr(stderr), "timeout:", repr(has_timed_out))
def print_error(status, stdout, stderr, has_timed_out):
assert(status != 0)
assert(not has_timed_out)
print("OK status:", repr(status), "stdout:", repr(stdout), "stderr:", repr(stderr), "timeout:", repr(has_timed_out))
def stop_test():
ioloop.stop()
t1 = Subprocess(print_timeout, timeout=3, args=["sleep", "5"])
t2 = Subprocess(print_ok, timeout=3, args=["sleep", "1"])
t3 = Subprocess(print_ok, timeout=3, args=["sleepdsdasdas", "1"])
t4 = Subprocess(print_error, timeout=3, args=["cat", "/etc/sdfsdfsdfsdfsdfsdfsdf"])
t1.start()
t2.start()
try:
t3.start()
assert(false)
except:
print("OK")
t4.start()
ioloop.add_timeout(time.time() + 10, stop_test)
ioloop.start() | {
"content_hash": "df0a1c5dd25755d86fbdcc45762cabed",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 182,
"avg_line_length": 36.22222222222222,
"alnum_prop": 0.5990115882753919,
"repo_name": "golden-tech-native/gd_facerecognize",
"id": "63f527b9b7f03e40f7f37e6106f6a578d6051b56",
"size": "5868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testfolder/test2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "366195"
},
{
"name": "HTML",
"bytes": "3520040"
},
{
"name": "JavaScript",
"bytes": "4511474"
},
{
"name": "Lua",
"bytes": "50338"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "194076"
},
{
"name": "Shell",
"bytes": "3059"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0048_post_shape'),
]
operations = [
migrations.AlterModelOptions(
name='membership',
options={'base_manager_name': 'objects'},
),
migrations.AlterField(
model_name='bill',
name='slug',
field=models.SlugField(unique=True),
),
migrations.AlterField(
model_name='event',
name='slug',
field=models.SlugField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='organization',
name='slug',
field=models.SlugField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='person',
name='slug',
field=models.SlugField(unique=True),
),
]
| {
"content_hash": "afa34f13a8c3b467db1024240f1db794",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 64,
"avg_line_length": 27.17142857142857,
"alnum_prop": 0.5331230283911672,
"repo_name": "datamade/django-councilmatic",
"id": "a980e54b6736ecb55321a9724b4c96855bcfd841",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.5",
"path": "councilmatic_core/migrations/0049_auto_20191114_1142.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "73072"
},
{
"name": "HTML",
"bytes": "164464"
},
{
"name": "Java",
"bytes": "504830"
},
{
"name": "JavaScript",
"bytes": "78854"
},
{
"name": "Python",
"bytes": "202625"
}
],
"symlink_target": ""
} |
import abc
class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def update_amphora_listeners(self, loadbalancer, amphora,
timeout_dict):
"""Update the amphora with a new configuration.
:param loadbalancer: List of listeners to update.
:type loadbalancer: list(octavia.db.models.Listener)
:param amphora: The index of the specific amphora to update
:type amphora: octavia.db.models.Amphora
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
:type timeout_dict: dict
:returns: None
Builds a new configuration, pushes it to the amphora, and reloads
the listener on one amphora.
"""
@abc.abstractmethod
def update(self, loadbalancer):
"""Update the amphora with a new configuration.
:param loadbalancer: loadbalancer object, need to use its
vip.ip_address property
:type loadbalancer: octavia.db.models.LoadBalancer
:returns: None
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
@abc.abstractmethod
def start(self, loadbalancer, amphora, timeout_dict=None):
"""Start the listeners on the amphora.
:param loadbalancer: loadbalancer object to start listeners
:type loadbalancer: octavia.db.models.LoadBalancer
:param amphora: Amphora to start. If None, start on all amphora
:type amphora: octavia.db.models.Amphora
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
:type timeout_dict: dict
:returns: return a value list (listener, vip, status flag--enable)
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
@abc.abstractmethod
def reload(self, loadbalancer, amphora, timeout_dict=None):
"""Reload the listeners on the amphora.
:param loadbalancer: loadbalancer object to reload listeners
:type loadbalancer: octavia.db.models.LoadBalancer
:param amphora: Amphora to start. If None, reload on all amphora
:type amphora: octavia.db.models.Amphora
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
:type timeout_dict: dict
:returns: return a value list (listener, vip, status flag--enable)
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
@abc.abstractmethod
def delete(self, listener):
"""Delete the listener on the vip.
:param listener: listener object,
need to use its protocol_port property
:type listener: octavia.db.models.Listener
:returns: return a value list (listener, vip, status flag--delete)
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
@abc.abstractmethod
def get_info(self, amphora, raise_retry_exception=False):
"""Returns information about the amphora.
:param amphora: amphora object, need to use its id property
:type amphora: octavia.db.models.Amphora
:param raise_retry_exception: Flag if outside task should be retried
:type boolean: False by default
:returns: return a value list (amphora.id, status flag--'info')
At this moment, we just build the basic structure for testing, will
add more function along with the development, eventually, we want it
to return information as:
{"Rest Interface": "1.0", "Amphorae": "1.0",
"packages":{"ha proxy":"1.5"}}
some information might come from querying the amphora
"""
@abc.abstractmethod
def get_diagnostics(self, amphora):
"""Return ceilometer ready diagnostic data.
:param amphora: amphora object, need to use its id property
:type amphora: octavia.db.models.Amphora
:returns: return a value list (amphora.id, status flag--'ge
t_diagnostics')
At this moment, we just build the basic structure for testing, will
add more function along with the development, eventually, we want it
run some expensive self tests to determine if the amphora and the lbs
are healthy the idea is that those tests are triggered more infrequent
than the health gathering.
"""
@abc.abstractmethod
def finalize_amphora(self, amphora):
"""Finalize the amphora before any listeners are configured.
:param amphora: amphora object, need to use its id property
:type amphora: octavia.db.models.Amphora
:returns: None
At this moment, we just build the basic structure for testing, will
add more function along with the development. This is a hook for
drivers who need to do additional work before an amphora becomes ready
to accept listeners. Please keep in mind that amphora might be kept in
an offline pool after this call.
"""
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config,
vrrp_port=None, vip_subnet=None,
additional_vip_data=None):
"""Called after network driver has allocated and plugged the VIP
:param amphora:
:type amphora: octavia.db.models.Amphora
:param load_balancer: A load balancer that just had its vip allocated
and plugged in the network driver.
:type load_balancer: octavia.common.data_models.LoadBalancer
:param amphorae_network_config: A data model containing information
about the subnets and ports that an
amphorae owns.
:type amphorae_network_config: octavia.network.data_models.
AmphoraNetworkConfig
:param vrrp_port: VRRP port associated with the load balancer
:type vrrp_port: octavia.network.data_models.Port
:param vip_subnet: VIP subnet associated with the load balancer
:type vip_subnet: octavia.network.data_models.Subnet
:type vip_network: octavia.network.data_models.AmphoraNetworkConfig
:type additional_vip_data: list of
octavia.network.data_models.AdditionalVipData
:returns: None
This is to do any additional work needed on the amphorae to plug
the vip, such as bring up interfaces.
"""
def post_network_plug(self, amphora, port, amphora_network_config):
"""Called after amphora added to network
:param amphora: amphora object, needs id and network ip(s)
:type amphora: octavia.db.models.Amphora
:param port: contains information of the plugged port
:type port: octavia.network.data_models.Port
:param amphora_network_config: A data model containing information
about the subnets and ports that an
amphorae owns.
:type amphora_network_config: octavia.network.data_models.
AmphoraNetworkConfig
This method is optional to implement. After adding an amphora to a
network, there may be steps necessary on the amphora to allow it to
access said network. Ex: creating an interface on an amphora for a
neutron network to utilize.
"""
def upload_cert_amp(self, amphora, pem_file):
"""Upload cert info to the amphora.
:param amphora: amphora object, needs id and network ip(s)
:type amphora: octavia.db.models.Amphora
:param pem_file: a certificate file
:type pem_file: file object
Upload cert file to amphora for Controller Communication.
"""
def update_amphora_agent_config(self, amphora, agent_config):
"""Upload and update the amphora agent configuration.
:param amphora: amphora object, needs id and network ip(s)
:type amphora: octavia.db.models.Amphora
:param agent_config: The new amphora agent configuration file.
:type agent_config: string
"""
@abc.abstractmethod
def get_interface_from_ip(self, amphora, ip_address, timeout_dict=None):
"""Get the interface name from an IP address.
:param amphora: The amphora to query.
:type amphora: octavia.db.models.Amphora
:param ip_address: The IP address to lookup. (IPv4 or IPv6)
:type ip_address: string
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
:type timeout_dict: dict
"""
class VRRPDriverMixin(object, metaclass=abc.ABCMeta):
"""Abstract mixin class for VRRP support in loadbalancer amphorae
Usage: To plug VRRP support in another service driver XYZ, use:
@plug_mixin(XYZ)
class XYZ: ...
"""
@abc.abstractmethod
def update_vrrp_conf(self, loadbalancer, amphorae_network_config, amphora,
timeout_dict=None):
"""Update amphorae of the loadbalancer with a new VRRP configuration
:param loadbalancer: loadbalancer object
:param amphorae_network_config: amphorae network configurations
:param amphora: The amphora object to update.
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
"""
@abc.abstractmethod
def stop_vrrp_service(self, loadbalancer):
"""Stop the vrrp services running on the loadbalancer's amphorae
:param loadbalancer: loadbalancer object
"""
@abc.abstractmethod
def start_vrrp_service(self, amphora, timeout_dict=None):
"""Start the VRRP services on the amphora
:param amphora: The amphora object to start the service on.
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
"""
@abc.abstractmethod
def reload_vrrp_service(self, loadbalancer):
"""Reload the VRRP services of all amphorae of the loadbalancer
:param loadbalancer: loadbalancer object
"""
| {
"content_hash": "232efd3740a332c44173660491b8fbff",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 42.42962962962963,
"alnum_prop": 0.6274441340782123,
"repo_name": "openstack/octavia",
"id": "1886cb4cf8acd1a2e7565a8a2e94bf8dd9d04f3b",
"size": "12136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/amphorae/drivers/driver_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "60600"
},
{
"name": "Mako",
"bytes": "922"
},
{
"name": "Python",
"bytes": "6651664"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Shell",
"bytes": "117966"
}
],
"symlink_target": ""
} |
from datetime import datetime
from app import create_uuid, db
from app.dao.dao_utils import autocommit, version_class
from app.models import (
COMPLAINT_CALLBACK_TYPE,
DELIVERY_STATUS_CALLBACK_TYPE,
ServiceCallbackApi,
)
@autocommit
@version_class(ServiceCallbackApi)
def save_service_callback_api(service_callback_api):
service_callback_api.id = create_uuid()
service_callback_api.created_at = datetime.utcnow()
db.session.add(service_callback_api)
@autocommit
@version_class(ServiceCallbackApi)
def reset_service_callback_api(service_callback_api, updated_by_id, url=None, bearer_token=None):
if url:
service_callback_api.url = url
if bearer_token:
service_callback_api.bearer_token = bearer_token
service_callback_api.updated_by_id = updated_by_id
service_callback_api.updated_at = datetime.utcnow()
db.session.add(service_callback_api)
def get_service_callback_api(service_callback_api_id, service_id):
return ServiceCallbackApi.query.filter_by(id=service_callback_api_id, service_id=service_id).first()
def get_service_delivery_status_callback_api_for_service(service_id):
return ServiceCallbackApi.query.filter_by(
service_id=service_id, callback_type=DELIVERY_STATUS_CALLBACK_TYPE
).first()
def get_service_complaint_callback_api_for_service(service_id):
return ServiceCallbackApi.query.filter_by(service_id=service_id, callback_type=COMPLAINT_CALLBACK_TYPE).first()
@autocommit
def delete_service_callback_api(service_callback_api):
db.session.delete(service_callback_api)
| {
"content_hash": "be76a66c43eba04a78acc91d49b20c1a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 115,
"avg_line_length": 32.30612244897959,
"alnum_prop": 0.7567909033480733,
"repo_name": "alphagov/notifications-api",
"id": "dae4a01bfce5454003aaf160f6758357457a3fd4",
"size": "1583",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "app/dao/service_callback_api_dao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
} |
"""
Test HeatMap
------------
"""
import folium
from folium.plugins import HeatMap
from folium.utilities import normalize
from jinja2 import Template
import numpy as np
import pytest
def test_heat_map():
np.random.seed(3141592)
data = (np.random.normal(size=(100, 2)) * np.array([[1, 1]]) +
np.array([[48, 5]]))
m = folium.Map([48., 5.], tiles='stamentoner', zoom_start=6)
hm = HeatMap(data)
m.add_child(hm)
m._repr_html_()
out = normalize(m._parent.render())
# We verify that the script import is present.
script = '<script src="https://cdn.jsdelivr.net/gh/python-visualization/folium@main/folium/templates/leaflet_heat.min.js"></script>' # noqa
assert script in out
# We verify that the script part is correct.
tmpl = Template("""
var {{this.get_name()}} = L.heatLayer(
{{this.data}},
{
minOpacity: {{this.min_opacity}},
maxZoom: {{this.max_zoom}},
radius: {{this.radius}},
blur: {{this.blur}},
gradient: {{this.gradient}}
})
.addTo({{this._parent.get_name()}});
""")
assert tmpl.render(this=hm)
bounds = m.get_bounds()
np.testing.assert_allclose(
bounds,
[[46.218566840847025, 3.0302801394447734],
[50.75345011431167, 7.132453997672826]])
def test_heatmap_data():
data = HeatMap(np.array([[3, 4, 1], [5, 6, 1], [7, 8, 0.5]])).data
assert isinstance(data, list)
assert len(data) == 3
for i in range(len(data)):
assert isinstance(data[i], list)
assert len(data[i]) == 3
def test_heat_map_exception():
with pytest.raises(ValueError):
HeatMap(np.array([[4, 5, 1], [3, 6, np.nan]]))
with pytest.raises(Exception):
HeatMap(np.array([3, 4, 5]))
| {
"content_hash": "90dc69d230ce6296517d1da0d0ea196d",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 144,
"avg_line_length": 27.852941176470587,
"alnum_prop": 0.5575501583949314,
"repo_name": "ocefpaf/folium",
"id": "2515399b2cd572cbd19d47b77cff3f89dd2cfe5e",
"size": "1894",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/plugins/test_heat_map.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39887"
},
{
"name": "JavaScript",
"bytes": "268"
},
{
"name": "Python",
"bytes": "349858"
}
],
"symlink_target": ""
} |
from google.cloud.vision_helpers.decorators import add_single_feature_methods
from google.cloud.vision_helpers import VisionHelpers
from .services.image_annotator import ImageAnnotatorClient as IacImageAnnotatorClient
from .services.image_annotator import ImageAnnotatorAsyncClient
from .services.product_search import ProductSearchClient
from .services.product_search import ProductSearchAsyncClient
from .types.geometry import BoundingPoly
from .types.geometry import NormalizedVertex
from .types.geometry import Position
from .types.geometry import Vertex
from .types.image_annotator import AnnotateFileRequest
from .types.image_annotator import AnnotateFileResponse
from .types.image_annotator import AnnotateImageRequest
from .types.image_annotator import AnnotateImageResponse
from .types.image_annotator import AsyncAnnotateFileRequest
from .types.image_annotator import AsyncAnnotateFileResponse
from .types.image_annotator import AsyncBatchAnnotateFilesRequest
from .types.image_annotator import AsyncBatchAnnotateFilesResponse
from .types.image_annotator import AsyncBatchAnnotateImagesRequest
from .types.image_annotator import AsyncBatchAnnotateImagesResponse
from .types.image_annotator import BatchAnnotateFilesRequest
from .types.image_annotator import BatchAnnotateFilesResponse
from .types.image_annotator import BatchAnnotateImagesRequest
from .types.image_annotator import BatchAnnotateImagesResponse
from .types.image_annotator import ColorInfo
from .types.image_annotator import CropHint
from .types.image_annotator import CropHintsAnnotation
from .types.image_annotator import CropHintsParams
from .types.image_annotator import DominantColorsAnnotation
from .types.image_annotator import EntityAnnotation
from .types.image_annotator import FaceAnnotation
from .types.image_annotator import Feature
from .types.image_annotator import GcsDestination
from .types.image_annotator import GcsSource
from .types.image_annotator import Image
from .types.image_annotator import ImageAnnotationContext
from .types.image_annotator import ImageContext
from .types.image_annotator import ImageProperties
from .types.image_annotator import ImageSource
from .types.image_annotator import InputConfig
from .types.image_annotator import LatLongRect
from .types.image_annotator import LocalizedObjectAnnotation
from .types.image_annotator import LocationInfo
from .types.image_annotator import OperationMetadata
from .types.image_annotator import OutputConfig
from .types.image_annotator import Property
from .types.image_annotator import SafeSearchAnnotation
from .types.image_annotator import TextDetectionParams
from .types.image_annotator import WebDetectionParams
from .types.image_annotator import Likelihood
from .types.product_search import ProductSearchParams
from .types.product_search import ProductSearchResults
from .types.product_search_service import AddProductToProductSetRequest
from .types.product_search_service import BatchOperationMetadata
from .types.product_search_service import CreateProductRequest
from .types.product_search_service import CreateProductSetRequest
from .types.product_search_service import CreateReferenceImageRequest
from .types.product_search_service import DeleteProductRequest
from .types.product_search_service import DeleteProductSetRequest
from .types.product_search_service import DeleteReferenceImageRequest
from .types.product_search_service import GetProductRequest
from .types.product_search_service import GetProductSetRequest
from .types.product_search_service import GetReferenceImageRequest
from .types.product_search_service import ImportProductSetsGcsSource
from .types.product_search_service import ImportProductSetsInputConfig
from .types.product_search_service import ImportProductSetsRequest
from .types.product_search_service import ImportProductSetsResponse
from .types.product_search_service import ListProductSetsRequest
from .types.product_search_service import ListProductSetsResponse
from .types.product_search_service import ListProductsInProductSetRequest
from .types.product_search_service import ListProductsInProductSetResponse
from .types.product_search_service import ListProductsRequest
from .types.product_search_service import ListProductsResponse
from .types.product_search_service import ListReferenceImagesRequest
from .types.product_search_service import ListReferenceImagesResponse
from .types.product_search_service import Product
from .types.product_search_service import ProductSet
from .types.product_search_service import ProductSetPurgeConfig
from .types.product_search_service import PurgeProductsRequest
from .types.product_search_service import ReferenceImage
from .types.product_search_service import RemoveProductFromProductSetRequest
from .types.product_search_service import UpdateProductRequest
from .types.product_search_service import UpdateProductSetRequest
from .types.text_annotation import Block
from .types.text_annotation import Page
from .types.text_annotation import Paragraph
from .types.text_annotation import Symbol
from .types.text_annotation import TextAnnotation
from .types.text_annotation import Word
from .types.web_detection import WebDetection
@add_single_feature_methods
class ImageAnnotatorClient(VisionHelpers, IacImageAnnotatorClient):
__doc__ = IacImageAnnotatorClient.__doc__
Feature = Feature
__all__ = (
"ImageAnnotatorAsyncClient",
"ProductSearchAsyncClient",
"AddProductToProductSetRequest",
"AnnotateFileRequest",
"AnnotateFileResponse",
"AnnotateImageRequest",
"AnnotateImageResponse",
"AsyncAnnotateFileRequest",
"AsyncAnnotateFileResponse",
"AsyncBatchAnnotateFilesRequest",
"AsyncBatchAnnotateFilesResponse",
"AsyncBatchAnnotateImagesRequest",
"AsyncBatchAnnotateImagesResponse",
"BatchAnnotateFilesRequest",
"BatchAnnotateFilesResponse",
"BatchAnnotateImagesRequest",
"BatchAnnotateImagesResponse",
"BatchOperationMetadata",
"Block",
"BoundingPoly",
"ColorInfo",
"CreateProductRequest",
"CreateProductSetRequest",
"CreateReferenceImageRequest",
"CropHint",
"CropHintsAnnotation",
"CropHintsParams",
"DeleteProductRequest",
"DeleteProductSetRequest",
"DeleteReferenceImageRequest",
"DominantColorsAnnotation",
"EntityAnnotation",
"FaceAnnotation",
"Feature",
"GcsDestination",
"GcsSource",
"GetProductRequest",
"GetProductSetRequest",
"GetReferenceImageRequest",
"Image",
"ImageAnnotationContext",
"ImageAnnotatorClient",
"ImageContext",
"ImageProperties",
"ImageSource",
"ImportProductSetsGcsSource",
"ImportProductSetsInputConfig",
"ImportProductSetsRequest",
"ImportProductSetsResponse",
"InputConfig",
"LatLongRect",
"Likelihood",
"ListProductSetsRequest",
"ListProductSetsResponse",
"ListProductsInProductSetRequest",
"ListProductsInProductSetResponse",
"ListProductsRequest",
"ListProductsResponse",
"ListReferenceImagesRequest",
"ListReferenceImagesResponse",
"LocalizedObjectAnnotation",
"LocationInfo",
"NormalizedVertex",
"OperationMetadata",
"OutputConfig",
"Page",
"Paragraph",
"Position",
"Product",
"ProductSearchClient",
"ProductSearchParams",
"ProductSearchResults",
"ProductSet",
"ProductSetPurgeConfig",
"Property",
"PurgeProductsRequest",
"ReferenceImage",
"RemoveProductFromProductSetRequest",
"SafeSearchAnnotation",
"Symbol",
"TextAnnotation",
"TextDetectionParams",
"UpdateProductRequest",
"UpdateProductSetRequest",
"Vertex",
"WebDetection",
"WebDetectionParams",
"Word",
)
| {
"content_hash": "ae6411b7effd835a95c938ec0323f95e",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 85,
"avg_line_length": 40.54736842105263,
"alnum_prop": 0.8155503634475597,
"repo_name": "googleapis/python-vision",
"id": "bfe927d259d6bcbf8d72f5051a2439a2c361434d",
"size": "8305",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/vision_v1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3254393"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from shinymud.lib.sport_plugins import SportError
from shinymud.models.area import Area
import traceback
import json
import re
def format(world, raw_data):
"""Deserialize an area saved in ShinyAreaFormat and adds it to the world.
raw_data - the data to be deserialized into a Shiny Area object.
world - The World instance
"""
area = json.loads(_match_shiny_tag('Area', raw_data))
scripts = json.loads(_match_shiny_tag('Scripts', raw_data))
items = json.loads(_match_shiny_tag('Items', raw_data))
itypes = json.loads(_match_shiny_tag('Item Types', raw_data))
npcs = json.loads(_match_shiny_tag('Npcs', raw_data))
npc_events = json.loads(_match_shiny_tag('Npc Events', raw_data))
rooms = json.loads(_match_shiny_tag('Rooms', raw_data))
room_exits = json.loads(_match_shiny_tag('Room Exits', raw_data))
room_spawns = json.loads(_match_shiny_tag('Room Spawns', raw_data))
# Build the area from the assembled dictionary data
try:
new_area = Area.create(area)
for script in scripts:
new_area.new_script(script)
world.log.debug('Finished Scripts.')
for item in items:
world.log.debug('In item, %s' % item['id'])
new_area.new_item(item)
world.log.debug('Finished Items.')
for itype in itypes:
# Get this itype's item by that item's id
my_item = new_area.get_item(itype['item'])
my_item.load_type(itype['item_type'], itype)
world.log.debug('Finished Item types.')
for npc in npcs:
new_area.new_npc(npc)
for event in npc_events:
my_script = new_area.get_script(str(event['script']))
event['script'] = my_script
my_npc = new_area.get_npc(event['prototype'])
my_npc.new_event(event)
for room in rooms:
new_room = new_area.new_room(room)
my_spawns = room_spawns.get(new_room.id)
if my_spawns:
new_room.load_spawns(my_spawns)
for exit in room_exits:
world.log.debug(exit['room'])
my_room = new_area.get_room(str(exit['room']))
my_room.new_exit(exit)
except Exception as e:
# if anything went wrong, make sure we destroy whatever parts of
# the area that got created. This way, we won't run into problems
# if they try to import it again, and we won't leave orphaned or
# erroneous data in the db.
world.log.error(traceback.format_exc())
world.destroy_area(area.get('name'), 'SPort Error')
raise SportError('There was a horrible error on import! '
'Aborting! Check logfile for details.')
new_area.reset()
return '%s has been successfully imported.' % new_area.title
def _match_shiny_tag(tag, text):
"""Match a ShinyTag from the ShinyAreaFormat.
tag -- the name of the tag you wish to match
text -- the text to be searched for the tags
Returns the string between the tag and its matching end-tag.
Raises an exception if the tag is not found.
"""
exp = r'\[' + tag + r'\](\n)?(?P<tag_body>.*?)(\n)?\[End ' + tag +\
r'\](\n)?'
match = re.search(exp, text, re.I | re.S)
if not match:
raise SportError('Corrupted file: missing or malformed %s tag.' % tag)
return match.group('tag_body')
| {
"content_hash": "08e35631acc5ca0753fb52c302b07c5b",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 42.55,
"alnum_prop": 0.6089894242068156,
"repo_name": "shinymud/ShinyMUD",
"id": "ff4cbbb4ab241253694e72ad2ee31a04a4fa03f2",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shinymud/lib/sport_plugins/formatters/area_read_shiny_format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "545213"
}
],
"symlink_target": ""
} |
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <[email protected]>'
__docformat__ = 'epytext'
try:
from django.conf.urls import url
except ImportError:
from django.conf.urls.defaults import url
from publications import views
app_name = 'publications'
urlpatterns = [
url(r'^$', views.year, name='index'),
url(r'^(?P<publication_id>\d+)/$', views.id, name='id'),
url(r'^year/(?P<year>\d+)/$', views.year, name='year'),
url(r'^tag/(?P<keyword>.+)/$', views.keyword, name='keyword'),
url(r'^list/(?P<list>.+)/$', views.list, name='list'),
url(r'^unapi/$', views.unapi, name='unapi'),
url(r'^(?P<name>.+)/$', views.author, name='author'),
]
| {
"content_hash": "a760d89c58804deba3f9e74cc4d8b9d5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 34.904761904761905,
"alnum_prop": 0.616643929058663,
"repo_name": "lucastheis/django-publications",
"id": "58d2c15ba9bfb0c1da0261cb7d92dead0bafb85c",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "publications/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13804"
},
{
"name": "Python",
"bytes": "104525"
},
{
"name": "TeX",
"bytes": "1438"
}
],
"symlink_target": ""
} |
import unittest
from telemetry.internal.backends.chrome import gpu_compositing_checker
from telemetry.internal.platform import system_info
class GpuCompositingChecker(unittest.TestCase):
def testAssertGpuCompositingEnabledFailed(self):
data = {
'model_name': 'MacBookPro 10.1',
'gpu': {
'devices': [
{'vendor_id': 1000, 'device_id': 2000,
'vendor_string': 'a', 'device_string': 'b'},
],
'feature_status': {'gpu_compositing': 'disabled'},
}
}
info = system_info.SystemInfo.FromDict(data)
with self.assertRaises(
gpu_compositing_checker.GpuCompositingAssertionFailure):
gpu_compositing_checker.AssertGpuCompositingEnabled(info)
def testAssertGpuCompositingEnabledPassed(self):
data = {
'model_name': 'MacBookPro 10.1',
'gpu': {
'devices': [
{'vendor_id': 1000, 'device_id': 2000,
'vendor_string': 'a', 'device_string': 'b'},
],
'feature_status': {'gpu_compositing': 'enabled'},
}
}
info = system_info.SystemInfo.FromDict(data)
gpu_compositing_checker.AssertGpuCompositingEnabled(info)
| {
"content_hash": "0b424d7b7457b92eb51776fde8b608b8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 31.435897435897434,
"alnum_prop": 0.6027732463295269,
"repo_name": "endlessm/chromium-browser",
"id": "91c0900f2563e6974980493ec91de4f5b4f678db",
"size": "1388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/telemetry/telemetry/internal/backends/chrome/gpu_compositing_checker_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 470
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
9*num_epochs_train/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
def filter_samples(folders):
# don't use patients who don't have 2ch
import glob
def has_2ch(f):
return len(glob.glob(f+"/2ch_*.pkl")) > 0
return [folder for folder in folders if has_2ch(folder)]
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(128,128)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 200 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = list(data_sizes["sliced:data:singleslice:2ch"])
input_size[0] = None
l0 = nn.layers.InputLayer(tuple(input_size))
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3mu = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
ldsys3sigma = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(50.0), nonlinearity=lb_softplus(3))
ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)
l_systole = layers.MuSigmaErfLayer(ldsys3musigma)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3mu = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
lddia3sigma = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(50.0), nonlinearity=lb_softplus(3))
lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)
l_diastole = layers.MuSigmaErfLayer(lddia3musigma)
return {
"inputs":{
"sliced:data:singleslice:2ch": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3mu: l2_weight_out,
ldsys3sigma: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3mu: l2_weight_out,
lddia3sigma: l2_weight_out,
},
"meta_outputs": {
"systole": ldsys2,
"diastole": lddia2,
}
}
| {
"content_hash": "7c22cfe06e1bafb06ecf044c3f14c9e3",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 175,
"avg_line_length": 44.924882629107984,
"alnum_prop": 0.6996551363778869,
"repo_name": "317070/kaggle-heart",
"id": "bd1f254cdf1306609809e207053494d9e77cfc1b",
"size": "9569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configurations/j6_2ch_gauss.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2686608"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.