content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import structlog
from pathlib import Path
from typing import Any, Dict, Generator, Iterable, Optional, Tuple
from normality import normalize, WS
from followthemoney.schema import Schema
from followthemoney.types import registry
from opensanctions import settings
from nomenklatura.loader import Loader
from nomenklatura.index import Index
from opensanctions.core.entity import Entity
from opensanctions.core.dataset import Dataset
log = structlog.get_logger(__name__)
def get_index_path(dataset: Dataset) -> Path:
index_dir = settings.DATA_PATH.joinpath("index")
index_dir.mkdir(exist_ok=True)
return index_dir.joinpath(f"{dataset.name}.pkl")
def get_index(
dataset: Dataset, loader: Loader[Dataset, Entity]
) -> Index[Dataset, Entity]:
"""Load the search index for the given dataset or generate one if it does
not exist."""
path = get_index_path(dataset)
index = Index.load(loader, path)
return index
| 30.451613 | 77 | 0.774364 | [
"MIT"
] | alephdata/opensanctions | opensanctions/core/index.py | 944 | Python |
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import ssl
import sys
import time
import math
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies,
compat_etree_Element,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
str_or_none,
str_to_int,
strip_or_none,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url", "duration" attributes with the same semantics
as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
exceptions.append(ssl.CertificateError)
try:
return self._downloader.urlopen(url_or_request)
except tuple(exceptions) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
json_ld = []
for mobj in json_ld_list:
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal)
if not json_ld_item:
continue
if isinstance(json_ld_item, dict):
json_ld.append(json_ld_item)
elif isinstance(json_ld_item, (list, tuple)):
json_ld.extend(json_ld_item)
if json_ld:
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
if json_ld:
return json_ld
if default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_type(e):
interaction_type = e.get('interactionType')
if isinstance(interaction_type, dict):
interaction_type = interaction_type.get('@type')
return str_or_none(interaction_type)
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if isinstance(interaction_statistic, dict):
interaction_statistic = [interaction_statistic]
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = extract_interaction_type(is_e)
if not interaction_type:
continue
# For interaction count some sites provide string instead of
# an integer (as per spec) with non digit characters (e.g. ",")
# so extracting count with more relaxed str_to_int
interaction_count = str_to_int(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'uploader': str_or_none(e.get('author')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if '@context' in e:
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
continue
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
if expected_type is None:
continue
else:
break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
if expected_type is None:
continue
else:
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
self.to_screen(
'%s: %s URL is invalid, skipping: %s'
% (video_id, item, error_to_compat_str(e.cause)))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playlist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
# for DailyMotion
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
if progressive_uri:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': progressive_uri,
})
formats.append(http_f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
mpd_doc, urlh = res
if mpd_doc is None:
return []
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
# https://github.com/ytdl-org/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
ism_doc, urlh = res
if ism_doc is None:
return []
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
media_tags = [(media_tag, media_tag_name, media_type, '')
for media_tag, media_tag_name, media_type
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
for media_tag, _, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
http_host = hosts.get('http')
if http_host and m3u8_formats and 'hdnea=' not in m3u8_url:
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
qualities_length = len(qualities)
if len(m3u8_formats) in (qualities_length, qualities_length + 1):
i = 0
for f in m3u8_formats:
if f['vcodec'] != 'none':
for protocol in ('http', 'https'):
http_f = f.copy()
del http_f['manifest_url']
http_url = re.sub(
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
http_f.update({
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
'url': http_url,
'protocol': protocol,
})
formats.append(http_f)
i += 1
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| 46.903595 | 172 | 0.546574 | [
"Unlicense"
] | DevSecOpsGuy/youtube-dl-1 | youtube_dl/extractor/common.py | 143,548 | Python |
from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.urls import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_text, smart_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote, DJANGO_11
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
if DJANGO_11:
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
else:
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_text(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_text(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required=required, widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_text(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception as e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_text(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| 35.285075 | 134 | 0.609069 | [
"BSD-3-Clause"
] | edwardvon/xadmin-django3 | xadmin/views/dashboard.py | 23,641 | Python |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class TransferDeviceClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.dts.TransferDeviceClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new TransferDeviceClientCompositeOperations object
:param TransferDeviceClient client:
The service client which will be wrapped by this object
"""
self.client = client
def update_transfer_device_and_wait_for_state(self, id, transfer_device_label, update_transfer_device_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device` and waits for the :py:class:`~oci.dts.models.TransferDevice` acted upon
to enter the given state(s).
:param str id: (required)
ID of the Transfer Job
:param str transfer_device_label: (required)
Label of the Transfer Device
:param oci.dts.models.UpdateTransferDeviceDetails update_transfer_device_details: (required)
fields to update
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.dts.models.TransferDevice.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_transfer_device(id, transfer_device_label, update_transfer_device_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_transfer_device(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| 51.014493 | 245 | 0.70767 | [
"Apache-2.0",
"BSD-3-Clause"
] | CentroidChef/oci-python-sdk | src/oci/dts/transfer_device_client_composite_operations.py | 3,520 | Python |
#!/usr/bin/env python
#
# Create daily QC HTML report
#
# USAGE : cbicqc_report.py <QA Directory>
#
# AUTHOR : Mike Tyszka
# PLACE : Caltech
# DATES : 09/25/2013 JMT From scratch
# 10/23/2013 JMT Add com external call
# 10/24/2013 JMT Move stats calcs to new cbicqc_stats.py
#
# This file is part of CBICQC.
#
# CBICQC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CBICQC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CBICQC. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2013-2014 California Institute of Technology.
import os
import string
import argparse
from pylab import *
# Define template
TEMPLATE_FORMAT = """
<html>
<head>
<STYLE TYPE="text/css">
BODY {
font-family : arial, sans-serif;
}
td, th {
padding-left : 10px;
padding-right : 10px;
padding-top : 0px;
padding-bottom : 0px;
text-align : "left";
}
</STYLE>
</head>
<body>
<h1 style="background-color:#E0E0FF">CBIC In Vivo Quality Control</h1>
<div>
<table>
<tr>
<td> QC Directory
<td> $qc_dir_abs
</tr>
<tr>
<td> Repetition Time (seconds)
<td> $TR_secs
</tr>
<tr>
<td> Image Volumes
<td> $N_vols
</tr>
<tr style="background-color:#AFFF9F">
<td> <b> Median tSFNR </b>
<td> <b> $tSFNR </b>
</tr>
</table>
</div>
<br>
<div>
<table>
<tr>
<td> <b> Parameter </b>
<td> <b> tMean </b>
<td> <b> Threshold </b>
<td> <b> Percent Outliers
</tr>
<td> Signal
<td> $signal_tmean
<td> $signal_thresh
<td> $signal_pout%
</tr>
<tr>
<td> Nyquist Ghost
<td> $ghost_tmean
<td> $ghost_thresh
<td> $ghost_pout%
</tr>
<tr>
<td> Air
<td> $air_tmean
<td> $air_thresh
<td> $air_pout%
</tr>
<tr>
<td> DVARS
<td> $dvars_tmean
<td> $dvars_thresh
<td> $dvars_pout%
</tr>
<tr>
<td> F-F Displacement (microns)
<td> $dd_um_tmean
<td> $dd_um_thresh
<td> $dd_um_pout%
</tr>
<tr>
<td> F-F Rotation (mdeg)
<td> $dr_mdeg_tmean
<td> $dr_mdeg_thresh
<td> $dr_mdeg_pout%
</tr>
</table>
</div>
<br>
<div>
<table>
<tr>
<td> <br><b>Motion Timeseries</b><br> <img src=qc_motion_timeseries.png />
<td> <br><b>ROI Timeseries</b><br> <img src=qc_roi_timeseries.png />
<tr>
</table>
</div>
<div>
<table>
<tr>
<td> <br><b>Temporal Mean Signal</b><br> <img src=qc_tmean_ortho.png />
<td> <br><b>Fluctuation Noise SD</b><br> <img src=qc_tsd_ortho.png />
</tr>
<tr>
<td> <b>Temporal Signal-to-Fluctuation-Noise Ratio (SFNR)</b><br> <img src=qc_tsfnr_ortho.png />
<td> <br><b>Region Mask</b><br> <img src=qc_labels_ortho.png />
</tr>
</table>
</div>
</body>
"""
# Main function
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='QC reporting for in vivo fMRI timeseries')
parser.add_argument('-i', '--qc_dir', help="CBICQClive directory (*.qclive)")
# Parse command line arguments
args = parser.parse_args()
qc_dir = args.qc_dir
print(' Creating in vivo QC report for ' + qc_dir)
# Determine full path for QC directory
qc_dir_abs = os.path.abspath(qc_dir)
# Load dataset info and stats from QC directory
info_fname = os.path.join(qc_dir, 'qc_info.csv')
info = genfromtxt(info_fname, delimiter=',')
# Create substitution dictionary for HTML report
qc_dict = dict([
('qc_dir_abs', "%s" % qc_dir_abs),
('TR_secs', "%0.3f" % info[1]),
('N_vols', "%d" % info[2]),
('tSFNR', "%0.1f" % info[3]),
('signal_tmean', "%0.1f" % info[4]),
('signal_thresh', "%0.1f" % info[5]),
('signal_pout', "%0.1f" % info[6]),
('ghost_tmean', "%0.1f" % info[7]),
('ghost_thresh', "%0.1f" % info[8]),
('ghost_pout', "%0.1f" % info[9]),
('air_tmean', "%0.1f" % info[10]),
('air_thresh', "%0.1f" % info[11]),
('air_pout', "%0.1f" % info[12]),
('dvars_tmean', "%0.1f" % info[13]),
('dvars_thresh', "%0.1f" % info[14]),
('dvars_pout', "%0.1f" % info[15]),
('dd_um_tmean', "%0.1f" % info[16]),
('dd_um_thresh', "%0.1f" % info[17]),
('dd_um_pout', "%0.1f" % info[18]),
('dr_mdeg_tmean', "%0.1f" % info[19]),
('dr_mdeg_thresh', "%0.1f" % info[20]),
('dr_mdeg_pout', "%0.1f" % info[21]),
])
# Generate HTML report from template (see above)
TEMPLATE = string.Template(TEMPLATE_FORMAT)
html_data = TEMPLATE.safe_substitute(qc_dict)
# Write HTML report page
qc_report_html = os.path.join(qc_dir, 'index.html')
open(qc_report_html, "w").write(html_data)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| 27.657277 | 109 | 0.529961 | [
"MIT"
] | jmtyszka/CBICQAlive | cbicqclive_report.py | 5,891 | Python |
from typing import Callable
import unittest
# test
from .pipe import pipe
class TestPipe(unittest.TestCase):
def test_pipe_should_return_a_function(self) -> None:
# given
def echo(x: str) -> str:
return f"echo {x}"
# when
output = pipe(echo)
# then
self.assertTrue(isinstance(output, Callable)) # type: ignore
def test_pipe_should_return_an_empty_string(self) -> None:
# given
def echo(x: str) -> str:
return f"echo {x}"
# when
param = "hello world"
output = pipe(echo)(param)
# then
self.assertEqual(output, f"echo {param}")
def test_pipe_should_pipe_two_function(self) -> None:
# given
def echo(x: str) -> str:
return f"echo {x}"
def grep() -> str:
return "grep world"
# when
param = "hello world"
output = pipe(echo, grep)(param)
# then
self.assertEqual(output, f"echo {param} | grep world")
| 22.06383 | 69 | 0.555448 | [
"MIT"
] | romainPrignon/unshellPy | src/unshell/utils/test_pipe.py | 1,037 | Python |
# -*- coding: utf-8 -*-
# Author:Qiujie Yao
# Email: [email protected]
# @Time: 2019-06-26 14:15
| 20.8 | 33 | 0.653846 | [
"Apache-2.0"
] | iyaoqiujie/VehicleInspection | VehicleInspection/apps/appointment/permissions.py | 106 | Python |
default_ids = [
[0x0E8D, 0x0003, -1], # MTK Brom
[0x0E8D, 0x6000, 2], # MTK Preloader
[0x0E8D, 0x2000, -1], # MTK Preloader
[0x0E8D, 0x2001, -1], # MTK Preloader
[0x0E8D, 0x20FF, -1], # MTK Preloader
[0x1004, 0x6000, 2], # LG Preloader
[0x22d9, 0x0006, -1], # OPPO Preloader
[0x0FCE, 0xF200, -1], # Sony Brom
]
| 31.181818 | 42 | 0.588921 | [
"MIT"
] | CrackerCat/mtkclient | mtkclient/config/usb_ids.py | 343 | Python |
########################################### Global Variables #################################
#sklearn pickled SGDClassifier where pre-trained clf.coef_ matrix is casted to a scipy.sparse.csr_matrix for efficiency and scalability
clf = None
#sklearn pickled TfidfVectorizer
vectorizer = None
#dictionary of labelid: (latitude, longitude) It is pre-computed as the median value of all training points in a region/cluster
label_coordinate = {}
#dictionary of (latitude,longitude):location (dictionary)
coordinate_address = {}
#check if model is loaded
model_loaded = False
#dictionary of hashed user name:(latitude, longitude) pre-trained by label propagation on TwitterWorld dataset
userhash_coordinate = {}
#check if lpworld model is loaded
lp_model_loaded = False
| 47.875 | 135 | 0.72846 | [
"Apache-2.0"
] | juanc5ibanez/WLM-WLMN | WLM-WLMN/TextAnalyzer/TextAnalyzer/Pigeo/pigeo-master/params.py | 766 | Python |
n = int(input())
arr = [[None for i in range(2*n+1)]for i in range(2*n+1)]
m = (2*n + 1) // 2
for i in range(n):
arr[i][m] = i
arr[n][m] = n
for i in range(n+1,2*n+1):
arr[i][m] = arr[i-1][m]-1
for y in range(1,m+1):
for x in range(len(arr[0])):
if x < m:
arr[y][x] = arr[y-1][x+1]
if x > m:
arr[y][x] = arr[y-1][x-1]
for y in range(2*n-1,m,-1):
for x in range(len(arr[0])):
if x < m:
arr[y][x] = arr[y+1][x+1]
if x > m:
arr[y][x] = arr[y+1][x-1]
for y in range(len(arr)):
for x in range(len(arr[0])):
if arr[y][x] is None:
arr[y][x] = ' '
else:
arr[y][x] = str(arr[y][x])
out = [" ".join(xs).rstrip() for xs in arr]
print("\n".join(out)) | 21.833333 | 57 | 0.431298 | [
"MIT"
] | neshdev/competitive-prog | ao2j/lt1300/055/A.py | 786 | Python |
"""
We recover the original divergence-free velocity field via
Ud,new = Ustar - Gphi
"""
import numpy
import pylab
import operator
def do_plots_c(Ud, Unew):
""" plot Ud,new and Ud with zoom on the bug """
pylab.clf()
pylab.cla()
f = pylab.figure()
f.text(.5, .95, r"$U_{\rm d}$ (left) and $U_{\rm d, new}$ (right) ", horizontalalignment='center')
pylab.subplot(221)
pylab.imshow(Ud[0])
pylab.ylabel("# of cells", size =8)
pylab.subplot(223)
pylab.imshow(Ud[1])
pylab.xlim(1,32)
pylab.xlabel("# of cells", size =8)
pylab.ylabel("# of cells", size =8)
pylab.subplot(222)
pylab.imshow(Unew[0])
pylab.ylabel("# of cells", size =8)
pylab.subplot(224)
pylab.imshow(Unew[1])
pylab.xlim(1,32)
pylab.xlabel("# of cells", size =8)
pylab.ylabel("# of cells", size =8)
pylab.savefig("plots/item_c_Udnew.png")
def doPartC(Ustar, phi_num, Ud, nx, ny, xmin, xmax, ymin, ymax, DO_PLOTS):
""" coordinates of centers """
dx = (xmax - xmin)/nx
dy = (ymax - ymin)/ny
""" calcuates the new gradient"""
Gphi = numpy.gradient(phi_num, dx, dy)
""" recover Ud, new """
Unew = map(operator.sub, Ustar,Gphi)
if (DO_PLOTS == 1):
do_plots_c(Ud, Unew)
return 0
| 21.770492 | 103 | 0.579819 | [
"Apache-2.0"
] | aquario-crypto/Numerical_Methods_for_Physics | homework5_elliptic_PDES/part_c.py | 1,328 | Python |
"""Module containing sacred functions for handling ML models."""
import inspect
from sacred import Ingredient
from src import models
ingredient = Ingredient('model')
@ingredient.config
def cfg():
"""Model configuration."""
name = ''
parameters = {
}
@ingredient.named_config
def TopologicalSurrogateAutoencoder():
"""TopologicalSurrogateAutoencoder."""
name = 'TopologicalSurrogateAutoencoder'
parameters = {
'd_latent': 8*2*2,
'batch_size': 32,
'arch': [256, 256, 256, 256]
}
@ingredient.named_config
def Vanilla():
name = 'VanillaAutoencoderModel'
@ingredient.named_config
def VAE():
name = 'VanillaAutoencoderModel'
parameters = {
'autoencoder_model': 'MLPVAE'
}
@ingredient.named_config
def TopoReg():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'sort_selected': False
}
}
@ingredient.named_config
def TopoRegSorted():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'sort_selected': True
}
}
@ingredient.named_config
def TopoAE():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'DeepAE',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoAERandomConv():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'DeepAE',
'input_distance': 'rp'
}
@ingredient.named_config
def TopoAEvgg():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'DeepAE',
'input_distance': 'vgg'
}
@ingredient.named_config
def TopoAEOrtho():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'input_distance': 'ortho'
}
@ingredient.named_config
def TopoAEOrthoSpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'MLPAutoencoder_Spheres',
'input_distance': 'ortho'
}
@ingredient.named_config
def TopoPCAOrtho():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAEOrtho',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoPCA():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAE',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoPCAOrthoSpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAEOrtho_Spheres',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoPCASpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAE_Spheres',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoAESpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'MLPAutoencoder_Spheres',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoRegEdgeRandom():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'random'
}
}
@ingredient.capture
def get_instance(name, parameters, _log, _seed):
"""Get an instance of a model according to parameters in the configuration.
Also, check if the provided parameters fit to the signature of the model
class and log default values if not defined via the configuration.
"""
# Get the mode class
model_cls = getattr(models, name)
# Inspect if the constructor specification fits with additional_parameters
signature = inspect.signature(model_cls)
available_parameters = signature.parameters
for key in parameters.keys():
if key not in available_parameters.keys():
# If a parameter is defined which does not fit to the constructor
# raise an error
raise ValueError(
f'{key} is not available in {name}\'s Constructor'
)
# Now check if optional parameters of the constructor are not defined
optional_parameters = list(available_parameters.keys())[4:]
for parameter_name in optional_parameters:
# Copy list beforehand, so we can manipulate the parameter dict in the
# loop
parameter_keys = list(parameters.keys())
if parameter_name not in parameter_keys:
if parameter_name != 'random_state':
# If an optional parameter is not defined warn and run with
# default
default = available_parameters[parameter_name].default
_log.warning(
f'Optional parameter {parameter_name} not explicitly '
f'defined, will run with {parameter_name}={default}'
)
else:
_log.info(
f'Passing seed of experiment to model parameter '
'`random_state`.'
)
parameters['random_state'] = _seed
return model_cls(**parameters)
| 26.277533 | 79 | 0.619279 | [
"BSD-3-Clause"
] | BorgwardtLab/topo-ae-distances | exp/ingredients/model.py | 5,965 | Python |
from core.advbase import *
from module.bleed import Bleed
from slot.a import *
def module():
return Botan
class Botan(Adv):
a3 = [('prep',1.00), ('scharge_all', 0.05)]
conf = {}
conf['slots.a'] = RR() + United_by_One_Vision()
conf['acl'] = """
`dragon.act('c3 s end')
`s3, not self.s3_buff and prep
`s4
`s2
`s1, cancel
"""
coab = ['Blade','Wand','Dagger']
share = ['Ranzal']
def d_coabs(self):
if self.sim_afflict:
self.coab = ['Blade','Wand','Bow']
def init(self):
self.buff_class = Teambuff if self.condition('buff all team') else Selfbuff
def prerun(self):
self.bleed = Bleed("g_bleed",0).reset()
def s1_proc(self, e):
Bleed(e.name, 1.46).on()
def s2_proc(self, e):
self.buff_class(e.name,0.1,15,'crit','chance').on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | 24.6 | 83 | 0.571138 | [
"Apache-2.0"
] | b1ueb1ues/dl | adv/botan.py | 984 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Translate Hook.
"""
from google.cloud.translate_v2 import Client
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class CloudTranslateHook(GoogleCloudBaseHook):
"""
Hook for Google Cloud translate APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_client = None
def __init__(self, gcp_conn_id='google_cloud_default'):
super().__init__(gcp_conn_id)
def get_conn(self):
"""
Retrieves connection to Cloud Translate
:return: Google Cloud Translate client object.
:rtype: Client
"""
if not self._client:
self._client = Client(credentials=self._get_credentials())
return self._client
def translate(
self, values, target_language, format_=None, source_language=None, model=None
):
"""Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str or None
:param source_language: (Optional) The language of the text to
be translated.
:type model: str or None
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
"""
client = self.get_conn()
return client.translate(
values=values,
target_language=target_language,
format_=format_,
source_language=source_language,
model=model,
)
| 37.45098 | 85 | 0.63377 | [
"Apache-2.0"
] | CatarinaSilva/airflow | airflow/contrib/hooks/gcp_translate_hook.py | 3,820 | Python |
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal)
from scipy.interpolate import interp1d
import pytest
import mne
from mne.forward import _make_surface_mapping, make_field_map
from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
_get_legen_table, _do_cross_dots)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._field_interpolation import _setup_dots
from mne.surface import get_meg_helmet_surf, get_head_surf
from mne.datasets import testing
from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs
from mne.io import read_raw_fif
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
data_path = testing.data_path(download=False)
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
def test_field_map_ctf():
"""Test that field mapping can be done with CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster
# smoke test
make_field_map(evoked, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir)
def test_legendre_val():
"""Test Legendre polynomial (derivative) equivalence."""
rng = np.random.RandomState(0)
# check table equiv
xs = np.linspace(-1., 1., 1000)
n_terms = 100
# True, numpy
vals_np = legendre.legvander(xs, n_terms - 1)
# Table approximation
for nc, interp in zip([100, 50], ['nearest', 'linear']):
lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp,
axis=0)
vals_i = lut_fun(xs)
# Need a "1:" here because we omit the first coefficient in our table!
assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
rtol=1e-2, atol=5e-3)
# Now let's look at our sums
ctheta = rng.rand(20, 30) * 2.0 - 1.0
beta = rng.rand(20, 30) * 0.8
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
# compare to numpy
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros((n_terms,) + beta.shape)
coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
(2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
# can't use tensor=False here b/c it isn't in old numpy
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...
# compare fast and slow for MEG
ctheta = rng.rand(20 * 30) * 2.0 - 1.0
beta = rng.rand(20 * 30) * 0.8
lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
def test_legendre_table():
"""Test Legendre table calculation."""
# double-check our table generation
n = 10
for ch_type in ['eeg', 'meg']:
lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :n - 1].copy()
n_fact1 = n_fact1[:n - 1].copy()
lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
@testing.requires_testing_data
def test_make_field_map_eeg():
"""Test interpolation of EEG field onto head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads
surf = get_head_surf('sample', subjects_dir=subjects_dir)
# we must have trans if surface is in MRI coords
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
# trans is necessary for EEG only
pytest.raises(RuntimeError, make_field_map, evoked, None,
subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 59
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
"""Test interpolation of MEG field onto helmet | head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
# let's reduce the number of channels by a bunch to speed it up
info['bads'] = info['ch_names'][:200]
# bad ch_type
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
# bad mode
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg',
mode='foo')
# no picks
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
surf, 'meg')
# bad surface def
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
# now do it with make_field_map
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj() # avoid projection warnings
fmd = make_field_map(evoked, None,
subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
# now test the make_field_map on head surf for MEG
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head',
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar',
subjects_dir=subjects_dir, trans=trans_fname)
@testing.requires_testing_data
def test_make_field_map_meeg():
"""Test making a M/EEG field map onto helmet & head."""
evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head
assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet
# reasonable ranges
maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0)
mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2)
assert_equal(len(maxs), len(maps))
for map_, max_, min_ in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=5e-2)
assert_allclose(map_['data'].min(), min_, rtol=5e-2)
# calculated from correct looking mapping on 2015/12/26
assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088,
atol=1e-3, rtol=1e-3)
assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245,
atol=1e-3, rtol=1e-3)
def _setup_args(info):
"""Configure args for test_as_meg_type_evoked."""
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg')
my_origin = np.array([0., 0., 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
@testing.requires_testing_data
def test_as_meg_type_evoked():
"""Test interpolation of data on to virtual channels."""
# validation tests
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
# channel names
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
# pick from and to channels
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
# set up things
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
# test cross dots
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
# correlation test
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[0, 1] > 0.95)
# Do it with epochs
virt_epochs = \
epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
| 41.195652 | 79 | 0.652858 | [
"BSD-3-Clause"
] | 0reza/mne-python | mne/forward/tests/test_field_interpolation.py | 11,370 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from targets.firefox.fx_testcase import *
class Test(FirefoxTest):
@pytest.mark.details(
description='Browser controls work as expected.',
locale=['en-US'],
test_case_id='119481',
test_suite_id='1998'
)
def run(self, firefox):
window_controls_minimize_pattern = Pattern('window_controls_minimize.png')
hover_minimize_control_pattern = Pattern('hover_minimize_control.png')
window_controls_restore_pattern = Pattern('window_controls_restore.png')
hover_restore_control_pattern = Pattern('hover_restore_control.png')
window_controls_maximize_pattern = Pattern('window_controls_maximize.png')
hover_maximize_control_pattern = Pattern('hover_maximize_control.png')
window_controls_close_pattern = Pattern('window_controls_close.png')
hover_close_control_pattern = Pattern('hover_close_control.png')
navigate(LocalWeb.FIREFOX_TEST_SITE)
assert exists(LocalWeb.FIREFOX_LOGO, 10), 'Page successfully loaded, firefox logo found.'
if OSHelper.is_linux():
Mouse().move(Location(0, 0))
hover(window_controls_minimize_pattern)
assert exists(hover_minimize_control_pattern, 10), 'Hover over the \'minimize\' button works correctly.'
if OSHelper.is_windows() or OSHelper.is_linux():
hover(window_controls_restore_pattern)
assert exists(hover_restore_control_pattern, 10), 'Hover over the \'restore\' button works correctly.'
if OSHelper.is_mac():
middle = find(hover_maximize_control_pattern)
Mouse().move(Location(middle.x + 7, middle.y + 5))
assert exists(hover_maximize_control_pattern, 10), 'Hover over the \'maximize\' button works correctly.'
Mouse().move(Location(middle.x - 35, middle.y + 5))
assert exists(hover_close_control_pattern.similar(0.7), 10), \
'Hover over the \'close\' button works correctly.'
else:
hover(window_controls_close_pattern)
assert exists(hover_close_control_pattern, 10), 'Hover over the \'close\' button works correctly.'
if OSHelper.is_windows() or OSHelper.is_linux():
click_window_control('restore', 'main')
time.sleep(Settings.DEFAULT_UI_DELAY)
hover(window_controls_maximize_pattern)
assert exists(hover_maximize_control_pattern, 10), \
'Hover over the \'maximize\' button works correctly; Window successfully restored.'
if OSHelper:
hover(Pattern('home_button.png'))
click_window_control('minimize', 'main')
time.sleep(Settings.DEFAULT_UI_DELAY)
try:
assert wait_vanish(LocalWeb.FIREFOX_LOGO, 10), 'Window successfully minimized.'
except FindError:
raise FindError('Window not minimized.')
restore_window_from_taskbar()
if OSHelper.is_windows():
click_window_control('maximize', 'main')
assert exists(LocalWeb.FIREFOX_LOGO, 10), 'Window successfully opened again.'
click_window_control('close', 'main')
try:
assert wait_vanish(LocalWeb.FIREFOX_LOGO, 10), 'Window successfully closed.'
except FindError:
assert False, 'Window successfully closed.'
| 44.125 | 116 | 0.679037 | [
"MPL-2.0"
] | mwxfr/mattapi | tests/firefox/toolbars_window_controls/browser_controls_upper_corner.py | 3,530 | Python |
from typing import Tuple
from chiavdf import prove
from apple.consensus.constants import ConsensusConstants
from apple.types.blockchain_format.classgroup import ClassgroupElement
from apple.types.blockchain_format.sized_bytes import bytes32
from apple.types.blockchain_format.vdf import VDFInfo, VDFProof
from apple.util.ints import uint8, uint64
def get_vdf_info_and_proof(
constants: ConsensusConstants,
vdf_input: ClassgroupElement,
challenge_hash: bytes32,
number_iters: uint64,
normalized_to_identity: bool = False,
) -> Tuple[VDFInfo, VDFProof]:
form_size = ClassgroupElement.get_size(constants)
result: bytes = prove(
bytes(challenge_hash),
vdf_input.data,
constants.DISCRIMINANT_SIZE_BITS,
number_iters,
)
output = ClassgroupElement.from_bytes(result[:form_size])
proof_bytes = result[form_size : 2 * form_size]
return VDFInfo(challenge_hash, number_iters, output), VDFProof(uint8(0), proof_bytes, normalized_to_identity)
| 33.7 | 113 | 0.771513 | [
"Apache-2.0"
] | Apple-Network/apple-blockchain | apple/util/vdf_prover.py | 1,011 | Python |
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
from __future__ import annotations
import random
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Callable, Dict
import numpy as np
from inferlo.base.factors.discrete_factor import DiscreteFactor
from inferlo.base import InferenceResult
if TYPE_CHECKING:
from inferlo import GraphModel
recordSentMessages = True
class Prob:
"""Equivalent of dai::Prob.
Wrapper around a vector - represents probability distribution.
"""
@staticmethod
def uniform(n):
"""Creates unifom probability distribution."""
return Prob.same_value(n, 1.0 / n)
@staticmethod
def same_value(n: int, val: float):
"""Creates vector filled with the same value."""
return Prob(np.ones(n, dtype=np.float64) * val)
def __init__(self, p: np.ndarray):
self.p = p
def fill(self, x):
"""Sets all entries to x."""
self.p = np.ones_like(self.p) * x
def clone(self):
"""Makes a copy."""
return Prob(np.array(self.p))
def __imul__(self, other):
self.p *= other.p
return self
def __iadd__(self, other):
self.p += other.p
return self
def normalize(self):
"""Normalize distribution."""
self.p /= np.sum(self.p)
def entropy(self) -> float:
"""Calculate entropy of the distribution."""
return - np.sum(self.p * np.log(self.p))
def __str__(self):
return str(self.p)
def dist_kl(p: Prob, q: Prob):
"""Kullback-Leibler divergence between two probability distributions."""
kl_div = p.p * (np.log(p.p + (p == 0)) - np.log(q.p + (p.p == 0)))
return np.sum(kl_div)
def dist_linf(p: Prob, q: Prob):
"""Distance between two probability distributions in L_infinity norm."""
return np.max(np.abs(p.p - q.p))
@dataclass
class Neighbor:
"""Describes the neighbor relationship of two nodes in a graph.
Corresponds to dai::Neighbor.
"""
# Corresponds to the index of this Neighbor entry in the vector of
# neighbors.
iter: int
# Contains the absolute index of the neighboring node.
node: int
# Contains the "dual" index (i.e., the index of this node in the Neighbors
# vector of the neighboring node)
dual: int
@dataclass
class EdgeProp:
"""Type used for storing edge properties."""
index: np.ndarray # Index cached for this edge.
message: Prob # Old message living on this edge.
new_message: Prob # New message living on this edge
residual: float # Residual for this edge
class LDFactor:
"""Equivalent of dai::Factor.
Consists of set of variables and flattened values assigned to all var
combinations. Variables are assigned like in Inferlo, but tensor is
transposed before flattening.
"""
def __init__(self, model: GraphModel, var_idx: List[int], p: Prob):
self.model = model
self.var_idx = var_idx
self.p = p
@staticmethod
def uniform(model: GraphModel, var_idx: List[int]):
"""Creates factor defining uniform distribution."""
total_domain_size = 1
for i in var_idx:
total_domain_size *= model.get_variable(i).domain.size()
return LDFactor(model, var_idx, Prob.uniform(total_domain_size))
@staticmethod
def from_inferlo_factor(f: DiscreteFactor):
"""Converts inferlo.DiscreteFactor to LDFactor."""
rev_perm = list(range(len(f.var_idx)))[::-1]
prob = f.values.transpose(rev_perm).reshape(-1)
return LDFactor(f.model, f.var_idx, Prob(prob))
def to_inferlo_factor(self) -> DiscreteFactor:
"""Converts LDFactor to inferlo.DiscreteFactor."""
sizes = [self.model.get_variable(i).domain.size()
for i in self.var_idx[::-1]]
libdai_tensor = self.p.p.reshape(sizes)
rev_perm = list(range(len(self.var_idx)))[::-1]
inferlo_tensor = libdai_tensor.transpose(rev_perm)
return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)
def combine_with_factor(self, other: LDFactor,
func: Callable[[float, float], float]):
"""Applies binary function to two factors."""
# Check that variables of the other factor are subset of variables of
# the given factor.
for i in other.var_idx:
assert i in self.var_idx
# Now, update every value of given factor with corresponding value of
# the other factor.
for idx in range(len(self.p.p)):
j = other._encode_value_index(self._decode_value_index(idx))
self.p.p[idx] = func(self.p.p[idx], other.p.p[j])
return self
def __iadd__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x + y)
def __imul__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x * y)
def marginal(self, new_var_idx, normed=True) -> LDFactor:
"""Sums factor over some variables."""
result = self.to_inferlo_factor().marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def max_marginal(self, new_var_idx, normed=True) -> LDFactor:
"""Eleiminates certain variables by finding maximum."""
result = self.to_inferlo_factor().max_marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def clone(self):
"""Makes a copy of this factor."""
return LDFactor(self.model, self.var_idx, self.p.clone())
def _decode_value_index(self, idx):
"""Returns dict from variable id to variable value."""
ans = dict()
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans[var_id] = idx % size
idx //= size
return ans
def _encode_value_index(self, var_values: Dict[int, int]):
ans = 0
base = 1
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans += base * var_values[var_id]
base *= size
return ans
def __str__(self):
return "%s %s" % (self.var_idx, self.p.p)
class BP:
"""Belief propagation algorithm, equivalent to dai::BP.
This class is ported from libDAI's dai::BP class. It runs belief
propagation algorithm for graphical model with discrete variables with
arbitrary factor graph.
At the moment MAXPROD algorithm (for finding MAP state) is not supported.
Use BP.infer() to perform inference.
"""
@staticmethod
def infer(model, options=None):
"""Runs inference BP algorithm for given model.
Supports all options which libdai::BP supports. Refer to libDAI
documentation for options descritpion.
"""
if options is None:
options = {'tol': 1e-9, 'logdomain': 0, 'updates': 'SEQRND'}
inf_alg = BP(model, options)
inf_alg.init()
inf_alg.run()
return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())
def __init__(self, model: GraphModel, props: Dict[str, str]):
# Stores all edge properties
self._edges: List[List[EdgeProp]] = []
# Maximum difference between variable beliefs encountered so far
self._maxdiff = 0.0
# Number of iterations needed
self._iters = 0
# The history of message updates (only recorded if \a
# recordSentMessages is \c true)
self._sentMessages = []
# Stores variable beliefs of previous iteration
self._oldBeliefsV: List[LDFactor] = []
# Stores factor beliefs of previous iteration
self._old_beliefs_f: List[LDFactor] = []
# Stores the update schedule
self._update_seq = []
self.model = model
self.factors = [
LDFactor.from_inferlo_factor(
DiscreteFactor.from_factor(f)) for f in model.get_factors()]
self.nrVars = model.num_variables
self.nrFactors = len(self.factors)
# Prepare Neighbors.
# For every variable - factors, referencing it.
self.nbV: List[List[Neighbor]] = [[] for _ in range(self.nrVars)]
# For every factor - variables it references.
self.nbF: List[List[Neighbor]] = [[] for _ in range(self.nrFactors)]
for factor_id in range(len(self.factors)):
factor = self.factors[factor_id]
for var_iter_index in range(len(factor.var_idx)):
var_id = factor.var_idx[var_iter_index]
nbv_len = len(self.nbV[var_id])
nbf_len = len(self.nbF[factor_id])
assert var_iter_index == nbf_len
self.nbV[var_id].append(
Neighbor(
iter=nbv_len,
node=factor_id,
dual=nbf_len))
self.nbF[factor_id].append(
Neighbor(
iter=nbf_len,
node=var_id,
dual=nbv_len))
# Parse properties.
self.logdomain = bool(int(props.get('logdomain', 0)))
self.updates = props['updates']
self.inference = props.get('inference', 'SUMPROD')
self.verbose = int(props.get('verbose', 0))
self.damping = float(props.get('damping', 0.0))
self.maxiter = int(props.get('maxiter', 10000))
self.maxtime = float(props.get('maxtime', np.inf))
self.tol = float(props['tol'])
self._construct()
def _construct(self):
"""Helper function for constructors."""
# Create edge properties
self._edges = []
for i in range(self.nrVars):
self._edges.append([])
for _ in self.nbV[i]:
size = self._var_size(i)
new_ep = EdgeProp(
index=None,
message=Prob.uniform(size),
new_message=Prob.uniform(size),
residual=0.0)
self._edges[i].append(new_ep)
# Create old beliefs
self._oldBeliefsV = []
for i in range(self.nrVars):
self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))
self._old_beliefs_f = []
for ii in range(self.nrFactors):
self._old_beliefs_f.append(
LDFactor.uniform(
self.model,
self.factors[ii].var_idx))
# Create update sequence
self._update_seq = []
for ii in range(self.nrFactors):
for i in self.nbF[ii]:
self._update_seq.append((i.node, i.dual))
def init(self):
"""Initializes messages awith default values."""
c = 0.0 if self.logdomain else 1.0
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._edges[i][ii.iter].message.fill(c)
self._edges[i][ii.iter].new_message.fill(c)
if self.updates == 'SEQMAX':
self._update_residual(i, ii.iter, 0.0)
self._iters = 0
def find_max_residual(self):
"""Find max residual."""
# TODO: optimize with a lookup table.
max_r = -np.inf
best_edge = None
for i in range(self.nrVars):
for _I in range(len(self.nbV[i])):
if self._edges[i][_I].residual > max_r:
max_r = self._edges[i][_I].residual
best_edge = i, _I
return best_edge
def _calc_incoming_message_product(
self,
ii: int,
without_i: bool,
i: int) -> Prob:
"""Calculate the product of factor \a I and the incoming messages.
If without_i == True, the message coming from variable i is omitted
from the product.
This function is used by calc_new_message and calc_belief_f.
"""
f_prod = self.factors[ii].clone()
if self.logdomain:
f_prod.p.p = np.log(f_prod.p.p)
# Calculate product of incoming messages and factor I
for j in self.nbF[ii]:
if without_i and (j.node == i):
continue
# prod_j will be the product of messages coming into j
size = self._var_size(j.node)
default_val = 0.0 if self.logdomain else 1.0
prod_j = Prob.same_value(size, default_val)
for J in self.nbV[j.node]:
if J.node != ii: # for all J in nb(j) \ I
if self.logdomain:
prod_j += self._edges[j.node][J.iter].message
else:
prod_j *= self._edges[j.node][J.iter].message
# multiply prod with prod_j
if self.logdomain:
f_prod += LDFactor(self.model, [j.node], prod_j)
else:
f_prod *= LDFactor(self.model, [j.node], prod_j)
return f_prod.p
def _calc_new_message(self, i: int, _I: int):
# calculate updated message I->i
ii = self.nbV[i][_I].node
if len(self.factors[ii].var_idx) == 1: # optimization
marg = self.factors[ii].p.clone()
else:
Fprod = self.factors[ii].clone()
Fprod.p = self._calc_incoming_message_product(ii, True, i)
if self.logdomain:
Fprod.p.p = np.exp(Fprod.p.p - np.max(Fprod.p.p))
# Marginalize onto i
if self.inference == 'SUMPROD':
marg = Fprod.marginal([i]).p
else:
marg = Fprod.max_marginal([i]).p
# Store result
if self.logdomain:
self._edges[i][_I].new_message = Prob(np.log(marg.p))
else:
self._edges[i][_I].new_message = marg
# Update the residual if necessary
if self.updates == 'SEQMAX':
self._update_residual(
i,
_I,
dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message))
# BP::run does not check for NANs for performance reasons
# Somehow NaNs do not often occur in BP...
def run(self):
"""Runs BP algorithm."""
tic = time.time()
# Do several passes over the network until maximum number of iterations
# has been reached or until the maximum belief difference is smaller
# than tolerance.
max_diff = np.inf
while (self._iters < self.maxiter) and (
max_diff > self.tol) and (time.time() - tic) < self.maxtime:
if self.updates == 'SEQMAX':
if self._iters == 0:
# do the first pass
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
# Maximum-Residual BP [\ref EMK06]
for _ in range(len(self._update_seq)):
# Update the message with the largest residual.
i, _I = self.find_max_residual()
self._update_message(i, _I)
# I->i has been updated, which means that residuals for all
# J->j with J in nb[i]\I and j in nb[J]\i have to be
# updated
for J in self.nbV[i]:
if J.iter != _I:
for j in self.nbF[J.node]:
_J = j.dual
if j != i:
self._calc_new_message(j.node, _J)
elif self.updates == 'PARALL':
# Parallel updates
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._update_message(i, ii.iter)
else:
# Sequential updates
if self.updates == 'SEQRND':
random.shuffle(self._update_seq)
for e in self._update_seq:
self._calc_new_message(e[0], e[1])
self._update_message(e[0], e[1])
# Calculate new beliefs and compare with old ones
max_diff = -np.inf
for i in range(self.nrVars):
b = self._belief_v(i).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._oldBeliefsV[i].p))
self._oldBeliefsV[i] = b
for ii in range(self.nrFactors):
b = self._belief_f(ii).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._old_beliefs_f[ii].p))
self._old_beliefs_f[ii] = b
self._iters += 1
if max_diff > self._maxdiff:
self._maxdiff = max_diff
return max_diff
def _calc_belief_v(self, i: int) -> Prob:
p = Prob.same_value(self.model.get_variable(i).domain.size(),
0.0 if self.logdomain else 1.0)
for ii in self.nbV[i]:
if self.logdomain:
p += self._edges[i][ii.iter].new_message
else:
p *= self._edges[i][ii.iter].new_message
return p
def _belief_v(self, i: int) -> LDFactor:
p = self._calc_belief_v(i)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, [i], p)
def _belief_f(self, ii) -> LDFactor:
p = self._calc_belief_f(ii)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, self.factors[ii].var_idx, p)
def _calc_belief_f(self, ii: int) -> Prob:
return self._calc_incoming_message_product(ii, False, 0)
def log_z(self) -> float:
"""Calculates logarithm of the partition function."""
ans = 0.0
for i in range(self.nrVars):
ans += (1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy()
for ii in range(self.nrFactors):
ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)
return ans
def marg_prob(self) -> np.ndarray:
"""Calculates marginal probabilities."""
max_domain_size = np.max([self._var_size(i)
for i in range(self.nrVars)])
ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)
for var_id in range(self.nrVars):
ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p
return ans
def _var_size(self, var_idx):
return self.model.get_variable(var_idx).domain.size()
def _update_message(self, i: int, _I: int):
if recordSentMessages:
self._sentMessages.append((i, _I))
if self.damping == 0.0:
self._edges[i][_I].message = self._edges[i][_I].new_message.clone()
if self.updates == 'SEQMAX':
self._update_residual(i, _I, 0.0)
else:
d = self.damping
old_msg = self._edges[i][_I].message.p
new_msg = self._edges[i][_I].new_message.p
if self.logdomain:
self._edges[i][_I].message.p = (
(old_msg * d) + (new_msg * (1.0 - d)))
else:
self._edges[i][_I].message.p = (
(old_msg ** d) * (new_msg ** (1.0 - d)))
if self.updates == 'SEQMAX':
new_res = dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message)
self._update_residual(i, _I, new_res)
def _update_residual(self, i, _I, r):
self._edges[i][_I].residual = r
| 35.932384 | 79 | 0.561305 | [
"Apache-2.0"
] | InferLO/inferlo | inferlo/generic/libdai_bp.py | 20,194 | Python |
import os
import shutil
def setup_vscode():
def _get_vscode_cmd(port):
executable = "code-server"
if not shutil.which(executable):
raise FileNotFoundError("Can not find code-server in PATH")
# Start vscode in CODE_WORKINGDIR env variable if set
# If not, start in 'current directory', which is $REPO_DIR in mybinder
# but /home/jovyan (or equivalent) in JupyterHubs
working_dir = os.getenv("CODE_WORKINGDIR", ".")
extensions_dir = os.getenv("CODE_EXTENSIONSDIR", None)
extra_extensions_dir = os.getenv("CODE_EXTRA_EXTENSIONSDIR", None)
cmd = [
executable,
"--auth",
"none",
"--allow-http",
"--disable-telemetry",
"--port=" + str(port),
]
if extensions_dir:
cmd += ["--extensions-dir", extensions_dir]
if extra_extensions_dir:
cmd += ["--extra-extensions-dir", extra_extensions_dir]
cmd.append(working_dir)
return cmd
return {
"command": _get_vscode_cmd,
"timeout": 20,
"new_browser_tab": True,
"launcher_entry": {
"title": "VS Code",
"icon_path": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "icons", "vscode.svg"
),
},
}
| 28.666667 | 81 | 0.554506 | [
"BSD-3-Clause"
] | sylus/vscode-binder | jupyter_vscode_proxy/__init__.py | 1,376 | Python |
import time
import logging
import os
import openpathsampling as paths
from .path_simulator import PathSimulator, MCStep
from ..ops_logging import initialization_logging
logger = logging.getLogger(__name__)
init_log = logging.getLogger('openpathsampling.initialization')
class PathSampling(PathSimulator):
"""
General path sampling code.
Takes a single move_scheme and generates samples from that, keeping one
per replica after each move.
"""
calc_name = "PathSampling"
def __init__(self, storage, move_scheme=None, sample_set=None,
initialize=True):
"""
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage where all results should be stored in
move_scheme : :class:`openpathsampling.MoveScheme`
the move scheme used for the pathsampling cycle
sample_set : :class:`openpathsampling.SampleSet`
the initial SampleSet for the Simulator
initialize : bool
if `False` the new PathSimulator will continue at the step and
not create a new SampleSet object to cut the connection to previous
steps
"""
super(PathSampling, self).__init__(storage)
self.move_scheme = move_scheme
if move_scheme is not None:
self.root_mover = move_scheme.move_decision_tree()
self._mover = paths.PathSimulatorMover(self.root_mover, self)
else:
self.root_mover = None
self._mover = None
initialization_logging(init_log, self,
['move_scheme', 'sample_set'])
self.live_visualizer = None
self.status_update_frequency = 1
if initialize:
# NOTE: why aren't we using save_initial_step here?
samples = []
if sample_set is not None:
for sample in sample_set:
samples.append(sample.copy_reset())
self.sample_set = paths.SampleSet(samples)
mcstep = MCStep(
simulation=self,
mccycle=self.step,
active=self.sample_set,
change=paths.AcceptedSampleMoveChange(self.sample_set.samples)
)
self._current_step = mcstep
else:
self.sample_set = sample_set
self._current_step = None
self.root = self.sample_set
if self.storage is not None:
template_trajectory = self.sample_set.samples[0].trajectory
self.storage.save(template_trajectory)
self.storage.save([self.move_scheme, self.root_mover,
self._mover])
self.save_current_step()
def to_dict(self):
return {
'root': self.root,
'move_scheme': self.move_scheme,
'root_mover': self.root_mover,
}
@classmethod
def from_dict(cls, dct):
# create empty object
obj = cls(None)
# and correct the content
obj.move_scheme = dct['move_scheme']
obj.root = dct['root']
obj.root_mover = dct['root_mover']
obj._mover = paths.PathSimulatorMover(obj.root_mover, obj)
return obj
@property
def current_step(self):
return self._current_step
def save_current_step(self):
"""
Save the current step to the storage
"""
if self.storage is not None and self._current_step is not None:
try:
# new storage does a stash here, not a save
self.storage.stash(self._current_step)
except AttributeError:
self.storage.steps.save(self._current_step)
@classmethod
def from_step(cls, storage, step, initialize=True):
"""
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage to be used to hold the simulation results
step : :class:`openpathsampling.MCStep`
the step used to fill the initial parameters
initialize : bool
if `False` the new PathSimulator will continue at the given step and
not create a new SampleSet object to cut the connection to previous
steps.
Returns
-------
:class:`openpathsampling.PathSampling`
the new simulator object
"""
obj = cls(
storage,
step.simulation.move_scheme,
step.sample_set,
initialize=initialize
)
return obj
def restart_at_step(self, step, storage=None):
"""
Continue with a loaded pathsampling at a given step
Notes
-----
You can only continue from a step that is compatible in the sense
that it was previously generated from the pathsampling instance.
If you want to switch the move scheme you need to create a new
pathsampling instance. You can do so with the constructor or using
the classmethod `from_step` which simplifies the setup process
Parameters
----------
step : :class:`MCStep`
the step to be continued from. You are always free to chose any step
which can be used to fork a simulation but for analysis you may
only use one path of steps.
storage : :class:`Storage`
If given this will change the storage used to store the generated
steps
"""
if step.simulation is not self:
raise RuntimeWarning(
'Trying to continue from other step. Please use the '
'`.from_step` method to create a new PathSampling object '
'instead.')
if storage is not None:
self.storage = storage
self.step = step.mccycle
self.sample_set = step.active
self.root = step.simulation.root
self._current_step = step
def run_until(self, n_steps):
# if self.storage is not None:
# if len(self.storage.steps) > 0:
# self.step = len(self.storage.steps)
n_steps_to_run = n_steps - self.step
self.run(n_steps_to_run)
def run_until_decorrelated(self, time_reversal=True):
"""Run until all trajectories are decorrelated.
This runs until all the replicas in ``self.sample_set`` have
decorrelated from their initial conditions. "Decorrelated" here is
meant in the sense commonly used in one-way shooting: this runs
until no configurations from the original trajectories remain.
"""
originals = {s.replica: s.trajectory for s in self.sample_set}
current = self.sample_set
# cache the output stream; force the primary `run` method to not
# output anything
original_output_stream = self.output_stream
self.output_stream = open(os.devnull, 'w')
def n_correlated(sample_set, originals):
return sum([originals[r].is_correlated(sample_set[r],
time_reversal)
for r in originals])
original_output_stream.write("Decorrelating trajectories....\n")
to_decorrelate = n_correlated(self.sample_set, originals)
# walrus in py38!
while to_decorrelate:
out_str = "Step {}: {} of {} trajectories still correlated\n"
paths.tools.refresh_output(
out_str.format(self.step + 1, to_decorrelate, len(originals)),
refresh=False,
output_stream=original_output_stream
)
self.run(1)
to_decorrelate = n_correlated(self.sample_set, originals)
paths.tools.refresh_output(
"Step {}: All trajectories decorrelated!\n".format(self.step+1),
refresh=False,
output_stream=original_output_stream
)
self.output_stream = original_output_stream
def run(self, n_steps):
mcstep = None
# cvs = list()
# n_samples = 0
# if self.storage is not None:
# n_samples = len(self.storage.snapshots)
# cvs = list(self.storage.cvs)
initial_time = time.time()
for nn in range(n_steps):
self.step += 1
logger.info("Beginning MC cycle " + str(self.step))
refresh = self.allow_refresh
if self.step % self.status_update_frequency == 0:
# do we visualize this step?
if self.live_visualizer is not None and mcstep is not None:
# do we visualize at all?
self.live_visualizer.draw_ipynb(mcstep)
refresh = False
elapsed = time.time() - initial_time
if nn > 0:
time_per_step = elapsed / nn
else:
time_per_step = 1.0
paths.tools.refresh_output(
"Working on Monte Carlo cycle number " + str(self.step)
+ "\n" + paths.tools.progress_string(nn, n_steps,
elapsed),
refresh=refresh,
output_stream=self.output_stream
)
time_start = time.time()
movepath = self._mover.move(self.sample_set, step=self.step)
samples = movepath.results
new_sampleset = self.sample_set.apply_samples(samples)
time_elapsed = time.time() - time_start
# TODO: we can save this with the MC steps for timing? The bit
# below works, but is only a temporary hack
setattr(movepath.details, "timing", time_elapsed)
mcstep = MCStep(
simulation=self,
mccycle=self.step,
previous=self.sample_set,
active=new_sampleset,
change=movepath
)
self._current_step = mcstep
self.save_current_step()
# if self.storage is not None:
# # I think this is done automatically when saving snapshots
# # for cv in cvs:
# # n_len = len(self.storage.snapshots)
# # cv(self.storage.snapshots[n_samples:n_len])
# # n_samples = n_len
#
# self.storage.steps.save(mcstep)
if self.step % self.save_frequency == 0:
self.sample_set.sanity_check()
self.sync_storage()
self.sample_set = new_sampleset
self.sync_storage()
if self.live_visualizer is not None and mcstep is not None:
self.live_visualizer.draw_ipynb(mcstep)
paths.tools.refresh_output(
"DONE! Completed " + str(self.step) + " Monte Carlo cycles.\n",
refresh=False,
output_stream=self.output_stream
)
| 34.326087 | 80 | 0.5774 | [
"MIT"
] | bolhuis/openpathsampling | openpathsampling/pathsimulators/path_sampling.py | 11,053 | Python |
"""
WSGI config for rush00 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rush00.settings")
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 | [
"MIT"
] | 42bbichero/MovieMon | rush00/wsgi.py | 389 | Python |
"""
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
import time
import win32ui
class WptRecord:
def __init__(self):
self.proc = None
self.window = None
self.UWM_PREPARE = (0x8000 + 0)
self.UWM_START = (0x8000 + 1)
self.UWM_STOP = (0x8000 + 2)
self.UWM_PROCESS = (0x8000 + 3)
self.UWM_DONE = (0x8000 + 4)
self.UWM_WAIT_FOR_IDLE = (0x8000 + 5)
def Prepare(self, test):
recorder = test.GetRecorder()
file_base = test.GetFileBase()
if recorder is not None and file_base is not None:
args = [recorder, '--filebase', file_base, '--histograms']
if test.TcpDump():
args.append('--tcpdump')
if test.Video():
args.append('--video')
if test.FullSizeVideo():
args.append('--noresize')
args.extend(['--quality', str(test.GetImageQuality())])
try:
self.proc = subprocess.Popen(args)
except:
logging.debug('Error launching recorder "{0}"'.format(recorder))
# Wait for the recorder window to be available for 30 seconds
start = time.time()
while self.window is None and time.time() - start < 30:
try:
self.window = win32ui.FindWindow("wptRecord", "wptRecord")
except:
time.sleep(0.1)
if self.window is not None:
try:
self.window.SendMessage(self.UWM_PREPARE, 0, 0)
except:
pass
def Start(self):
if self.window is not None:
try:
self.window.PostMessage(self.UWM_START, 0, 0)
except:
pass
def WaitForIdle(self, wait_seconds):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_WAIT_FOR_IDLE, wait_seconds, 0)
except:
pass
def Stop(self):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_STOP, 0, 0)
except:
pass
def Process(self, start_offset):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_PROCESS, start_offset, 0)
except:
pass
def Done(self):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_DONE, 0, 0)
except:
pass
if self.proc is not None:
self.proc.wait() | 28.306122 | 72 | 0.645999 | [
"BSD-3-Clause"
] | vgno/webpagetest | agent/webdriver/recorder.py | 2,774 | Python |
import urllib.request
from urllib.parse import urlencode
import json
import pprint
import socket
import struct
#from src import etri2conll
def getETRI_rest(text):
url = "http://143.248.135.20:31235/etri_parser"
contents = {}
contents['text'] = text
contents = json.dumps(contents).encode('utf-8')
u = urllib.request.Request(url, contents)
response = urllib.request.urlopen(u)
result = response.read().decode('utf-8')
result = json.loads(result)
return result
def read_blob(sock, size):
buf = ''
while len(buf) != size:
ret = sock.recv(size - len(buf))
if not ret:
raise Exception("Socket closed")
ret += buf
return buf
def read_long(sock):
size = struct.calcsize("L")
data = readblob(sock, size)
return struct.unpack("L", data)
def getETRI(text):
host = '143.248.135.60'
port = 33222
ADDR = (host, port)
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
clientSocket.connect(ADDR)
except Exception as e:
return None
try:
clientSocket.sendall(str.encode(text))
#clientSocket.sendall(text.encode('unicode-escape'))
#clientSocket.sendall(text.encode('utf-8'))
buffer = bytearray()
while True:
data = clientSocket.recv(1024)
if not data:
break
buffer.extend(data)
result = json.loads(buffer.decode(encoding='utf-8'))
return result['sentence']
except Exception as e:
return None
def lemmatizer(word, pos):
etri = getETRI(word)
lemmas = etri[0]['WSD']
lemma = word
for i in lemmas:
p = i['type']
if pos == 'v' or pos == 'VV':
if p == 'VV':
lemma = i['text']
break
elif pos == 'n' or pos == 'NN' or pos == 'NNG' or pos == 'NNP' or pos =='NNB' or pos =='NR' or pos == 'NP':
if 'NN' in p:
lemma = i['text']
break
elif pos == 'adj' or pos == 'VA':
if p == 'VA':
lemma = i['text']
break
else:
pass
return lemma
def getPOS(word):
etri = getETRI(word)
pos = etri[0]['WSD'][0]['type']
if pos.startswith('N'):
pos = 'n'
elif pos == 'VV':
pos = 'v'
elif pos == 'VA':
pos = 'adj'
else:
pos == 'n'
return pos
def getMorpEval(tid, nlp):
result = '_'
for i in nlp[0]['morp_eval']:
if i['id'] == tid:
morp = i['result']
morps = morp.split('+')
pos_sequence = []
for m in morps:
if '/' not in m:
pass
else:
p = m.split('/')[1]
pos_sequence.append(p)
pos = '+'.join(pos_sequence)
result = pos
else:
pass
return result
def getMorhWithWord(tid, nlp):
result = '_'
for i in nlp[0]['morp_eval']:
if i['id'] == tid:
morp = i['result']
break
return morp
def getETRI_CoNLL2006(text):
nlp = getETRI(text)
result = []
for i in nlp[0]['dependency']:
tid = i['id']
token = i['text']
third = getMorhWithWord(tid, nlp)
pos = getMorpEval(tid, nlp)
five = '_'
arc = i['head']
pt = i['label']
eight = '_'
nine = '_'
line = [tid, token, third, pos, five, arc, pt, eight, nine]
result.append(line)
return result
def getETRI_CoNLL2009(text):
nlp = getETRI(text)
result = []
for i in nlp[0]['dependency']:
tid = i['id']
token = i['text']
third = getMorhWithWord(tid, nlp)
plemma = token
pos = getMorpEval(tid, nlp)
ppos = pos
feat = '_'
pfeat = '_'
head = i['head']
phead = head
deprel = i['label']
pdeprel = i['label']
line = [tid, token, third, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel]
result.append(line)
return result
#def test():
#conll = getETRI_CoNLL2006(text)
#conll = getETRI_CoNLL2009(text)
#pprint.pprint(conll)
#test()
| 27.42236 | 116 | 0.495583 | [
"Apache-2.0"
] | shingiyeon/KoreanCoreferenceResolution | etri.py | 4,415 | Python |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '4.7.3'
| 23 | 59 | 0.717391 | [
"BSD-3-Clause"
] | flowcommerce/integrations-core | gitlab/datadog_checks/gitlab/__about__.py | 138 | Python |
from django.contrib.auth import models as auth_models
from django.core.mail import send_mail
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import gettext_lazy as _
from oscar.core.compat import AUTH_USER_MODEL
class UserManager(auth_models.BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given email and
password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(
email=email, is_staff=False, is_active=True,
is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class AbstractUser(auth_models.AbstractBaseUser,
auth_models.PermissionsMixin):
"""
An abstract base user suitable for use in Oscar projects.
This is basically a copy of the core AbstractUser model but without a
username field
"""
email = models.EmailField(_('email address'), unique=True)
first_name = models.CharField(
_('First name'), max_length=255, blank=True)
last_name = models.CharField(
_('Last name'), max_length=255, blank=True)
is_staff = models.BooleanField(
_('Staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('Active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'),
default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
abstract = True
verbose_name = _('User')
verbose_name_plural = _('Users')
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Return the short name for the user.
"""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Send an email to this user.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
def _migrate_alerts_to_user(self):
"""
Transfer any active alerts linked to a user's email address to the
newly registered user.
"""
ProductAlert = self.alerts.model
alerts = ProductAlert.objects.filter(
email=self.email, status=ProductAlert.ACTIVE)
alerts.update(user=self, key='', email='')
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
# Migrate any "anonymous" product alerts to the registered user
# Ideally, this would be done via a post-save signal. But we can't
# use get_user_model to wire up signals to custom user models
# see Oscar ticket #1127, Django ticket #19218
self._migrate_alerts_to_user()
class AbstractProductAlert(models.Model):
"""
An alert for when a product comes back in stock
"""
product = models.ForeignKey(
'catalogue.Product',
on_delete=models.CASCADE)
# A user is only required if the notification is created by a
# registered user, anonymous users will only have an email address
# attached to the notification
user = models.ForeignKey(
AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="alerts",
verbose_name=_('User'))
email = models.EmailField(_("Email"), db_index=True, blank=True)
# This key are used to confirm and cancel alerts for anon users
key = models.CharField(_("Key"), max_length=128, blank=True, db_index=True)
# An alert can have two different statuses for authenticated
# users ``ACTIVE`` and ``CANCELLED`` and anonymous users have an
# additional status ``UNCONFIRMED``. For anonymous users a confirmation
# and unsubscription key are generated when an instance is saved for
# the first time and can be used to confirm and unsubscribe the
# notifications.
UNCONFIRMED, ACTIVE, CANCELLED, CLOSED = (
'Unconfirmed', 'Active', 'Cancelled', 'Closed')
STATUS_CHOICES = (
(UNCONFIRMED, _('Not yet confirmed')),
(ACTIVE, _('Active')),
(CANCELLED, _('Cancelled')),
(CLOSED, _('Closed')),
)
status = models.CharField(_("Status"), max_length=20,
choices=STATUS_CHOICES, default=ACTIVE)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_confirmed = models.DateTimeField(_("Date confirmed"), blank=True,
null=True)
date_cancelled = models.DateTimeField(_("Date cancelled"), blank=True,
null=True)
date_closed = models.DateTimeField(_("Date closed"), blank=True, null=True)
class Meta:
abstract = True
app_label = 'customer'
verbose_name = _('Product alert')
verbose_name_plural = _('Product alerts')
@property
def is_anonymous(self):
return self.user is None
@property
def can_be_confirmed(self):
return self.status == self.UNCONFIRMED
@property
def can_be_cancelled(self):
return self.status in (self.ACTIVE, self.UNCONFIRMED)
@property
def is_cancelled(self):
return self.status == self.CANCELLED
@property
def is_active(self):
return self.status == self.ACTIVE
def confirm(self):
self.status = self.ACTIVE
self.date_confirmed = timezone.now()
self.save()
confirm.alters_data = True
def cancel(self):
self.status = self.CANCELLED
self.date_cancelled = timezone.now()
self.save()
cancel.alters_data = True
def close(self):
self.status = self.CLOSED
self.date_closed = timezone.now()
self.save()
close.alters_data = True
def get_email_address(self):
if self.user:
return self.user.email
else:
return self.email
def save(self, *args, **kwargs):
if not self.id and not self.user:
self.key = self.get_random_key()
self.status = self.UNCONFIRMED
# Ensure date fields get updated when saving from modelform (which just
# calls save, and doesn't call the methods cancel(), confirm() etc).
if self.status == self.CANCELLED and self.date_cancelled is None:
self.date_cancelled = timezone.now()
if not self.user and self.status == self.ACTIVE \
and self.date_confirmed is None:
self.date_confirmed = timezone.now()
if self.status == self.CLOSED and self.date_closed is None:
self.date_closed = timezone.now()
return super().save(*args, **kwargs)
def get_random_key(self):
return get_random_string(length=40, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')
def get_confirm_url(self):
return reverse('customer:alerts-confirm', kwargs={'key': self.key})
def get_cancel_url(self):
return reverse('customer:alerts-cancel-by-key', kwargs={'key': self.key})
| 34.531646 | 97 | 0.633065 | [
"BSD-3-Clause"
] | Abirami15/django-oscar | src/oscar/apps/customer/abstract_models.py | 8,184 | Python |
"""A training script of TD3 on OpenAI Gym Mujoco environments.
This script follows the settings of http://arxiv.org/abs/1802.09477 as much
as possible.
"""
import argparse
import logging
import sys
import gym
import gym.wrappers
import numpy as np
import torch
from torch import nn
import pfrl
from pfrl import experiments, explorers, replay_buffers, utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--outdir",
type=str,
default="results",
help=(
"Directory path to save output files."
" If it does not exist, it will be created."
),
)
parser.add_argument(
"--env",
type=str,
default="Hopper-v2",
help="OpenAI Gym MuJoCo env to perform algorithm on.",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)")
parser.add_argument(
"--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU."
)
parser.add_argument(
"--load", type=str, default="", help="Directory to load agent from."
)
parser.add_argument(
"--steps",
type=int,
default=10**6,
help="Total number of timesteps to train the agent.",
)
parser.add_argument(
"--eval-n-runs",
type=int,
default=10,
help="Number of episodes run for each evaluation.",
)
parser.add_argument(
"--eval-interval",
type=int,
default=5000,
help="Interval in timesteps between evaluations.",
)
parser.add_argument(
"--replay-start-size",
type=int,
default=10000,
help="Minimum replay buffer size before " + "performing gradient updates.",
)
parser.add_argument("--batch-size", type=int, default=100, help="Minibatch size")
parser.add_argument(
"--render", action="store_true", help="Render env states in a GUI window."
)
parser.add_argument(
"--demo", action="store_true", help="Just run evaluation, not training."
)
parser.add_argument("--load-pretrained", action="store_true", default=False)
parser.add_argument(
"--pretrained-type", type=str, default="best", choices=["best", "final"]
)
parser.add_argument(
"--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor."
)
parser.add_argument(
"--log-level", type=int, default=logging.INFO, help="Level of the root logger."
)
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv)
print("Output files are saved in {}".format(args.outdir))
# Set a random seed used in PFRL
utils.set_random_seed(args.seed)
def make_env(test):
env = gym.make(args.env)
# Unwrap TimeLimit wrapper
assert isinstance(env, gym.wrappers.TimeLimit)
env = env.env
# Use different random seeds for train and test envs
env_seed = 2**32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = pfrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = pfrl.wrappers.Monitor(env, args.outdir)
if args.render and not test:
env = pfrl.wrappers.Render(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.max_episode_steps
obs_space = env.observation_space
action_space = env.action_space
print("Observation space:", obs_space)
print("Action space:", action_space)
obs_size = obs_space.low.size
action_size = action_space.low.size
policy = nn.Sequential(
nn.Linear(obs_size, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, action_size),
nn.Tanh(),
pfrl.policies.DeterministicHead(),
)
policy_optimizer = torch.optim.Adam(policy.parameters())
def make_q_func_with_optimizer():
q_func = nn.Sequential(
pfrl.nn.ConcatObsAndAction(),
nn.Linear(obs_size + action_size, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, 1),
)
q_func_optimizer = torch.optim.Adam(q_func.parameters())
return q_func, q_func_optimizer
q_func1, q_func1_optimizer = make_q_func_with_optimizer()
q_func2, q_func2_optimizer = make_q_func_with_optimizer()
rbuf = replay_buffers.ReplayBuffer(10**6)
explorer = explorers.AdditiveGaussian(
scale=0.1, low=action_space.low, high=action_space.high
)
def burnin_action_func():
"""Select random actions until model is updated one or more times."""
return np.random.uniform(action_space.low, action_space.high).astype(np.float32)
# Hyperparameters in http://arxiv.org/abs/1802.09477
agent = pfrl.agents.TD3(
policy,
q_func1,
q_func2,
policy_optimizer,
q_func1_optimizer,
q_func2_optimizer,
rbuf,
gamma=0.99,
soft_update_tau=5e-3,
explorer=explorer,
replay_start_size=args.replay_start_size,
gpu=args.gpu,
minibatch_size=args.batch_size,
burnin_action_func=burnin_action_func,
)
if len(args.load) > 0 or args.load_pretrained:
# either load or load_pretrained must be false
assert not len(args.load) > 0 or not args.load_pretrained
if len(args.load) > 0:
agent.load(args.load)
else:
agent.load(
utils.download_model("TD3", args.env, model_type=args.pretrained_type)[
0
]
)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit,
)
print(
"n_runs: {} mean: {} median: {} stdev {}".format(
args.eval_n_runs,
eval_stats["mean"],
eval_stats["median"],
eval_stats["stdev"],
)
)
import json
import os
with open(os.path.join(args.outdir, "demo_scores.json"), "w") as f:
json.dump(eval_stats, f)
else:
experiments.train_agent_with_evaluation(
agent=agent,
env=env,
steps=args.steps,
eval_env=eval_env,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
outdir=args.outdir,
train_max_episode_len=timestep_limit,
)
if __name__ == "__main__":
main()
| 30.400881 | 88 | 0.60542 | [
"MIT"
] | toslunar/pfrl | examples/mujoco/reproduction/td3/train_td3.py | 6,901 | Python |
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from artapp.models import Artist, Art
from django.template.defaultfilters import slugify
class RegistrationForm(ModelForm):
username = forms.CharField(label=(u'User Name'))
email = forms.EmailField(label=(u'Email Address'))
password = forms.CharField(label=(u'Password'), widget=forms.PasswordInput(render_value=False))
password1 = forms.CharField(label=(u'Verify Password'), widget=forms.PasswordInput(render_value=False))
class Meta:
model = Artist
exclude = ('user',)
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError('That username is already taken, please select another')
def clean(self):
if self.cleaned_data['password'] != self.cleaned_data['password1']:
raise forms.ValidationError("The passwords did not match. Please try again")
myslug=slugify(self.cleaned_data['username'])
try:
Artist.objects.get(slug=myslug)
except Artist.DoesNotExist:
return self.cleaned_data
#return self.cleaned_data
raise forms.ValidationError("That username is already taken, please select another")
class LoginForm(forms.Form):
username = forms.CharField(label=(u'User Name'))
password = forms.CharField(label=(u'Password'), widget=forms.PasswordInput(render_value=False))
#next_url = forms.CharField(label=(u'next url'), widget=forms.HiddenInput()) #hidden
class VoteForm(forms.Form):
VOTECHOICES = [('upvote', 'downvote')]
vote = forms.MultipleChoiceField(required=False, widget=forms.CheckboxSelectMultiple, choices=VOTECHOICES)
artslug = forms.CharField()
class SubmitArtForm(ModelForm):
class Meta:
model = Art
fields = ['title', 'drawing']
#exclude = ('slug','created_at', 'likes',)
def clean(self):
myslug=slugify(self.cleaned_data['title'])
try:
Art.objects.get(slug=myslug)
except Art.DoesNotExist:
return self.cleaned_data
raise forms.ValidationError("Ascii Art with that slug already exists, please pick another title")
"""
def clean_title(self):
title = self.cleaned_data['title']
try:
Art.objects.get(title=title)
except Art.DoesNotExist:
return title
raise forms.ValidationError("Ascii Art with that title already exists, please pick another title")
""" | 34.385714 | 107 | 0.750312 | [
"Apache-2.0"
] | khenness/asciiartdatabase | artapp/forms.py | 2,407 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.discovery_v1alpha1_api import DiscoveryV1alpha1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestDiscoveryV1alpha1Api(unittest.TestCase):
"""DiscoveryV1alpha1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.discovery_v1alpha1_api.DiscoveryV1alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_create_namespaced_endpoint_slice(self):
"""Test case for create_namespaced_endpoint_slice
"""
pass
def test_delete_collection_namespaced_endpoint_slice(self):
"""Test case for delete_collection_namespaced_endpoint_slice
"""
pass
def test_delete_namespaced_endpoint_slice(self):
"""Test case for delete_namespaced_endpoint_slice
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_endpoint_slice_for_all_namespaces(self):
"""Test case for list_endpoint_slice_for_all_namespaces
"""
pass
def test_list_namespaced_endpoint_slice(self):
"""Test case for list_namespaced_endpoint_slice
"""
pass
def test_patch_namespaced_endpoint_slice(self):
"""Test case for patch_namespaced_endpoint_slice
"""
pass
def test_read_namespaced_endpoint_slice(self):
"""Test case for read_namespaced_endpoint_slice
"""
pass
def test_replace_namespaced_endpoint_slice(self):
"""Test case for replace_namespaced_endpoint_slice
"""
pass
if __name__ == '__main__':
unittest.main()
| 23.034091 | 124 | 0.692649 | [
"Apache-2.0"
] | ACXLM/python | kubernetes/test/test_discovery_v1alpha1_api.py | 2,027 | Python |
numero = int(input("Digite um número: "))
if ((numero % 2) == 0):
print("par")
else:
print("impar") | 21.8 | 41 | 0.550459 | [
"MIT"
] | valdirsjr/learning.data | python/paridade.py | 110 | Python |
# -*- coding:utf-8 -*-
"""
Sections organize movement between pages in an experiment.
.. moduleauthor:: Johannes Brachem <[email protected]>, Paul Wiemann <[email protected]>
"""
import time
import typing as t
from ._core import ExpMember
from ._helper import inherit_kwargs
from .page import _PageCore, _DefaultFinalPage
from .exceptions import AlfredError, ValidationError, AbortMove
from random import shuffle
@inherit_kwargs
class Section(ExpMember):
"""
The basic section, allows forward and backward movements.
Args:
shuffle (bool): If True, the order of all members in this
section will be randomized every time the section is entered.
Shuffling is not recursive, it only affects direct members
of a section. That means, if there are subsections,
their position in the parent section will be randomized,
but the members within the subsection will not be affected.
Defaults to False. Can be defined as a class attribute.
{kwargs}
Examples:
Using a basic section and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.Section(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.Section): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
"""
#: Controls, whether participants can move forward from pages in
#: this section.
allow_forward: bool = True
#: Controls, whether participants can move backward from *and to*
#: pages in this section.
allow_backward: bool = True
#: Controls, whether participants can jump *from* pages in this
#: section
allow_jumpfrom: bool = True
#: Controls, whether participants can jump *to* pages in this
#: section.
allow_jumpto: bool = True
#: If *True*, pages in this section will be closed on leaving
close_pages_on_leave: bool = False
#: If True, the members of this section will be randomized every
#: time the section is entered.
shuffle: bool = False
def __init__(self, title: str = None, name: str = None, shuffle: bool = None, **kwargs):
super().__init__(title=title, name=name, **kwargs)
self._members = {}
self._should_be_shown = True
#: bool: Boolean flag, indicating whether the experiment session
#: is currently operating within this section
self.active: bool = False
if shuffle is not None:
self.shuffle = shuffle
self._catch_page_hooks()
def __contains__(self, member):
try:
return member.name in self.all_members or member.name in self.all_elements
except AttributeError:
return member in self.all_members or member in self.all_elements
def __iadd__(self, other):
self.append(other)
return self
def __getitem__(self, name):
return self.all_members[name]
def __setitem__(self, name, value):
if "members" in self.__dict__ and name in self.members:
if self.members[name] is value:
return
else:
raise AlfredError(f"{name} is a member of {self}. The name is reserved.")
else:
raise KeyError(
(
f"{name} not found in members of {self}. "
"You can use square bracket syntax only for changing existing pages, not for adding "
"new ones. Please use the augmented assignment operator '+=' for this purpose."
)
)
def __getattr__(self, name):
try:
return self.all_members[name]
except KeyError:
raise AttributeError(f"{self} has no attribute '{name}'.")
def __setattr__(self, name, value):
if "members" in self.__dict__ and name in self.members:
if self.members[name] is value:
return
else:
raise AlfredError(f"{name} is a member of {self}. The name is reserved.")
else:
self.__dict__[name] = value
def _shuffle_members(self):
"""Non-recursive shuffling of this section's members."""
members = list(self.members.items())
shuffle(members)
self._members = dict(members)
@property
def members(self) -> dict:
"""
Dictionary of the section's members.
"""
return self._members
@members.setter
def members(self, value):
self._members = value
@property
def empty(self) -> bool:
"""
True, if there are no pages or subsections in this section.
"""
return False if self.members else True
@property
def all_updated_members(self) -> dict:
"""
Returns a dict of all members that already have exp access.
Operates recursively, i.e. pages and subsections of subsections
are included.
"""
return {name: m for name, m in self.all_members.items() if m.exp is not None}
@property
def all_updated_pages(self) -> dict:
"""
Returns a dict of all pages in the current section that have
access to the experiment session. Operates recursively, i.e.
pages in subsections are included.
"""
pages = {}
for name, member in self.all_updated_members.items():
if isinstance(member, _PageCore):
pages[name] = member
return pages
@property
def all_updated_elements(self) -> dict:
"""
Returns a dict of all elements in the current section that have
access to the experiment session. Operates recursively, i.e.
elements on pages in subsections are included.
"""
elements = {}
for page in self.all_updated_pages.values():
elements.update(page.updated_elements)
return elements
@property
def all_members(self) -> dict:
"""
Returns a flat dict of all members in this section and its subsections.
The order is preserved, i.e. members are listed in this dict in
the same order in which they appear in the experiment.
"""
members = {}
for name, member in self.members.items():
members[name] = member
if isinstance(member, Section):
members.update(member.all_members)
return members
@property
def last_member(self):
"""
Returns the last member of the current section. Can be a page
or a subsection.
"""
try:
return list(self.members.values())[-1]
except IndexError:
return None
@property
def first_member(self):
"""
Returns the first member of the current section. Can be a page
or a subsection.
"""
try:
return list(self.members.values())[0]
except IndexError:
return None
@property
def first_page(self):
"""
Returns the first page inside the current section.
"""
try:
return list(self.all_pages.values())[0]
except IndexError:
return None
@property
def last_page(self):
"""
Returns the last page inside the current section.
"""
try:
return list(self.all_pages.values())[-1]
except IndexError:
return None
@property
def all_subsections(self) -> dict:
"""
Returns a flat dict of all sections in this section and its subsections.
The order is preserved, i.e. sections are listed in this dict in
the same order in which they appear in the experiment.
"""
subsections = {}
for name, member in self.members.items():
if isinstance(member, Section):
subsections[name] = member
subsections.update(member.all_subsections)
return subsections
@property
def subsections(self) -> dict:
"""
Returns a flat dict of all subsections in this section.
Subsections in subsections are not included. Use
:attr:`.all_subsections` for that purpose.
"""
return {name: sec for name, sec in self.members.items() if isinstance(sec, Section)}
@property
def all_pages(self) -> dict:
"""
Returns a flat dict of all pages in this section and its subsections.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
"""
pages = {}
for name, member in self.members.items():
if isinstance(member, _PageCore):
pages[name] = member
elif isinstance(member, Section):
pages.update(member.all_pages)
return pages
@property
def all_closed_pages(self) -> dict:
"""
Returns a flat dict of all *closed* pages in this section and its
subsections.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
"""
return {name: page for name, page in self.all_pages.items() if page.is_closed}
@property
def all_shown_pages(self) -> dict:
"""
Returns a flat dict of all pages in this section and its
subsections that have already been shown.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
"""
return {name: page for name, page in self.all_pages.items() if page.has_been_shown}
@property
def pages(self) -> dict:
"""
Returns a flat dict of all pages in this section.
Pages in subsections are not included. Use :attr:`.all_pages`
for that purpose.
"""
return {name: page for name, page in self.members.items() if isinstance(page, _PageCore)}
@property
def all_elements(self) -> dict:
"""
Returns a flat dict of all elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
"""
elements = {}
for page in self.all_pages.values():
elements.update(page.elements)
return elements
@property
def all_input_elements(self) -> dict:
"""
Returns a flat dict of all input elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
"""
elements = {}
for page in self.all_pages.values():
elements.update(page.input_elements)
return elements
@property
def all_shown_input_elements(self) -> dict:
"""
Returns a flat dict of all shown input elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
"""
elements = {}
for page in self.all_pages.values():
if page.has_been_shown:
elements.update(page.input_elements)
return elements
@property
def data(self) -> dict:
"""
Returns a dictionary of user input data for all pages in this
section and its subsections.
"""
data = {}
for page in self.all_pages.values():
data.update(page.data)
return data
@property
def unlinked_data(self) -> dict:
"""
Returns a dictionary of user input data for all *unlinked* pages
in this section and its subsections.
"""
data = {}
for page in self.all_pages.values():
data.update(page.unlinked_data)
return data
def added_to_experiment(self, exp):
# docstring inherited
super().added_to_experiment(exp)
self.log.add_queue_logger(self, __name__)
self.on_exp_access()
self._update_members_recursively()
def _update_members(self):
for member in self.members.values():
if not member.experiment:
member.added_to_experiment(self.exp)
if not member.section:
member.added_to_section(self)
def _update_members_recursively(self):
self._update_members()
for member in self.members.values():
member._update_members_recursively()
def _generate_unset_tags_in_subtree(self):
for i, member in enumerate(self.members.values(), start=1):
if member.tag is None:
member.tag = str(i)
if isinstance(member, Section):
member._generate_unset_tags_in_subtree()
def append(self, *items):
"""
Appends a variable number of pages or subsections to the section.
In practice, it is recommended to use the augmented assignment
operator ``+=`` instead in order to add pages or subsections.
"""
for item in items:
if item.name in dir(self):
raise ValueError(f"Name of {item} is also an attribute of {self}.")
if item.name in self.members:
raise AlfredError(f"Name '{item.name}' is already present in the experiment.")
item.added_to_section(self)
self.members[item.name] = item
if self.experiment is not None:
item.added_to_experiment(self.experiment)
item._update_members_recursively()
if not item.tag:
item.tag = str(len(self.members) + 1)
def on_exp_access(self):
"""
Executed *once*, when the :class:`.ExperimentSession` becomes
available to the section.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_enter(self):
"""
Executed *every time* this section is entered.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_leave(self):
"""
Executed *every time* this section is left.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_resume(self):
"""
Executed *every time* the experiment resumes from a direct subsection to this section.
Resuming takes place, when a child section is left and the
next page is a direct child of the parent section. Then this
the parent section becomes the primary current section again: it
resumes its status.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_hand_over(self):
"""
Executed *every time* a direct subsection of this section is entered.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def _enter(self):
self.active = True
self.log.debug(f"Entering {self}.")
self.on_enter()
self._update_members()
if self.shuffle:
self._shuffle_members()
if isinstance(self.first_member, Section) and not self.first_member.active:
self._hand_over()
self.first_member._enter()
def _leave(self):
self.log.debug(f"Leaving {self}.")
self.on_leave()
try:
self.validate_on_leave()
except ValidationError:
raise AbortMove
if self.close_pages_on_leave:
for page in self.pages.values():
page.close()
if self is self.parent.last_member:
self.parent._leave()
def _resume(self):
self.log.debug(f"Resuming to {self}.")
self.on_resume()
def _hand_over(self):
self.log.debug(f"{self} handing over to child section.")
self.on_hand_over()
def _forward(self):
pass
def _backward(self):
pass
def _jumpfrom(self):
pass
def _jumpto(self):
pass
def _move(self, direction, from_page, to_page):
"""
Conducts a section's part of moving in an alfred experiment.
Raises:
ValidationError: If validation of the current page fails.
"""
if direction == "forward":
self._forward()
elif direction == "backward":
self._backward()
elif direction == "jumpto":
self._jumpto()
elif direction.startswith("jump"):
self._jumpfrom()
if to_page.section.name in self.all_members:
self._hand_over()
elif not to_page.section is self:
self._leave()
if direction.startswith("jump"):
to_page.section._jumpto()
if self.name in to_page.section.all_members:
to_page.section._resume()
elif not to_page.section is self:
to_page.section._enter()
if self.exp.aborted:
raise AbortMove
def _validate(self, direction: str):
if direction == "forward":
self.validate_on_forward()
elif direction == "backward":
self.validate_on_backward()
elif direction.startswith("jump"):
self.validate_on_jump()
def validate_on_leave(self):
"""
Validates pages and their input elements within the section.
Can be overloaded to change the validating behavior of a derived
section.
Notes:
Validation is conducted only for pages that are direct
children of this section. Pages in subsections are not
validated.
Raises:
ValidationError: If validation fails.
"""
for page in self.pages.values():
if not page._validate():
raise ValidationError()
if not page._validate_elements():
msg = self.exp.config.get("hints", "no_input_section_validation")
msg = msg.format(n=len(self.pages))
self.exp.post_message(msg, level="danger")
raise ValidationError()
def validate_on_move(self):
"""
Validates the current page and its elements.
Can be overloaded to change the validating behavior of a derived
section. By default, this validation method is called on each
foward and backward move, as well as when participants jump
*from* the section, but not when they jump *to* the section.
Raises:
ValidationError: If validation fails.
See Also:
Use the individual methods :meth:`.validate_on_forward`,
:meth:`.validate_on_backward`, :meth:`.validate_on_jumpfrom`,
and :meth:`.validate_on_jumpto` if you want even more fine-
grained control over validation behavior.
.. versionchanged:: 2.1.0
Switched the default order of validation. Now, elements are
validated first, then the page itself is validated.
"""
if not self.exp.current_page._validate_elements():
raise ValidationError()
if not self.exp.current_page._validate():
raise ValidationError()
def validate_on_forward(self):
"""
Called for validation on each forward move.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
"""
self.validate_on_move()
def validate_on_backward(self):
"""
Called for validation on each backward move.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
"""
self.validate_on_move()
def validate_on_jump(self):
"""
Called for validation on jumping from this section.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
"""
self.validate_on_move()
def _catch_page_hooks(self):
"""
Raises errors, if users define page hooks on a section.
"""
explanation = " This does not work. Remove the method to continue."
try:
self.on_first_show()
msg = f"You tried to use the page-only hook method 'on_first_show' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
try:
self.on_each_show()
msg = f"You tried to use the page-only hook method 'on_each_show' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
try:
self.on_each_hide()
msg = f"You tried to use the page-only hook method 'on_each_hide' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
try:
self.on_first_hide()
msg = f"You tried to use the page-only hook method 'on_first_hide' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
@inherit_kwargs
class RevisitSection(Section):
"""
A section that disables all input elements upon moving forward (and
jumping) form it, but still allows participants to revisit previous
pages.
Args:
{kwargs}
Examples:
Using a RevisitSection and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.RevisitSection(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.RevisitSection): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
"""
allow_forward: bool = True
allow_backward: bool = True
allow_jumpfrom: bool = True
allow_jumpto: bool = True
def _forward(self):
super()._forward()
self.exp.movement_manager.current_page.close()
def _jumpfrom(self):
super()._jumpfrom()
self.exp.movement_manager.current_page.close()
@inherit_kwargs
class HideOnForwardSection(Section):
"""
A section that hides pages once they have been submitted.
Args:
{kwargs}
This section enables the following behaviors:
1. Once a participant has entered their data on a page and submitted
it by pressing "forward", the participant cannot revisit that
page – it is hidden.
2. The participant can, however, go back to pages in previous sections.
For instance, they may revisit the instructions page. A press on
"forward" will then ignore the hidden pages and take the participant
back to the most recent page.
.. versionadded:: 2.3.0
Examples:
You can test the section's behavior with this example::
import alfred3 as al
exp = al.Experiment()
main = al.Section(name="main")
main += al.Page(name="first")
hide = al.HideOnForwardSection(name="hide")
hide += al.Page(name="second")
hide += al.Page(name="thirs")
exp += main
exp += hide
"""
allow_forward: bool = True
allow_backward: bool = True
allow_jumpfrom: bool = True
allow_jumpto: bool = True
def _forward(self):
super()._forward()
self.exp.movement_manager.current_page.close()
self.exp.movement_manager.current_page.should_be_shown = False
def _jumpfrom(self):
super()._jumpfrom()
self.exp.movement_manager.current_page.close()
self.exp.movement_manager.current_page.should_be_shown = False
@inherit_kwargs
class ForwardOnlySection(RevisitSection):
"""
A section that allows only a single step forward; no jumping and no
backwards steps.
Args:
{kwargs}
Examples:
Using an ForwardOnlySection and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.ForwardOnlySection(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.ForwardOnlySection): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
"""
allow_forward: bool = True
allow_backward: bool = False
allow_jumpfrom: bool = False
allow_jumpto: bool = False
@inherit_kwargs
class _FinishedSection(Section):
"""
A section that finishes the experiment on entering it.
Args:
{kwargs}
"""
allow_forward: bool = False
allow_backward: bool = False
allow_jumpfrom: bool = False
allow_jumpto: bool = True
def _enter(self):
super()._enter()
self.experiment.finish()
class _AbortSection(Section):
allow_forward: bool = False
allow_backward: bool = False
allow_jumpfrom: bool = False
allow_jumpto: bool = True
@inherit_kwargs
class _RootSection(Section):
"""
A section that serves as parent for all other sections in the
experiment.
Args:
{kwargs}
Defines the '_content' section and the '__finished_section' as its
only direct children.
"""
name = "_root"
def __init__(self, experiment):
super().__init__()
self._experiment = experiment
self.log.add_queue_logger(self, __name__)
self.content = Section(name="_content")
self.admin_section = None
self.finished_section = _FinishedSection(name="__finished_section")
self.finished_section += _DefaultFinalPage(name="_final_page")
self._all_pages_list = None
self._all_page_names = None
def append_root_sections(self):
if self.exp.admin_mode:
from .admin import _AdminSection
self += _AdminSection(name="_content")
self += self.finished_section
else:
self += self.content
self += self.finished_section
@property
def all_page_names(self):
"""
Improvised caching mechanism for the list of all page names.
Danger: The caching is not compatible with sections that shuffle their members.
The shuffling does not affect this list, which is unexpected
behavior in most cases.
For this reason, the caching is currently deactivated.
"""
return list(self.all_pages.keys())
# if not self._all_page_names:
# self._all_page_names = list(self.all_pages.keys())
# elif not len(self._all_page_names) == len(self.all_pages):
# self._all_page_names = list(self.all_pages.keys())
# return self._all_page_names
@property
def all_pages_list(self):
"""
Improvised caching mechanism for the list of all pages.
Danger: The caching is not compatible with sections that shuffle their members.
The shuffling does not affect this list, which is unexpected
behavior in most cases.
For this reason, the caching is currently deactivated.
"""
return list(self.all_pages.values())
# if not self._all_pages_list:
# self._all_pages_list = list(self.all_pages.values())
# elif not len(self._all_pages_list) == len(self.all_pages):
# self._all_pages_list = list(self.all_pages.values())
# return self._all_pages_list
@property
def final_page(self):
return self.finished_section._final_page
| 31.623327 | 105 | 0.607231 | [
"MIT"
] | ctreffe/alfred | src/alfred3/section.py | 33,080 | Python |
import bottle, logging, argparse, json, sys
from beaker.middleware import SessionMiddleware
from . import database, processing, routing
logger = logging.getLogger("snuggle.api.server")
def load_config(filename):
try:
f = open(filename)
return json.load(f)
except Exception as e:
raise Exception("Could not load configuration file: %s" % e)
def application(config):
#configure db
db = database.DB(config)
#configure processors
processing.configure(db, config)
#construct app
return SessionMiddleware(
bottle.default_app(),
{
'session.type': "memory",
'session.key': "s_id",
'session.secret': config['sessions']['secret'],
'session.timeout': 60*30, #30 minutes
'session.auto': True
}
)
def main():
parser = argparse.ArgumentParser(
description='Loads a jsop API for snuggle'
)
parser.add_argument(
'config',
type=load_config,
help='the path to the configuration file'
)
parser.add_argument(
'-p', "--profile",
action="store_true",
default=False,
help='run in profile mode?'
)
parser.add_argument(
'-d', "--debug",
action="store_true",
default=False,
help='print debugging output?'
)
args = parser.parse_args()
LOGGING_STREAM = sys.stderr
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO,
stream=LOGGING_STREAM,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%b-%d %H:%M:%S'
)
if args.profile:
try:
import pstats
import cProfile as profile
except ImportError:
import profile
f = tempfile.NamedTemporaryFile()
profile.runctx("run(args.config)", globals(), locals(), f.name)
p = pstats.Stats(f.name)
p.strip_dirs().sort_stats("time").print_stats(10)
else:
run(args.config)
def run(config):
logger.info("Configuring system.")
app = application(config)
logger.info("Running server.")
bottle.run(
app=app,
host=config['server']['host'],
port=config['server']['port'],
server='cherrypy'
)
if __name__ == "__main__":
logging.debug("calling main()")
main() | 21.56383 | 65 | 0.690183 | [
"MIT"
] | wikimedia/analytics-snuggle | snuggle/api/server.py | 2,027 | Python |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from registration.backends.simple.views import RegistrationView
from plan.models import Plan, UserProfile
from plan.forms import UserForm, RegistrationFormDHHD
def index(request):
# Query the database for a list of all the plans currently stored.
# Order the plans by the number of likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in the context_dict dictionary which will be passed to the template engine.
#popular_plan_list = Plan.objects.order_by('-views')[:3]
popular_plan_list = reformat_plan(Plan.objects.filter(active=True).order_by('-views')[:3])
# Most recent plans
recent_plan_list = reformat_plan(Plan.objects.filter(active=True).order_by('-pub_date')[:3])
context_dict = {'popular_plans': popular_plan_list, 'recent_plans': recent_plan_list}
return render(request, 'index.html', context_dict)
def about(request):
return render(request, 'about.html', {})
def reformat_plan(plan_list):
for plan in plan_list:
# Re-format the width and depth from floats to FF'-II" format.
plan.width = str(int(plan.width)) + "'-" + str(round((plan.width - int(plan.width))*12)) + '"'
plan.depth = str(int(plan.depth)) + "'-" + str(round((plan.depth - int(plan.depth))*12)) + '"'
# Re-format bedrooms from float to int
plan.bed = int(plan.bed)
# Re-format bathrooms to int if the number of bathrooms is whole
if not plan.bath%1:
plan.bath = int(plan.bath)
return plan_list
@login_required
def myplans(request):
user_name = request.user.get_username()
user = User.objects.get(username=user_name)
profile = UserProfile.objects.get(user=user)
plan_list = profile.fav_plans.all()
return render(request, 'myplans.html', {'plan_list': plan_list})
class MyRegistrationView(RegistrationView):
form_class = RegistrationFormDHHD
def get_success_url(self, request, user):
return '/'
def register(self, request, **cleaned_data):
new_user = RegistrationView.register(self, request, **cleaned_data)
UserProfile(user=new_user).save()
return new_user
| 37.622951 | 96 | 0.754684 | [
"Apache-2.0"
] | smhilde/dhhd_project | dhhd/dhhd/views.py | 2,295 | Python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore.common import dtype as mstype
from mindspore import nn
from mindspore import Tensor
from mindspore.ops import composite as C
from mindspore import context
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend")
class ForwardNet(nn.Cell):
def __init__(self, max_cycles=10):
super(ForwardNet, self).__init__()
self.max_cycles = max_cycles
self.i = Tensor(np.array(0), mstype.int32)
self.zero = Tensor(np.array(0), mstype.int32)
self.weight = Parameter(Tensor(np.array(0), mstype.int32))
def construct(self, x, y):
i = self.i
out = self.zero
while i < self.max_cycles:
self.weight = i
if out <= 20:
self.weight = i
out = x * y + out
i = i + 1
if out >= 30:
self.weight = out
out = out - 30
return out, self.weight
class BackwardNet(nn.Cell):
def __init__(self, net):
super(BackwardNet, self).__init__(auto_prefix=False)
self.forward_net = net
self.grad = C.GradOperation(get_all=True)
def construct(self, *inputs):
grads = self.grad(self.forward_net)(*inputs)
return grads
def test_forward():
x = Tensor(np.array(1), mstype.int32)
y = Tensor(np.array(3), mstype.int32)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
graph_forward_net = ForwardNet(max_cycles=10)
graph_mode_out = graph_forward_net(x, y)
# Pynative Mode
context.set_context(mode=context.PYNATIVE_MODE)
pynative_forward_net = ForwardNet(max_cycles=10)
pynative_mode_out = pynative_forward_net(x, y)
assert graph_mode_out == pynative_mode_out
def test_backward():
x = Tensor(np.array(1), mstype.int32)
y = Tensor(np.array(3), mstype.int32)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
graph_forward_net = ForwardNet(max_cycles=10)
graph_backward_net = BackwardNet(graph_forward_net)
graph_mode_grads = graph_backward_net(x, y)
# Pynative Mode
context.set_context(mode=context.PYNATIVE_MODE)
pynative_forward_net = ForwardNet(max_cycles=10)
pynative_backward_net = BackwardNet(pynative_forward_net)
pynative_mode_grads = pynative_backward_net(x, y)
assert graph_mode_grads == pynative_mode_grads
| 35.181818 | 87 | 0.67991 | [
"Apache-2.0"
] | 233-puchi/mindspore | tests/st/control/inner/test_111_if_after_if_in_while.py | 3,096 | Python |
import re
from datetime import date, datetime, time, timedelta, timezone
ISO_8601_DATETIME_REGEX = re.compile(
r"^(\d{4})-?([0-1]\d)-?([0-3]\d)[t\s]?([0-2]\d:?[0-5]\d:?[0-5]\d|23:59:60|235960)(\.\d+)?(z|[+-]\d{2}:\d{2})?$",
re.I,
)
ISO_8601_DATE_REGEX = re.compile(r"^(\d{4})-?([0-1]\d)-?([0-3]\d)$", re.I)
ISO_8601_TIME_REGEX = re.compile(
r"^(?P<time>[0-2]\d:?[0-5]\d:?[0-5]\d|23:59:60|235960)(?P<microseconds>\.\d+)?(?P<tzpart>z|[+-]\d{2}:\d{2})?$",
re.I,
)
ISO_8601_TIME_DURATION_REGEX = re.compile(
r"^(?P<sign>-?)P(?=\d|T\d)(?:(?P<weeks>\d+)W)?(?:(?P<days>\d+)D)?(?:T(?:(?P<hours>\d+)H)?(?:(?P<minutes>\d+)M)?(?:(?P<seconds>\d+(?:\.\d+)?)S)?)?$",
re.I,
)
def parse_iso_datetime(value: str) -> datetime:
if not ISO_8601_DATETIME_REGEX.match(value):
raise ValueError(f"passed value {value!r} is not valid ISO-8601 datetime.")
date_parts = ISO_8601_DATETIME_REGEX.findall(value)[0]
time_part = date_parts[3]
if ":" in time_part:
time_part = time_part.split(":")
else:
time_part = list(map("".join, zip(*[iter(time_part)] * 2)))
microseconds_part = date_parts[4]
microseconds = microseconds_part.lstrip(".") if microseconds_part else "0"
time_part.append(microseconds)
if date_parts[5] and date_parts[5].lower() != "z":
sign = 1 if date_parts[5][0] == "+" else -1
hours, minutes = date_parts[5][1:].split(":")
offset = timezone(timedelta(hours=int(hours) * sign, minutes=int(minutes) * sign))
elif date_parts[5] and date_parts[5].lower() == "z":
offset = timezone.utc
else:
offset = None # type: ignore
return datetime(
year=int(date_parts[0]),
month=int(date_parts[1]),
day=int(date_parts[2]),
hour=int(time_part[0]),
minute=int(time_part[1]),
second=int(time_part[2]),
microsecond=int(time_part[3]),
tzinfo=offset,
)
def parse_iso_date(value: str) -> date:
if not ISO_8601_DATE_REGEX.match(value):
raise ValueError("Passed value is not valid ISO-8601 date.")
date_parts = ISO_8601_DATE_REGEX.findall(value)[0]
return date(year=int(date_parts[0]), month=int(date_parts[1]), day=int(date_parts[2]))
def parse_iso_duration(value: str) -> timedelta:
"""
Parses duration string according to ISO 8601 and returns timedelta representation (it excludes year and month)
http://www.datypic.com/sc/xsd/t-xsd_dayTimeDuration.html
:param str value:
:return dict:
"""
if not ISO_8601_TIME_DURATION_REGEX.match(value):
raise ValueError(f"Passed value {value} is not valid ISO-8601 duration.")
duration = ISO_8601_TIME_DURATION_REGEX.fullmatch(value)
sign = -1 if duration.group("sign") else 1 # type: ignore
kwargs = {
"weeks": int(duration.group("weeks")) * sign if duration.group("weeks") else 0, # type: ignore
"days": int(duration.group("days")) * sign if duration.group("days") else 0, # type: ignore
"hours": int(duration.group("hours")) * sign if duration.group("hours") else 0, # type: ignore
"minutes": int(duration.group("minutes")) * sign # type: ignore
if duration.group("minutes") # type: ignore
else 0,
"seconds": float(duration.group("seconds")) * sign # type: ignore
if duration.group("seconds") # type: ignore
else 0,
}
return timedelta(**kwargs) # type: ignore
def parse_iso_time(value: str) -> time:
if not ISO_8601_TIME_REGEX.match(value):
raise ValueError(f"Passed value {value} is not valid ISO-8601 time.")
time_parts = ISO_8601_TIME_REGEX.fullmatch(value)
hour_parts = time_parts.group("time") # type: ignore
if ":" in hour_parts:
hour_parts = hour_parts.split(":")
else:
hour_parts = list(map("".join, zip(*[iter(hour_parts)] * 2)))
microseconds = time_parts.group("microseconds") # type: ignore
if microseconds is not None:
microseconds = int(microseconds[1:])
else:
microseconds = 0
tz_part = time_parts.group("tzpart") # type: ignore
if tz_part and tz_part.lower() != "z":
sign = 1 if tz_part[0] == "+" else -1
hours, minutes = tz_part[1:].split(":")
offset = timezone(timedelta(hours=int(hours) * sign, minutes=int(minutes) * sign))
elif tz_part and tz_part.lower() == "z":
offset = timezone.utc
else:
offset = None # type: ignore
return time(
hour=int(hour_parts[0]),
minute=int(hour_parts[1]),
second=int(hour_parts[2]),
microsecond=microseconds,
tzinfo=offset,
)
def timedelta_to_iso_duration(value: timedelta) -> str:
seconds = value.total_seconds()
sign = "-" if seconds < 0 else ""
seconds = abs(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
weeks, days, hours, minutes = map(int, (weeks, days, hours, minutes))
seconds = round(seconds, 6)
iso_8601 = sign + "P"
iso_8601_date = ""
iso_8601_time = ""
if weeks:
iso_8601_date += f"{weeks}W"
if days:
iso_8601_date += f"{days}D"
if hours:
iso_8601_time += f"{hours}H"
if minutes:
iso_8601_time += f"{minutes}M"
if seconds:
if seconds.is_integer():
iso_8601_time += f"{int(seconds)}S"
else:
iso_8601_time += f"{seconds}S"
return f"{iso_8601}{iso_8601_date}" + (f"T{iso_8601_time}" if iso_8601_time else "")
__all__ = [
"parse_iso_datetime",
"parse_iso_date",
"parse_iso_duration",
"parse_iso_time",
"timedelta_to_iso_duration",
]
| 33.567251 | 152 | 0.612369 | [
"MIT"
] | kodemore/chili | chili/iso_datetime.py | 5,740 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('report', '0003_auto_20151015_1921'),
]
operations = [
migrations.AlterField(
model_name='report',
name='client',
field=models.CharField(default=None, max_length=40, null=True, verbose_name='Zg\u0142aszaj\u0105cy', blank=True),
),
migrations.AlterField(
model_name='report',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Utworzone'),
),
migrations.AlterField(
model_name='report',
name='description',
field=models.TextField(verbose_name='Opis'),
),
migrations.AlterField(
model_name='report',
name='resolved_at',
field=models.DateTimeField(null=True, verbose_name='Rozpatrzone', blank=True),
),
migrations.AlterField(
model_name='report',
name='resolved_by',
field=models.ForeignKey(verbose_name='Rozpatrzone przez', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),
),
]
| 32.146341 | 148 | 0.611533 | [
"BSD-3-Clause"
] | MatPerowicz/pola-backend | report/migrations/0004_auto_20151031_0721.py | 1,318 | Python |
# 导入需要的包
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import sklearn.linear_model
import matplotlib
# Display plots inline and change default figure size
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0) # 生成数据集并绘制出来
np.random.seed(0)
X, y = sklearn.datasets.make_moons(200, noise=0.20)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
# 训练逻辑回归训练器
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
# Helper function to plot a decision boundary.
# If you don't fully understand this function don't worry, it just generates the contour plot below.
def plot_decision_boundary(pred_func):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
# Plot the decision boundary
plot_decision_boundary(lambda x: clf.predict(x))
plt.title("Logistic Regression")
plt.show()
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1. / num_examples * data_loss
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.zeros((1, nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(num_examples), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" % (i, calculate_loss(model)))
return model
# Helper function to predict an output (0 or 1)
def predict(model, x):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# Build a model with a 3-dimensional hidden layer
model = build_model(3, print_loss=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(model, x))
plt.title("Decision Boundary for hidden layer size 3")
plt.show()
| 35.106383 | 100 | 0.655152 | [
"MIT"
] | vuhe/LearnPython | artificial_intelligence/experiment_7.py | 5,000 | Python |
# Distributed DL Client runs on the master node
# @author: Trung Phan
# @created date: 2021-06-28
# @last modified date:
# @note:
from ddlf.cluster import *
async def main():
cluster = Cluster()
await cluster.connect()
await cluster.show_data()
await cluster.clean()
await cluster.show_data()
await cluster.close()
asyncio.run(main())
| 20.111111 | 47 | 0.685083 | [
"Apache-2.0"
] | trungphansg/DDLF | examples/task-clean.py | 362 | Python |
from django import forms
from django.db.models.loading import get_model
from django.utils.translation import ugettext_lazy as _
from oscar.forms import widgets
Voucher = get_model('voucher', 'Voucher')
Benefit = get_model('offer', 'Benefit')
Range = get_model('offer', 'Range')
class VoucherForm(forms.Form):
"""
A specialised form for creating a voucher and offer
model.
"""
name = forms.CharField(label=_("Name"))
code = forms.CharField(label=_("Code"))
start_date = forms.DateField(
label=_("Start date"), widget=widgets.DatePickerInput())
end_date = forms.DateField(
label=_("End date"), widget=widgets.DatePickerInput())
usage = forms.ChoiceField(choices=Voucher.USAGE_CHOICES, label=_("Usage"))
benefit_range = forms.ModelChoiceField(
label=_('Which products get a discount?'),
queryset=Range.objects.all(),
)
type_choices = (
(Benefit.PERCENTAGE, _('Percentage off of products in range')),
(Benefit.FIXED, _('Fixed amount off of products in range')),
)
benefit_type = forms.ChoiceField(
choices=type_choices,
label=_('Discount type'),
)
benefit_value = forms.DecimalField(
label=_('Discount value'))
def __init__(self, voucher=None, *args, **kwargs):
self.voucher = voucher
super(VoucherForm, self).__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data['name']
try:
voucher = Voucher.objects.get(name=name)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The name '%s' is already in"
" use") % name)
return name
def clean_code(self):
code = self.cleaned_data['code'].strip().upper()
if not code:
raise forms.ValidationError(_("Please enter a voucher code"))
try:
voucher = Voucher.objects.get(code=code)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The code '%s' is already in"
" use") % code)
return code
def clean(self):
cleaned_data = super(VoucherForm, self).clean()
start_date = cleaned_data.get('start_date', None)
end_date = cleaned_data.get('end_date', None)
if start_date and end_date and end_date < start_date:
raise forms.ValidationError(_("The start date must be before the"
" end date"))
return cleaned_data
class VoucherSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Name"))
code = forms.CharField(required=False, label=_("Code"))
is_active = forms.BooleanField(required=False, label=_("Is Active?"))
def clean_code(self):
return self.cleaned_data['code'].upper()
| 35.102273 | 78 | 0.609582 | [
"BSD-3-Clause"
] | Idematica/django-oscar | oscar/apps/dashboard/vouchers/forms.py | 3,089 | Python |
import collections
import itertools
import string
import unittest
# noinspection PyUnusedLocal
# skus = unicode string
def getItemPrices():
itemPrices = {}
itemPrices['A'] = {1:50, 3:130, 5:200}
itemPrices['B'] = {1:30, 2:45}
itemPrices['C'] = {1:20}
itemPrices['D'] = {1:15}
itemPrices['E'] = {1:40}
itemPrices['F'] = {1:10}
itemPrices['G'] = {1:20}
itemPrices['H'] = {1:10, 5:45, 10:80}
itemPrices['I'] = {1:35}
itemPrices['J'] = {1:60}
itemPrices['K'] = {1:70, 2:120}
itemPrices['L'] = {1:90}
itemPrices['M'] = {1:15}
itemPrices['N'] = {1:40}
itemPrices['O'] = {1:10}
itemPrices['P'] = {1:50, 5:200}
itemPrices['Q'] = {1:30, 3:80}
itemPrices['R'] = {1:50}
itemPrices['S'] = {1:20}
itemPrices['T'] = {1:20}
itemPrices['U'] = {1:40}
itemPrices['V'] = {1:50, 2:90, 3:130}
itemPrices['W'] = {1:20}
itemPrices['X'] = {1:17}
itemPrices['Y'] = {1:20}
itemPrices['Z'] = {1:21}
return itemPrices
def getGroupItemPrices():
itemPrices = getItemPrices()
groupPrices = {}
for combination in itertools.combinations_with_replacement("STXYZ", 3):
regularCost = sum(itemPrices[item][1] for item in combination)
saving = regularCost - 45
# FIXME: Using 0 to denote saving from using group
groupPrices["".join(combination)] = {1:45, 0:saving}
return groupPrices
def getItemFreebies():
itemFreebies = {}
itemFreebies['E'] = {2:'B'}
itemFreebies['F'] = {3:'F'}
itemFreebies['N'] = {3:'M'}
itemFreebies['R'] = {3:'Q'}
itemFreebies['U'] = {4:'U'}
return itemFreebies
def generateItemCounts(skus):
itemCounts = collections.defaultdict(int)
for item in skus:
invalidItem = item not in string.ascii_uppercase
if invalidItem:
raise ValueError
else:
itemCounts[item] += 1
return itemCounts
def removeFreeItems(itemCounts):
itemFreebies = getItemFreebies()
freeItems = {}
for item, count in itemCounts.items():
freebies = itemFreebies.get(item, {})
for itemsNeededForFreebe, freeItem in freebies.items():
freebeeCount = int(count/itemsNeededForFreebe)
freeItems[freeItem] = freebeeCount
for freeItem, count in freeItems.items():
itemCounts[freeItem] = max(0, itemCounts[freeItem] - count)
def applyItemGroupings(itemCounts):
groupItemPrices = getGroupItemPrices()
groupsByLargestSaving = sorted(list(groupItemPrices.keys()), key = lambda group: groupItemPrices[group][0], reverse=True)
for group in groupsByLargestSaving:
while True:
groupCounts = collections.defaultdict(int)
for groupItem in group:
if itemCounts[groupItem]:
groupCounts[groupItem] += 1
itemCounts[groupItem] -= 1
else:
for item, count in groupCounts.items():
itemCounts[item] += count
break
else:
itemCounts[group] += 1
continue
break
def calculateItemCosts(itemCounts):
itemPrices = getItemPrices()
itemPrices.update(getGroupItemPrices())
totalCost = 0
for item, count in itemCounts.items():
prices = itemPrices[item]
for n in reversed(list(prices.keys())):
if n == 0:
continue
price = prices[n]
offerCount = int(count/n)
totalCost += offerCount * price
count -= offerCount * n
return totalCost
def checkout(skus):
try:
itemCounts = generateItemCounts(skus)
except ValueError:
return -1
removeFreeItems(itemCounts)
applyItemGroupings(itemCounts)
return calculateItemCosts(itemCounts)
class TestCheckOut(unittest.TestCase):
def test_invalidSKUItemReturnsMinus1(self):
self.assertEqual(checkout("AB32"), -1)
self.assertEqual(checkout("ABc"), -1)
self.assertEqual(checkout("AB!"), -1)
def test_emptySKUCostsNothing(self):
self.assertEqual(checkout(""), 0)
def test_singlePrices(self):
self.assertEqual(checkout('A'), 50)
self.assertEqual(checkout('B'), 30)
self.assertEqual(checkout('C'), 20)
self.assertEqual(checkout('D'), 15)
self.assertEqual(checkout('E'), 40)
self.assertEqual(checkout('F'), 10)
self.assertEqual(checkout('G'), 20)
self.assertEqual(checkout('H'), 10)
self.assertEqual(checkout('I'), 35)
self.assertEqual(checkout('J'), 60)
self.assertEqual(checkout('K'), 70)
self.assertEqual(checkout('L'), 90)
self.assertEqual(checkout('M'), 15)
self.assertEqual(checkout('N'), 40)
self.assertEqual(checkout('O'), 10)
self.assertEqual(checkout('P'), 50)
self.assertEqual(checkout('Q'), 30)
self.assertEqual(checkout('R'), 50)
self.assertEqual(checkout('S'), 20)
self.assertEqual(checkout('T'), 20)
self.assertEqual(checkout('U'), 40)
self.assertEqual(checkout('V'), 50)
self.assertEqual(checkout('W'), 20)
self.assertEqual(checkout('X'), 17)
self.assertEqual(checkout('Y'), 20)
self.assertEqual(checkout('Z'), 21)
def test_multipleItemOffers(self):
self.assertEqual(checkout('AAA'), 130)
self.assertEqual(checkout('AAAAA'), 200)
self.assertEqual(checkout('BB'), 45)
self.assertEqual(checkout("HHHHH"), 45)
self.assertEqual(checkout("HHHHHHHHHH"), 80)
self.assertEqual(checkout("KK"), 120)
self.assertEqual(checkout("PPPPP"), 200)
self.assertEqual(checkout("QQQ"), 80)
self.assertEqual(checkout("VV"), 90)
self.assertEqual(checkout("VVV"), 130)
def test_multipleNonOfferItemsAreMultiplesOfSingleItemPrice(self):
self.assertEqual(checkout('CC'), checkout('C') * 2)
self.assertEqual(checkout('DD'), checkout('D') * 2)
def test_mixedSingleItemsAreSumOfIndividualPrices(self):
self.assertEqual(checkout("BADC"), checkout("A") + checkout("B") + checkout("C") + checkout("D"))
def test_multipleSpecialOffserAreMultipleOfSpecialOfferPrice(self):
self.assertEqual(checkout("AAAAAAAAAA"), checkout("AAAAA") * 2)
self.assertEqual(checkout("BBBB"), checkout("BB") * 2)
def test_mixedOffersAreSumOfSpecialAndIndividualPrices(self):
self.assertEqual(checkout("AAAAAAA"), checkout("AAAAA") + checkout("AA"))
self.assertEqual(checkout("BBB"), checkout("BB") + checkout("B"))
def test_mixedSpecialOffersAreSumsOfOffers(self):
self.assertEqual(checkout("ABABA"), checkout("BB") + checkout("AAA"))
def test_mixedItemsAreSumed(self):
self.assertEqual(checkout("ABCCABADDA"), checkout("BB") + checkout("AAA") + checkout("A") + checkout("CC") + checkout("DD"))
def test_specialOfferCombinationsMinimisePrice(self):
self.assertEqual(checkout("AAAAAAAAA"), checkout("AAAAA") + checkout("AAA") + checkout("A"))
def test_2ESpecialOfferGivesOneFreeB(self):
self.assertEqual(checkout("EE"), checkout("E") + checkout("E"))
self.assertEqual(checkout("EEB"), checkout("E") + checkout("E"))
self.assertEqual(checkout("EEBEE"), checkout("E") * 4)
self.assertEqual(checkout("EEBEEB"), checkout("E") * 4)
self.assertEqual(checkout("EEBEEBB"), checkout("E") * 4 + checkout("B"))
def test_3FSpecialOfferGivesOneFreeF(self):
self.assertEqual(checkout("FFF"), checkout("F") * 2)
self.assertEqual(checkout("FFFFF"), checkout("F") * 4)
self.assertEqual(checkout("FFFFFF"), checkout("F") * 4)
def test_3NSpecialOfferGivesOneFreeM(self):
self.assertEqual(checkout("NNNM"), checkout("NNN"))
def test_3RSpecialOfferGivesOneFreeQ(self):
self.assertEqual(checkout("RRRQ"), checkout("RRR"))
def test_4USpecialOfferGivesOneFreeU(self):
self.assertEqual(checkout("UUUU"), checkout("UUU"))
def test_groupDiscount(self):
for combination in itertools.combinations_with_replacement("STXYZ", 3):
self.assertEqual(checkout("".join(combination)), 45)
def test_maximumGroupDiscount(self):
self.assertEqual(checkout("STXYZ"), 45 + checkout("XY"))
self.assertEqual(checkout("SSSX"), 45 + checkout("X"))
def test_multipleGroupDiscountsAreGiven(self):
self.assertEqual(checkout("STXYZTYX"), 90 + checkout("XX"))
if __name__ == '__main__':
unittest.main()
| 33.486381 | 132 | 0.622821 | [
"Apache-2.0"
] | DPNT-Sourcecode/CHK-dykz01 | lib/solutions/CHK/checkout_solution.py | 8,606 | Python |
from .backend import BackendTest
from .field_db_conversion import FieldDBConversionTest
from .field_options import FieldOptionsTest
from .filter import FilterTest
from .order import OrderTest
from .not_return_sets import NonReturnSetsTest
from .decimals import DecimalTest
| 34.125 | 54 | 0.871795 | [
"BSD-3-Clause"
] | aprefontaine/TMScheduler | djangoappengine/tests/__init__.py | 273 | Python |
import pdb
if __name__ == "__main__":
with open("21input.txt") as f:
data = f.read().split("\n")
data.pop(-1)
print(data)
all_food = []
for food in data:
allergens = False
ings = []
alle = []
for ingredient in food.split(" "):
if "(contains" == ingredient:
allergens = True
elif allergens:
alle.append(ingredient[:-1])
else:
ings.append(ingredient)
all_food.append([ings, alle])
print(all_food)
alg_dico = {}
assigned = {}
for food in all_food:
for alg in food[1]:
if alg in alg_dico:
alg_dico[alg] &= set(food[0])
else:
alg_dico[alg] = set(food[0])
solved = []
unsolved = []
for alg, val in alg_dico.items():
if (len(val) == 1):
solved.append(alg)
else:
unsolved.append(alg)
for alg in alg_dico.keys():
alg_dico[alg] = list(alg_dico[alg])
print(alg_dico, solved, unsolved)
while (len(unsolved)>0) :
for alg in solved:
val = alg_dico[alg][0]
for algx in unsolved:
if val in (alg_dico[algx]):
alg_dico[algx].remove(val)
if len(alg_dico[algx]) == 1:
solved.append(algx)
unsolved.remove(algx)
used_ing = list(alg_dico.values())
used_ing = [x[0] for x in used_ing]
# for alg, val in alg_dico.items():
# if (len(val) == 1):
# for valx in alg_dico.values():
# if val in valx and valx != val:
# valx.remove(val)
print(used_ing)
cpt = 0
for ings, algs in all_food:
for ing in ings:
if ing not in used_ing:
cpt+=1
print(cpt)
algs = list(alg_dico.keys())
algs.sort()
used_ing_sorted = []
for alg in algs:
used_ing_sorted.append(alg_dico[alg][0])
print(used_ing_sorted, ",".join(used_ing_sorted))
| 26.820513 | 53 | 0.494264 | [
"MIT"
] | dxkkxn/advent-of-code | 2020/21day.py | 2,092 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
Link.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
HistoricalLink = apps.get_model('perma','HistoricalLink')
HistoricalLink.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
HistoricalLink.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
def reverse_update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
Link.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
HistoricalLink = apps.get_model('perma', 'HistoricalLink')
HistoricalLink.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
HistoricalLink.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
class Migration(migrations.Migration):
dependencies = [
('perma', '0005_auto_20160513_2006'),
]
operations = [
migrations.AddField(
model_name='historicallink',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.AddField(
model_name='link',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.RunPython(update_upload_to_ia_field, reverse_code=reverse_update_upload_to_ia_field),
migrations.RemoveField(
model_name='historicallink',
name='uploaded_to_internet_archive',
),
migrations.RemoveField(
model_name='link',
name='uploaded_to_internet_archive',
),
]
| 50.627119 | 246 | 0.73619 | [
"Unlicense",
"MIT"
] | peterk/perma | perma_web/perma/migrations/0006_add_internetarchive_status.py | 2,987 | Python |
import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
class Freestream:
"""
Freestream conditions.
"""
def __init__(self, u_inf=1.0, alpha=0.0):
"""
Sets the freestream speed and angle (in degrees).
Parameters
----------
u_inf: float, optional
Freestream speed;
default: 1.0.
alpha: float, optional
Angle of attack in degrees;
default 0.0.
"""
self.u_inf = u_inf
self.alpha = alpha*numpy.pi/180.0 # degrees to radians
| 23.25 | 62 | 0.574501 | [
"MIT"
] | Sparsh-Sharma/SteaPy | steapy/freestream.py | 651 | Python |
from decimal import Decimal
from pytest import raises
from typedpy import Structure, Positive, DecimalNumber
class PositiveDecimal(DecimalNumber, Positive): pass
class Foo(Structure):
_required = []
a = DecimalNumber
b = DecimalNumber(maximum=100, multiplesOf=5)
c = PositiveDecimal
def test_not_decimal_value():
with raises(ValueError) as excinfo:
Foo(a='1 1 ')
assert "a: [<class 'decimal.ConversionSyntax'>]" in str(excinfo.value)
def test_not_decimal_type():
with raises(TypeError) as excinfo:
Foo(a={})
assert "a: " in str(excinfo.value)
def test_basic_operation():
f = Foo(a=Decimal('3.14'))
assert f.a - 1 == Decimal('2.14')
def test_too_large():
with raises(ValueError) as excinfo:
Foo(b=1000)
assert "b: Got 1000; Expected a maximum of 100" in str(excinfo.value)
def test_too_large2():
f = Foo(b=90)
with raises(ValueError) as excinfo:
f.b += 20
assert "b: Got 110; Expected a maximum of 100" in str(excinfo.value)
def test_not_multiple():
with raises(ValueError) as excinfo:
Foo(b=93)
assert "b: Got 93; Expected a a multiple of 5" in str(excinfo.value)
def test_positivedecimal_err():
with raises(ValueError) as excinfo:
Foo(c=Decimal(-5))
assert "c: Got -5; Expected a positive number" in str(excinfo.value)
def test_positivedecimal_valid():
f = Foo(c=Decimal(5))
assert int(f.c) == 5
| 23.031746 | 74 | 0.669883 | [
"MIT"
] | reesmanp/typedpy | tests/test_decimal.py | 1,451 | Python |
#!/usr/bin/python
"""
IO Module
"""
import sys
import logging
from time import time as _time
import threading
import cPickle
from bisect import bisect_left
from collections import deque
from bacpypes.debugging import bacpypes_debugging, DebugContents, ModuleLogger
from bacpypes.core import deferred
from bacpypes.comm import PDU, Client, bind
from bacpypes.task import FunctionTask
from bacpypes.udp import UDPDirector
# some debugging
_debug = 0
_log = ModuleLogger(globals())
_commlog = logging.getLogger(__name__ + "._commlog")
#
# IOCB States
#
IDLE = 0 # has not been submitted
PENDING = 1 # queued, waiting for processing
ACTIVE = 2 # being processed
COMPLETED = 3 # finished
ABORTED = 4 # finished in a bad way
_stateNames = {
IDLE: 'IDLE',
PENDING: 'PENDING',
ACTIVE: 'ACTIVE',
COMPLETED: 'COMPLETED',
ABORTED: 'ABORTED',
}
#
# IOQController States
#
CTRL_IDLE = 0 # nothing happening
CTRL_ACTIVE = 1 # working on an iocb
CTRL_WAITING = 1 # waiting between iocb requests (throttled)
_ctrlStateNames = {
CTRL_IDLE: 'IDLE',
CTRL_ACTIVE: 'ACTIVE',
CTRL_WAITING: 'WAITING',
}
# dictionary of local controllers
_local_controllers = {}
_proxy_server = None
# special abort error
TimeoutError = RuntimeError("timeout")
#
# _strftime
#
def _strftime():
return "%011.6f" % (_time() % 3600,)
#
# IOCB - Input Output Control Block
#
_identNext = 1
_identLock = threading.Lock()
@bacpypes_debugging
class IOCB(DebugContents):
_debug_contents = \
( 'args', 'kwargs'
, 'ioState', 'ioResponse-', 'ioError'
, 'ioController', 'ioServerRef', 'ioControllerRef', 'ioClientID', 'ioClientAddr'
, 'ioComplete', 'ioCallback+', 'ioQueue', 'ioPriority', 'ioTimeout'
)
def __init__(self, *args, **kwargs):
global _identNext
# lock the identity sequence number
_identLock.acquire()
# generate a unique identity for this block
ioID = _identNext
_identNext += 1
# release the lock
_identLock.release()
# debugging postponed until ID acquired
if _debug: IOCB._debug("__init__(%d) %r %r", ioID, args, kwargs)
# save the ID
self.ioID = ioID
# save the request parameters
self.args = args
self.kwargs = kwargs
# start with an idle request
self.ioState = IDLE
self.ioResponse = None
self.ioError = None
# blocks are bound to a controller
self.ioController = None
# blocks could reference a local or remote server
self.ioServerRef = None
self.ioControllerRef = None
self.ioClientID = None
self.ioClientAddr = None
# each block gets a completion event
self.ioComplete = threading.Event()
self.ioComplete.clear()
# applications can set a callback functions
self.ioCallback = []
# request is not currently queued
self.ioQueue = None
# extract the priority if it was given
self.ioPriority = kwargs.get('_priority', 0)
if '_priority' in kwargs:
if _debug: IOCB._debug(" - ioPriority: %r", self.ioPriority)
del kwargs['_priority']
# request has no timeout
self.ioTimeout = None
def add_callback(self, fn, *args, **kwargs):
"""Pass a function to be called when IO is complete."""
if _debug: IOCB._debug("add_callback(%d) %r %r %r", self.ioID, fn, args, kwargs)
# store it
self.ioCallback.append((fn, args, kwargs))
# already complete?
if self.ioComplete.isSet():
self.trigger()
def wait(self, *args):
"""Wait for the completion event to be set."""
if _debug: IOCB._debug("wait(%d) %r", self.ioID, args)
# waiting from a non-daemon thread could be trouble
self.ioComplete.wait(*args)
def trigger(self):
"""Set the event and make the callback."""
if _debug: IOCB._debug("trigger(%d)", self.ioID)
# if it's queued, remove it from its queue
if self.ioQueue:
if _debug: IOCB._debug(" - dequeue")
self.ioQueue.remove(self)
# if there's a timer, cancel it
if self.ioTimeout:
if _debug: IOCB._debug(" - cancel timeout")
self.ioTimeout.suspend_task()
# set the completion event
self.ioComplete.set()
# make the callback
for fn, args, kwargs in self.ioCallback:
if _debug: IOCB._debug(" - callback fn: %r %r %r", fn, args, kwargs)
fn(self, *args, **kwargs)
def complete(self, msg):
"""Called to complete a transaction, usually when process_io has
shipped the IOCB off to some other thread or function."""
if _debug: IOCB._debug("complete(%d) %r", self.ioID, msg)
if self.ioController:
# pass to controller
self.ioController.complete_io(self, msg)
else:
# just fill in the data
self.ioState = COMPLETED
self.ioResponse = msg
self.trigger()
def abort(self, err):
"""Called by a client to abort a transaction."""
if _debug: IOCB._debug("abort(%d) %r", self.ioID, err)
if self.ioController:
# pass to controller
self.ioController.abort_io(self, err)
elif self.ioState < COMPLETED:
# just fill in the data
self.ioState = ABORTED
self.ioError = err
self.trigger()
def set_timeout(self, delay, err=TimeoutError):
"""Called to set a transaction timer."""
if _debug: IOCB._debug("set_timeout(%d) %r err=%r", self.ioID, delay, err)
# if one has already been created, cancel it
if self.ioTimeout:
self.ioTimeout.suspend_task()
else:
self.ioTimeout = FunctionTask(self.abort, err)
# (re)schedule it
self.ioTimeout.install_task(_time() + delay)
def __repr__(self):
xid = id(self)
if (xid < 0): xid += (1 << 32)
sname = self.__module__ + '.' + self.__class__.__name__
desc = "(%d)" % (self.ioID,)
return '<' + sname + desc + ' instance at 0x%08x' % (xid,) + '>'
#
# IOChainMixIn
#
@bacpypes_debugging
class IOChainMixIn(DebugContents):
_debugContents = ( 'ioChain++', )
def __init__(self, iocb):
if _debug: IOChainMixIn._debug("__init__ %r", iocb)
# save a refence back to the iocb
self.ioChain = iocb
# set the callback to follow the chain
self.add_callback(self.chain_callback)
# if we're not chained, there's no notification to do
if not self.ioChain:
return
# this object becomes its controller
iocb.ioController = self
# consider the parent active
iocb.ioState = ACTIVE
try:
if _debug: IOChainMixIn._debug(" - encoding")
# let the derived class set the args and kwargs
self.Encode()
if _debug: IOChainMixIn._debug(" - encode complete")
except:
# extract the error and abort the request
err = sys.exc_info()[1]
if _debug: IOChainMixIn._exception(" - encoding exception: %r", err)
iocb.abort(err)
def chain_callback(self, iocb):
"""Callback when this iocb completes."""
if _debug: IOChainMixIn._debug("chain_callback %r", iocb)
# if we're not chained, there's no notification to do
if not self.ioChain:
return
# refer to the chained iocb
iocb = self.ioChain
try:
if _debug: IOChainMixIn._debug(" - decoding")
# let the derived class transform the data
self.Decode()
if _debug: IOChainMixIn._debug(" - decode complete")
except:
# extract the error and abort
err = sys.exc_info()[1]
if _debug: IOChainMixIn._exception(" - decoding exception: %r", err)
iocb.ioState = ABORTED
iocb.ioError = err
# break the references
self.ioChain = None
iocb.ioController = None
# notify the client
iocb.trigger()
def abort_io(self, iocb, err):
"""Forward the abort downstream."""
if _debug: IOChainMixIn._debug("abort_io %r %r", iocb, err)
# make sure we're being notified of an abort request from
# the iocb we are chained from
if iocb is not self.ioChain:
raise RuntimeError("broken chain")
# call my own abort(), which may forward it to a controller or
# be overridden by IOGroup
self.abort(err)
def encode(self):
"""Hook to transform the request, called when this IOCB is
chained."""
if _debug: IOChainMixIn._debug("encode (pass)")
# by default do nothing, the arguments have already been supplied
def decode(self):
"""Hook to transform the response, called when this IOCB is
completed."""
if _debug: IOChainMixIn._debug("decode")
# refer to the chained iocb
iocb = self.ioChain
# if this has completed successfully, pass it up
if self.ioState == COMPLETED:
if _debug: IOChainMixIn._debug(" - completed: %r", self.ioResponse)
# change the state and transform the content
iocb.ioState = COMPLETED
iocb.ioResponse = self.ioResponse
# if this aborted, pass that up too
elif self.ioState == ABORTED:
if _debug: IOChainMixIn._debug(" - aborted: %r", self.ioError)
# change the state
iocb.ioState = ABORTED
iocb.ioError = self.ioError
else:
raise RuntimeError("invalid state: %d" % (self.ioState,))
#
# IOChain
#
class IOChain(IOCB, IOChainMixIn):
def __init__(self, chain, *args, **kwargs):
"""Initialize a chained control block."""
if _debug: IOChain._debug("__init__ %r %r %r", chain, args, kwargs)
# initialize IOCB part to pick up the ioID
IOCB.__init__(self, *args, **kwargs)
IOChainMixIn.__init__(self, chain)
#
# IOGroup
#
@bacpypes_debugging
class IOGroup(IOCB, DebugContents):
_debugContents = ('ioMembers',)
def __init__(self):
"""Initialize a group."""
if _debug: IOGroup._debug("__init__")
IOCB.__init__(self)
# start with an empty list of members
self.ioMembers = []
# start out being done. When an IOCB is added to the
# group that is not already completed, this state will
# change to PENDING.
self.ioState = COMPLETED
self.ioComplete.set()
def add(self, iocb):
"""Add an IOCB to the group, you can also add other groups."""
if _debug: IOGroup._debug("Add %r", iocb)
# add this to our members
self.ioMembers.append(iocb)
# assume all of our members have not completed yet
self.ioState = PENDING
self.ioComplete.clear()
# when this completes, call back to the group. If this
# has already completed, it will trigger
iocb.add_callback(self.group_callback)
def group_callback(self, iocb):
"""Callback when a child iocb completes."""
if _debug: IOGroup._debug("group_callback %r", iocb)
# check all the members
for iocb in self.ioMembers:
if not iocb.ioComplete.isSet():
if _debug: IOGroup._debug(" - waiting for child: %r", iocb)
break
else:
if _debug: IOGroup._debug(" - all children complete")
# everything complete
self.ioState = COMPLETED
self.trigger()
def abort(self, err):
"""Called by a client to abort all of the member transactions.
When the last pending member is aborted the group callback
function will be called."""
if _debug: IOGroup._debug("abort %r", err)
# change the state to reflect that it was killed
self.ioState = ABORTED
self.ioError = err
# abort all the members
for iocb in self.ioMembers:
iocb.abort(err)
# notify the client
self.trigger()
#
# IOQueue - Input Output Queue
#
@bacpypes_debugging
class IOQueue:
def __init__(self, name):
if _debug: IOQueue._debug("__init__ %r", name)
self.queue = []
self.notempty = threading.Event()
self.notempty.clear()
def put(self, iocb):
"""Add an IOCB to a queue. This is usually called by the function
that filters requests and passes them out to the correct processing
thread."""
if _debug: IOQueue._debug("put %r", iocb)
# requests should be pending before being queued
if iocb.ioState != PENDING:
raise RuntimeError("invalid state transition")
# save that it might have been empty
wasempty = not self.notempty.isSet()
# add the request to the end of the list of iocb's at same priority
priority = iocb.ioPriority
item = (priority, iocb)
self.queue.insert(bisect_left(self.queue, (priority+1,)), item)
# point the iocb back to this queue
iocb.ioQueue = self
# set the event, queue is no longer empty
self.notempty.set()
return wasempty
def get(self, block=1, delay=None):
"""Get a request from a queue, optionally block until a request
is available."""
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
# if the queue is empty and we do not block return None
if not block and not self.notempty.isSet():
return None
# wait for something to be in the queue
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
# extract the first element
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
# return the request
return iocb
def remove(self, iocb):
"""Remove a control block from the queue, called if the request
is canceled/aborted."""
if _debug: IOQueue._debug("remove %r", iocb)
# remove the request from the queue
for i, item in enumerate(self.queue):
if iocb is item[1]:
if _debug: IOQueue._debug(" - found at %d", i)
del self.queue[i]
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
break
else:
if _debug: IOQueue._debug(" - not found")
def abort(self, err):
"""abort all of the control blocks in the queue."""
if _debug: IOQueue._debug("abort %r", err)
# send aborts to all of the members
try:
for iocb in self.queue:
iocb.ioQueue = None
iocb.abort(err)
# flush the queue
self.queue = []
# the queue is now empty, clear the event
self.notempty.clear()
except ValueError:
pass
#
# IOController
#
@bacpypes_debugging
class IOController:
def __init__(self, name=None):
"""Initialize a controller."""
if _debug: IOController._debug("__init__ name=%r", name)
# save the name
self.name = name
# register the name
if name is not None:
if name in _local_controllers:
raise RuntimeError("already a local controller called '%s': %r" % (name, _local_controllers[name]))
_local_controllers[name] = self
def abort(self, err):
"""Abort all requests, no default implementation."""
pass
def request_io(self, iocb):
"""Called by a client to start processing a request."""
if _debug: IOController._debug("request_io %r", iocb)
# bind the iocb to this controller
iocb.ioController = self
try:
# hopefully there won't be an error
err = None
# change the state
iocb.ioState = PENDING
# let derived class figure out how to process this
self.process_io(iocb)
except:
# extract the error
err = sys.exc_info()[1]
# if there was an error, abort the request
if err:
self.abort_io(iocb, err)
def process_io(self, iocb):
"""Figure out how to respond to this request. This must be
provided by the derived class."""
raise NotImplementedError("IOController must implement process_io()")
def active_io(self, iocb):
"""Called by a handler to notify the controller that a request is
being processed."""
if _debug: IOController._debug("active_io %r", iocb)
# requests should be idle or pending before coming active
if (iocb.ioState != IDLE) and (iocb.ioState != PENDING):
raise RuntimeError("invalid state transition (currently %d)" % (iocb.ioState,))
# change the state
iocb.ioState = ACTIVE
def complete_io(self, iocb, msg):
"""Called by a handler to return data to the client."""
if _debug: IOController._debug("complete_io %r %r", iocb, msg)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
else:
# change the state
iocb.ioState = COMPLETED
iocb.ioResponse = msg
# notify the client
iocb.trigger()
def abort_io(self, iocb, err):
"""Called by a handler or a client to abort a transaction."""
if _debug: IOController._debug("abort_io %r %r", iocb, err)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
else:
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
#
# IOQController
#
@bacpypes_debugging
class IOQController(IOController):
wait_time = 0.0
def __init__(self, name=None):
"""Initialize a queue controller."""
if _debug: IOQController._debug("__init__ name=%r", name)
# give ourselves a nice name
if not name:
name = self.__class__.__name__
IOController.__init__(self, name)
# start idle
self.state = CTRL_IDLE
# no active iocb
self.active_iocb = None
# create an IOQueue for iocb's requested when not idle
self.ioQueue = IOQueue(str(name) + "/Queue")
def abort(self, err):
"""Abort all pending requests."""
if _debug: IOQController._debug("abort %r", err)
if (self.state == CTRL_IDLE):
if _debug: IOQController._debug(" - idle")
return
while True:
iocb = self.ioQueue.get()
if not iocb:
break
if _debug: IOQController._debug(" - iocb: %r", iocb)
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
if (self.state != CTRL_IDLE):
if _debug: IOQController._debug(" - busy after aborts")
def request_io(self, iocb):
"""Called by a client to start processing a request."""
if _debug: IOQController._debug("request_io %r", iocb)
# bind the iocb to this controller
iocb.ioController = self
# if we're busy, queue it
if (self.state != CTRL_IDLE):
if _debug: IOQController._debug(" - busy, request queued")
iocb.ioState = PENDING
self.ioQueue.put(iocb)
return
try:
# hopefully there won't be an error
err = None
# let derived class figure out how to process this
self.process_io(iocb)
except:
# extract the error
err = sys.exc_info()[1]
# if there was an error, abort the request
if err:
self.abort_io(iocb, err)
def process_io(self, iocb):
"""Figure out how to respond to this request. This must be
provided by the derived class."""
raise NotImplementedError("IOController must implement process_io()")
def active_io(self, iocb):
"""Called by a handler to notify the controller that a request is
being processed."""
if _debug: IOQController._debug("active_io %r", iocb)
# base class work first, setting iocb state and timer data
IOController.active_io(self, iocb)
# change our state
self.state = CTRL_ACTIVE
# keep track of the iocb
self.active_iocb = iocb
def complete_io(self, iocb, msg):
"""Called by a handler to return data to the client."""
if _debug: IOQController._debug("complete_io %r %r", iocb, msg)
# check to see if it is completing the active one
if iocb is not self.active_iocb:
raise RuntimeError("not the current iocb")
# normal completion
IOController.complete_io(self, iocb, msg)
# no longer an active iocb
self.active_iocb = None
# check to see if we should wait a bit
if self.wait_time:
# change our state
self.state = CTRL_WAITING
# schedule a call in the future
task = FunctionTask(IOQController._wait_trigger, self)
task.install_task(_time() + self.wait_time)
else:
# change our state
self.state = CTRL_IDLE
# look for more to do
deferred(IOQController._trigger, self)
def abort_io(self, iocb, err):
"""Called by a handler or a client to abort a transaction."""
if _debug: IOQController._debug("abort_io %r %r", iocb, err)
# normal abort
IOController.abort_io(self, iocb, err)
# check to see if it is completing the active one
if iocb is not self.active_iocb:
if _debug: IOQController._debug(" - not current iocb")
return
# no longer an active iocb
self.active_iocb = None
# change our state
self.state = CTRL_IDLE
# look for more to do
deferred(IOQController._trigger, self)
def _trigger(self):
"""Called to launch the next request in the queue."""
if _debug: IOQController._debug("_trigger")
# if we are busy, do nothing
if self.state != CTRL_IDLE:
if _debug: IOQController._debug(" - not idle")
return
# if there is nothing to do, return
if not self.ioQueue.queue:
if _debug: IOQController._debug(" - empty queue")
return
# get the next iocb
iocb = self.ioQueue.get()
try:
# hopefully there won't be an error
err = None
# let derived class figure out how to process this
self.process_io(iocb)
except:
# extract the error
err = sys.exc_info()[1]
# if there was an error, abort the request
if err:
self.abort_io(iocb, err)
# if we're idle, call again
if self.state == CTRL_IDLE:
deferred(IOQController._trigger, self)
def _wait_trigger(self):
"""Called to launch the next request in the queue."""
if _debug: IOQController._debug("_wait_trigger")
# make sure we are waiting
if (self.state != CTRL_WAITING):
raise RuntimeError("not waiting")
# change our state
self.state = CTRL_IDLE
# look for more to do
IOQController._trigger(self)
#
# IOProxy
#
@bacpypes_debugging
class IOProxy:
def __init__(self, controllerName, serverName=None, requestLimit=None):
"""Create an IO client. It implements request_io like a controller, but
passes requests on to a local controller if it happens to be in the
same process, or the IOProxyServer instance to forward on for processing."""
if _debug: IOProxy._debug("__init__ %r serverName=%r, requestLimit=%r", controllerName, serverName, requestLimit)
# save the server reference
self.ioControllerRef = controllerName
self.ioServerRef = serverName
# set a limit on how many requests can be submitted
self.ioRequestLimit = requestLimit
self.ioPending = set()
self.ioBlocked = deque()
# bind to a local controller if possible
if not serverName:
self.ioBind = _local_controllers.get(controllerName, None)
if self.ioBind:
if _debug: IOProxy._debug(" - local bind successful")
else:
if _debug: IOProxy._debug(" - local bind deferred")
else:
self.ioBind = None
if _debug: IOProxy._debug(" - bind deferred")
def request_io(self, iocb, urgent=False):
"""Called by a client to start processing a request."""
if _debug: IOProxy._debug("request_io %r urgent=%r", iocb, urgent)
global _proxy_server
# save the server and controller reference
iocb.ioServerRef = self.ioServerRef
iocb.ioControllerRef = self.ioControllerRef
# check to see if it needs binding
if not self.ioBind:
# if the server is us, look for a local controller
if not self.ioServerRef:
self.ioBind = _local_controllers.get(self.ioControllerRef, None)
if not self.ioBind:
iocb.abort("no local controller %s" % (self.ioControllerRef,))
return
if _debug: IOProxy._debug(" - local bind successful")
else:
if not _proxy_server:
_proxy_server = IOProxyServer()
self.ioBind = _proxy_server
if _debug: IOProxy._debug(" - proxy bind successful: %r", self.ioBind)
# if this isn't urgent and there is a limit, see if we've reached it
if (not urgent) and self.ioRequestLimit:
# call back when this is completed
iocb.add_callback(self._proxy_trigger)
# check for the limit
if len(self.ioPending) < self.ioRequestLimit:
if _debug: IOProxy._debug(" - cleared for launch")
self.ioPending.add(iocb)
self.ioBind.request_io(iocb)
else:
# save it for later
if _debug: IOProxy._debug(" - save for later")
self.ioBlocked.append(iocb)
else:
# just pass it along
self.ioBind.request_io(iocb)
def _proxy_trigger(self, iocb):
"""This has completed, remove it from the set of pending requests
and see if it's OK to start up the next one."""
if _debug: IOProxy._debug("_proxy_trigger %r", iocb)
if iocb not in self.ioPending:
if _debug: IOProxy._warning("iocb not pending: %r", iocb)
else:
self.ioPending.remove(iocb)
# check to send another one
if (len(self.ioPending) < self.ioRequestLimit) and self.ioBlocked:
nextio = self.ioBlocked.popleft()
if _debug: IOProxy._debug(" - cleared for launch: %r", nextio)
# this one is now pending
self.ioPending.add(nextio)
self.ioBind.request_io(nextio)
#
# IOServer
#
PORT = 8002
SERVER_TIMEOUT = 60
@bacpypes_debugging
class IOServer(IOController, Client):
def __init__(self, addr=('',PORT)):
"""Initialize the remote IO handler."""
if _debug: IOServer._debug("__init__ %r", addr)
IOController.__init__(self)
# create a UDP director
self.server = UDPDirector(addr)
bind(self, self.server)
# dictionary of IOCBs as a server
self.remoteIOCB = {}
def confirmation(self, pdu):
if _debug: IOServer._debug('confirmation %r', pdu)
addr = pdu.pduSource
request = pdu.pduData
try:
# parse the request
request = cPickle.loads(request)
if _debug: _commlog.debug(">>> %s: S %s %r" % (_strftime(), str(addr), request))
# pick the message
if (request[0] == 0):
self.new_iocb(addr, *request[1:])
elif (request[0] == 1):
self.complete_iocb(addr, *request[1:])
elif (request[0] == 2):
self.abort_iocb(addr, *request[1:])
except:
# extract the error
err = sys.exc_info()[1]
IOServer._exception("error %r processing %r from %r", err, request, addr)
def callback(self, iocb):
"""Callback when an iocb is completed by a local controller and the
result needs to be sent back to the client."""
if _debug: IOServer._debug("callback %r", iocb)
# make sure it's one of ours
if not self.remoteIOCB.has_key(iocb):
IOServer._warning("IOCB not owned by server: %r", iocb)
return
# get the client information
clientID, clientAddr = self.remoteIOCB[iocb]
# we're done with this
del self.remoteIOCB[iocb]
# build a response
if iocb.ioState == COMPLETED:
response = (1, clientID, iocb.ioResponse)
elif iocb.ioState == ABORTED:
response = (2, clientID, iocb.ioError)
else:
raise RuntimeError("IOCB invalid state")
if _debug: _commlog.debug("<<< %s: S %s %r" % (_strftime(), clientAddr, response))
response = cPickle.dumps( response, 1 )
# send it to the client
self.request(PDU(response, destination=clientAddr))
def abort(self, err):
"""Called by a local application to abort all transactions."""
if _debug: IOServer._debug("abort %r", err)
for iocb in self.remoteIOCB.keys():
self.abort_io(iocb, err)
def abort_io(self, iocb, err):
"""Called by a local client or a local controlled to abort a transaction."""
if _debug: IOServer._debug("abort_io %r %r", iocb, err)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
elif self.remoteIOCB.has_key(iocb):
# get the client information
clientID, clientAddr = self.remoteIOCB[iocb]
# we're done with this
del self.remoteIOCB[iocb]
# build an abort response
response = (2, clientID, err)
if _debug: _commlog.debug("<<< %s: S %s %r" % (_strftime(), clientAddr, response))
response = cPickle.dumps( response, 1 )
# send it to the client
self.socket.sendto( response, clientAddr )
else:
IOServer._error("no reference to aborting iocb: %r", iocb)
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
def new_iocb(self, clientAddr, iocbid, controllerName, args, kwargs):
"""Called when the server receives a new request."""
if _debug: IOServer._debug("new_iocb %r %r %r %r %r", clientAddr, iocbid, controllerName, args, kwargs)
# look for a controller
controller = _local_controllers.get(controllerName, None)
if not controller:
# create a nice error message
err = RuntimeError("no local controller '%s'" % (controllerName, ))
# build an abort response
response = (2, iocbid, err)
if _debug: _commlog.debug("<<< %s: S %s %r" % (_strftime(), clientAddr, response))
response = cPickle.dumps( response, 1 )
# send it to the server
self.request(PDU(response, destination=clientAddr))
else:
# create an IOCB
iocb = IOCB(*args, **kwargs)
if _debug: IOServer._debug(" - local IOCB %r bound to remote %r", iocb.ioID, iocbid)
# save a reference to it
self.remoteIOCB[iocb] = (iocbid, clientAddr)
# make sure we're notified when it completes
iocb.add_callback(self.callback)
# pass it along
controller.request_io(iocb)
def abort_iocb(self, addr, iocbid, err):
"""Called when the client or server receives an abort request."""
if _debug: IOServer._debug("abort_iocb %r %r %r", addr, iocbid, err)
# see if this came from a client
for iocb in self.remoteIOCB.keys():
clientID, clientAddr = self.remoteIOCB[iocb]
if (addr == clientAddr) and (clientID == iocbid):
break
else:
IOServer._error("no reference to aborting iocb %r from %r", iocbid, addr)
return
if _debug: IOServer._debug(" - local IOCB %r bound to remote %r", iocb.ioID, iocbid)
# we're done with this
del self.remoteIOCB[iocb]
# clear the callback, we already know
iocb.ioCallback = []
# tell the local controller about the abort
iocb.abort(err)
#
# IOProxyServer
#
SERVER_TIMEOUT = 60
@bacpypes_debugging
class IOProxyServer(IOController, Client):
def __init__(self, addr=('', 0), name=None):
"""Initialize the remote IO handler."""
if _debug: IOProxyServer._debug("__init__")
IOController.__init__(self, name=name)
# create a UDP director
self.server = UDPDirector(addr)
bind(self, self.server)
if _debug: IOProxyServer._debug(" - bound to %r", self.server.socket.getsockname())
# dictionary of IOCBs as a client
self.localIOCB = {}
def confirmation(self, pdu):
if _debug: IOProxyServer._debug('confirmation %r', pdu)
addr = pdu.pduSource
request = pdu.pduData
try:
# parse the request
request = cPickle.loads(request)
if _debug: _commlog.debug(">>> %s: P %s %r" % (_strftime(), addr, request))
# pick the message
if (request[0] == 1):
self.complete_iocb(addr, *request[1:])
elif (request[0] == 2):
self.abort_iocb(addr, *request[1:])
except:
# extract the error
err = sys.exc_info()[1]
IOProxyServer._exception("error %r processing %r from %r", err, request, addr)
def process_io(self, iocb):
"""Package up the local IO request and send it to the server."""
if _debug: IOProxyServer._debug("process_io %r", iocb)
# save a reference in our dictionary
self.localIOCB[iocb.ioID] = iocb
# start a default timer if one hasn't already been set
if not iocb.ioTimeout:
iocb.set_timeout( SERVER_TIMEOUT, RuntimeError("no response from " + iocb.ioServerRef))
# build a message
request = (0, iocb.ioID, iocb.ioControllerRef, iocb.args, iocb.kwargs)
if _debug: _commlog.debug("<<< %s: P %s %r" % (_strftime(), iocb.ioServerRef, request))
request = cPickle.dumps( request, 1 )
# send it to the server
self.request(PDU(request, destination=(iocb.ioServerRef, PORT)))
def abort(self, err):
"""Called by a local application to abort all transactions, local
and remote."""
if _debug: IOProxyServer._debug("abort %r", err)
for iocb in self.localIOCB.values():
self.abort_io(iocb, err)
def abort_io(self, iocb, err):
"""Called by a local client or a local controlled to abort a transaction."""
if _debug: IOProxyServer._debug("abort_io %r %r", iocb, err)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
elif self.localIOCB.has_key(iocb.ioID):
# delete the dictionary reference
del self.localIOCB[iocb.ioID]
# build an abort request
request = (2, iocb.ioID, err)
if _debug: _commlog.debug("<<< %s: P %s %r" % (_strftime(), iocb.ioServerRef, request))
request = cPickle.dumps( request, 1 )
# send it to the server
self.request(PDU(request, destination=(iocb.ioServerRef, PORT)))
else:
raise RuntimeError("no reference to aborting iocb: %r" % (iocb.ioID,))
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
def complete_iocb(self, serverAddr, iocbid, msg):
"""Called when the client receives a response to a request."""
if _debug: IOProxyServer._debug("complete_iocb %r %r %r", serverAddr, iocbid, msg)
# assume nothing
iocb = None
# make sure this is a local request
if not self.localIOCB.has_key(iocbid):
IOProxyServer._error("no reference to IOCB %r", iocbid)
if _debug: IOProxyServer._debug(" - localIOCB: %r", self.localIOCB)
else:
# get the iocb
iocb = self.localIOCB[iocbid]
# delete the dictionary reference
del self.localIOCB[iocbid]
if iocb:
# change the state
iocb.ioState = COMPLETED
iocb.ioResponse = msg
# notify the client
iocb.trigger()
def abort_iocb(self, addr, iocbid, err):
"""Called when the client or server receives an abort request."""
if _debug: IOProxyServer._debug("abort_iocb %r %r %r", addr, iocbid, err)
if not self.localIOCB.has_key(iocbid):
raise RuntimeError("no reference to aborting iocb: %r" % (iocbid,))
# get the iocb
iocb = self.localIOCB[iocbid]
# delete the dictionary reference
del self.localIOCB[iocbid]
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
#
# abort
#
@bacpypes_debugging
def abort(err):
"""Abort everything, everywhere."""
if _debug: abort._debug("abort %r", err)
# start with the server
if IOServer._highlander:
IOServer._highlander.abort(err)
# now do everything local
for controller in _local_controllers.values():
controller.abort(err)
| 30.422958 | 121 | 0.584137 | [
"MIT"
] | DB-CL/bacpypes | sandbox/io.py | 39,489 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from cli.output import CLIOutput
from cli.user_input import CLIUserInput
class CLIDay():
# constants
INTRO_TEXT_WIDTH = 60
CMD_HELP_ALIASES = [ "h", "help" ]
CMD_WORDS_ALIASES = [ "w", "words" ]
CMD_SKIP_ALIASES = [ "s", "skip" ]
CMD_EXIT_ALIASES = [ "e", "exit",
"q", "quit" ]
CMD_NEXT_ALIASES = [ "n", "next" ]
CMD_PREV_ALIASES = [ "p", "prev" ]
ACTION_EXIT = "exit"
ACTION_TITLE = "title"
ACTION_NEW_WORDS = "new words"
ACTION_INTRO_TEXT = "intro text"
ACTION_SAMPLE_SENTENCES = "sample sentences"
ACTION_DEFINITIONS = "definitions"
ACTION_MATCHING = "matching"
ACTION_OTHER_NEW_WORDS = "other new words"
# General variables #
_next_action = None
_day = None
@classmethod
def start(cls, day):
cls._day = day
cls.mainloop()
@classmethod
def mainloop(cls):
cls._next_action = "title"
while cls._next_action != cls.ACTION_EXIT:
if cls._next_action == cls.ACTION_TITLE:
cls._next_action = cls.ACTION_NEW_WORDS
cls.title()
elif cls._next_action == cls.ACTION_NEW_WORDS:
cls._next_action = cls.ACTION_INTRO_TEXT
cls.new_words()
CLIOutput.empty_line(1)
CLIUserInput.wait_for_enter()
elif cls._next_action == cls.ACTION_INTRO_TEXT:
cls._next_action = cls.ACTION_SAMPLE_SENTENCES
cls.intro_text()
CLIOutput.empty_line(1)
CLIUserInput.wait_for_enter()
elif cls._next_action == cls.ACTION_SAMPLE_SENTENCES:
cls._next_action = cls.ACTION_DEFINITIONS
cls.sample_sentences()
elif cls._next_action == cls.ACTION_DEFINITIONS:
cls._next_action = cls.ACTION_MATCHING
cls.definitions()
elif cls._next_action == cls.ACTION_MATCHING:
cls._next_action = cls.ACTION_OTHER_NEW_WORDS
cls.matching()
elif cls._next_action == cls.ACTION_OTHER_NEW_WORDS:
cls._next_action = cls.ACTION_EXIT
cls.other_new_words()
else:
raise KeyError("Unknown action request.")
# day displays ------------------------------------------------------- #
@classmethod
def title(cls):
"""Display title"""
CLIOutput.empty_line(1)
CLIOutput.center(cls._day.get_title())
@classmethod
def new_words(cls, display_in_full=True):
"""Display new words section"""
regular = list()
phonetic = list()
for unit in cls._day.get_new_words():
regular.append(unit["regular"])
phonetic.append(unit["phonetic"])
if display_in_full:
CLIOutput.section_title("NEW WORDS")
CLIOutput.empty_line(1)
CLIOutput.empty_line(1)
CLIOutput.words_table(regular, phonetic)
@classmethod
def intro_text(cls):
"""Display intro text"""
parts = cls._day.get_intro_text()
CLIOutput.empty_line(2)
CLIOutput.framed(parts, cls.INTRO_TEXT_WIDTH)
# task answer cycle -------------------------------------------------- #
@classmethod
def _answer_cycle(cls, prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
"""Answer cycle"""
while True:
CLIOutput.empty_line(1)
a_type, a_content = CLIUserInput.get_answer(prompt)
if a_type == CLIUserInput.TYPE_ANSWER:
if a_content in answers:
CLIOutput.empty_line(1)
l_pr_answer()
CLIOutput.empty_line(1)
CLIOutput.simple("Correct!")
return True
else:
CLIOutput.warning("Incorrect, try again.")
elif a_type == CLIUserInput.TYPE_COMMAND:
if a_content in cls.CMD_WORDS_ALIASES:
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
elif a_content in cls.CMD_SKIP_ALIASES:
return True
elif a_content in cls.CMD_NEXT_ALIASES:
l_next_msg()
return False
elif a_content in cls.CMD_PREV_ALIASES:
l_prev_msg()
cls._next_action = prev_action
return False
elif a_content in cls.CMD_EXIT_ALIASES:
cls._next_action = cls.ACTION_EXIT
return False
elif a_content in cls.CMD_HELP_ALIASES:
cls.help_cmd_in_task()
else:
CLIOutput.warning("Invalid command.")
else:
raise ValueError("Unknown answer type.")
# tasks -------------------------------------------------------------- #
@classmethod
def sample_sentences(cls):
"""Display 'sample sentences' task"""
data = cls._day.get_sample_sentences()
CLIOutput.section_title("SAMPLE SENTENCES")
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
for sentence in data["sentences"]:
CLIOutput.numbered_sentence(sentence["id"], sentence["beginning"] + CLIOutput.BLANK + sentence["end"], CLIOutput.FORMAT_INDENTED)
new_words_extension = cls._day.get_new_words_extension()
CLIOutput.new_words_extension(new_words_extension)
CLIOutput.empty_line(1)
for sentence in data["sentences"]:
prompt = "{}. ".format(sentence["id"])
l_pr_question = lambda : CLIOutput.numbered_sentence(sentence["id"], sentence["beginning"] + CLIOutput.BLANK + sentence["end"], CLIOutput.FORMAT_REGULAR)
answers = list()
answers.append(sentence['answer'])
full_answer = sentence['answer']
if len(sentence["beginning"]) > 0:
full_answer = sentence["beginning"] + " " + full_answer
if len(sentence["end"]) > 0:
if sentence["end"] not in [".", "!", "?", "?!", "!?"]:
full_answer += " "
full_answer += sentence["end"]
l_pr_answer = lambda : CLIOutput.simple(full_answer)
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = lambda : CLIOutput.general_message("This is the first task: Starting from the beginning.")
l_next_msg = lambda : None
# answer cycle
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
if not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
return
# return after answer cycle returns
@classmethod
def definitions(cls):
"""Display 'definitions' task"""
# skip until data files are complete
return
data = cls._day.get_definitions()
CLIOutput.section_title("DEFINITIONS")
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
for definition in data["definitions"]:
CLIOutput.numbered_sentence(definition["id"], definition["text"], CLIOutput.FORMAT_INDENTED)
l_words = lambda : [CLIOutput.numbered_sentence(word["id"], word["text"], CLIOutput.FORMAT_INDENTED) for word in data["words"]]
for definition in data["definitions"]:
prompt = "{}. ".format(definition["id"])
l_pr_question = lambda : CLIOutput.numbered_sentence(definition["id"], definition["text"], CLIOutput.FORMAT_REGULAR)
answers = list()
answer_id = [value for (id, value) in data["answers"] if id == definition["id"]][0]
answers.append(answer_id)
answer_text = [item["text"] for item in data["words"] if item["id"] == answer_id][0]
answers.append(answer_text)
l_pr_answer = lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR)
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = lambda : None
l_next_msg = lambda : None
# answer cycle
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
return
# return after answer cycle returns
@classmethod
def matching(cls):
"""Display 'matching' task"""
# skip until data files are complete
return
data = cls._day.get_matching()
CLIOutput.section_title(data["name"])
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
for sentence in data["sentences"]:
CLIOutput.numbered_sentence(sentence["id"], sentence["text"], CLIOutput.FORMAT_INDENTED)
l_words = lambda : [CLIOutput.numbered_sentence(word["id"], word["text"], CLIOutput.FORMAT_INDENTED) for word in data["words"]]
for sentence in data["sentences"]:
prompt = "{}. ".format(sentence["id"])
l_pr_question = lambda : CLIOutput.numbered_sentence(sentence["id"], sentence["text"], CLIOutput.FORMAT_REGULAR)
answers = list()
answer_id = [value for (id, value) in data["answers"] if id == sentence["id"]][0]
answers.append(answer_id)
answer_text = [item["text"] for item in data["words"] if item["id"] == answer_id][0]
answers.append(answer_text)
l_pr_answer = lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR)
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = lambda : None
l_next_msg = lambda : None
# answer cycle
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
return
# return after answer cycle returns
@classmethod
def other_new_words(cls):
"""Display other new words section"""
data = cls._day.get_other_new_words()
CLIOutput.section_title("OTHER NEW WORDS:")
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
a_type, a_content = CLIUserInput.get_answer("")
CLIOutput.empty_line(1)
# helper ------------------------------------------------------------- #
@classmethod
def help_cmd_in_task(cls):
collection = [
["words", "Display New Words section again."],
["skip", "Move on to the next part of the task."],
["next", "Leave task and move on to the next one."],
["prev", "Leave task and jump back to the previous one."],
["exit", "Leave task an exit to top program level."]
]
CLIOutput.empty_line(1)
CLIOutput.simple("Within the task, the following commands are available:")
CLIOutput.value_pair_list(collection, CLIOutput.FORMAT_REGULAR, CLIOutput.SPACING_CLOSE)
# END ---------------------------------------------------------------- #
| 32.690411 | 165 | 0.570399 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | sunarch/woyo | cli/day.py | 11,932 | Python |
from __future__ import absolute_import
from desicos.abaqus.abaqus_functions import create_sketch_plane
from desicos.abaqus.utils import cyl2rec
class Imperfection(object):
"""Base class for all imperfections
This class should be sub-classed when a new imperfection is created.
"""
def __init__(self):
self.name = ''
self.thetadegs = []
self.pts = [] #NOTE zs, rs and pts are the same
self.zs = []
self.rs = []
self.cc = None
self.impconf = None
self.amplitude = None
self.sketch_plane = None
def create_sketch_plane(self):
self.sketch_plane = create_sketch_plane(self.impconf.conecyl,
self)
def get_xyz(self):
r, z = self.impconf.conecyl.r_z_from_pt(self.pt)
return cyl2rec(r, self.thetadeg, z)
| 30.137931 | 72 | 0.614416 | [
"BSD-3-Clause"
] | saullocastro/desicos | desicos/abaqus/imperfections/imperfection.py | 874 | Python |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Code to interact with the primersearch program from EMBOSS."""
class InputRecord(object):
"""Represent the input file into the primersearch program.
This makes it easy to add primer information and write it out to the
simple primer file format.
"""
def __init__(self):
self.primer_info = []
def __str__(self):
output = ""
for name, primer1, primer2 in self.primer_info:
output += "%s %s %s\n" % (name, primer1, primer2)
return output
def add_primer_set(self, primer_name, first_primer_seq,
second_primer_seq):
"""Add primer information to the record."""
self.primer_info.append((primer_name, first_primer_seq,
second_primer_seq))
class OutputRecord(object):
"""Represent the information from a primersearch job.
amplifiers is a dictionary where the keys are the primer names and
the values are a list of PrimerSearchAmplifier objects.
"""
def __init__(self):
self.amplifiers = {}
class Amplifier(object):
"""Represent a single amplification from a primer."""
def __init__(self):
self.hit_info = ""
self.length = 0
def read(handle):
"""Get output from primersearch into a PrimerSearchOutputRecord."""
record = OutputRecord()
for line in handle:
if not line.strip():
continue
elif line.startswith("Primer name"):
name = line.split()[-1]
record.amplifiers[name] = []
elif line.startswith("Amplimer"):
amplifier = Amplifier()
record.amplifiers[name].append(amplifier)
elif line.startswith("\tSequence: "):
amplifier.hit_info = line.replace("\tSequence: ", "")
elif line.startswith("\tAmplimer length: "):
length = line.split()[-2]
amplifier.length = int(length)
else:
amplifier.hit_info += line
for name in record.amplifiers:
for amplifier in record.amplifiers[name]:
amplifier.hit_info = amplifier.hit_info.rstrip()
return record
| 30.012987 | 72 | 0.626136 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | EnjoyLifeFund/macHighSierra-py36-pkgs | Bio/Emboss/PrimerSearch.py | 2,311 | Python |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers a decomposition for the Entangle gate.
Applies a Hadamard gate to the first qubit and then, conditioned on this first
qubit, CNOT gates to all others.
"""
from projectq.cengines import DecompositionRule
from projectq.meta import Control, get_control_count
from projectq.ops import X, H, Entangle, All
def _decompose_entangle(cmd):
""" Decompose the entangle gate. """
qr = cmd.qubits[0]
eng = cmd.engine
with Control(eng, cmd.control_qubits):
H | qr[0]
with Control(eng, qr[0]):
All(X) | qr[1:]
all_defined_decomposition_rules = [
DecompositionRule(Entangle.__class__, _decompose_entangle)
]
| 31.268293 | 78 | 0.723869 | [
"Apache-2.0"
] | VirtueQuantumCloud/Ex | projectq/setups/decompositions/entangle.py | 1,282 | Python |
import re
import json
from math import log, sqrt
from jinja2 import Markup
from sklearn import cluster
from sklearn.decomposition import PCA
from scipy import stats
from sklearn import metrics
import numpy
from db import export_sql
from werkzeug.wrappers import Response
# create higher order transformations
def x2fs(X, fields, type=''):
if type == 'Interaction':
s2 = lambda x: x + 1
e2 = lambda x, y: y
elif type == 'Quadratic':
s2 = lambda x: x
e2 = lambda x, y: y
elif type == 'Purely Quadratic':
s2 = lambda x: x
e2 = lambda x, y: x + 1
else:
return
l1 = len(X[0])
l2 = len(X[0])
for i in range(len(X)):
r = X[i]
for j1 in range(l1):
for j2 in range(s2(j1), e2(j1, l2)):
r.append(r[j1] * r[j2])
for j1 in range(l1):
for j2 in range(s2(j1), e2(j1, l2)):
fields.append(fields[j1] + '*' + fields[j2])
# fit_transform from sklearn doesn't return the loadings V. Here is a hacked version
def fit_transform(pca, X):
U, S, V = pca._fit(X)
if pca.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S
# transposing component matrix such that PCA_1 is in row
V = V.transpose()
return (U, V)
def evaluate(clust_dists, clustidx, X):
results = {}
sum = 0
count = 0
clustsum = [0 for i in range(len(clust_dists[0]))]
clustcount = [0 for i in range(len(clust_dists[0]))]
clustmean = [0 for i in range(len(clust_dists[0]))]
for i in range(len(clustidx)):
sum += clust_dists[i][clustidx[i]]
count += 1
clustsum[clustidx[i]] += clust_dists[i][clustidx[i]]
clustcount[clustidx[i]] += 1
averagedist = float(sum) / count
results['meandist'] = averagedist
for i in range(len(clust_dists[0])):
clustmean[i] = float(clustsum[i]) / clustcount[i]
return results, clustmean
def render(vis, request, info):
info["message"] = []
info["results"] = []
# module independent user inputs
table = request.args.get("table", '')
where = request.args.get("where", '1=1')
limit = request.args.get("limit", '1000')
start = request.args.get("start", '0') # start at 0
reload = int(request.args.get("reload", 0))
view = request.args.get("view", '')
# module dependent user inputs
field = request.args.get("field", '')
pre_process = request.args.get("pre_process", '')
pre_transform = request.args.get("pre_transform", '')
orderBy = request.args.get("orderBy", '')
groupBy = request.args.get("groupBy", '')
if orderBy and len(orderBy) > 0: orderBy = ' order by %s' % orderBy
if groupBy and len(groupBy) > 0: groupBy = ' group by %s' % groupBy
k = int(request.args.get("k", 2))
pfield = request.args.get("pfield", [])
# verify essential parameter details - smell test
if len(table) == 0 or len(field) == 0:
info["message"].append("Table or field missing")
info["message_class"] = "failure"
else:
# prepare sql query
sql = "select %s from %s where %s %s %s limit %s offset %s" % (
field, table, where, groupBy, orderBy, limit, start)
(datfile, reload, result) = export_sql(sql, vis.config, reload, None, view)
if len(result) > 0:
info["message"].append(result)
info["message_class"] = "failure"
else:
X = []
with open(datfile, 'r') as f:
for r in f:
row = r.rstrip().split(',')
X.append([float(r) for r in row])
xfield = pfield
# transform features
x2fs(X, xfield, pre_transform)
pfield = xfield
X = numpy.array(X)
if pre_process == "Z-Score":
X = stats.zscore(X, axis=0)
elif pre_process == "PCA":
pca = PCA()
(X, V) = fit_transform(pca, X)
pfield = ['PCA_%d' % (d + 1) for d in range(len(pfield))]
elif pre_process == "Whitened PCA":
pca = PCA(whiten=True)
(X, V) = fit_transform(pca, X)
pfield = ['PCA_%d' % (d + 1) for d in range(len(pfield))]
clust = cluster.KMeans(n_clusters=k)
cidx = clust.fit_predict(X)
cdists = clust.transform(X)
# summary results
results, clustmeans = evaluate(cdists, cidx, X)
info["results"].append('Clustering the data using K-means with k=%d' % k)
info["results"].append('Average distance to centroid: %.4f' % results['meandist'])
hashquery = datfile + hex(hash(request.args.get('query', datfile)) & 0xffffffff)
if pre_process == "PCA" or pre_process == "Whitened PCA":
#write pca matrix file
info["datfile_matrix"] = hashquery + '.pca.csv'
with open(info["datfile_matrix"], 'w') as f:
f.write("feature,%s\n" % (','.join(xfield)))
for i in range(len(V)):
f.write('PCA_%d,%s\n' % (i + 1, ','.join([str(v) for v in V[i]])))
info["pca_matrix_divs"] = Markup('<h2>PCA Components</h2><div id="svg-pca_matrix"></div>')
else:
info["pca_matrix_divs"] = ''
# preparing within cluster distances into a js array
f = []
for i in range(k):
f.append('{cluster:"%d", distance:%.3f}' % (i, clustmeans[i]))
info["clust_data"] = Markup('clust_data=[' + ','.join(f) + '];')
#provenance
#0:id,1:prediction result (grouping),2:actual label(shape),3:error,4:y,or features
info["datfile_provenance"] = hashquery + '.provenance.csv'
RES = ['Cluster %d' % (i + 1) for i in range(k)]
with open(info["datfile_provenance"], 'w') as f:
f.write('Cluster,Error,%s\n' % (','.join(pfield)))
for i in range(len(cidx)):
e = cdists[i][cidx[i]]
f.write('%s,%.4f,%s\n' % (RES[cidx[i]], e, ','.join([str(r) for r in X[i]])))
pfield = ['cluster'] + pfield
divs = [
'<div class="chart"><div class="title">%s<a href="javascript:reset(%d)" class="reset" style="display: none;">reset</a></div></div>' % (
pfield[d], d + 1) for d in range(len(pfield))]
divs = ''.join(divs)
divs = '<div class="chart"><div class="title">Distance to Centroid (<span id="active"></span> of <span id="total"></span> items selected.)<a href="javascript:reset(0)" class="reset" style="display: none;">reset</a></div></div>' + divs
info['provenance_divs'] = Markup(divs)
info["message_class"] = "success"
if reload > 0:
info["message"].append("Loaded fresh.")
else:
info["message"].append("Loading from cache. Use reload=1 to reload.")
info["datfile"] = info["datfile_provenance"]
# prepare some messages
info["title"] = "FIELD_X: <em>%s</em> from <br />TABLE: <em>%s</em>" % (','.join(pfield), table)
info["title"] = Markup(info["title"])
info["message"] = Markup(''.join('<p>%s</p>' % m for m in info["message"] if len(m) > 0))
info["results"] = Markup('<ul>' + ''.join('<li>%s</li>' % m for m in info["results"] if len(m) > 0) + '</ul>')
# format the message to encode HTML characters
info['query'] = Markup(request.args.get('query', ''))
t = vis.jinja_env.get_template('explore.html')
v1 = t.render(**info)
t = vis.jinja_env.get_template('ml_kmeans.html')
v2 = t.render(**info)
v3 = v1[:-7] + v2 + v1[-7:] + '</html>'
return Response(v3, mimetype='text/html')
| 37.037209 | 246 | 0.540374 | [
"MIT"
] | garthee/gnot | modules/ml_kmeans.py | 7,963 | Python |
# From http://rodp.me/2015/how-to-extract-data-from-the-web.html
import time
import sys
import uuid
import json
import markdown
from collections import Counter
from requests import get
from lxml import html
from unidecode import unidecode
import urllib
import lxml.html
from readability.readability import Document
def getDoc(url):
t = time.time()
t2 = time.time()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
r = get(url,headers=headers)
print("*"*30)
print("Getting url took " + str(time.time()-t2))
print("*"*30)
redirectUrl = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(r.url)))[0:5]
newContent = r.content
parsed_doc = html.fromstring(newContent)
with open('doc.html','w') as f:
f.write(newContent)
parents_with_children_counts = []
parent_elements = parsed_doc.xpath('//body//*/..')
for parent in parent_elements:
children_counts = Counter([child.tag for child in parent.iterchildren()])
parents_with_children_counts.append((parent, children_counts))
parents_with_children_counts.sort(key=lambda x: x[1].most_common(1)[0][1], reverse=True)
docStrings = {}
last = len(parents_with_children_counts)
if last > 20:
last = 20
t2 = time.time()
for i in range(last):
docString = ""
numLines = 0
for child in parents_with_children_counts[i][0]: # Possibly [1][0]
tag = str(child.tag)
#print(tag)
if tag == 'style' or tag == 'iframe':
continue
if tag == 'font' or tag == 'div' or tag == 'script':
tag = 'p'
try:
startTag = "<" + tag + ">"
endTag = "</" + tag + ">"
except:
startTag = '<p>'
endTag = '</p>'
try:
str_text = child.text_content().encode('utf-8')
#str_text = " ".join(str_text.split())
str_text = json.dumps(str_text)
str_text = str_text.replace('\"','').replace('\\n','\n')
str_text = str_text.replace('\\t','').replace('\\r','')
str_text = str_text.replace('\u0092',"'").replace('\\u00e2\\u0080\\u0099',"'").replace('\u2019',"'")
str_text = str_text.replace('\u0093','"').replace('\u00e2\u0080\u009c','"').replace('\u00e2\u0080\u009d','"').replace('\u201c','"').replace('\u201d','"')
str_text = str_text.replace('\u0094','"').replace('\u00e2\u0080" ','')
for foo in range(5):
str_text = str_text.replace('<br> <br>','<br>')
str_text = str_text.replace('\u0096','-').replace('\u2014','-').replace('\\u00a0',' ')
str_text = str_text.replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ')
str_text = str_text.replace('\\','').replace('u2026 ','').replace('u00c2','')
newString = startTag + str_text + endTag + "\n"
newString = str_text + "\n\n"
if (len(newString) > 50000 or
len(newString)<14 or
'{ "' in newString or
'{"' in newString or
"function()" in newString or
'else {' in newString or
'.js' in newString or
'pic.twitter' in newString or
'("' in newString or
'ajax' in newString or
'var ' in newString or
('Advertisement' in newString and len(newString)<200) or
'Continue reading' in newString or
('Photo' in newString and 'Credit' in newString) or
'window.' in newString or
');' in newString or
'; }' in newString or
'CDATA' in newString or
'()' in newString):
continue
#print(len(newString))
if len(newString) > 50 and ':' not in newString:
numLines += 1
docString += newString
except:
#print('error')
pass
docStrings[i] = {}
docStrings[i]['docString'] = markdown.markdown(docString)
docStrings[i]['word_per_p'] = float(len(docString.split())) / float(len(docStrings[i]['docString'].split('<p>')))
docStrings[i]['numLines'] = numLines
docStrings[i]['docString_length'] = len(docString)
try:
docStrings[i]['score']=numLines*docStrings[i]['word_per_p']
#docStrings[i]['score']=1000*numLines / sum(1 for c in docString if c.isupper())
except:
docStrings[i]['score'] = 0
print("*"*30)
print("Looping took " + str(time.time()-t2))
print("*"*30)
with open('test.json','w') as f:
f.write(json.dumps(docStrings,indent=2))
bestI = 0
bestNumLines = 0
for i in range(len(docStrings)):
if (docStrings[i]['word_per_p']>12 and
docStrings[i]['score'] > bestNumLines and
docStrings[i]['docString_length'] > 300):
bestI = i
bestNumLines = docStrings[i]['score']
print("*"*24)
print(bestI)
print(bestNumLines)
print("*"*24)
docString = docStrings[bestI]['docString']
if len(docString)<100:
docString="<h1>There is no content on this page.</h1>"
title = parsed_doc.xpath(".//title")[0].text_content().strip()
try:
description = parsed_doc.xpath(".//meta[@name='description']")[0].get('content')
except:
description = ""
url = r.url
timeElapsed = int((time.time()-t)*1000)
docString = docString.decode('utf-8')
for s in docString.split('\n'):
print(len(s))
fileSize = 0.7 + float(sys.getsizeof(docString)/1000.0)
fileSize = round(fileSize,1)
return {'title':title,'description':description,'url':url,'timeElapsed':timeElapsed,'content':docString,'size':fileSize}
def getDoc2(url):
t = time.time()
# import urllib
# html = urllib.urlopen(url).read()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
r = get(url,headers=headers)
html = r.content
doc = Document(html,url=url)
readable_article = doc.summary()
readable_title = doc.short_title()
readable_article = readable_article.replace("http","/?url=http")
timeElapsed = int((time.time()-t)*1000)
fileSize = 0.7 + float(sys.getsizeof(readable_article)/1000.0)
fileSize = round(fileSize,1)
return {'title':readable_title,'description':"",'url':url,'timeElapsed':timeElapsed,'content':readable_article,'size':fileSize}
#print(getDoc('http://www.bbc.co.uk/news/entertainment-arts-34768201'))
| 40.188571 | 169 | 0.54358 | [
"MIT"
] | schollz/justread | parseDoc.py | 7,033 | Python |
# -*- coding: utf-8 -*-
"""Identity Services Engine deleteDeviceAdminLocalExceptionById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorC7D6Bb4Abf53F6Aa2F40B6986F58A9(object):
"""deleteDeviceAdminLocalExceptionById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC7D6Bb4Abf53F6Aa2F40B6986F58A9, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"id": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 36.190476 | 81 | 0.694298 | [
"MIT"
] | oianson/ciscoisesdk | tests/models/validators/v3_0_0/jsd_c7d6bb4abf53f6aa2f40b6986f58a9.py | 2,280 | Python |
# -*- coding: utf-8 -*-
u"""
Beta regression for modeling rates and proportions.
References
----------
Grün, Bettina, Ioannis Kosmidis, and Achim Zeileis. Extended beta regression
in R: Shaken, stirred, mixed, and partitioned. No. 2011-22. Working Papers in
Economics and Statistics, 2011.
Smithson, Michael, and Jay Verkuilen. "A better lemon squeezer?
Maximum-likelihood regression with beta-distributed dependent variables."
Psychological methods 11.1 (2006): 54.
"""
import numpy as np
from scipy.special import gammaln as lgamma
import patsy
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.model import (
GenericLikelihoodModel, GenericLikelihoodModelResults, _LLRMixin)
from statsmodels.genmod import families
_init_example = """
Beta regression with default of logit-link for exog and log-link
for precision.
>>> mod = BetaModel(endog, exog)
>>> rslt = mod.fit()
>>> print(rslt.summary())
We can also specify a formula and a specific structure and use the
identity-link for precision.
>>> from sm.families.links import identity
>>> Z = patsy.dmatrix('~ temp', dat, return_type='dataframe')
>>> mod = BetaModel.from_formula('iyield ~ C(batch, Treatment(10)) + temp',
... dat, exog_precision=Z,
... link_precision=identity())
In the case of proportion-data, we may think that the precision depends on
the number of measurements. E.g for sequence data, on the number of
sequence reads covering a site:
>>> Z = patsy.dmatrix('~ coverage', df)
>>> formula = 'methylation ~ disease + age + gender + coverage'
>>> mod = BetaModel.from_formula(formula, df, Z)
>>> rslt = mod.fit()
"""
class BetaModel(GenericLikelihoodModel):
__doc__ = """Beta Regression.
The Model is parameterized by mean and precision. Both can depend on
explanatory variables through link functions.
Parameters
----------
endog : array_like
1d array of endogenous response variable.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
exog_precision : array_like
2d array of variables for the precision.
link : link
Any link in sm.families.links for mean, should have range in
interval [0, 1]. Default is logit-link.
link_precision : link
Any link in sm.families.links for precision, should have
range in positive line. Default is log-link.
**kwds : extra keywords
Keyword options that will be handled by super classes.
Not all general keywords will be supported in this class.
Notes
-----
Status: experimental, new in 0.13.
Core results are verified, but api can change and some extra results
specific to Beta regression are missing.
Examples
--------
{example}
See Also
--------
:ref:`links`
""".format(example=_init_example)
def __init__(self, endog, exog, exog_precision=None,
link=families.links.Logit(),
link_precision=families.links.Log(), **kwds):
etmp = np.array(endog)
assert np.all((0 < etmp) & (etmp < 1))
if exog_precision is None:
extra_names = ['precision']
exog_precision = np.ones((len(endog), 1), dtype='f')
else:
extra_names = ['precision-%s' % zc for zc in
(exog_precision.columns
if hasattr(exog_precision, 'columns')
else range(1, exog_precision.shape[1] + 1))]
kwds['extra_params_names'] = extra_names
super(BetaModel, self).__init__(endog, exog,
exog_precision=exog_precision,
**kwds)
self.link = link
self.link_precision = link_precision
# not needed, handled by super:
# self.exog_precision = exog_precision
# inherited df do not account for precision params
self.nobs = self.endog.shape[0]
self.df_model = self.nparams - 1
self.df_resid = self.nobs - self.nparams
assert len(self.exog_precision) == len(self.endog)
self.hess_type = "oim"
if 'exog_precision' not in self._init_keys:
self._init_keys.extend(['exog_precision'])
self._init_keys.extend(['link', 'link_precision'])
self._null_drop_keys = ['exog_precision']
self.results_class = BetaResults
self.results_class_wrapper = BetaResultsWrapper
@classmethod
def from_formula(cls, formula, data, exog_precision_formula=None,
*args, **kwargs):
if exog_precision_formula is not None:
if 'subset' in kwargs:
d = data.ix[kwargs['subset']]
Z = patsy.dmatrix(exog_precision_formula, d)
else:
Z = patsy.dmatrix(exog_precision_formula, data)
kwargs['exog_precision'] = Z
return super(BetaModel, cls).from_formula(formula, data, *args,
**kwargs)
def _get_exogs(self):
return (self.exog, self.exog_precision)
def predict(self, params, exog=None, exog_precision=None, which="mean"):
"""Predict values for mean or precision
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision parameter.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linear" : linear predictor for the mean function
- "linear-precision" : linear predictor for the precision parameter
Returns
-------
ndarray, predicted values
"""
# compatibility with old names and misspelling
if which == "linpred":
which = "linear"
if which in ["linpred_precision", "linear_precision"]:
which = "linear-precision"
k_mean = self.exog.shape[1]
if which in ["mean", "linear"]:
if exog is None:
exog = self.exog
params_mean = params[:k_mean]
# Zparams = params[k_mean:]
linpred = np.dot(exog, params_mean)
if which == "mean":
mu = self.link.inverse(linpred)
res = mu
else:
res = linpred
elif which in ["precision", "linear-precision"]:
if exog_precision is None:
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_prec)
if which == "precision":
phi = self.link_precision.inverse(linpred_prec)
res = phi
else:
res = linpred_prec
elif which == "var":
res = self._predict_var(
params,
exog=exog,
exog_precision=exog_precision
)
else:
raise ValueError('which = %s is not available' % which)
return res
def _predict_precision(self, params, exog_precision=None):
"""Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision.
"""
if exog_precision is None:
exog_precision = self.exog_precision
k_mean = self.exog.shape[1]
params_precision = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_precision)
phi = self.link_precision.inverse(linpred_prec)
return phi
def _predict_var(self, params, exog=None, exog_precision=None):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance.
"""
mean = self.predict(params, exog=exog)
precision = self._predict_precision(params,
exog_precision=exog_precision)
var_endog = mean * (1 - mean) / (1 + precision)
return var_endog
def loglikeobs(self, params):
"""
Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
return self._llobs(self.endog, self.exog, self.exog_precision, params)
def _llobs(self, endog, exog, exog_precision, params):
"""
Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
y, X, Z = endog, exog, exog_precision
nz = Z.shape[1]
params_mean = params[:-nz]
params_prec = params[-nz:]
linpred = np.dot(X, params_mean)
linpred_prec = np.dot(Z, params_prec)
mu = self.link.inverse(linpred)
phi = self.link_precision.inverse(linpred_prec)
eps_lb = 1e-200
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ll = (lgamma(phi) - lgamma(alpha)
- lgamma(beta)
+ (mu * phi - 1) * np.log(y)
+ (((1 - mu) * phi) - 1) * np.log(1 - y))
return ll
def score(self, params):
"""
Returns the score vector of the log-likelihood.
http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score : ndarray
First derivative of loglikelihood function.
"""
sf1, sf2 = self.score_factor(params)
d1 = np.dot(sf1, self.exog)
d2 = np.dot(sf2, self.exog_precision)
return np.concatenate((d1, d2))
def _score_check(self, params):
"""Inherited score with finite differences
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score based on numerical derivatives
"""
return super(BetaModel, self).score(params)
def score_factor(self, params, endog=None):
"""Derivative of loglikelihood function w.r.t. linear predictors.
This needs to be multiplied with the exog to obtain the score_obs.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
Notes
-----
The score_obs can be obtained from score_factor ``sf`` using
- d1 = sf[:, :1] * exog
- d2 = sf[:, 1:2] * exog_precision
"""
from scipy import special
digamma = special.psi
y = self.endog if endog is None else endog
X, Z = self.exog, self.exog_precision
nz = Z.shape[1]
Xparams = params[:-nz]
Zparams = params[-nz:]
# NO LINKS
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ystar = np.log(y / (1. - y))
dig_beta = digamma(beta)
mustar = digamma(alpha) - dig_beta
yt = np.log(1 - y)
mut = dig_beta - digamma(phi)
t = 1. / self.link.deriv(mu)
h = 1. / self.link_precision.deriv(phi)
#
sf1 = phi * t * (ystar - mustar)
sf2 = h * (mu * (ystar - mustar) + yt - mut)
return (sf1, sf2)
def score_hessian_factor(self, params, return_hessian=False,
observed=True):
"""Derivatives of loglikelihood function w.r.t. linear predictors.
This calculates score and hessian factors at the same time, because
there is a large overlap in calculations.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
return_hessian : bool
If False, then only score_factors are returned
If True, the both score and hessian factors are returned
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
(-jbb, -jbg, -jgg) : tuple
A tuple with 3 hessian factors, corresponding to the upper
triangle of the Hessian matrix.
TODO: check why there are minus
"""
from scipy import special
digamma = special.psi
y, X, Z = self.endog, self.exog, self.exog_precision
nz = Z.shape[1]
Xparams = params[:-nz]
Zparams = params[-nz:]
# NO LINKS
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
# We need to prevent mu = 0 and (1-mu) = 0 in digamma call
eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ystar = np.log(y / (1. - y))
dig_beta = digamma(beta)
mustar = digamma(alpha) - dig_beta
yt = np.log(1 - y)
mut = dig_beta - digamma(phi)
t = 1. / self.link.deriv(mu)
h = 1. / self.link_precision.deriv(phi)
ymu_star = (ystar - mustar)
sf1 = phi * t * ymu_star
sf2 = h * (mu * ymu_star + yt - mut)
if return_hessian:
trigamma = lambda x: special.polygamma(1, x) # noqa
trig_beta = trigamma(beta)
var_star = trigamma(alpha) + trig_beta
var_t = trig_beta - trigamma(phi)
c = - trig_beta
s = self.link.deriv2(mu)
q = self.link_precision.deriv2(phi)
jbb = (phi * t) * var_star
if observed:
jbb += s * t**2 * ymu_star
jbb *= t * phi
jbg = phi * t * h * (mu * var_star + c)
if observed:
jbg -= ymu_star * t * h
jgg = h**2 * (mu**2 * var_star + 2 * mu * c + var_t)
if observed:
jgg += (mu * ymu_star + yt - mut) * q * h**3 # **3 ?
return (sf1, sf2), (-jbb, -jbg, -jgg)
else:
return (sf1, sf2)
def score_obs(self, params):
"""
Score, first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
"""
sf1, sf2 = self.score_factor(params)
# elementwise product for each row (observation)
d1 = sf1[:, None] * self.exog
d2 = sf2[:, None] * self.exog_precision
return np.column_stack((d1, d2))
def hessian(self, params, observed=None):
"""Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
Parameter at which Hessian is evaluated.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
"""
if self.hess_type == "eim":
observed = False
else:
observed = True
_, hf = self.score_hessian_factor(params, return_hessian=True,
observed=observed)
hf11, hf12, hf22 = hf
# elementwise product for each row (observation)
d11 = (self.exog.T * hf11).dot(self.exog)
d12 = (self.exog.T * hf12).dot(self.exog_precision)
d22 = (self.exog_precision.T * hf22).dot(self.exog_precision)
return np.block([[d11, d12], [d12.T, d22]])
def hessian_factor(self, params, observed=True):
"""Derivatives of loglikelihood function w.r.t. linear predictors.
"""
_, hf = self.score_hessian_factor(params, return_hessian=True,
observed=observed)
return hf
def _start_params(self, niter=2, return_intermediate=False):
"""find starting values
Parameters
----------
niter : int
Number of iterations of WLS approximation
return_intermediate : bool
If False (default), then only the preliminary parameter estimate
will be returned.
If True, then also the two results instances of the WLS estimate
for mean parameters and for the precision parameters will be
returned.
Returns
-------
sp : ndarray
start parameters for the optimization
res_m2 : results instance (optional)
Results instance for the WLS regression of the mean function.
res_p2 : results instance (optional)
Results instance for the WLS regression of the precision function.
Notes
-----
This calculates a few iteration of weighted least squares. This is not
a full scoring algorithm.
"""
# WLS of the mean equation uses the implied weights (inverse variance),
# WLS for the precision equations uses weights that only take
# account of the link transformation of the precision endog.
from statsmodels.regression.linear_model import OLS, WLS
res_m = OLS(self.link(self.endog), self.exog).fit()
fitted = self.link.inverse(res_m.fittedvalues)
resid = self.endog - fitted
prec_i = fitted * (1 - fitted) / np.maximum(np.abs(resid), 1e-2)**2 - 1
res_p = OLS(self.link_precision(prec_i), self.exog_precision).fit()
prec_fitted = self.link_precision.inverse(res_p.fittedvalues)
# sp = np.concatenate((res_m.params, res_p.params))
for _ in range(niter):
y_var_inv = (1 + prec_fitted) / (fitted * (1 - fitted))
# y_var = fitted * (1 - fitted) / (1 + prec_fitted)
ylink_var_inv = y_var_inv / self.link.deriv(fitted)**2
res_m2 = WLS(self.link(self.endog), self.exog,
weights=ylink_var_inv).fit()
fitted = self.link.inverse(res_m2.fittedvalues)
resid2 = self.endog - fitted
prec_i2 = (fitted * (1 - fitted) /
np.maximum(np.abs(resid2), 1e-2)**2 - 1)
w_p = 1. / self.link_precision.deriv(prec_fitted)**2
res_p2 = WLS(self.link_precision(prec_i2), self.exog_precision,
weights=w_p).fit()
prec_fitted = self.link_precision.inverse(res_p2.fittedvalues)
sp2 = np.concatenate((res_m2.params, res_p2.params))
if return_intermediate:
return sp2, res_m2, res_p2
return sp2
def fit(self, start_params=None, maxiter=1000, disp=False,
method='bfgs', **kwds):
"""
Fit the model by maximum likelihood.
Parameters
----------
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
maxiter : integer
The maximum number of iterations
disp : bool
Show convergence stats.
method : str
The optimization method to use.
kwds :
Keyword arguments for the optimizer.
Returns
-------
BetaResults instance.
"""
if start_params is None:
start_params = self._start_params()
# # http://www.ime.usp.br/~sferrari/beta.pdf suggests starting phi
# # on page 8
if "cov_type" in kwds:
# this is a workaround because we cannot tell super to use eim
if kwds["cov_type"].lower() == "eim":
self.hess_type = "eim"
del kwds["cov_type"]
else:
self.hess_type = "oim"
res = super(BetaModel, self).fit(start_params=start_params,
maxiter=maxiter, method=method,
disp=disp, **kwds)
if not isinstance(res, BetaResultsWrapper):
# currently GenericLikelihoodModel doe not add wrapper
res = BetaResultsWrapper(res)
return res
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
not verified yet
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
link = self.link
lin_pred = self.predict(params, which="linear")
idl = link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
return np.column_stack((dmat, np.zeros(self.exog_precision.shape)))
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
# deriv is 2d vector
d1 = dsf[:, :1] * self.exog
d2 = dsf[:, 1:2] * self.exog_precision
return np.column_stack((d1, d2))
# code duplication with results class
def get_distribution_params(self, params, exog=None, exog_precision=None):
"""
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
"""
mean = self.predict(params, exog=exog)
precision = self.predict(params, exog_precision=exog_precision,
which="precision")
return precision * mean, precision * (1 - mean)
def get_distribution(self, params, exog=None, exog_precision=None):
"""
Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
from scipy import stats
args = self.get_distribution_params(params, exog=exog,
exog_precision=exog_precision)
distr = stats.beta(*args)
return distr
class BetaResults(GenericLikelihoodModelResults, _LLRMixin):
"""Results class for Beta regression
This class inherits from GenericLikelihoodModelResults and not all
inherited methods might be appropriate in this case.
"""
# GenericLikeihoodmodel doesn't define fittedvalues, residuals and similar
@cache_readonly
def fittedvalues(self):
"""In-sample predicted mean, conditional expectation."""
return self.model.predict(self.params)
@cache_readonly
def fitted_precision(self):
"""In-sample predicted precision"""
return self.model.predict(self.params, which="precision")
@cache_readonly
def resid(self):
"""Response residual"""
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
"""Pearson standardize residual"""
std = np.sqrt(self.model.predict(self.params, which="var"))
return self.resid / std
@cache_readonly
def prsquared(self):
"""Cox-Snell Likelihood-Ratio pseudo-R-squared.
1 - exp((llnull - .llf) * (2 / nobs))
"""
return self.pseudo_rsquared(kind="lr")
def get_distribution_params(self, exog=None, exog_precision=None,
transform=True):
"""
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
"""
mean = self.predict(exog=exog, transform=transform)
precision = self.predict(exog_precision=exog_precision,
which="precision", transform=transform)
return precision * mean, precision * (1 - mean)
def get_distribution(self, exog=None, exog_precision=None, transform=True):
"""
Return a instance of the predictive distribution.
Parameters
----------
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
from scipy import stats
args = self.get_distribution_params(exog=exog,
exog_precision=exog_precision,
transform=transform)
args = (np.asarray(arg) for arg in args)
distr = stats.beta(*args)
return distr
def bootstrap(self, *args, **kwargs):
raise NotImplementedError
class BetaResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(BetaResultsWrapper,
BetaResults)
| 33.704972 | 79 | 0.577812 | [
"BSD-3-Clause"
] | EC-AI/statsmodels | statsmodels/othermod/betareg.py | 30,504 | Python |
from sys import exit
def gold_room():
print("This room is full of gold. How much do you t ake?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got not idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| 25.025 | 83 | 0.564935 | [
"MIT"
] | Eithandarphyo51/python-exercises | ex35.py | 2,002 | Python |
import serial
import csv
import os
serialPort = serial.Serial("COM10", baudrate=115200)
try:
os.rename('output.csv', 'ALTERAR_MEU_NOME.csv')
except IOError:
print('')
finally:
while(True):
arduinoData = serialPort.readline().decode("ascii")
print(arduinoData)
#add the data to the file
file = open("output.csv", "a") #append the data to the file
file.write(arduinoData) #write data with a newline
#close out the file
file.close()
| 24.619048 | 68 | 0.624758 | [
"MIT"
] | yaguts1/Receiver | receiveGeneratorData.py | 517 | Python |
from nonebot.default_config import *
SUPERUSERS = {513673369}
COMMAND_START = {'', '/', '!', '/', '!'} | 25.75 | 40 | 0.61165 | [
"MIT"
] | liangshicheng-daniel/Saren | Saren_bot/config.py | 107 | Python |
###############################################################################
#
# DONE:
#
# 1. READ the code below.
# 2. TRACE (by hand) the execution of the code,
# predicting what will get printed.
# 3. Run the code and compare your prediction to what actually was printed.
# 4. Decide whether you are 100% clear on the CONCEPTS and the NOTATIONS for:
# -- DEFINING a function that has PARAMETERS
# -- CALLING a function with actual ARGUMENTS.
#
# *****************************************************************************
# If you are NOT 100% clear on the above concepts,
# ask your instructor or a student assistant about them during class.
# *****************************************************************************
#
# After you have completed the above, mark this _TODO_ as DONE.
#
###############################################################################
def main():
hello("Snow White")
goodbye("Bashful")
hello("Grumpy")
hello("Sleepy")
hello_and_goodbye("Magic Mirror", "Cruel Queen")
def hello(friend):
print("Hello,", friend, "- how are things?")
def goodbye(friend):
print("Goodbye,", friend, '- see you later!')
print(' Ciao!')
print(' Bai bai!')
def hello_and_goodbye(person1, person2):
hello(person1)
goodbye(person2)
main()
| 28.808511 | 79 | 0.5 | [
"MIT"
] | ColinBalitewicz/03-AccumulatorsAndFunctionsWithParameters | src/m1r_functions.py | 1,354 | Python |
from . import db
class Account(db.Model):
__tablename__ = 'account'
account_id = db.Column(db.Integer, primary_key=True)
account_name = db.Column(db.String(16), unique=True)
account_pwd = db.Column(db.String(16), unique=True)
account_nick = db.Column(db.String(16), unique=True)
account_email = db.Column(db.String(320), unique=True)
def __repr__(self):
return '<Account %r>' % self.account_name
class Doc(db.Model):
__tablename__ = 'doc'
doc_id = db.Column(db.Integer, primary_key=True)
doc_name = db.Column(db.String(16), unique=True)
account_id = db.Column(db.Integer, unique=True)
def __repr__(self):
return '<Doc %r>' % self.doc_name
class DataSrc(db.Model):
__tablename__ = 'datasrc'
data_id = db.Column(db.Integer, primary_key=True)
data_name = db.Column(db.String(16), unique=True)
data_type = db.Column(db.String(16), unique=True)
have_data = db.Column(db.Integer, unique=True)
account_id = db.Column(db.Integer, unique=True)
def __repr__(self):
return '<DataSrc %r>' % self.data_name | 33.333333 | 58 | 0.677273 | [
"MIT"
] | Justyer/NightHeartDataPlatform | firefly/app/models.py | 1,100 | Python |
"""
Django settings for YoutubeFun project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xb103+!$k5uhqga^9$^g=t^bw-3zo-6j4c+cf8x8mflhq-*qm2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Youtube_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'YoutubeFun.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'YoutubeFun.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| 25.855856 | 71 | 0.705575 | [
"MIT"
] | dimemp/YoutubeFun | YoutubeFun/settings.py | 2,870 | Python |
import urllib.request
import os
import random
import socket
def url_open(url):
#代理
iplist=['60.251.63.159:8080','118.180.15.152:8102','119.6.136.122:80','183.61.71.112:8888']
proxys= random.choice(iplist)
print (proxys)
proxy_support = urllib.request.ProxyHandler({'http': proxys})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')]
urllib.request.install_opener(opener)
#头文件
head={}
head['User-Agent']='Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
req=urllib.request.Request(url,headers=head)
res = urllib.request.urlopen(req)
html = res.read()
print(url)
return html
def get_page(url):
html=url_open(url).decode('utf-8')
a = html.find('current-comment-page')+23
b = html.find(']',a)
return html[a:b]
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
while a != -1:
b = html.find('.jpg',a,a+255)
if b != -1:
img_addrs.append(html[a+9:b+4])
else:
b=a+9
a=html.find('img src=',b)
#for each in img_addrs:
# print(each)
#return img_addrs
def save_imgs(folder,img_addrs):
socket.setdefaulttimeout(3)
for each in img_addrs:
try:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = url_open(each)
f.write(img)
except Exception:
continue
def download_mm(folder = 'ooxx',pages=10):
os.mkdir(folder)
os.chdir(folder)
url='http://jandan.net/ooxx/'
#拿到所在页面
page_num= int(get_page(url))
for i in range(pages):
page_num = page_num - i
page_url = url+'page-'+str(page_num)+'#comments'
#查询页面中的图片
img_addrs = find_imgs(page_url)
#保存图片
save_imgs(folder,img_addrs)
if __name__ == '__main__':
download_mm()
| 23.461538 | 144 | 0.594848 | [
"Apache-2.0"
] | dodopeng/Pythons | mmParse.py | 2,181 | Python |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING, Any
# Bokeh imports
from ..models import glyphs
from ._decorators import glyph_method, marker_method
if TYPE_CHECKING:
from ..models.canvas import CoordinateMapping
from ..models.plots import Plot
from ..models.renderers import GlyphRenderer
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"GlyphAPI",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class GlyphAPI:
""" """
@property
def plot(self) -> Plot | None:
return self._parent
@property
def coordinates(self) -> CoordinateMapping | None:
return self._coordinates
def __init__(self, parent: Plot | None = None, coordinates: CoordinateMapping | None = None) -> None:
self._parent = parent
self._coordinates = coordinates
@glyph_method(glyphs.AnnularWedge)
def annular_wedge(self, **kwargs):
pass
@glyph_method(glyphs.Annulus)
def annulus(self, **kwargs):
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
"""
@glyph_method(glyphs.Arc)
def arc(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@marker_method()
def asterisk(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
"""
@glyph_method(glyphs.Bezier)
def bezier(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@glyph_method(glyphs.Circle)
def circle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to |data units|.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
"""
@marker_method()
def circle_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
"""
@marker_method()
def circle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_color=None)
show(plot)
"""
@marker_method()
def circle_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@marker_method()
def circle_y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@marker_method()
def cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
"""
@marker_method()
def dash(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@marker_method()
def diamond(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@marker_method()
def diamond_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def diamond_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None)
show(plot)
"""
@marker_method()
def dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")
show(plot)
"""
@glyph_method(glyphs.HArea)
def harea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.HBar)
def hbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Ellipse)
def ellipse(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def hex(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@marker_method()
def hex_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],
color="#74ADD1", fill_color=None)
show(plot)
"""
@glyph_method(glyphs.HexTile)
def hex_tile(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300, match_aspect=True)
plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")
show(plot)
"""
@glyph_method(glyphs.Image)
def image(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
"""
@glyph_method(glyphs.ImageRGBA)
def image_rgba(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
"""
@glyph_method(glyphs.ImageURL)
def image_url(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@marker_method()
def inverted_triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Line)
def line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(title="line", width=300, height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
"""
@glyph_method(glyphs.MultiLine)
def multi_line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
"""
@glyph_method(glyphs.MultiPolygons)
def multi_polygons(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is a
nested array.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],
ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],
color=['red', 'green'])
show(p)
"""
@glyph_method(glyphs.Oval)
def oval(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
"""
@glyph_method(glyphs.Patch)
def patch(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
"""
@glyph_method(glyphs.Patches)
def patches(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
"""
@marker_method()
def plus(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Quad)
def quad(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
"""
@glyph_method(glyphs.Quadratic)
def quadratic(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@glyph_method(glyphs.Ray)
def ray(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
"""
@glyph_method(glyphs.Rect)
def rect(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
"""
@glyph_method(glyphs.Step)
def step(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")
show(plot)
"""
@glyph_method(glyphs.Segment)
def segment(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
"""
@marker_method()
def square(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@marker_method()
def square_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def square_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F", fill_color=None)
show(plot)
"""
@marker_method()
def square_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def square_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def star(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@marker_method()
def star_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(glyphs.Text)
def text(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
"""
@marker_method()
def triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@marker_method()
def triangle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", fill_color=None)
show(plot)
"""
@marker_method()
def triangle_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@glyph_method(glyphs.VArea)
def varea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.VBar)
def vbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Wedge)
def wedge(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
"""
@marker_method()
def x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
"""
@marker_method()
def y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
# -------------------------------------------------------------------------
@glyph_method(glyphs.Scatter)
def _scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
def scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
''' Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in |screen units|
marker (str, or list[str]): values or field names of marker types
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: |line properties| and |fill properties|
Examples:
>>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")
>>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)
.. note::
When passing ``marker="circle"`` it is also possible to supply a
``radius`` value in |data units|. When configuring marker type
from a data source column, *all* markers including circles may only
be configured with ``size`` in |screen units|.
'''
marker_type = kwargs.pop("marker", "circle")
if isinstance(marker_type, str) and marker_type in _MARKER_SHORTCUTS:
marker_type = _MARKER_SHORTCUTS[marker_type]
# The original scatter implementation allowed circle scatters to set a
# radius. We will leave this here for compatibility but note that it
# only works when the marker type is "circle" (and not referencing a
# data source column). Consider deprecating in the future.
if marker_type == "circle" and "radius" in kwargs:
return self.circle(*args, **kwargs)
else:
return self._scatter(*args, marker=marker_type, **kwargs)
_MARKER_SHORTCUTS = {
"*" : "asterisk",
"+" : "cross",
"o" : "circle",
"o+" : "circle_cross",
"o." : "circle_dot",
"ox" : "circle_x",
"oy" : "circle_y",
"-" : "dash",
"." : "dot",
"v" : "inverted_triangle",
"^" : "triangle",
"^." : "triangle_dot",
}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 25.221095 | 112 | 0.536995 | [
"BSD-3-Clause"
] | AzureTech/bokeh | bokeh/plotting/glyph_api.py | 24,868 | Python |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""init for cli and clize"""
import pathlib
import shlex
from collections import OrderedDict
from functools import partial
import clize
from clize import parameters
from clize.help import ClizeHelp, HelpForAutodetectedDocstring
from clize.parser import value_converter
from clize.runner import Clize
from sigtools.wrappers import decorator
# Imports are done in their functions to make calls to -h quicker.
# selected clize imports/constants
IGNORE = clize.Parameter.IGNORE
LAST_OPTION = clize.Parameter.LAST_OPTION
REQUIRED = clize.Parameter.REQUIRED
UNDOCUMENTED = clize.Parameter.UNDOCUMENTED
# help helpers
def docutilize(obj):
"""Convert Numpy or Google style docstring into reStructuredText format.
Args:
obj (str or object):
Takes an object and changes it's docstrings to a reStructuredText
format.
Returns:
str or object:
A converted string or an object with replaced docstring depending
on the type of the input.
"""
from inspect import cleandoc, getdoc
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if isinstance(obj, str):
doc = cleandoc(obj)
else:
doc = getdoc(obj)
doc = str(NumpyDocstring(doc))
doc = str(GoogleDocstring(doc))
doc = doc.replace(":exc:", "")
doc = doc.replace(":data:", "")
doc = doc.replace(":keyword", ":param")
doc = doc.replace(":kwtype", ":type")
if isinstance(obj, str):
return doc
obj.__doc__ = doc
return obj
class HelpForNapoleonDocstring(HelpForAutodetectedDocstring):
"""Subclass to add support for google style docstrings"""
def add_docstring(self, docstring, *args, **kwargs):
"""Adds the updated docstring."""
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs)
class DocutilizeClizeHelp(ClizeHelp):
"""Subclass to build Napoleon docstring from subject."""
def __init__(self, subject, owner, builder=HelpForNapoleonDocstring.from_subject):
super().__init__(subject, owner, builder)
# input handling
class ObjectAsStr(str):
"""Hide object under a string to pass it through Clize parser."""
__slots__ = ("original_object",)
def __new__(cls, obj, name=None):
if isinstance(obj, cls): # pass object through if already wrapped
return obj
if name is None:
name = cls.obj_to_name(obj)
self = str.__new__(cls, name)
self.original_object = obj
return self
@staticmethod
def obj_to_name(obj, cls=None):
"""Helper function to create the string."""
if cls is None:
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return "<%s.%s@%i>" % (cls.__module__, cls.__name__, obj_id)
def maybe_coerce_with(converter, obj, **kwargs):
"""Apply converter if str, pass through otherwise."""
obj = getattr(obj, "original_object", obj)
return converter(obj, **kwargs) if isinstance(obj, str) else obj
@value_converter
def inputcube(to_convert):
"""Loads cube from file or returns passed object.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube
return maybe_coerce_with(load_cube, to_convert)
@value_converter
def inputcube_nolazy(to_convert):
"""Loads cube from file or returns passed object.
Where a load is performed, it will not have lazy data.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube
if getattr(to_convert, "has_lazy_data", False):
# Realise data if lazy
to_convert.data
return maybe_coerce_with(load_cube, to_convert, no_lazy_load=True)
@value_converter
def inputcubelist(to_convert):
"""Loads a cubelist from file or returns passed object.
Args:
to_convert (string or iris.cube.CubeList):
File name or CubeList object.
Returns:
Loaded cubelist or passed object.
"""
from improver.utilities.load import load_cubelist
return maybe_coerce_with(load_cubelist, to_convert)
@value_converter
def inputjson(to_convert):
"""Loads json from file or returns passed object.
Args:
to_convert (string or dict):
File name or json dictionary.
Returns:
Loaded json dictionary or passed object.
"""
from improver.utilities.cli_utilities import load_json_or_none
return maybe_coerce_with(load_json_or_none, to_convert)
@value_converter
def comma_separated_list(to_convert):
"""Converts comma separated string to list or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
"""
return maybe_coerce_with(lambda s: s.split(","), to_convert)
@value_converter
def comma_separated_list_of_float(to_convert):
"""Converts comma separated string to list of floats or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
"""
return maybe_coerce_with(
lambda string: [float(s) for s in string.split(",")], to_convert
)
@value_converter
def inputpath(to_convert):
"""Converts string paths to pathlib Path objects
Args:
to_convert (string or pathlib.Path):
path represented as string
Returns:
(pathlib.Path): Path object
"""
return maybe_coerce_with(pathlib.Path, to_convert)
@value_converter
def inputdatetime(to_convert):
"""Converts string to datetime or returns passed object.
Args:
to_convert (string or datetime):
datetime represented as string of the format YYYYMMDDTHHMMZ
Returns:
(datetime): datetime object
"""
from improver.utilities.temporal import cycletime_to_datetime
return maybe_coerce_with(cycletime_to_datetime, to_convert)
def create_constrained_inputcubelist_converter(*constraints):
"""Makes function that the input constraints are used in a loop.
The function is a @value_converter, this means it is used by clize to convert
strings into objects.
This is a way of not using the IMPROVER load_cube which will try to merge
cubes. Iris load on the other hand won't deal with meta data properly.
So an example is if you wanted to load an X cube and a Y cube from a cubelist
of 2. You call this function with a list of constraints.
These cubes get loaded and returned as a CubeList.
Args:
*constraints (tuple of str or callable or iris.Constraint):
Constraints to be used in extracting the required cubes.
Each constraint must match exactly one cube and extracted cubes
will be sorted to match their order.
A constraint can be an iris.Constraint object or a callable
or cube name that can be used to construct one.
Returns:
callable:
A function with the constraints used for a list comprehension.
"""
@value_converter
def constrained_inputcubelist_converter(to_convert):
"""Passes the cube and constraints onto maybe_coerce_with.
Args:
to_convert (str or iris.cube.CubeList):
A CubeList or a filename to be loaded into a CubeList.
Returns:
iris.cube.CubeList:
The loaded cubelist of constrained cubes.
"""
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList(
cubelist.extract_cube(
Constraint(cube_func=constr) if callable(constr) else constr
)
for constr in constraints
)
return constrained_inputcubelist_converter
# output handling
@decorator
def with_output(
wrapped,
*args,
output=None,
compression_level=1,
least_significant_digit: int = None,
**kwargs,
):
"""Add `output` keyword only argument.
Add `compression_level` option.
Add `least_significant_digit` option.
This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI
options. If `output` is provided, it saves the result of calling `wrapped` to file and returns
None, otherwise it returns the result. If `compression_level` is provided, it compresses the
data with the provided compression level (or not, if `compression_level` 0). If
`least_significant_digit` provided, it will quantize the data to a certain number of
significant figures.
Args:
wrapped (obj):
The function to be wrapped.
output (str, optional):
Output file name. If not supplied, the output object will be
printed instead.
compression_level (int):
Will set the compression level (1 to 9), or disable compression (0).
least_significant_digit (int):
If specified will truncate the data to a precision given by
10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will
be quantized to a precision of 0.01 (10**(-2)). See
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml
for details. When used with `compression level`, this will result in lossy
compression.
Returns:
Result of calling `wrapped` or None if `output` is given.
"""
from improver.utilities.save import save_netcdf
result = wrapped(*args, **kwargs)
if output and result:
save_netcdf(result, output, compression_level, least_significant_digit)
return
return result
# cli object creation
def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
"""Decorator for creating CLI objects.
"""
if obj is None:
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, "cli"):
return obj
if not callable(obj):
return Clize.get_cli(obj, **kwargs)
return Clize.keep(obj, helper_class=helper_class, **kwargs)
# help command
@clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
"""Show command help."""
prog_name = prog_name.split()[0]
args = filter(None, [command, "--help", usage and "--usage"])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if not command and usage:
result = "\n".join(
line
for line in result.splitlines()
if not line.endswith("--help [--usage]")
)
return result
def _cli_items():
"""Dynamically discover CLIs."""
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
yield ("help", improver_help)
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if mod_name != "__main__":
mcli = importlib.import_module("improver.cli." + mod_name)
yield (mod_name, clizefy(mcli.process))
SUBCOMMANDS_TABLE = OrderedDict(sorted(_cli_items()))
# main CLI object with subcommands
SUBCOMMANDS_DISPATCHER = clizefy(
SUBCOMMANDS_TABLE,
description="""IMPROVER NWP post-processing toolbox""",
footnotes="""See also improver --help for more information.""",
)
# IMPROVER top level main
def unbracket(args):
"""Convert input list with bracketed items into nested lists.
>>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())
['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']
"""
outargs = []
stack = []
mismatch_msg = "Mismatched bracket at position %i."
for i, arg in enumerate(args):
if arg == "[":
stack.append(outargs)
outargs = []
elif arg == "]":
if not stack:
raise ValueError(mismatch_msg % i)
stack[-1].append(outargs)
outargs = stack.pop()
else:
outargs.append(arg)
if stack:
raise ValueError(mismatch_msg % len(args))
return outargs
def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
"""Common entry point for command execution."""
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, (list, tuple)):
# process nested commands recursively
arg = execute_command(
dispatcher, prog_name, *arg, verbose=verbose, dry_run=dry_run
)
if isinstance(arg, pathlib.PurePath):
arg = str(arg)
elif not isinstance(arg, str):
arg = ObjectAsStr(arg)
args[i] = arg
if verbose or dry_run:
print(" ".join([shlex.quote(x) for x in (prog_name, *args)]))
if dry_run:
return args
result = dispatcher(prog_name, *args)
if verbose and result is not None:
print(ObjectAsStr.obj_to_name(result))
return result
@clizefy()
def main(
prog_name: parameters.pass_name,
command: LAST_OPTION,
*args,
profile: value_converter(lambda _: _, name="FILENAME") = None, # noqa: F821
memprofile: value_converter(lambda _: _, name="FILENAME") = None, # noqa: F821
verbose=False,
dry_run=False,
):
"""IMPROVER NWP post-processing toolbox
Results from commands can be passed into file-like arguments
of other commands by surrounding them by square brackets::
improver command [ command ... ] ...
Spaces around brackets are mandatory.
Args:
prog_name:
The program name from argv[0].
command (str):
Command to execute
args (tuple):
Command arguments
profile (str):
If given, will write profiling to the file given.
To write to stdout, use a hyphen (-)
memprofile (str):
Creates 2 files by adding a suffix to the provided arguemnt -
a tracemalloc snapshot at the point of highest memory consumption
of your program (suffixed with _SNAPSHOT)
and a track of the maximum memory used by your program
over time (suffixed with _MAX_TRACKER).
verbose (bool):
Print executed commands
dry_run (bool):
Print commands to be executed
See improver help [--usage] [command] for more information
on available command(s).
"""
args = unbracket(args)
exec_cmd = execute_command
if profile is not None:
from improver.profile import profile_hook_enable
profile_hook_enable(dump_filename=None if profile == "-" else profile)
if memprofile is not None:
from improver.memprofile import memory_profile_decorator
exec_cmd = memory_profile_decorator(exec_cmd, memprofile)
result = exec_cmd(
SUBCOMMANDS_DISPATCHER,
prog_name,
command,
*args,
verbose=verbose,
dry_run=dry_run,
)
return result
def run_main(argv=None):
"""Overrides argv[0] to be 'improver' then runs main.
Args:
argv (list of str):
Arguments that were from the command line.
"""
import sys
from clize import run
# clize help shows module execution as `python -m improver.cli`
# override argv[0] and pass it explicitly in order to avoid this
# so that the help command reflects the way that we call improver.
if argv is None:
argv = sys.argv[:]
argv[0] = "improver"
run(main, args=argv)
| 30.660279 | 98 | 0.663504 | [
"BSD-3-Clause"
] | anja-bom/improver | improver/cli/__init__.py | 17,599 | Python |
"""
input: image
output: little squares with faces
"""
import face_recognition
image = face_recognition.load_image_file("people.png")
face_locations = face_recognition.face_locations(image)
print(face_locations) | 21.3 | 55 | 0.812207 | [
"Apache-2.0"
] | sebastianmihai01/CG-FaceRecognition | Features/face_extraction.py | 213 | Python |
import main
main.recover_unseeded_items() | 20.5 | 29 | 0.878049 | [
"MIT"
] | maziara/deluge-feed-innoreader | recover_unseeded.py | 41 | Python |
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Yi Li
# --------------------------------------------------------
from __future__ import print_function, division
import numpy as np
class Symbol:
def __init__(self):
self.arg_shape_dict = None
self.out_shape_dict = None
self.aux_shape_dict = None
self.sym = None
@property
def symbol(self):
return self.sym
def get_symbol(self, cfg, is_train=True):
"""
return a generated symbol, it also need to be assigned to self.sym
"""
raise NotImplementedError()
def init_weights(self, cfg, arg_params, aux_params):
raise NotImplementedError()
def get_msra_std(self, shape):
fan_in = float(shape[1])
if len(shape) > 2:
fan_in *= np.prod(shape[2:])
print(np.sqrt(2 / fan_in))
return np.sqrt(2 / fan_in)
def infer_shape(self, data_shape_dict):
# infer shape
arg_shape, out_shape, aux_shape = self.sym.infer_shape(**data_shape_dict)
self.arg_shape_dict = dict(zip(self.sym.list_arguments(), arg_shape))
self.out_shape_dict = dict(zip(self.sym.list_outputs(), out_shape))
self.aux_shape_dict = dict(zip(self.sym.list_auxiliary_states(), aux_shape))
def check_parameter_shapes(
self, arg_params, aux_params, data_shape_dict, is_train=True
):
for k in self.sym.list_arguments():
if k in data_shape_dict or (False if is_train else "label" in k):
continue
assert k in arg_params, k + " not initialized"
assert arg_params[k].shape == self.arg_shape_dict[k], (
"shape inconsistent for "
+ k
+ " inferred "
+ str(self.arg_shape_dict[k])
+ " provided "
+ str(arg_params[k].shape)
)
for k in self.sym.list_auxiliary_states():
assert k in aux_params, k + " not initialized"
assert aux_params[k].shape == self.aux_shape_dict[k], (
"shape inconsistent for "
+ k
+ " inferred "
+ str(self.aux_shape_dict[k])
+ " provided "
+ str(aux_params[k].shape)
)
| 35.072464 | 84 | 0.554132 | [
"Apache-2.0"
] | 571502680/mx-DeepIM | lib/utils/symbol.py | 2,420 | Python |
__author__ = 'Eugene'
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def return_to_the_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_the_groups_page()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# submit deletion delete
wd.find_element_by_name("delete").click()
self.return_to_the_groups_page()
def modify_first_group(self, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# init modify group
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modify group
wd.find_element_by_name("update").click()
self.return_to_the_groups_page()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
| 29.851351 | 100 | 0.640109 | [
"Apache-2.0"
] | eugene1smith/homeworks | fixture/group.py | 2,209 | Python |
vet = []
i = 1
while(i <= 100):
valor = int(input())
vet.append(valor)
i = i + 1
print(max(vet))
print(vet.index(max(vet)) + 1)
| 9.733333 | 30 | 0.513699 | [
"MIT"
] | AndreAlbu/Questoes---URI---Python | 1080.py | 146 | Python |
import unittest
from creds3 import paddedInt
class TestPadLeft(unittest.TestCase):
def test_zero(self):
i = 0
self.assertEqual(paddedInt(i), "0" * 19)
def test_ten(self):
i = 10
self.assertEqual(paddedInt(i), str(i).zfill(19))
def test_arbitrary_number(self):
i = 98218329123
self.assertEqual(paddedInt(i), str(i).zfill(19))
def test_huge_number(self):
i = 12345678901234567890123
self.assertEqual(paddedInt(i), str(i).zfill(19))
| 24.52381 | 56 | 0.640777 | [
"Apache-2.0"
] | romanrev/creds3 | tests/pad_left_tests.py | 515 | Python |
from typing import List
from pdip.data import Entity
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from process.domain.base import Base
from process.domain.base.operation.DataOperationBase import DataOperationBase
from process.domain.operation.DataOperationContact import DataOperationContact
from process.domain.operation.DataOperationIntegration import DataOperationIntegration
from process.domain.operation.DataOperationJob import DataOperationJob
class DataOperation(DataOperationBase, Entity, Base):
__tablename__ = "DataOperation"
__table_args__ = {"schema": "Operation"}
DefinitionId = Column(Integer, ForeignKey('Operation.Definition.Id'))
Name = Column(String(100), index=True, unique=False, nullable=False)
Definition = relationship("Definition", back_populates="DataOperations")
DataOperationJobs: List[DataOperationJob] = relationship("DataOperationJob", back_populates="DataOperation")
Integrations: List[DataOperationIntegration] = relationship("DataOperationIntegration",
back_populates="DataOperation")
Contacts: List[DataOperationContact] = relationship("DataOperationContact",
back_populates="DataOperation")
| 53.44 | 112 | 0.747754 | [
"MIT"
] | ahmetcagriakca/pythondataintegrator | src/process/process/domain/operation/DataOperation.py | 1,336 | Python |
import pandas as pd
import numpy as np
import os
import json
import requests
from dotenv import load_dotenv
from PIL import Image
from io import BytesIO
from IPython.core.display import display, HTML
def art_search(art):
'''
Function to retrieve the information about collections in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched artworks.
Example:
-------------
>>>art_search('monet')
0 16568 Water Lilies Claude Monet\nFrench, 1840-1926 France 1906 1906 Oil on canvas [Painting and Sculpture of Europe, Essentials]
1 16571 Arrival of the Normandy Train, Gare Saint-Lazare Claude Monet\nFrench, 1840-1926 France 1877 1877 Oil on canvas [Painting and Sculpture of Europe]
'''
params_search = {'q': art}
r = requests.get("https://api.artic.edu/api/v1/artworks/search?fields=id,title,date_start,date_end,artist_display,place_of_origin,medium_display,category_titles", params = params_search)
try:
status = r.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
r1 = json.dumps(r.json(), indent = 2)
artsearch = json.loads(r1)
artworks = pd.DataFrame(artsearch['data'])
artworks_info = artworks[['id','title','artist_display','place_of_origin','date_start','date_end','medium_display','category_titles']]
return artworks_info
def tour_search(tour):
'''
Function to retrieve the information about tour in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched tour.
Example:
-------------
>>>tour_search('monet')
0 4714 Monet and Chicago http://aic-mobile-tours.artic.edu/sites/defaul... <p>Monet and Chicago presents the city’s uniqu... <p>Monet and Chicago is the first exhibition t... [Cliff Walk at Pourville, Caricature of a Man ... [Claude Monet, Claude Monet, Claude Monet, Cla...
1 4520 Manet and Modern Beauty http://aic-mobile-tours.artic.edu/sites/defaul... <p>Dive deep into the life and mind of one the... <p>Manet is undoubtedly one of the most fascin... [] []
'''
params_search_tour = {'q': tour}
rt = requests.get("https://api.artic.edu/api/v1/tours/search?fields=id,image,title,description,intro,artwork_titles,artist_titles", params = params_search_tour)
try:
status = rt.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rt1 = json.dumps(rt.json(), indent = 2)
toursearch = json.loads(rt1)
ntour = pd.DataFrame(toursearch['data'])
tour_info = ntour[['id','title','image','description','intro','artwork_titles','artist_titles']]
return tour_info
def pic_search(pic, artist):
'''
Function to retrieve the images of artworks collected in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
Image: jpg
The image of the searched atwork
Error Message:
Error messsage if the search is invalid
Example:
-------------
>>>pic_search('Water Lillies', 'Claude Monet')
'''
params_search_pic = {'q': pic}
rp = requests.get("https://api.artic.edu/api/v1/artworks/search?fields=id,title,artist_display,image_id", params = params_search_pic)
linkhead = 'https://www.artic.edu/iiif/2/'
linktail = '/full/843,/0/default.jpg'
try:
status = rp.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rp1 = json.dumps(rp.json(), indent = 2)
picsearch = json.loads(rp1)
npic = pd.DataFrame(picsearch['data'])
pic_info = npic[['id','title','artist_display','image_id']]
df_len = len(pic_info)
for i in range(df_len):
if pic_info.iloc[i]['title'] == pic and (artist in pic_info.iloc[i]['artist_display']): # match title and artist with user input
get_image_id = pic_info.iloc[i]['image_id']
image_link = linkhead + get_image_id + linktail
response = requests.get(image_link)
img = Image.open(BytesIO(response.content))
return img
print("Invalid Search! Please find related information below :)")
return pic_info
def product_search(product_art, product_category):
'''
Function to retrieve the information about products sold in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the products and images of the products
Example:
-------------
>>>product_search('Rainy Day', 'Mug')
>>>0 245410 Gustave Caillebotte Paris Street; Rainy Day Mug $9.95...
'''
params_search_product = {'q': product_art}
rpro = requests.get("https://api.artic.edu/api/v1/products?search", params = params_search_product)
try:
status = rpro.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rpro1 = json.dumps(rpro.json(), indent = 2)
productsearch = json.loads(rpro1)
nproduct = pd.DataFrame(productsearch['data'])
df_len1 = len(nproduct)
for i in range(df_len1):
if product_art in nproduct.iloc[i]['title'] and (product_category in nproduct.iloc[i]['description']): # match title and artist with user input
product_info = nproduct[['id','title','image_url','price_display','description']]
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
image_cols = ['image_url']
format_dict={}
for image_cols in image_cols:
format_dict[image_cols] = path_to_image_html
html = display(HTML(product_info.to_html(escape = False,formatters = format_dict)))
return html
else:
return"Invalid Search! Please try other artworks or categories:)"
def product_show(product_art_show):
'''
Function to retrieve the information about top10 products sold in the Art institute of Chicago
Parameters:
-------------
Type in any random word
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the top 10 products and images of the products
Example:
-------------
>>>product_search('')
>>>0 250620 The Age of French Impressionism—Softcover $30...
'''
params_show_product = {'q': product_art_show}
rproshow = requests.get("https://api.artic.edu/api/v1/products?limit=10", params = params_show_product)
try:
status = rproshow.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rproshow1 = json.dumps(rproshow.json(), indent = 2)
productshow = json.loads(rproshow1)
nproductshow = pd.DataFrame(productshow['data'])
product_show_info = nproductshow[['id','title','image_url','price_display','description']]
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
image_cols1 = ['image_url']
format_dict={}
for image_cols1 in image_cols1:
format_dict[image_cols1] = path_to_image_html
html1 = display(HTML(product_show_info.to_html(escape = False,formatters = format_dict)))
return html1 | 37.609959 | 281 | 0.629523 | [
"MIT"
] | nicolewang97/AICAPI_YW3760 | src/aicapi_yw3760/aicapi_yw3760.py | 9,068 | Python |
import argparse
import sys
from typing import List, Sequence
from exabel_data_sdk import ExabelClient
from exabel_data_sdk.scripts.base_script import BaseScript
class ListTimeSeries(BaseScript):
"""
Lists all time series.
"""
def __init__(self, argv: Sequence[str], description: str):
super().__init__(argv, description)
self.parser.add_argument(
"--signal",
required=False,
type=str,
help="The resource name of a signal, for example 'signals/ns.signalIdentifier'",
)
self.parser.add_argument(
"--entity",
required=False,
type=str,
help=(
"The resource name of an entity, "
"for example 'entityTypes/company/entities/identifier'"
),
)
def run_script(self, client: ExabelClient, args: argparse.Namespace) -> None:
if (args.signal is None) == (args.entity is None):
raise ValueError("Specify either signal or entity, but not both.")
page_token = None
all_time_series: List[str] = []
while True:
if args.signal is not None:
result = client.time_series_api.get_signal_time_series(
args.signal, page_size=1000, page_token=page_token
)
else:
result = client.time_series_api.get_entity_time_series(
args.entity, page_size=1000, page_token=page_token
)
all_time_series.extend(result.results)
page_token = result.next_page_token
if len(all_time_series) == result.total_size:
break
if not all_time_series:
print("No time series.")
for time_series in all_time_series:
print(time_series)
if __name__ == "__main__":
ListTimeSeries(sys.argv, "Lists time series.").run()
| 32.216667 | 92 | 0.590274 | [
"MIT"
] | Exabel/python-sdk | exabel_data_sdk/scripts/list_time_series.py | 1,933 | Python |
from typing import List, Dict
import os
import json
import argparse
import sys
from string import Template
from common import get_files, get_links_from_file, get_id_files_dict, get_id_title_dict
FORCE_GRAPH_TEMPLATE_NAME = "force_graph.html"
OUTPUT_FILE_NAME = "output.html"
def generate_force_graph(id_files_dict: Dict, id_title_dict: Dict, dirname: str = "", highlight: List = None) -> None:
if not highlight:
highlight = []
# Create nodes
# Dict(id, group)
nodes = [
{"id": title, "group": 2 if uid in highlight else 1}
for uid, title in id_title_dict.items()
]
# Create links
# Dict(source, target, value)
links = []
for uid, file in id_files_dict.items():
file_links = get_links_from_file(file, dirname=dirname)
link_list = [
{"source": id_title_dict[uid], "target": id_title_dict[link], "value": 2}
for link in file_links
if id_title_dict.get(link, None)
]
links.extend(link_list)
# Create Output and open it
data = json.dumps({"nodes": nodes, "links": links})
with open(FORCE_GRAPH_TEMPLATE_NAME, "r") as f:
template = f.read()
s = Template(template)
with open(OUTPUT_FILE_NAME, "w") as out:
out.write(s.substitute(data=data))
os.system("open {}".format(OUTPUT_FILE_NAME))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"directory", help="Source Directory (Default to current directory)"
)
parser.add_argument("--highlight", nargs='*', help="Highlight zettel ID")
args = parser.parse_args()
dirname = args.directory
if not os.path.isdir(dirname):
print("Invalid directory, please check you directory")
exit(1)
# Handle the file
files = get_files(dirname)
if not files:
print("No markdown file found.")
exit(1)
# Create title and files map
id_files_dict = get_id_files_dict(files)
id_title_dict = get_id_title_dict(files)
if not id_files_dict or not id_title_dict:
print("No valid Zettel was found.")
exit(1)
highlight = []
if args.highlight is not None:
highlight = args.highlight if args.highlight else []
if not highlight:
highlight = [line.strip() for line in sys.stdin]
generate_force_graph(id_files_dict, id_title_dict, dirname, highlight=highlight)
| 29.481928 | 118 | 0.651819 | [
"MIT"
] | joashxu/zetteltools | zettvis.py | 2,447 | Python |
import os
import sys
import math
import copy
from binary_tree import BinaryTreeNode, BinaryTree, BinarySearchTree
from graph import GraphNode, Graph
# 4.6 find the next node (in-order) of a given node in a Binary Tree
# -> back to root and using in-order travelsal until meet the current node. get the next
def get_next_node(node):
root = get_root_node(node)
is_next = [False]
next_node = get_next_node_in_order_of_node(node, root, is_next)
return next_node
def get_next_node_in_order_of_node(node, visit_node, is_next):
if is_next[0]:
return visit_node
if visit_node == None:
return None
node_next = get_next_node_in_order_of_node(node, visit_node.left, is_next)
if node_next != None:
return node_next
if is_next[0]:
return visit_node
if visit_node == node:
is_next[0] = True
node_next = get_next_node_in_order_of_node(node, visit_node.right, is_next)
if node_next != None:
return node_next
return None
def get_root_node(node):
root = node
while node.parent != None:
node = node.parent
return node
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# node = tree.root.left.right
# next_node = get_next_node(node)
# if next_node != None:
# print(next_node.value)
# else:
# print("None")
# 4.7 build projects
class Project:
name = ""
dependencies = list() # list of dependency projects
state = 0 # 0: waiting, 1: built
def __init__(self, name):
self.name = name
self.state = 0
self.dependencies = list()
def build_projects(projects):
build_queue = list()
while True:
has_new_build = False
for project in projects:
if project.state == 0:
if build_project(project) == True:
build_queue.append(project.name)
project.state = 1
has_new_build = True
if has_new_build == False:
break
is_built_all = True
for project in projects:
if project.state == 0:
is_built_all = False
break
if is_built_all:
return build_queue
else:
return False
def build_project(project):
is_dependencies_built = True
for dep in project.dependencies:
if dep.state != 1:
is_dependencies_built = False
break
if is_dependencies_built:
project.state = 1
return True
else:
return False
# a = Project("a")
# b = Project("b")
# c = Project("c")
# d = Project("d")
# e = Project("e")
# f = Project("f")
# d.dependencies.append(a)
# b.dependencies.append(f)
# d.dependencies.append(b)
# a.dependencies.append(f)
# c.dependencies.append(d)
# t = build_projects([a,b,c,d,e,f])
# print(t)
# 4.8 find first common ancestor
# -> get a queue ancestor of node 1 and compare for node 2
def get_common_ancestor(node1, node2):
if node1 == node2:
return node1
node1_parents = list()
parent1 = node1
while parent1 != None:
node1_parents.append(parent1)
parent1 = parent1.parent
node2_parents = list()
parent2 = node2
while parent2 != None:
node2_parents.append(parent2)
parent2 = parent2.parent
common_ancestor = None
for p1 in node1_parents:
for p2 in node2_parents:
if p1 == p2:
common_ancestor = p1
break
if common_ancestor != None:
break
return common_ancestor
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# n1 = tree.root.left.left
# n2 = tree.root.right.left
# common = get_common_ancestor(n1, n2)
# print(common.value)
# 4.9 print all possible array can be create from a binary search tree
def dump_permutation_of_source_array(tree):
if tree.root != None:
_dump_permutation_of_source_array([tree.root], [])
else:
print("tree is empty")
def _dump_permutation_of_source_array(candidate_nodes, visited_nodes):
if len(candidate_nodes) == 0:
dump_nodes(visited_nodes)
return
n = len(candidate_nodes)
for i in range(0, n):
_visited_nodes = copy.deepcopy(visited_nodes)
_candidate_nodes = copy.deepcopy(candidate_nodes)
_visited_nodes.append(_candidate_nodes[i])
_candidate_nodes.remove(_candidate_nodes[i])
node = candidate_nodes[i]
if node.left != None:
_candidate_nodes.insert(0, node.left)
if node.right != None:
_candidate_nodes.insert(0, node.right)
_dump_permutation_of_source_array(_candidate_nodes, _visited_nodes)
def dump_nodes(nodes):
values = []
for node in nodes:
values.append(node.value)
print("source:", values)
# Test
# values = [2,1,3,4]
# values1 = [10,5,15,4,6,14,16]
# tree = BinarySearchTree()
# for v in values1:
# tree.append(v)
# dump_permutation_of_source_array(tree) | 22.042453 | 88 | 0.682217 | [
"MIT"
] | tranquan/coding-dojo | cracking-the-coding-interview/1-chapter4_1.py | 4,673 | Python |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
"""
Built-in value transformers.
"""
import datetime as dt
from typing import Any, Sequence
from datadog_checks.base import AgentCheck
from datadog_checks.base.types import ServiceCheckStatus
from datadog_checks.base.utils.db.utils import normalize_datetime
def length(value):
# type: (Sequence) -> int
return len(value)
def to_time_elapsed(datetime):
# type: (dt.datetime) -> float
datetime = normalize_datetime(datetime)
elapsed = dt.datetime.now(datetime.tzinfo) - datetime
return elapsed.total_seconds()
def ok_warning(value):
# type: (Any) -> ServiceCheckStatus
return AgentCheck.OK if value else AgentCheck.WARNING
| 25.833333 | 65 | 0.748387 | [
"BSD-3-Clause"
] | 0gajun/integrations-core | rethinkdb/datadog_checks/rethinkdb/document_db/transformers.py | 775 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Performance benchmark tests for MXNet NDArray Unary Operations.
1. Operators are automatically fetched from MXNet operator registry.
2. Default Inputs are generated. See rules/default_params.py. You can override the default values.
Below 54 unary Operators are covered:
['BlockGrad', 'Flatten', 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'argmax_channel', 'cbrt', 'ceil', 'cos', 'cosh', 'degrees', 'erf', 'erfinv', 'exp', 'expm1', 'fix', 'flatten',
'floor', 'gamma', 'gammaln', 'identity', 'log', 'log10', 'log1p', 'log2', 'logical_not', 'make_loss', 'negative',
'ones_like', 'radians', 'rcbrt', 'reciprocal', 'relu', 'rint', 'round', 'rsqrt', 'shuffle', 'sigmoid', 'sign',
'sin', 'sinh', 'size_array', 'softsign', 'sqrt', 'square', 'stop_gradient', 'tan', 'tanh', 'trunc', 'zeros_like']
"""
import mxnet as mx
from benchmark.opperf.utils.op_registry_utils import get_all_unary_operators
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
def run_mx_unary_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype)for all the unary
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all Unary Operators
mx_unary_broadcast_ops = get_all_unary_operators()
# Run benchmarks
mx_unary_op_results = run_op_benchmarks(mx_unary_broadcast_ops, dtype, ctx, warmup, runs)
return mx_unary_op_results
| 41.825397 | 113 | 0.718406 | [
"Apache-2.0"
] | Angzz/DeformableV2 | benchmark/opperf/nd_operations/unary_operators.py | 2,635 | Python |
from http import HTTPStatus
from src.app.post.enum import PostStatus
from src.app.post.model import PostModel
from src.common.authorization import Authorizer
from src.common.decorator import api_response
from src.common.exceptions import (ExceptionHandler, ItemNotFoundException)
class GetService(object):
def __init__(self, path_param, user_group):
self.path_param = path_param
self.user_group = user_group
@api_response()
def execute(self):
try:
if Authorizer.is_admin(user_group=self.user_group):
return self._get_post_object()
return self._find_filtered_result()
except ItemNotFoundException as ex:
ExceptionHandler.handel_exception(exception=ex)
return HTTPStatus.NOT_FOUND
except PostModel.DoesNotExist as ex:
ExceptionHandler.handel_exception(exception=ex)
return HTTPStatus.NOT_FOUND
def _get_post_object(self, query_filter=None):
for item in PostModel.query(self.path_param, filter_condition=query_filter):
return item
raise ItemNotFoundException
def _find_filtered_result(self):
return self._get_post_object(
query_filter=PostModel.status.is_in(PostStatus.PUBLISHED.value))
| 35.722222 | 84 | 0.716952 | [
"MIT"
] | Thiqah-Lab/aws-serverless-skeleton | src/app/post/get.py | 1,286 | Python |
from utils.compute import get_landmark_3d, get_vector_intersection
from utils.visualize import HumanPoseVisualizer
from utils.OakRunner import OakRunner
from utils.pose import getKeypoints
from utils.draw import displayFPS
from pathlib import Path
import depthai as dai
import numpy as np
import cv2
fps_limit = 3
frame_width, frame_height = 456, 256
pairs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],
[1, 0], [0, 14], [14, 16], [0, 15], [15, 17]]
colors = [[255, 100, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [0, 255, 0],
[100, 200, 255], [255, 0, 255], [0, 255, 0], [100, 200, 255], [255, 0, 255], [255, 0, 0], [0, 0, 255],
[0, 200, 200], [0, 0, 255], [0, 200, 200], [0, 0, 0]]
threshold = 0.3
nb_points = 18
def init(runner, device):
calibration = device.readCalibration()
left_intrinsics = np.array(calibration.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, 1280, 720))
right_intrinsics = np.array(calibration.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720))
runner.custom_arguments["focal_length_left"] = left_intrinsics[0,0]
runner.custom_arguments["focal_length_right"] = right_intrinsics[0,0]
runner.custom_arguments["size_left"] = left_intrinsics[0,2]
runner.custom_arguments["size_right"] = right_intrinsics[0,2]
runner.custom_arguments["visualizer"] = HumanPoseVisualizer(300, 300, [runner.left_camera_location, runner.right_camera_location], colors=colors, pairs=pairs)
runner.custom_arguments["visualizer"].start()
def process(runner):
spatial_vectors = dict()
for side in ["left", "right"]:
frame = runner.output_queues[side+"_cam"].get().getCvFrame()
nn_current_output = runner.output_queues["nn_"+side].get()
heatmaps = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L2')).reshape((1, 19, 32, 57)).astype('float32')
pafs = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L1')).reshape((1, 38, 32, 57)).astype('float32')
outputs = np.concatenate((heatmaps, pafs), axis=1)
spatial_vectors[side] = []
landmarks = []
for i in range(nb_points):
probMap = outputs[0, i, :, :]
probMap = cv2.resize(probMap, (frame_width, frame_height))
keypoints = getKeypoints(probMap, threshold)
if(len(keypoints) > 0 and len(keypoints[0]) > 1):
spatial_vectors[side].append(np.array(get_landmark_3d((keypoints[0][0]/frame_width, keypoints[0][1]/frame_height), focal_length=runner.custom_arguments["focal_length_"+side], size=runner.custom_arguments["size_"+side])))
landmarks.append([keypoints[0][0], keypoints[0][1]])
cv2.circle(frame, (keypoints[0][0], keypoints[0][1]), 5, (colors[i][2], colors[i][1], colors[i][0]), -1, cv2.LINE_AA) # draw keypoint
else:
spatial_vectors[side].append(keypoints) # insert empty array if the keypoint is not detected with enough confidence
landmarks.append(keypoints)
for pair in pairs:
if(np.alltrue([len(landmarks[i])==2 for i in pair])):
color = [0, 0, 0]
for i in range(3):
color[i] += colors[pair[0]][i]/2
color[i] += colors[pair[1]][i]/2
cv2.line(frame, (landmarks[pair[0]][0], landmarks[pair[0]][1]), (landmarks[pair[1]][0], landmarks[pair[1]][1]), (color[2], color[1], color[0]), 3, cv2.LINE_AA)
displayFPS(frame, runner.getFPS())
cv2.imshow(side, frame)
# Determined depth to accuratly locate landmarks in space
landmark_spatial_locations = []
for i in range(nb_points):
landmark_spatial_locations.append(np.array(get_vector_intersection(spatial_vectors["left"][i], runner.left_camera_location, spatial_vectors["right"][i], runner.right_camera_location)))
runner.custom_arguments["visualizer"].setLandmarks(landmark_spatial_locations)
runner = OakRunner()
for side in ["left", "right"]:
if(side == "left"):
runner.setLeftCamera(frame_width, frame_height)
runner.getLeftCamera().setFps(fps_limit)
manip = runner.getLeftCameraManip()
else:
runner.setRightCamera(frame_width, frame_height)
runner.getRightCamera().setFps(fps_limit)
manip = runner.getRightCameraManip()
manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)
runner.addNeuralNetworkModel(stream_name="nn_"+side, path=str(Path(__file__).parent) + "/../_models/pose_estimation.blob", handle_mono_depth=False)
manip.out.link(runner.neural_networks["nn_"+side].input) # link transformed video stream to neural network entry
runner.run(process=process, init=init) | 50.092784 | 236 | 0.659189 | [
"MIT"
] | Ikomia-dev/ikomia-oakd | _examples/pose_estimation.py | 4,859 | Python |
import sys
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_scalar
from dask.dataframe import methods
from dask.dataframe.core import DataFrame, Series, apply_concat_apply, map_partitions
from dask.dataframe.utils import has_known_categories
from dask.utils import M
###############################################################
# Dummies
###############################################################
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=np.uint8,
**kwargs,
):
"""
Convert categorical variable into dummy/indicator variables.
Data must have category dtype to infer result's ``columns``.
Parameters
----------
data : Series, or DataFrame
For Series, the dtype must be categorical.
For DataFrame, at least one column must be categorical.
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.18.2
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.18.2
Returns
-------
dummies : DataFrame
Examples
--------
Dask's version only works with Categorical data, as this is the only way to
know the output shape without computing all the data.
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)
>>> dd.get_dummies(s)
Traceback (most recent call last):
...
NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...
With categorical data:
>>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)
>>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE
Dask DataFrame Structure:
a b c
npartitions=2
0 uint8 uint8 uint8
2 ... ... ...
3 ... ... ...
Dask Name: get_dummies, 4 tasks
>>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
See Also
--------
pandas.get_dummies
"""
if isinstance(data, (pd.Series, pd.DataFrame)):
return pd.get_dummies(
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
**kwargs,
)
not_cat_msg = (
"`get_dummies` with non-categorical dtypes is not "
"supported. Please use `df.categorize()` beforehand to "
"convert to categorical dtype."
)
unknown_cat_msg = (
"`get_dummies` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known "
"categories"
)
if isinstance(data, Series):
if not methods.is_categorical_dtype(data):
raise NotImplementedError(not_cat_msg)
if not has_known_categories(data):
raise NotImplementedError(unknown_cat_msg)
elif isinstance(data, DataFrame):
if columns is None:
if (data.dtypes == "object").any():
raise NotImplementedError(not_cat_msg)
columns = data._meta.select_dtypes(include=["category"]).columns
else:
if not all(methods.is_categorical_dtype(data[c]) for c in columns):
raise NotImplementedError(not_cat_msg)
if not all(has_known_categories(data[c]) for c in columns):
raise NotImplementedError(unknown_cat_msg)
package_name = data._meta.__class__.__module__.split(".")[0]
dummies = sys.modules[package_name].get_dummies
return map_partitions(
dummies,
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
**kwargs,
)
###############################################################
# Pivot table
###############################################################
def pivot_table(df, index=None, columns=None, values=None, aggfunc="mean"):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, and ``aggfunc`` must be all scalar.
``values`` can be scalar or list-like.
Parameters
----------
df : DataFrame
index : scalar
column to be index
columns : scalar
column to be columns
values : scalar or list(scalar)
column(s) to aggregate
aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'
Returns
-------
table : DataFrame
See Also
--------
pandas.DataFrame.pivot_table
"""
if not is_scalar(index) or index is None:
raise ValueError("'index' must be the name of an existing column")
if not is_scalar(columns) or columns is None:
raise ValueError("'columns' must be the name of an existing column")
if not methods.is_categorical_dtype(df[columns]):
raise ValueError("'columns' must be category dtype")
if not has_known_categories(df[columns]):
raise ValueError(
"'columns' must have known categories. Please use "
"`df[columns].cat.as_known()` beforehand to ensure "
"known categories"
)
if not (
is_list_like(values)
and all([is_scalar(v) for v in values])
or is_scalar(values)
):
raise ValueError("'values' must refer to an existing column or columns")
available_aggfuncs = ["mean", "sum", "count", "first", "last"]
if not is_scalar(aggfunc) or aggfunc not in available_aggfuncs:
raise ValueError(
"aggfunc must be either " + ", ".join(f"'{x}'" for x in available_aggfuncs)
)
# _emulate can't work for empty data
# the result must have CategoricalIndex columns
columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)
if is_scalar(values):
new_columns = columns_contents
else:
new_columns = pd.MultiIndex.from_product(
(sorted(values), columns_contents), names=[None, columns]
)
if aggfunc in ["first", "last"]:
# Infer datatype as non-numeric values are allowed
if is_scalar(values):
meta = pd.DataFrame(
columns=new_columns,
dtype=df[values].dtype,
index=pd.Index(df._meta[index]),
)
else:
meta = pd.DataFrame(
columns=new_columns,
index=pd.Index(df._meta[index]),
)
for value_col in values:
meta[value_col] = meta[value_col].astype(df[values].dtypes[value_col])
else:
# Use float64 as other aggregate functions require numerical data
meta = pd.DataFrame(
columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index])
)
kwargs = {"index": index, "columns": columns, "values": values}
if aggfunc in ["sum", "mean"]:
pv_sum = apply_concat_apply(
[df],
chunk=methods.pivot_sum,
aggregate=methods.pivot_agg,
meta=meta,
token="pivot_table_sum",
chunk_kwargs=kwargs,
)
if aggfunc in ["count", "mean"]:
pv_count = apply_concat_apply(
[df],
chunk=methods.pivot_count,
aggregate=methods.pivot_agg,
meta=meta,
token="pivot_table_count",
chunk_kwargs=kwargs,
)
if aggfunc == "sum":
return pv_sum
elif aggfunc == "count":
return pv_count
elif aggfunc == "mean":
return pv_sum / pv_count
elif aggfunc == "first":
return apply_concat_apply(
[df],
chunk=methods.pivot_first,
aggregate=methods.pivot_agg_first,
meta=meta,
token="pivot_table_first",
chunk_kwargs=kwargs,
)
elif aggfunc == "last":
return apply_concat_apply(
[df],
chunk=methods.pivot_last,
aggregate=methods.pivot_agg_last,
meta=meta,
token="pivot_table_last",
chunk_kwargs=kwargs,
)
else:
raise ValueError
###############################################################
# Melt
###############################################################
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
"""
Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to the row
axis, leaving just two non-identifier columns, 'variable' and 'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
pandas.DataFrame.melt
"""
from dask.dataframe.core import no_default
return frame.map_partitions(
M.melt,
meta=no_default,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
token="melt",
)
| 31.416438 | 118 | 0.588297 | [
"BSD-3-Clause"
] | Kirito1397/dask | dask/dataframe/reshape.py | 11,467 | Python |
import json
from sqlalchemy.orm import subqueryload
from werkzeug.exceptions import BadRequest, NotFound, PreconditionFailed
from rdr_service import clock
from rdr_service.code_constants import PPI_EXTRA_SYSTEM
from rdr_service.dao.base_dao import BaseDao, UpdatableDao
from rdr_service.lib_fhir.fhirclient_1_0_6.models import questionnaire
from rdr_service.model.code import CodeType
from rdr_service.model.questionnaire import (
Questionnaire,
QuestionnaireConcept,
QuestionnaireHistory,
QuestionnaireQuestion,
)
_SEMANTIC_DESCRIPTION_EXTENSION = "http://all-of-us.org/fhir/forms/semantic-description"
_IRB_MAPPING_EXTENSION = "http://all-of-us.org/fhir/forms/irb-mapping"
class QuestionnaireDao(UpdatableDao):
def __init__(self):
super(QuestionnaireDao, self).__init__(Questionnaire)
def get_id(self, obj):
return obj.questionnaireId
def get_with_children(self, questionnaire_id):
with self.session() as session:
query = session.query(Questionnaire).options(
subqueryload(Questionnaire.concepts), subqueryload(Questionnaire.questions)
)
return query.get(questionnaire_id)
def has_dup_semantic_version(self, session, questionnaire_id, semantic_version):
record = session.query(QuestionnaireHistory)\
.filter(QuestionnaireHistory.questionnaireId == questionnaire_id,
QuestionnaireHistory.semanticVersion == semantic_version)\
.first()
return True if record else False
def get_latest_questionnaire_with_concept(self, codeId):
"""Find the questionnaire most recently modified that has the specified concept code."""
with self.session() as session:
return (
session.query(Questionnaire)
.join(Questionnaire.concepts)
.filter(QuestionnaireConcept.codeId == codeId)
.order_by(Questionnaire.lastModified.desc())
.options(subqueryload(Questionnaire.questions))
.first()
)
def _make_history(self, questionnaire, concepts, questions):
# pylint: disable=redefined-outer-name
history = QuestionnaireHistory()
history.fromdict(questionnaire.asdict(), allow_pk=True)
for concept in concepts:
new_concept = QuestionnaireConcept()
new_concept.fromdict(concept.asdict())
new_concept.questionnaireId = questionnaire.questionnaireId
new_concept.questionnaireVersion = questionnaire.version
history.concepts.append(new_concept)
for question in questions:
new_question = QuestionnaireQuestion()
new_question.fromdict(question.asdict())
new_question.questionnaireId = questionnaire.questionnaireId
new_question.questionnaireVersion = questionnaire.version
history.questions.append(new_question)
return history
def insert_with_session(self, session, questionnaire):
# pylint: disable=redefined-outer-name
questionnaire.created = clock.CLOCK.now()
questionnaire.lastModified = clock.CLOCK.now()
questionnaire.version = 1
# SQLAlchemy emits warnings unnecessarily when these collections aren't empty.
# We don't want these to be cascaded now anyway, so point them at nothing, but save
# the concepts and questions for use in history.
concepts = list(questionnaire.concepts)
questions = list(questionnaire.questions)
questionnaire.concepts = []
questionnaire.questions = []
super(QuestionnaireDao, self).insert_with_session(session, questionnaire)
# This is needed to assign an ID to the questionnaire, as the client doesn't need to provide
# one.
session.flush()
# Set the ID in the resource JSON
resource_json = json.loads(questionnaire.resource)
resource_json["id"] = str(questionnaire.questionnaireId)
questionnaire.semanticVersion = resource_json['version']
questionnaire.resource = json.dumps(resource_json)
history = self._make_history(questionnaire, concepts, questions)
history.questionnaireId = questionnaire.questionnaireId
QuestionnaireHistoryDao().insert_with_session(session, history)
return questionnaire
def _do_update(self, session, obj, existing_obj):
# If the provider link changes, update the HPO ID on the participant and its summary.
obj.lastModified = clock.CLOCK.now()
obj.version = existing_obj.version + 1
obj.created = existing_obj.created
resource_json = json.loads(obj.resource)
resource_json["id"] = str(obj.questionnaireId)
obj.semanticVersion = resource_json['version']
obj.resource = json.dumps(resource_json)
super(QuestionnaireDao, self)._do_update(session, obj, existing_obj)
def update_with_session(self, session, questionnaire):
# pylint: disable=redefined-outer-name
super(QuestionnaireDao, self).update_with_session(session, questionnaire)
QuestionnaireHistoryDao().insert_with_session(
session, self._make_history(questionnaire, questionnaire.concepts, questionnaire.questions)
)
@classmethod
def from_client_json(cls, resource_json, id_=None, expected_version=None, client_id=None):
# pylint: disable=unused-argument
# Parse the questionnaire to make sure it's valid, but preserve the original JSON
# when saving.
fhir_q = questionnaire.Questionnaire(resource_json)
if not fhir_q.group:
raise BadRequest("No top-level group found in questionnaire")
if 'version' not in resource_json:
raise BadRequest('No version info found in questionnaire')
external_id = None
if fhir_q.identifier and len(fhir_q.identifier) > 0:
external_id = fhir_q.identifier[0].value
semantic_desc = None
irb_mapping = None
if fhir_q.extension:
for ext in fhir_q.extension:
if ext.url == _SEMANTIC_DESCRIPTION_EXTENSION:
semantic_desc = ext.valueString
if ext.url == _IRB_MAPPING_EXTENSION:
irb_mapping = ext.valueString
q = Questionnaire(
resource=json.dumps(resource_json),
questionnaireId=id_,
semanticVersion=expected_version,
externalId=external_id,
semanticDesc=semantic_desc,
irbMapping=irb_mapping
)
# Assemble a map of (system, value) -> (display, code_type, parent_id) for passing into CodeDao.
# Also assemble a list of (system, code) for concepts and (system, code, linkId) for questions,
# which we'll use later when assembling the child objects.
code_map, concepts, questions = cls._extract_codes(fhir_q.group)
from rdr_service.dao.code_dao import CodeDao
# Get or insert codes, and retrieve their database IDs.
code_id_map = CodeDao().get_internal_id_code_map(code_map)
# Now add the child objects, using the IDs in code_id_map
cls._add_concepts(q, code_id_map, concepts)
cls._add_questions(q, code_id_map, questions)
return q
def _validate_update(self, session, obj, existing_obj):
"""Validates that an update is OK before performing it. (Not applied on insert.)
By default, validates that the object already exists, and if an expected semanticVersion ID is provided,
that it matches.
"""
if not existing_obj:
raise NotFound('%s with id %s does not exist' % (self.model_type.__name__, id))
if self.validate_version_match and existing_obj.semanticVersion != obj.semanticVersion:
raise PreconditionFailed('Expected semanticVersion was %s; stored semanticVersion was %s' %
(obj.semanticVersion, existing_obj.semanticVersion))
resource_json = json.loads(obj.resource)
exist_id = str(obj.questionnaireId)
new_semantic_version = resource_json['version']
if self.has_dup_semantic_version(session, exist_id, new_semantic_version):
raise BadRequest('This semantic version already exist for this questionnaire id.')
self._validate_model(session, obj)
@classmethod
def _add_concepts(cls, q, code_id_map, concepts):
for system, code in concepts:
q.concepts.append(
QuestionnaireConcept(
questionnaireId=q.questionnaireId,
questionnaireVersion=q.version,
codeId=code_id_map.get(system, code),
)
)
@classmethod
def _add_questions(cls, q, code_id_map, questions):
for system, code, linkId, repeats in questions:
q.questions.append(
QuestionnaireQuestion(
questionnaireId=q.questionnaireId,
questionnaireVersion=q.version,
linkId=linkId,
codeId=code_id_map.get(system, code),
repeats=repeats if repeats else False,
)
)
@classmethod
def _extract_codes(cls, group):
code_map = {}
concepts = []
questions = []
if group.concept:
for concept in group.concept:
if concept.system and concept.code and concept.system != PPI_EXTRA_SYSTEM:
code_map[(concept.system, concept.code)] = (concept.display, CodeType.MODULE, None)
concepts.append((concept.system, concept.code))
cls._populate_questions(group, code_map, questions)
return (code_map, concepts, questions)
@classmethod
def _populate_questions(cls, group, code_map, questions):
"""Recursively populate questions under this group."""
if group.question:
for question in group.question:
# Capture any questions that have a link ID and single concept with a system and code
if question.linkId and question.concept and len(question.concept) == 1:
concept = question.concept[0]
if concept.system and concept.code and concept.system != PPI_EXTRA_SYSTEM:
code_map[(concept.system, concept.code)] = (concept.display, CodeType.QUESTION, None)
questions.append((concept.system, concept.code, question.linkId, question.repeats))
if question.group:
for sub_group in question.group:
cls._populate_questions(sub_group, code_map, questions)
if question.option:
for option in question.option:
code_map[(option.system, option.code)] = (option.display, CodeType.ANSWER, None)
if group.group:
for sub_group in group.group:
cls._populate_questions(sub_group, code_map, questions)
class QuestionnaireHistoryDao(BaseDao):
"""Maintains version history for questionnaires.
All previous versions of a questionnaire are maintained (with the same questionnaireId value and
a new version value for each update.)
Old versions of questionnaires and their questions can still be referenced by questionnaire
responses, and are used when generating metrics / participant summaries, and in general
determining what answers participants gave to questions.
Concepts and questions live under a QuestionnaireHistory entry, such that when the questionnaire
gets updated new concepts and questions are created and existing ones are left as they were.
Do not use this DAO for write operations directly; instead use QuestionnaireDao.
"""
def __init__(self):
super(QuestionnaireHistoryDao, self).__init__(QuestionnaireHistory)
def get_id(self, obj):
return [obj.questionnaireId, obj.version]
def get_with_children_with_session(self, session, questionnaire_id_and_semantic_version):
query = session.query(QuestionnaireHistory) \
.options(subqueryload(QuestionnaireHistory.concepts), subqueryload(QuestionnaireHistory.questions)) \
.filter(QuestionnaireHistory.questionnaireId == questionnaire_id_and_semantic_version[0],
QuestionnaireHistory.semanticVersion == questionnaire_id_and_semantic_version[1])
return query.first()
def get_with_children(self, questionnaire_id_and_semantic_version):
with self.session() as session:
return self.get_with_children_with_session(session, questionnaire_id_and_semantic_version)
class QuestionnaireConceptDao(BaseDao):
def __init__(self):
super(QuestionnaireConceptDao, self).__init__(QuestionnaireConcept)
def get_id(self, obj):
return obj.questionnaireConceptId
class QuestionnaireQuestionDao(BaseDao):
def __init__(self):
super(QuestionnaireQuestionDao, self).__init__(QuestionnaireQuestion)
def get_id(self, obj):
return obj.questionnaireQuestionId
def get_all_with_session(self, session, ids):
if not ids:
return []
return (
session.query(QuestionnaireQuestion).filter(QuestionnaireQuestion.questionnaireQuestionId.in_(ids)).all()
)
| 44.664452 | 117 | 0.675841 | [
"BSD-3-Clause"
] | all-of-us/raw-data-repository | rdr_service/dao/questionnaire_dao.py | 13,444 | Python |
import json
import os
from urllib import request
from flask import current_app
from elastichq.model import ClusterDTO
from elastichq.vendor.elasticsearch.exceptions import NotFoundError
from .ConnectionService import ConnectionService
from ..globals import CACHE_REGION, LOG
class HQService:
def get_status(self):
version_str = ""
try:
fp = request.urlopen("http://www.elastichq.org/currversion.json", timeout=10)
mbyte = fp.read()
version_str = mbyte.decode("utf-8")
fp.close()
except Exception as ex:
LOG.error("error retrieving version information", ex)
stable_version = (json.loads(version_str)).get("version", None)
from elastichq.service import ClusterService
clusters = ClusterService().get_clusters(create_if_missing=False)
schema = ClusterDTO(many=True)
result = schema.dump(clusters)
status = {
"name": "ElasticHQ",
"installed_version": current_app.config.get('API_VERSION'),
"current_stable_version": stable_version,
"tagline": "You know, for Elasticsearch",
"clusters": result.data,
"default_url": os.environ.get('HQ_DEFAULT_URL', current_app.config.get('DEFAULT_URL'))
}
return status
@CACHE_REGION.cache_on_arguments()
def get_settings(self, cluster_name):
try:
connection = ConnectionService().get_connection(cluster_name)
settings_doc = connection.get_source(index=current_app.config.get('HQ_CLUSTER_SETTINGS')[
'index_name'],
id=current_app.config.get('HQ_CLUSTER_SETTINGS')[
'doc_id'],
doc_type=current_app.config.get('HQ_CLUSTER_SETTINGS')[
'doc_type'])
return settings_doc
except NotFoundError as nfe:
if current_app.config.get('HQ_CLUSTER_SETTINGS')['store_metrics']:
self.save_settings(cluster_name)
return current_app.config.get('HQ_CLUSTER_SETTINGS')
def save_settings(self, cluster_name, body=None):
try:
if body is None:
body = current_app.config.get('HQ_CLUSTER_SETTINGS')
connection = ConnectionService().get_connection(cluster_name)
connection.index(index=current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'],
doc_type=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_type'],
id=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_id'],
body=body, refresh=True)
except NotFoundError as nfe:
LOG.error("Unable to save index. Is action.auto_create_index disabled in the ES configuration file?", nfe)
def update_settings(self, cluster_name, body=None):
if body is None:
body = current_app.config.get('HQ_CLUSTER_SETTINGS')
current_settings = self.get_settings(cluster_name)
new_settings = {
'doc_id': current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_id'],
'index_name': current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'],
'version': 1,
'doc_type': current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_type'],
'store_metrics': body.get('store_metrics', current_settings.get('store_metrics')),
'websocket_interval': body.get('websocket_interval',
current_settings.get('websocket_interval')),
'historic_poll_interval': body.get('historic_poll_interval',
current_settings.get('historic_poll_interval')),
'historic_days_to_store': body.get('historic_days_to_store',
current_settings.get('historic_days_to_store')),
'show_dot_indices': body.get('show_dot_indices',
current_settings.get('show_dot_indices'))
}
connection = ConnectionService().get_connection(cluster_name)
connection.update(index=current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'],
doc_type=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_type'],
id=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_id'],
body={"doc": new_settings}, refresh=True)
self.get_settings.invalidate(self, cluster_name) # alter cache
return new_settings
def delete_settings(self, cluster_name):
connection = ConnectionService().get_connection(cluster_name)
self.get_settings.invalidate(self, cluster_name) # alter cache
return connection.indices.delete(index=current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'])
| 46.027523 | 118 | 0.612318 | [
"Apache-2.0"
] | AdvancedThreatAnalytics/elasticsearch-HQ | elastichq/service/HQService.py | 5,017 | Python |
import unittest
import os
from web3 import Web3
proxy_url = os.environ.get('PROXY_URL', 'http://localhost:9090/solana')
proxy = Web3(Web3.HTTPProvider(proxy_url))
eth_account = proxy.eth.account.create('web3_clientVersion')
proxy.eth.default_account = eth_account.address
neon_revision = os.environ.get('NEON_REVISION', 'env var NEON_REVISION is not set')
class Test_web3_clientVersion(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('\n\nhttps://github.com/neonlabsorg/proxy-model.py/issues/205')
print('eth_account.address:', eth_account.address)
print('eth_account.key:', eth_account.key.hex())
def test_web3_clientVersion(self):
print('check tag Neon/v in web3_clientVersion')
web3_clientVersion = proxy.clientVersion
print('web3_clientVersion:', web3_clientVersion)
self.assertTrue(web3_clientVersion.startswith('Neon/v'))
print('check for neon_revision:', neon_revision)
self.assertTrue(web3_clientVersion.endswith(neon_revision))
if __name__ == '__main__':
unittest.main()
| 35.064516 | 83 | 0.734131 | [
"BSD-3-Clause"
] | neonlabsorg/proxy-model.py | proxy/testing/test_web3_clientVersion.py | 1,087 | Python |
def palindrome(str):
end = len(str)
middle = end >> 1
for i in range(middle):
end -= 1
if(str[i] != str[end]):
return False
return True
while True:
word = input('Enter word: ')
if word == 'done' : break
palindrome(word)
if palindrome(word) == True:
print('Palindrome')
else:
print('No Palindrome')
| 22.235294 | 32 | 0.531746 | [
"MIT"
] | jmbohan/python | mid/mid_3.py | 378 | Python |
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class NoodlesAndCompanySpider(scrapy.Spider):
name = "noodles_and_company"
item_attributes = { 'brand': "Noodles and Company" }
allowed_domains = ["locations.noodles.com"]
start_urls = (
'https://locations.noodles.com/',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info['day'][:2].title()
hour_intervals = []
for interval in day_info['intervals']:
f_time = str(interval['start']).zfill(4)
t_time = str(interval['end']).zfill(4)
hour_intervals.append('{}:{}-{}:{}'.format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
))
hours = ','.join(hour_intervals)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse(self, response):
for state_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re(r'(^[^\/]+$)'):
yield scrapy.Request(
response.urljoin(state_url),
callback=self.parse_state,
)
for location_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re(r'(^[^\/]+\/[^\/]+\/.+$)'):
yield scrapy.Request(
response.urljoin(location_url),
callback=self.parse_location,
)
def parse_state(self, response):
# For counties that have multiple locations, go to a county page listing, and go to each individual location from there.
for county_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re('(^[^\/]+\/[^\/]+$)'):
yield scrapy.Request(
response.urljoin(county_url),
callback=self.parse_county,
)
# For counties that have only one location, go directly to that location page.
for location_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re('(^[^\/]+\/[^\/]+\/.+$)'):
yield scrapy.Request(
response.urljoin(location_url),
callback=self.parse_location,
)
def parse_county(self, response):
for location_block in response.xpath('//div[@class="c-location-grid-item"]'):
location_url = location_block.xpath('.//a[@class="c-location-grid-item-link"]/@href').extract_first()
yield scrapy.Request(
response.urljoin(location_url),
callback=self.parse_location,
)
def parse_location(self, response):
properties = {
'lon': float(response.xpath('//span/meta[@itemprop="longitude"]/@content').extract_first()),
'lat': float(response.xpath('//span/meta[@itemprop="latitude"]/@content').extract_first()),
'addr_full': response.xpath('//span[@class="c-address-street-1"]/text()').extract_first().strip(),
'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(),
'phone': response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
'name': response.xpath('//span[@class="location-name-geo"]/text()').extract_first(),
'ref': response.url,
'website': response.url,
}
hours_elem = response.xpath('//div[@class="c-location-hours-details-wrapper js-location-hours"]/@data-days')
opening_hours = None
if hours_elem:
hours = json.loads(hours_elem.extract_first())
opening_hours = self.store_hours(hours)
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
| 42.291339 | 131 | 0.548501 | [
"MIT"
] | Darknez07/alltheplaces | locations/spiders/noodles_and_company.py | 5,371 | Python |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import scipy as sp
from PIL import Image
import six
import networkx
for m in (np, sp, Image, six, networkx):
if not m is None:
if m is Image:
# Pillow 6.0.0 and above have removed the 'VERSION' attribute
# https://bitbucket.org/rptlab/reportlab/issues/176/incompatibility-with-pillow-600
try:
im_ver = Image.__version__
except AttributeError:
im_ver = Image.VERSION
print('PIL'.rjust(10), ' ', im_ver)
else:
print(m.__name__.rjust(10), ' ', m.__version__)
| 28.782609 | 95 | 0.611782 | [
"BSD-3-Clause"
] | JDWarner/scikit-fuzzy | tools/build_versions.py | 662 | Python |
from .base import SimIRExpr
from ... import s_options as o
from ...s_action import SimActionData
class SimIRExpr_RdTmp(SimIRExpr):
def _execute(self):
if (o.SUPER_FASTPATH in self.state.options
and self._expr.tmp not in self.state.scratch.temps):
self.expr = self.state.se.BVV(0, self.size_bits())
else:
self.expr = self.state.scratch.tmp_expr(self._expr.tmp)
# finish it and save the tmp reference
self._post_process()
if o.TRACK_TMP_ACTIONS in self.state.options:
r = SimActionData(self.state, SimActionData.TMP, SimActionData.READ, tmp=self._expr.tmp, size=self.size_bits(), data=self.expr)
self.actions.append(r)
| 40.388889 | 139 | 0.664374 | [
"BSD-2-Clause"
] | praetorian-code/simuvex | simuvex/vex/expressions/rdtmp.py | 727 | Python |
# coding: utf-8
# # Load and preprocess 2012 data
#
# We will, over time, look over other years. Our current goal is to explore the features of a single year.
#
# ---
# In[1]:
get_ipython().magic('pylab --no-import-all inline')
import pandas as pd
# ## Load the data.
#
# ---
#
# If this fails, be sure that you've saved your own data in the prescribed location, then retry.
# In[2]:
file = "../data/interim/2012data.dta"
df_rawest = pd.read_stata(file)
# In[7]:
df_rawest.weight_full.isnull()
# In[8]:
good_columns = [#'campfin_limcorp', # "Should gov be able to limit corporate contributions"
'pid_x', # Your own party identification
'abortpre_4point', # Abortion
'trad_adjust', # Moral Relativism
'trad_lifestyle', # "Newer" lifetyles
'trad_tolerant', # Moral tolerance
'trad_famval', # Traditional Families
'gayrt_discstd_x', # Gay Job Discrimination
'gayrt_milstd_x', # Gay Military Service
'inspre_self', # National health insurance
'guarpr_self', # Guaranteed Job
'spsrvpr_ssself', # Services/Spending
'aa_work_x', # Affirmative Action ( Should this be aapost_hire_x? )
'resent_workway',
'resent_slavery',
'resent_deserve',
'resent_try',
]
df_raw = df_rawest[good_columns]
# ## Clean the data
# ---
# In[9]:
def convert_to_int(s):
"""Turn ANES data entry into an integer.
>>> convert_to_int("1. Govt should provide many fewer services")
1
>>> convert_to_int("2")
2
"""
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn("Couldn't convert: "+s)
return np.nan
except AttributeError:
return s
def negative_to_nan(value):
"""Convert negative values to missing.
ANES codes various non-answers as negative numbers.
For instance, if a question does not pertain to the
respondent.
"""
return value if value >= 0 else np.nan
def lib1_cons2_neutral3(x):
"""Rearrange questions where 3 is neutral."""
return -3 + x if x != 1 else x
def liblow_conshigh(x):
"""Reorder questions where the liberal response is low."""
return -x
def dem_edu_special_treatment(x):
"""Eliminate negative numbers and {95. Other}"""
return np.nan if x == 95 or x <0 else x
df = df_raw.applymap(convert_to_int)
df = df.applymap(negative_to_nan)
df.abortpre_4point = df.abortpre_4point.apply(lambda x: np.nan if x not in {1, 2, 3, 4} else -x)
df.loc[:, 'trad_lifestyle'] = df.trad_lifestyle.apply(lambda x: -x) # 1: moral relativism, 5: no relativism
df.loc[:, 'trad_famval'] = df.trad_famval.apply(lambda x: -x) # Tolerance. 1: tolerance, 7: not
df.loc[:, 'spsrvpr_ssself'] = df.spsrvpr_ssself.apply(lambda x: -x)
df.loc[:, 'resent_workway'] = df.resent_workway.apply(lambda x: -x)
df.loc[:, 'resent_try'] = df.resent_try.apply(lambda x: -x)
df.rename(inplace=True, columns=dict(zip(
good_columns,
["PartyID",
"Abortion",
"MoralRelativism",
"NewerLifestyles",
"MoralTolerance",
"TraditionalFamilies",
"GayJobDiscrimination",
"GayMilitaryService",
"NationalHealthInsurance",
"StandardOfLiving",
"ServicesVsSpending",
"AffirmativeAction",
"RacialWorkWayUp",
"RacialGenerational",
"RacialDeserve",
"RacialTryHarder",
]
)))
# In[10]:
print("Variables now available: df")
# In[11]:
df_rawest.pid_x.value_counts()
# In[12]:
df.PartyID.value_counts()
# In[13]:
df.describe()
# In[14]:
df.head()
# In[21]:
df.to_csv("../data/processed/2012.csv")
# In[15]:
df_rawest.weight_full.to_csv("../data/processed/2012_weights.csv")
# In[16]:
df_rawest.shapee
# In[ ]:
| 20.027027 | 108 | 0.654521 | [
"MIT"
] | aryamccarthy/ANES | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | 3,705 | Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading machine types."""
from googlecloudsdk.calliope import base
class MachineTypes(base.Group):
"""Read Google Compute Engine virtual machine types."""
MachineTypes.detailed_help = {
'brief': 'Read Google Compute Engine virtual machine types',
}
| 33.5 | 74 | 0.758898 | [
"Apache-2.0"
] | KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/surface/compute/machine_types/__init__.py | 871 | Python |
"""
WSGI config for rara_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rara_api.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 | [
"MIT"
] | lcbiplove/rara-api | rara_api/wsgi.py | 393 | Python |
import random
from string import Template
import tinydb
from flask import Flask, redirect, url_for, abort
app = Flask(__name__)
db = tinydb.TinyDB('./db.json')
HTML = """<!DOCTYPE html>
<html>
<head>
<title>Dulux Swatch</title>
<link rel="StyleSheet" href="/static/main.css" type="text/css">
</head>
<body style="background-color: #${rgb}">
<div>
${name}
<br/>
(id: <a href="https://www.dulux.co.uk/en/products/colour-tester#?selectedColor=${colorId}">${colorId}</a>)
</div>
</body>
</html>
"""
@app.route('/id/<colour_id>')
def get_colour_by_id(colour_id):
obj = db.get(tinydb.Query().colorId == colour_id)
if not obj:
abort(404)
return Template(HTML).substitute(obj)
@app.route('/name/<colur_name>')
def get_colour_by_name(colur_name):
obj = db.get(tinydb.Query().uriFriendlyName == colur_name)
if not obj:
abort(404)
return Template(HTML).substitute(obj)
@app.route('/')
@app.route('/random')
def get_random_colour():
return redirect(url_for('get_colour_by_id', colour_id=random.choice(db.all())['colorId']))
| 24.173913 | 114 | 0.65018 | [
"MIT"
] | leohemsted/duluxswatch | duluxswatch/app.py | 1,112 | Python |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from codecs import open
from setuptools import setup
VERSION = "2.14.0"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/core/__init__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
import sys
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/core/__init__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'adal~=1.2.3',
'argcomplete~=1.8',
'azure-cli-telemetry==1.0.6',
'colorama~=0.4.1',
'humanfriendly>=4.7,<9.0',
'jmespath',
'knack==0.7.2',
'msal~=1.0.0',
'msal-extensions~=0.1.3',
'msrest>=0.4.4',
'msrestazure>=0.6.3',
'paramiko>=2.0.8,<3.0.0',
'PyJWT',
'pyopenssl>=17.1.0', # https://github.com/pyca/pyopenssl/pull/612
'requests~=2.22',
'six~=1.12',
'pkginfo>=1.5.0.1',
'azure-mgmt-resource==10.2.0',
'azure-mgmt-core==1.2.0'
]
TESTS_REQUIRE = [
'mock'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-core',
version=VERSION,
description='Microsoft Azure Command-Line Tools Core Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
packages=[
'azure.cli.core',
'azure.cli.core.commands',
'azure.cli.core.extension',
'azure.cli.core.profiles',
],
install_requires=DEPENDENCIES,
extras_require={
":python_version<'3.4'": ['enum34'],
":python_version<'2.7.9'": ['pyopenssl', 'ndg-httpsclient', 'pyasn1'],
':python_version<"3.0"': ['futures'],
"test": TESTS_REQUIRE,
},
tests_require=TESTS_REQUIRE,
package_data={'azure.cli.core': ['auth_landing_pages/*.html']}
)
| 30.048544 | 94 | 0.581583 | [
"MIT"
] | ALT-F1/azure-cli | src/azure-cli-core/setup.py | 3,095 | Python |
# -*- coding: utf-8 -*-
#
import re
from collections import OrderedDict
from copy import deepcopy
from ._http import HTTPStatus
#copied from sanic router
REGEX_TYPES = {
'string': (str, r'[^/]+'),
'int': (int, r'\d+'),
'number': (float, r'[0-9\\.]+'),
'alpha': (str, r'[A-Za-z]+'),
}
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
__all__ = ('merge', 'camel_to_dash', 'default_id', 'not_none', 'not_none_sorted', 'unpack')
def merge(first, second):
"""
Recursively merges two dictionaries.
Second dictionary values will take precedence over those from the first one.
Nested dictionaries are merged too.
:param dict first: The first dictionary
:param dict second: The second dictionary
:return: the resulting merged dictionary
:rtype: dict
"""
if not isinstance(second, dict):
return second
result = deepcopy(first)
for key, value in second.items():
if key in result and isinstance(result[key], dict):
result[key] = merge(result[key], value)
else:
result[key] = deepcopy(value)
return result
def camel_to_dash(value):
'''
Transform a CamelCase string into a low_dashed one
:param str value: a CamelCase string to transform
:return: the low_dashed string
:rtype: str
'''
first_cap = FIRST_CAP_RE.sub(r'\1_\2', value)
return ALL_CAP_RE.sub(r'\1_\2', first_cap).lower()
def default_id(resource, method):
'''Default operation ID generator'''
return '{0}_{1}'.format(method, camel_to_dash(resource))
def not_none(data):
'''
Remove all keys where value is None
:param dict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: dict
'''
return dict((k, v) for k, v in data.items() if v is not None)
def not_none_sorted(data):
'''
Remove all keys where value is None
:param OrderedDict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: OrderedDict
'''
return OrderedDict((k, v) for k, v in sorted(data.items()) if v is not None)
def unpack(response, default_code=HTTPStatus.OK):
'''
Unpack a Flask standard response.
Flask response can be:
- a single value
- a 2-tuple ``(value, code)``
- a 3-tuple ``(value, code, headers)``
.. warning::
When using this function, you must ensure that the tuple is not the response data.
To do so, prefer returning list instead of tuple for listings.
:param response: A Flask style response
:param int default_code: The HTTP code to use as default if none is provided
:return: a 3-tuple ``(data, code, headers)``
:rtype: tuple
:raise ValueError: if the response does not have one of the expected format
'''
if not isinstance(response, tuple):
# data only
return response, default_code, {}
elif len(response) == 1:
# data only as tuple
return response[0], default_code, {}
elif len(response) == 2:
# data and code
data, code = response
return data, code, {}
elif len(response) == 3:
# data, code and headers
data, code, headers = response
return data, code or default_code, headers
else:
raise ValueError('Too many response values')
def get_accept_mimetypes(request):
accept_types = request.headers.get('accept', None)
if accept_types is None:
return {}
split_types = str(accept_types).split(',')
# keep the order they appear!
return OrderedDict([((s, 1,), s,) for s in split_types])
def best_match_accept_mimetype(request, representations, default=None):
if representations is None or len(representations) < 1:
return default
try:
accept_mimetypes = get_accept_mimetypes(request)
if accept_mimetypes is None or len(accept_mimetypes) < 1:
return default
# find exact matches, in the order they appear in the `Accept:` header
for accept_type, qual in accept_mimetypes:
if accept_type in representations:
return accept_type
# match special types, like "application/json;charset=utf8" where the first half matches.
for accept_type, qual in accept_mimetypes:
type_part = str(accept_type).split(';', 1)[0]
if type_part in representations:
return type_part
# if _none_ of those don't match, then fallback to wildcard matching
for accept_type, qual in accept_mimetypes:
if accept_type == "*" or accept_type == "*/*" or accept_type == "*.*":
return default
except (AttributeError, KeyError):
return default
def parse_rule(parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
if str(parameter_string).startswith('/'):
parameter_string = parameter_string[1:]
parameter_string = str(parameter_string).strip('<>')
name = parameter_string
pattern = 'string'
if ':' in parameter_string:
name, pattern = parameter_string.split(':', 1)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern
| 31.707182 | 97 | 0.646628 | [
"MIT"
] | oliverpain/sanic-restplus | sanic_restplus/utils.py | 5,739 | Python |
import asyncio
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon.tl.functions.users import GetFullUserRequest
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, LESS_SPAMMY
from userbot.utils import admin_cmd
PM_WARNS = {}
PREV_REPLY_MESSAGE = {}
CACHE = {}
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "**No name set yet nibba, check pinned message in** @XtraTgBot"
USER_BOT_WARN_ZERO = "`You were spamming my peru master's inbox, henceforth your retarded lame ass has been blocked by my master's userbot.` **Now GTFO, i'm playing minecraft** "
USER_BOT_NO_WARN = ("[Please wait for my boss's permission](tg://user?id=742506768)\n\n"
f"{DEFAULTUSER}'s` inbox.\n\n"
"Leave your name, phone number, address and 10k$ and hopefully you'll get a reply within 2 light years.`\n\n"
"** Send** `/start` ** so that we can decide why you're here.**")
if Var.PRIVATE_GROUP_ID is not None:
@command(pattern="^.approve ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if chat.id in PM_WARNS:
del PM_WARNS[chat.id]
if chat.id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat.id].delete()
del PREV_REPLY_MESSAGE[chat.id]
pmpermit_sql.approve(chat.id, reason)
await event.edit("Approved Nibba [{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.delete()
@bot.on(events.NewMessage(outgoing=True))
async def you_dm_niqq(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if not chat.id in PM_WARNS:
pmpermit_sql.approve(chat.id, "outgoing")
bruh = "__Added user to approved pms cuz outgoing message >~<__"
rko = await borg.send_message(event.chat_id, bruh)
await asyncio.sleep(3)
await rko.delete()
@command(pattern="^.block ?(.*)")
async def block_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit("\n Now You Can't Message Me..[{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat.id))
@command(pattern="^.listapproved")
async def approve_p_m(event):
if event.fwd_from:
return
approved_users = pmpermit_sql.get_all_approved()
APPROVED_PMs = "Current Approved PMs\n"
if len(approved_users) > 0:
for a_user in approved_users:
if a_user.reason:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\n"
else:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id})\n"
else:
APPROVED_PMs = "no Approved PMs (yet)"
if len(APPROVED_PMs) > 4095:
with io.BytesIO(str.encode(APPROVED_PMs)) as out_file:
out_file.name = "approved.pms.text"
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Current Approved PMs",
reply_to=event
)
await event.delete()
else:
await event.edit(APPROVED_PMs)
@bot.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if event.from_id == bot.uid:
return
if Var.PRIVATE_GROUP_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
chat_id = event.from_id
current_message_text = message_text.lower()
if USER_BOT_NO_WARN == message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return
if event.from_id in CACHE:
sender = CACHE[event.from_id]
else:
sender = await bot.get_entity(event.from_id)
CACHE[event.from_id] = sender
if chat_id == bot.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if any([x in event.raw_text for x in ("/start", "1", "2", "3", "4", "5")]):
return
if not pmpermit_sql.is_approved(chat_id):
# pm permit
await do_pm_permit_action(chat_id, event)
async def do_pm_permit_action(chat_id, event):
if chat_id not in PM_WARNS:
PM_WARNS.update({chat_id: 0})
if PM_WARNS[chat_id] == Config.MAX_FLOOD_IN_P_M_s:
r = await event.reply(USER_BOT_WARN_ZERO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
the_message = ""
the_message += "#BLOCKED_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message Count: {PM_WARNS[chat_id]}\n"
# the_message += f"Media: {message_media}"
try:
await event.client.send_message(
entity=Var.PRIVATE_GROUP_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
return
except:
return
r = await event.reply(USER_BOT_NO_WARN)
PM_WARNS[chat_id] += 1
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
| 37.315789 | 178 | 0.573625 | [
"Apache-2.0"
] | Maxpayne7000/X-tra-Telegram | userbot/plugins/pmpermit.py | 7,096 | Python |
''' Frsutum PointNets v1 Model.
'''
from __future__ import print_function
import sys
import os
import tensorflow as tf
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT
from model_util import point_cloud_masking, get_center_regression_net
from model_util import placeholder_inputs, parse_output_to_tensors, get_loss
def get_instance_seg_v1_net(point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
net = tf.expand_dims(point_cloud, 2)
# (32, 2048, 1, 4) 论文第一格
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
# (32, 2048, 1, 64)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
point_feat = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
# (32, 2048, 1, 64) 论文第二格,拼接到第五格
net = tf_util.conv2d(point_feat, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
# (32, 2048, 1, 128)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# (32, 2048, 1, 1024) 论文第三格
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
# (32, 1, 1, 1024) 论文第四格,拼接到第五格
global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
# (32, 1, 1, 1027)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
# (32, 2048, 1, 1027) tf.tile()复制扩展数据
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
# (32, 2048, 1, 1091) 论文第五格 2048*(1024+64+3)
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
# (32, 2048, 1, 512)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
# (32, 2048, 1, 128)
net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)
logits = tf_util.conv2d(net, 2, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
# (32, 2048, 1, 2)
logits = tf.squeeze(logits, [2]) # BxNxC
# (32, 2048, 2)论文第六格
# to 182
return logits, end_points
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
# (32, 512,1, 3) 第一格
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg4', bn_decay=bn_decay)
# (32, 512, 1, 512) 第二格
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool2')
# (32, 1, 1, 512)
net = tf.squeeze(net, axis=[1,2])
# (32, 512)
net = tf.concat([net, one_hot_vec], axis=1)
# (32, 512+3) 第三格
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
is_training=is_training, bn_decay=bn_decay)
# (32, 256)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points
# (3+4*8+2*12)
# (3+4NS+2NH) 第四格
# to 202
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
''' Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors)
'''
end_points = {}
# 3D Instance Segmentation PointNet
# logits: TF tensor in shape(B, N, 2), scores for bkg / clutter and object
# end_points: dict
# to 18
logits, end_points = get_instance_seg_v1_net(\
point_cloud, one_hot_vec,
is_training, bn_decay, end_points)
# (32, 2048, 2), {}
end_points['mask_logits'] = logits
# Masking
# select masked points and translate to masked points' centroid
object_point_cloud_xyz, mask_xyz_mean, end_points = \
point_cloud_masking(point_cloud, logits, end_points)
# to model_util.py 217
# (32, 512, 3) (32, 3) end_points['mask'] = mask
# T-Net and coordinate translation
# to model_util.py 295
center_delta, end_points = get_center_regression_net(\
object_point_cloud_xyz, one_hot_vec,
is_training, bn_decay, end_points)
# (32,3) end_points['mask'] = mask
stage1_center = center_delta + mask_xyz_mean # Bx3
# (32,3)
end_points['stage1_center'] = stage1_center
# Get object point cloud in object coordinate
object_point_cloud_xyz_new = \
object_point_cloud_xyz - tf.expand_dims(center_delta, 1)
# (32, 512, 3) - (32, 1, 3)
# in object coordinate
# Amodel Box Estimation PointNet
# to 105
output, end_points = get_3d_box_estimation_v1_net(\
object_point_cloud_xyz_new, one_hot_vec,
is_training, bn_decay, end_points)
# Parse output to 3D box parameters
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3
# (32, 3)
return end_points
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,4))
outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True))
for key in outputs:
print((key, outputs[key]))
loss = get_loss(tf.zeros((32,1024),dtype=tf.int32),
tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,3)), outputs)
print(loss)
| 41.308017 | 101 | 0.591624 | [
"Apache-2.0"
] | BPMJG/annotated-F-pointnet | models/frustum_pointnets_v1.py | 9,914 | Python |
from configparser import SafeConfigParser
import logging
import os
class Config:
def __init__(self, configFile):
if os.path.isfile(configFile):
self.Config = SafeConfigParser()
self.Config.read(configFile)
logging.info(self.Config.sections())
else:
print("Config file not found at: ")
print(configFile)
quit()
def ConfigSectionMap(self, section):
dict1 = {}
options = self.Config.options(section)
for option in options:
try:
dict1[option] = self.Config.get(section, option)
if dict1[option] == -1:
logger.debug("skip: %s" % option)
except:
logging.error("exception on %s!" % option)
dict1[option] = None
return dict1
| 29.517241 | 64 | 0.546729 | [
"BSD-3-Clause"
] | ponder-lab/gitcproc | src/util/Config.py | 856 | Python |
import asyncio
import discord
from discord import Member, Role, TextChannel, DMChannel
from discord.ext import commands
from typing import Union
from profanity_check import predict
class ProfanityFilter:
"""
A simple filter that checks for profanity in a message and
then deletes it. Many profanity detection libraries use a hard-coded
list of bad words to detect and filter profanity, however this
plugin utilises a library that uses a linear support vector machine
(SVM) model trained on 200k human-labeled samples of clean and profane
text strings. ([`profanity-check`](https://github.com/vzhou842/profanity-check)).
Artificial intelligence in a discord bot? Heck yeah!
"""
def __init__(self, bot):
self.bot = bot
self.coll = bot.plugin_db.get_partition(self)
self.enabled = True
self.whitelist = set()
asyncio.create_task(self._set_config())
async def _set_config(self):
config = await self.coll.find_one({'_id': 'config'})
self.enabled = config.get('enabled', True)
self.whitelist = set(config.get('whitelist', []))
@commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
"""Disable or enable the profanity filter.
Usage: `profanity enable` / `profanity disable`
"""
self.enabled = mode
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'enabled': self.enabled}},
upsert=True
)
await ctx.send(('Enabled' if mode else 'Disabled') + ' the profanity filter.')
@commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[Member, Role, TextChannel]):
"""Whitelist a user, role or channel from the profanity filter.
Usage: `profanity whitelist @dude`
"""
self = ctx.bot.get_cog('ProfanityFilter') # wtf where did self dissapear
if target.id in self.whitelist:
self.whitelist.remove(target.id)
removed = True
else:
self.whitelist.add(target.id)
removed = False
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'whitelist': list(self.whitelist)}},
upsert=True
)
await ctx.send(
f"{'Un-w' if removed else 'W'}hitelisted "
f"{target.mention} from the profanity filter."
)
async def on_message(self, message):
if not self.enabled:
return
channel = message.channel
author = message.author
if isinstance(author, discord.User): # private channel
return
ids = {author.id, channel.id} | {r.id for r in author.roles}
if self.whitelist.intersection(ids): # anything intersects
return
profane = bool(predict([message.content])[0])
if not profane:
return
await message.delete()
temp = await channel.send(
f'{author.mention} your message has '
'been deleted for containing profanity.'
)
await asyncio.sleep(5)
await temp.delete()
def setup(bot):
bot.add_cog(ProfanityFilter(bot)) | 29.734513 | 86 | 0.59881 | [
"MIT"
] | officialpiyush/modmail-plugins-2 | profanity-filter/profanity-filter.py | 3,360 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: [email protected]
Description: Very simple reminder
'''
from core.people.person import Profile, Session
from core.utils.utils import text2int
import re
from crontab import CronTab
from getpass import getuser
from core.config.settings import logger, ROBOT_DIR
class Reaction:
"""remind me every ... reaction"""
response = ''
request = ''
def __str__(self):
return 'Remind me every ... reaction'
@classmethod
def __init__(self, *args, **kwargs):
""" original request string """
#get request object
self.req_obj = kwargs.pop('req_obj')
#request word sequence
self.request = self.req_obj.get('request', '')
#request received from (julius, jabber any other resources)
self.req_from = self.req_obj.get('from', '')
self.response = ''
@classmethod
def run(self):
"""default method"""
sess = Session()
sender = self.req_obj.get('sender', '')
if sender:
#exctract sender email
email = sender.split('/')[0]
#find user profile by primary email
profile = sess.query(Profile).filter(Profile.email == email).one()
cron = CronTab(getuser())
DAYS = {'sunday': 'SUN'
, 'monday': 'MON'
, 'tuesday': 'TUE'
, 'wednesday': 'WED'
, 'thursday': 'THU'
, 'friday': 'FRI'
, 'saturday': 'SAT'}
req = self.request.replace('remind me every', '', 1)
#r = re.compile(re.escape('remind me every'), re.IGNORECASE)
#req = r.sub('', request)
m = re.search('\s+?(by|with|to|of)\s+message\s+?(.+)', req)
if m and m.group(2):
msg = m.group(2)
else:
m = re.search('\s+?(by|with|to|of)\s+?(.+)', req)
if m and m.group(2):
msg = m.group(2)
else:
msg = 'This a reminder. Unfortunatelly I could not parse your message, \
but I guess you can remember what you wanted to do.'
job = cron.new(command='/usr/bin/python %s/core/cron/cronjob.py --uuid=%s \
--cmd="send jabber message" --arguments="%s"' % (ROBOT_DIR, profile.uuid, msg.replace('"', '')))
skip_other = False
if req.strip().startswith('month'):
job.minute.on(0)
job.hour.on(0)
job.dom.on(1)
skip_other = True
if req.strip().startswith('week'):
job.minute.on(0)
job.hour.on(0)
job.dow.on(0)
skip_other = True
if req.strip().startswith('year'):
job.dom.on(0)
job.month.on(0)
skip_other = True
dow = False
for dw, cron_day in DAYS.items():
if req.strip().lower().startswith(dw):
dow = True
break
if dow:
job.dow.on(cron_day.upper())
#req = req.replace(dw, '', 1) - ignore case problem
r = re.split(r'^' + dw, req.strip(), flags=re.IGNORECASE)
if r and len(r) == 2:
req = r.pop()
if req.strip().startswith('at '):
################################################
# every monday/tuesday/wednesday at 00:00
################################################
time = re.search("[^0-9](\d{1,2})\so'clock", req)
if time and time.group(1):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if not skip_other:
################################################
# every monday/tuesday/wednesday at 00:00
################################################
time = re.search('[^0-9](\d{1,2}):(\d{2})[^0-9]', req)
if time and time.group(1) and time.group(2):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
################################################
# every monday/tuesday/wednesday hourly
################################################
if not skip_other and req.strip().startswith('hourly'):
#hourly
job.minute.on(0)
skip_other = True
################################################
# every monday/tuesday/wednesday from 00:00 to 00:00
################################################
elif not skip_other:
#@todo
#time = re.search('\s?from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())
time = re.search('\s?from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+', req)
#@todo
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
#todo every minute, every 5 minutes
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
################################################
# every monday/tuesday/wednesday
################################################
elif not skip_other:
job.minute.on(0)
#by default 10:00
job.hour.on(10)
skip_other = True
if not skip_other and req.strip().startswith('day'):
#cut day word
req = req.replace('day', '', 1)
if req.strip().startswith('at '):
################################################
# every day at 00:00
################################################
time = re.search("[^0-9](\d{1,2})\so'clock", req)
if time and time.group(1):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if not skip_other:
################################################
# every day at 00:00
################################################
time = re.search('[^0-9](\d{1,2}):(\d{2})[^0-9]', req)
if time and time.group(1) and time.group(2):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
################################################
# every day hourly
################################################
if not skip_other and req.strip().startswith('hourly'):
#hourly
job.minute.on(0)
skip_other = True
################################################
# every day every 5 hours
################################################
if not skip_other and req.strip().startswith('every'):
req = req.replace('every', '', 1)
hour = re.search('\s?(\d+)\s+(hour|hours|hs|h)', req)
if hour and hour.group(1):
job.hour.every(hour.group(1))
skip_other = True
else:
#if hour presents in human word : one, two etc.
hour = re.search('^\s?([a-zA-Z]+?)\s(hours|hour)', req)
if hour and hour.group(1):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
################################################
# every day from 00:00 to 00:00
################################################
elif not skip_other and req.strip().startswith('from'):
#@todo
time = re.search('^from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+', req.strip())
#@todo
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
#todo every minute, every 5 minutes
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
################################################
# every day
################################################
elif not skip_other:
job.minute.on(0)
#by default 10:00
job.hour.on(10)
skip_other = True
print(job)
else:
pass
if not skip_other and req.strip().startswith('with message'):
job.minute.on(0)
#by default 10:00
job.hour.on(10)
skip_other = True
if not skip_other and req.strip().startswith('hour'):
#every hour
job.minute.on(0)
skip_other = True
if not skip_other and req.strip().startswith('minute'):
#every minute
job.minute.every(1)
skip_other = True
if not skip_other:
################################################
# hours
################################################
hour = re.search('^(\d+)\s+(hour|hours|hs|h)', req.strip())
if hour and hour.group(1):
job.hour.every(hour.group(1))
skip_other = True
else:
#if hour presents in human word : one, two etc.
hour = re.search('^([a-zA-Z]+?)\s(hours|hour)', req.strip())
if hour and hour.group(1):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
if not skip_other:
#######################################################################################################
# days
#######################################################################################################
day = re.search('^(\d+)\s+(days|day|d)', req.strip())
if day and day.group(1):
#remove the matched part of the string which describes number of days: ex. 10 days
req = req.replace(day.group(0), '', 1)
################################################
# days at 00:00
################################################
if req.strip().startswith('at '):
req = req.replace('at', '', 1)
################################################
# days at 8 o'clock
################################################
time = re.search("^(\d{1,2})\so'clock", req.strip())
if time and time.group(1):
job.dow.every(day.group(1))
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
################################################
# days hourly
################################################
if not skip_other and req.strip().startswith('hourly'):
#hourly
job.minute.on(0)
job.dow.every(day.group(1))
skip_other = True
################################################
# days at 00:00
################################################
if not skip_other:
time = re.search('^(\d{1,2}):(\d{2})[^0-9]', req.strip())
if time and time.group(1) and time.group(2):
job.dom.every(day.group(1))
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
################################################
# 10 days from 00:00 to 00:00
################################################
if not skip_other and req.strip().startswith('from'):
#@todo
req = req.replace('from', '', 1)
time = re.search('^(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
job.dom.every(day.group(1))
#todo every 5 minutes
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
#################################################
# every two days
#################################################
elif not skip_other:
day = re.search('^\s?([a-zA-Z]+?)\s(days|day)', req)
if day and day.group(1):
d = text2int(day.group(1))
req = req.replace(day.group(0), '', 1)
################################################
# ten days from 00:00 to 00:00
################################################
if not skip_other and req.strip().startswith('from'):
time = re.search('^from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
job.dom.every(d)
#todo every 5 minutes
# remove from .. to and check for "every" 5 minutes
req = req.replace(day.group(0), '', 1)
req = req.replace(time.group(0), '', 1)
if req.strip().startswith('every'):
mins = re.search('^every\s(\d{1,2})[^0-9]+?(min|minute|minutes)', req.strip())
if mins and mins.group(0):
job.minute.during(time.group(2), time.group(4)).every(mins.group(1))
skip_other = True
#check once again but now we expect minutes as word not number
else:
mins = re.search('^every\s([^0-9\s]+)\s?(min|minute|minutes)', req.strip())
#if exists
if mins and mins.group(1):
m = text2int(mins.group(1))
job.minute.during(time.group(2), time.group(4)).every(m)
skip_other = True
else:
raise
# if not starts with "every"
else:
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
else:
job.dom.every(d)
job.minute.on(0)
#by default 10:00
job.hour.on(10)
#print(job)
skip_other = True
else:
print(req)
raise
#job.minute.on(0)
#job.hour.on(10) #by default 10:00
#skip_other=True
#job.dow.every(day.group(1))
#skip_other = True
if not skip_other:
#######################################################################################################
# minutes
#######################################################################################################
min = re.search('\s?(\d+)\s+(minutes|min|minute|m)', req)
if min and min.group(1):
job.minute.every(min.group(1))
else:
#if day presents in human word : one, two etc.
min = re.search('^\s?([a-zA-Z]+?)\s(minutes|min|mins)', req)
if min and min.group(1):
m = text2int(min.group(1))
job.minute.every(m)
cron.write()
logger.info('adding cronjob %s' % cron.render())
response = 'ok, cronjob added %s' % job.render()
if self.req_from == 'jabber':
todo = {'text': response, 'jmsg': response, 'type': 'response'}
self.response = todo
if self.req_from == 'julius':
from core.broadcast import say, bang
bang()
todo = {'say': response, 'text': response, 'type': 'response'}
self.response = say(self.request.replace('say', '').upper())
return self.response
#n = Reaction(*{'reserved':''}, **{'req_obj':{'from':'jabber', 'request':'remind me every 2 minutes with "hey don\'t forget about pizza"', 'sender': '[email protected]'}})
#n.run()
| 39.460177 | 178 | 0.365665 | [
"MIT"
] | vsilent/smarty-bot | core/brain/remind/me/every/reaction.py | 17,836 | Python |
import cv2
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# resize the image to a fixed size, ignoring the aspect
# ratio
return cv2.resize(image, (self.width, self.height), interpolation=self.inter) | 34.5 | 85 | 0.660455 | [
"MIT"
] | Akshat4112/Machine-Learning-Case-Studies | 22. Neural Networks from Scratch/preprocessing/simplepreprocessor.py | 483 | Python |
"""Define endpoints related to user reports."""
import logging
from typing import Any, Dict
from .helpers.report import Report
_LOGGER: logging.Logger = logging.getLogger(__name__)
class UserReport(Report):
"""Define a user report object."""
async def status_by_coordinates(
self, latitude: float, longitude: float
) -> Dict[str, Any]:
"""Get symptom data for the location nearest to the user's lat/lon."""
return await self.nearest_by_coordinates(latitude, longitude)
async def status_by_zip(self, zip_code: str) -> Dict[str, Any]:
"""Get symptom data for the provided ZIP code."""
try:
location = next(
(d for d in await self.user_reports() if d["zip"] == zip_code)
)
except StopIteration:
return {}
return await self.status_by_coordinates(
float(location["latitude"]), float(location["longitude"])
)
| 30.774194 | 78 | 0.634172 | [
"MIT"
] | bachya/pyflunearyou | pyflunearyou/user.py | 954 | Python |
import re
from .reports import BaseReport
from .utils import get_pacer_doc_id_from_doc1_url, reverse_goDLS_function
from ..lib.log_tools import make_default_logger
from ..lib.string_utils import force_unicode
logger = make_default_logger()
class AttachmentPage(BaseReport):
"""An object for querying and parsing the attachment page report. """
PATH = 'doc1/'
def __init__(self, court_id, pacer_session=None):
super(AttachmentPage, self).__init__(court_id, pacer_session)
if self.court_id.endswith('b'):
# Note that parsing bankruptcy attachment pages does not reveal the
# document number, only the attachment numbers.
self.is_bankruptcy = True
else:
self.is_bankruptcy = False
def query(self, document_number):
"""Query the "attachment page" endpoint and set the results to self.response.
:param document_number: The internal PACER document ID for the item.
:return: a request response object
"""
assert self.session is not None, \
"session attribute of DocketReport cannot be None."
# coerce the fourth digit of the document number to 1 to ensure we get
# the attachment page.
document_number = document_number[:3] + "0" + document_number[4:]
url = self.url + document_number
logger.info(u'Querying the attachment page endpoint at URL: %s' % url)
self.response = self.session.get(url)
self.parse()
@property
def data(self):
"""Get data back from the query for the matching document entry.
:return: If lookup fails, an empty dict. Else, a dict containing the
following fields:
- document_number: The document number we're working with.
- page_count: The number of pages of the item
- pacer_doc_id: The doc ID for the main document.
- attachments: A list of attached items with the following fields:
- attachment_number: The attachment number.
- description: A description of the item.
- page_count: The number of pages.
- pacer_doc_id: The document ID for the attachment (a str).
See the JSON objects in the tests for more examples.
"""
rows = self.tree.xpath('//tr[.//a]')
if not rows:
logger.info("No documents found on attachment page.")
return {}
first_row = rows.pop(0)
result = {
'document_number': self._get_document_number(first_row),
'page_count': self._get_page_count_from_tr(first_row),
'pacer_doc_id': self._get_pacer_doc_id(first_row),
'pacer_case_id': self._get_pacer_case_id(),
'attachments': []
}
for row in rows:
result['attachments'].append({
'attachment_number': self._get_attachment_number(row),
'description': self._get_description_from_tr(row),
'page_count': self._get_page_count_from_tr(row),
'pacer_doc_id': self._get_pacer_doc_id(row)
})
return result
def _get_document_number(self, row):
"""Return the document number for an item.
In district court attachment pages, this is easy to extract with an
XPath. In bankruptcy cases, it's simply not there.
"""
if self.is_bankruptcy:
return None
else:
return int(row.xpath('.//a/text()')[0].strip())
def _get_attachment_number(self, row):
"""Return the attachment number for an item.
In district courts, this can be easily extracted. In bankruptcy courts,
you must extract it, then subtract 1 from the value since these are
tallied and include the main document.
"""
number = int(row.xpath('.//a/text()')[0].strip())
if self.is_bankruptcy:
return number - 1
else:
return number
def _get_description_from_tr(self, row):
"""Get the description from the row"""
if not self.is_bankruptcy:
index = 2
else:
index = 3
description_text_nodes = row.xpath('./td[%s]//text()' % index)
if len(description_text_nodes) == 0:
# No text in the cell.
return u''
else:
description = description_text_nodes[0].strip()
return force_unicode(description)
@staticmethod
def _get_page_count_from_tr(tr):
"""Take a row from the attachment table and return the page count as an
int extracted from the cell specified by index.
"""
pg_cnt_str_nodes = tr.xpath('./td[contains(., "page")]/text()')
if len(pg_cnt_str_nodes) == 0:
# It's a restricted document without page count information.
return None
else:
for pg_cnt_str_node in pg_cnt_str_nodes:
try:
pg_cnt_str = pg_cnt_str_node.strip()
return int(pg_cnt_str.split()[0])
except ValueError:
# Happens when the description field contains the word "page"
# and gets caught by the xpath. Just press on.
continue
@staticmethod
def _get_pacer_doc_id(row):
"""Take in a row from the attachment table and return the pacer_doc_id
for the item in that row. Return None if the ID cannot be found.
"""
try:
url = row.xpath(u'.//a')[0]
except IndexError:
# Item exists, but cannot download document. Perhaps it's sealed
# or otherwise unavailable in PACER. This is carried over from the
# docket report and may not be needed here, but it's a good
# precaution.
return None
else:
doc1_url = url.xpath('./@href')[0]
return get_pacer_doc_id_from_doc1_url(doc1_url)
def _get_pacer_case_id(self):
"""Get the pacer_case_id value by inspecting the HTML
:returns str: The pacer_case_id value
"""
# Start by inspecting all the links
urls = self.tree.xpath('//a')
for url in urls:
try:
onclick = url.xpath('./@onclick')[0]
except IndexError:
continue
else:
if 'goDLS' not in onclick:
# Some other onclick we don't care about.
continue
go_dls_parts = reverse_goDLS_function(onclick)
return go_dls_parts['caseid']
# If that fails, try inspecting the input elements
input_els = self.tree.xpath('//input')
for input_el in input_els:
try:
onclick = input_el.xpath('./@onclick')[0]
except IndexError:
continue
else:
m = re.search(r'[?&]caseid=(\d+)', onclick, flags=re.I)
if m:
return m.group(1)
| 38.112903 | 85 | 0.589223 | [
"BSD-2-Clause"
] | johnhawkinson/juriscraper | juriscraper/pacer/attachment_page.py | 7,089 | Python |
# System
import json
# SBaaS
from .stage02_physiology_pairWiseTest_query import stage02_physiology_pairWiseTest_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from ddt_python.ddt_container import ddt_container
class stage02_physiology_pairWiseTest_io(stage02_physiology_pairWiseTest_query,
sbaas_template_io):
def import_data_stage02_physiology_pairWiseTest_add(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_data_stage02_physiology_pairWiseTest(data.data);
data.clear_data();
def export_dataStage02PhysiologyPairWiseTest_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)'''
#get the data for the analysis
data_O = [];
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTest(analysis_id_I);
# make the data parameters
data1_keys = ['analysis_id','simulation_id_1','simulation_id_2',
'rxn_id','flux_units','test_description'
];
data1_nestkeys = ['analysis_id'];
data1_keymap = {'ydata':'pvalue_negLog10',
'xdata':'fold_change',
'serieslabel':'',
'featureslabel':'rxn_id'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 50, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":'Fold Change [geometric]',
"svgy1axislabel":'Probability [-log10(P)]'};
svgtileparameters_O = {'tileheader':'Volcano plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tableclass":"table table-condensed table-hover",
"tablefilters":None,
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'pairWiseTest','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
filtermenuobject_O = None;
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage02PhysiologyPairWiseTestMetabolites_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)'''
#get the data for the analysis
data_O = [];
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestMetabolites(analysis_id_I);
# make the data parameters
data1_keys = ['analysis_id','simulation_id_1','simulation_id_2',
'met_id','flux_units','test_description'
];
data1_nestkeys = ['analysis_id'];
data1_keymap = {'ydata':'pvalue_negLog10',
'xdata':'fold_change',
'serieslabel':'',
'featureslabel':'met_id'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 50, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":'Fold Change [geometric]',
"svgy1axislabel":'Probability [-log10(P)]'};
svgtileparameters_O = {'tileheader':'Volcano plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tableclass":"table table-condensed table-hover",
"tablefilters":None,
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'pairWiseTest','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
filtermenuobject_O = None;
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage02PhysiologyPairWiseTestSubsystems_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)'''
#get the data for the analysis
data_O = [];
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestSubsystems(analysis_id_I);
# make the data parameters
data1_keys = ['analysis_id','simulation_id_1','simulation_id_2',
'subsystem_id','flux_units','test_description'
];
data1_nestkeys = ['analysis_id'];
data1_keymap = {'ydata':'pvalue_negLog10',
'xdata':'fold_change',
'serieslabel':'',
'featureslabel':'subsystem_id'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 50, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":'Fold Change [geometric]',
"svgy1axislabel":'Probability [-log10(P)]'};
svgtileparameters_O = {'tileheader':'Volcano plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tableclass":"table table-condensed table-hover",
"tablefilters":None,
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'pairWiseTest','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
filtermenuobject_O = None;
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects()); | 61.165803 | 248 | 0.61313 | [
"MIT"
] | dmccloskey/SBaaS_COBRA | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | 11,805 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.