content
stringlengths 5
1.05M
|
---|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
from traits.testing.unittest_tools import UnittestTools
from force_bdss.api import KPISpecification
from force_bdss.tests.probe_classes.mco import ProbeMCOFactory
from force_wfmanager.ui.setup.mco.base_mco_options_model_view import (
BaseMCOOptionsModelView
)
class TestBaseMCOOptionsModelView(unittest.TestCase, UnittestTools):
def setUp(self):
self.mco_options_model_view = BaseMCOOptionsModelView()
def test_mco_options_model_view_init(self):
self.assertIsNone(self.mco_options_model_view.model)
self.assertTrue(self.mco_options_model_view.valid)
def test__check_model_name(self):
self.mco_options_model_view.available_variables = (
[('T1', 'PRESSURE'), ('T2', 'PRESSURE')]
)
self.assertEqual(['T1', 'T2'],
self.mco_options_model_view._combobox_values)
self.mco_options_model_view.model = KPISpecification(name='T1')
self.mco_options_model_view.available_variables.remove(
self.mco_options_model_view.available_variables[-1]
)
self.assertTrue(self.mco_options_model_view.valid)
self.mco_options_model_view.available_variables.remove(
self.mco_options_model_view.available_variables[0]
)
self.assertEqual('', self.mco_options_model_view.model.name)
error_message = self.mco_options_model_view.model.verify()
self.assertIn(
'KPI is not named',
error_message[0].local_error
)
def test_verify_mco_options(self):
factory = ProbeMCOFactory({'id': '0', 'name': 'plugin'})
parameter_factory = factory.parameter_factories[0]
model = parameter_factory.create_model()
self.mco_options_model_view.model = model
with self.assertTraitChanges(
self.mco_options_model_view, 'verify_workflow_event', 1):
model.test_trait = 10
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
Implementation of flags command
"""
import os
from .base import Base
from ..context import bugzilla_instance
from ..bugzilla import BugzillaError
from .. import ui
class Command(Base):
"""Flags operations: add/remove/set"""
def register(self, subparsers):
"""Register parser for flags-related commands"""
parser_ls = subparsers.add_parser('flags')
parser_ls.set_defaults(func=self.run_ls)
parser_ls.add_argument('bug_id', type=int, help='bug ID')
parser_add = subparsers.add_parser('addflag')
parser_add.set_defaults(func=self.run_add)
parser_add.add_argument('bug_id', type=int, help='bug ID')
parser_add.add_argument('name', type=str, help='flag name')
parser_add.add_argument('requestee', type=str, nargs='?', help='requestee')
parser_rm = subparsers.add_parser('rmflags')
parser_rm.set_defaults(func=self.run_rm)
parser_rm.add_argument('bug_id', type=int, help='bug ID')
parser_rm.add_argument('name', type=str, nargs='+', help='flag name')
parser_set = subparsers.add_parser('setflag')
parser_set.set_defaults(func=self.run_set)
parser_set.add_argument('bug_id', type=int, help='bug ID')
parser_set.add_argument('name', type=str, help='flag name')
parser_set.add_argument('status', type=str, choices=['+', '-'], help='flag status')
def run_ls(self, args):
"""Implementation of the 'flags' command"""
bugzilla = bugzilla_instance()
try:
bug = bugzilla.bug(args.bug_id)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
rows = []
for f in bug.flags:
row = []
row.append(str(f.object_id))
row.append(f.name)
row.append(f.status)
row.append(f.requestee)
rows.append(row)
if rows:
column_formats = []
for i in range(len(rows[0]) - 1):
width = max([len(str(row[i])) for row in rows])
column_format = '{: >%d}' % width
column_formats.append(column_format)
row_format = ' '.join(column_formats)
row_format += ' {}'
for row in rows:
ui.output(row_format.format(*row))
def run_add(self, args):
"""Implementation of the 'addflag' command"""
bugzilla = bugzilla_instance()
try:
bugzilla.add_flag(args.bug_id, args.name, args.requestee)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
def run_rm(self, args):
"""Implementation of the 'rmflags' command"""
bugzilla = bugzilla_instance()
try:
bugzilla.rm_flags(args.bug_id, args.name)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
def run_set(self, args):
"""Implementation of the 'setflag' command"""
bugzilla = bugzilla_instance()
try:
bugzilla.update_flag(args.bug_id, args.name, args.status)
except BugzillaError as ex:
ui.fatal('Bugzilla error: {}'.format(ex.message))
|
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DateType,
BooleanType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ActivityDefinitionSchema:
"""
This resource allows for the definition of some activity to be performed,
independent of a particular patient, practitioner, or other performance
context.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
This resource allows for the definition of some activity to be performed,
independent of a particular patient, practitioner, or other performance
context.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a ActivityDefinition resource
url: An absolute URI that is used to identify this activity definition when it is
referenced in a specification, model, design or an instance. This SHALL be a
URL, SHOULD be globally unique, and SHOULD be an address at which this
activity definition is (or will be) published. The URL SHOULD include the
major version of the activity definition. For more information see [Technical
and Business Versions](resource.html#versions).
identifier: A formal identifier that is used to identify this activity definition when it
is represented in other formats, or referenced in a specification, model,
design or an instance.
version: The identifier that is used to identify this version of the activity
definition when it is referenced in a specification, model, design or
instance. This is an arbitrary value managed by the activity definition author
and is not expected to be globally unique. For example, it might be a
timestamp (e.g. yyyymmdd) if a managed version is not available. There is also
no expectation that versions can be placed in a lexicographical sequence. To
provide a version consistent with the Decision Support Service specification,
use the format Major.Minor.Revision (e.g. 1.0.0). For more information on
versioning knowledge assets, refer to the Decision Support Service
specification. Note that a version is required for non-experimental active
assets.
name: A natural language name identifying the activity definition. This name should
be usable as an identifier for the module by machine processing applications
such as code generation.
title: A short, descriptive, user-friendly title for the activity definition.
status: The status of this activity definition. Enables tracking the life-cycle of the
content.
experimental: A boolean value to indicate that this activity definition is authored for
testing purposes (or education/evaluation/marketing), and is not intended to
be used for genuine usage.
date: The date (and optionally time) when the activity definition was published.
The date must change if and when the business version changes and it must
change if the status code changes. In addition, it should change when the
substantive content of the activity definition changes.
publisher: The name of the individual or organization that published the activity
definition.
description: A free text natural language description of the activity definition from a
consumer's perspective.
purpose: Explaination of why this activity definition is needed and why it has been
designed as it has.
usage: A detailed description of how the asset is used from a clinical perspective.
approvalDate: The date on which the resource content was approved by the publisher. Approval
happens once when the content is officially approved for usage.
lastReviewDate: The date on which the resource content was last reviewed. Review happens
periodically after approval, but doesn't change the original approval date.
effectivePeriod: The period during which the activity definition content was or is planned to
be in active use.
useContext: The content was developed with a focus and intent of supporting the contexts
that are listed. These terms may be used to assist with indexing and searching
for appropriate activity definition instances.
jurisdiction: A legal or geographic region in which the activity definition is intended to
be used.
topic: Descriptive topics related to the content of the activity. Topics provide a
high-level categorization of the activity that can be useful for filtering and
searching.
contributor: A contributor to the content of the asset, including authors, editors,
reviewers, and endorsers.
contact: Contact details to assist a user in finding and communicating with the
publisher.
copyright: A copyright statement relating to the activity definition and/or its contents.
Copyright statements are generally legal restrictions on the use and
publishing of the activity definition.
relatedArtifact: Related artifacts such as additional documentation, justification, or
bibliographic references.
library: A reference to a Library resource containing any formal logic used by the
asset.
kind: A description of the kind of resource the activity definition is representing.
For example, a MedicationRequest, a ProcedureRequest, or a
CommunicationRequest. Typically, but not always, this is a Request resource.
code: Detailed description of the type of activity; e.g. What lab test, what
procedure, what kind of encounter.
timingTiming: The period, timing or frequency upon which the described activity is to occur.
timingDateTime: The period, timing or frequency upon which the described activity is to occur.
timingPeriod: The period, timing or frequency upon which the described activity is to occur.
timingRange: The period, timing or frequency upon which the described activity is to occur.
location: Identifies the facility where the activity will occur; e.g. home, hospital,
specific clinic, etc.
participant: Indicates who should participate in performing the action described.
productReference: Identifies the food, drug or other product being consumed or supplied in the
activity.
productCodeableConcept: Identifies the food, drug or other product being consumed or supplied in the
activity.
quantity: Identifies the quantity expected to be consumed at once (per dose, per meal,
etc.).
dosage: Provides detailed dosage instructions in the same way that they are described
for MedicationRequest resources.
bodySite: Indicates the sites on the subject's body where the procedure should be
performed (I.e. the target sites).
transform: A reference to a StructureMap resource that defines a transform that can be
executed to produce the intent resource using the ActivityDefinition instance
as the input.
dynamicValue: Dynamic values that will be evaluated to produce values for elements of the
resulting resource. For example, if the dosage of a medication must be
computed based on the patient's weight, a dynamic value would be used to
specify an expression that calculated the weight, and the path on the intent
resource that would contain the result.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.usagecontext import (
UsageContextSchema,
)
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.contributor import ContributorSchema
from spark_fhir_schemas.stu3.complex_types.contactdetail import (
ContactDetailSchema,
)
from spark_fhir_schemas.stu3.complex_types.relatedartifact import (
RelatedArtifactSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.timing import TimingSchema
from spark_fhir_schemas.stu3.complex_types.range import RangeSchema
from spark_fhir_schemas.stu3.complex_types.activitydefinition_participant import (
ActivityDefinition_ParticipantSchema,
)
from spark_fhir_schemas.stu3.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.stu3.complex_types.dosage import DosageSchema
from spark_fhir_schemas.stu3.complex_types.activitydefinition_dynamicvalue import (
ActivityDefinition_DynamicValueSchema,
)
if (
max_recursion_limit
and nesting_list.count("ActivityDefinition") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["ActivityDefinition"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a ActivityDefinition resource
StructField("resourceType", StringType(), True),
# An absolute URI that is used to identify this activity definition when it is
# referenced in a specification, model, design or an instance. This SHALL be a
# URL, SHOULD be globally unique, and SHOULD be an address at which this
# activity definition is (or will be) published. The URL SHOULD include the
# major version of the activity definition. For more information see [Technical
# and Business Versions](resource.html#versions).
StructField("url", StringType(), True),
# A formal identifier that is used to identify this activity definition when it
# is represented in other formats, or referenced in a specification, model,
# design or an instance.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The identifier that is used to identify this version of the activity
# definition when it is referenced in a specification, model, design or
# instance. This is an arbitrary value managed by the activity definition author
# and is not expected to be globally unique. For example, it might be a
# timestamp (e.g. yyyymmdd) if a managed version is not available. There is also
# no expectation that versions can be placed in a lexicographical sequence. To
# provide a version consistent with the Decision Support Service specification,
# use the format Major.Minor.Revision (e.g. 1.0.0). For more information on
# versioning knowledge assets, refer to the Decision Support Service
# specification. Note that a version is required for non-experimental active
# assets.
StructField("version", StringType(), True),
# A natural language name identifying the activity definition. This name should
# be usable as an identifier for the module by machine processing applications
# such as code generation.
StructField("name", StringType(), True),
# A short, descriptive, user-friendly title for the activity definition.
StructField("title", StringType(), True),
# The status of this activity definition. Enables tracking the life-cycle of the
# content.
StructField("status", StringType(), True),
# A boolean value to indicate that this activity definition is authored for
# testing purposes (or education/evaluation/marketing), and is not intended to
# be used for genuine usage.
StructField("experimental", BooleanType(), True),
# The date (and optionally time) when the activity definition was published.
# The date must change if and when the business version changes and it must
# change if the status code changes. In addition, it should change when the
# substantive content of the activity definition changes.
StructField("date", StringType(), True),
# The name of the individual or organization that published the activity
# definition.
StructField("publisher", StringType(), True),
# A free text natural language description of the activity definition from a
# consumer's perspective.
StructField("description", StringType(), True),
# Explaination of why this activity definition is needed and why it has been
# designed as it has.
StructField("purpose", StringType(), True),
# A detailed description of how the asset is used from a clinical perspective.
StructField("usage", StringType(), True),
# The date on which the resource content was approved by the publisher. Approval
# happens once when the content is officially approved for usage.
StructField("approvalDate", DateType(), True),
# The date on which the resource content was last reviewed. Review happens
# periodically after approval, but doesn't change the original approval date.
StructField("lastReviewDate", DateType(), True),
# The period during which the activity definition content was or is planned to
# be in active use.
StructField(
"effectivePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The content was developed with a focus and intent of supporting the contexts
# that are listed. These terms may be used to assist with indexing and searching
# for appropriate activity definition instances.
StructField(
"useContext",
ArrayType(
UsageContextSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A legal or geographic region in which the activity definition is intended to
# be used.
StructField(
"jurisdiction",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Descriptive topics related to the content of the activity. Topics provide a
# high-level categorization of the activity that can be useful for filtering and
# searching.
StructField(
"topic",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A contributor to the content of the asset, including authors, editors,
# reviewers, and endorsers.
StructField(
"contributor",
ArrayType(
ContributorSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Contact details to assist a user in finding and communicating with the
# publisher.
StructField(
"contact",
ArrayType(
ContactDetailSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A copyright statement relating to the activity definition and/or its contents.
# Copyright statements are generally legal restrictions on the use and
# publishing of the activity definition.
StructField("copyright", StringType(), True),
# Related artifacts such as additional documentation, justification, or
# bibliographic references.
StructField(
"relatedArtifact",
ArrayType(
RelatedArtifactSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A reference to a Library resource containing any formal logic used by the
# asset.
StructField(
"library",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A description of the kind of resource the activity definition is representing.
# For example, a MedicationRequest, a ProcedureRequest, or a
# CommunicationRequest. Typically, but not always, this is a Request resource.
StructField("kind", StringType(), True),
# Detailed description of the type of activity; e.g. What lab test, what
# procedure, what kind of encounter.
StructField(
"code",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The period, timing or frequency upon which the described activity is to occur.
StructField(
"timingTiming",
TimingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The period, timing or frequency upon which the described activity is to occur.
StructField("timingDateTime", TimestampType(), True),
# The period, timing or frequency upon which the described activity is to occur.
StructField(
"timingPeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The period, timing or frequency upon which the described activity is to occur.
StructField(
"timingRange",
RangeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies the facility where the activity will occur; e.g. home, hospital,
# specific clinic, etc.
StructField(
"location",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates who should participate in performing the action described.
StructField(
"participant",
ArrayType(
ActivityDefinition_ParticipantSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies the food, drug or other product being consumed or supplied in the
# activity.
StructField(
"productReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies the food, drug or other product being consumed or supplied in the
# activity.
StructField(
"productCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies the quantity expected to be consumed at once (per dose, per meal,
# etc.).
StructField(
"quantity",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Provides detailed dosage instructions in the same way that they are described
# for MedicationRequest resources.
StructField(
"dosage",
ArrayType(
DosageSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Indicates the sites on the subject's body where the procedure should be
# performed (I.e. the target sites).
StructField(
"bodySite",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A reference to a StructureMap resource that defines a transform that can be
# executed to produce the intent resource using the ActivityDefinition instance
# as the input.
StructField(
"transform",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Dynamic values that will be evaluated to produce values for elements of the
# resulting resource. For example, if the dosage of a medication must be
# computed based on the patient's weight, a dynamic value would be used to
# specify an expression that calculated the weight, and the path on the intent
# resource that would contain the result.
StructField(
"dynamicValue",
ArrayType(
ActivityDefinition_DynamicValueSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
# -*- coding: utf-8 -*-
from .models import ConferenceModerator
def list_conference_moderator(user):
qs = ConferenceModerator.objects.filter(moderator=user)
return qs.all()
|
from __future__ import annotations
import matplotlib as mpl
# https://stackoverflow.com/a/26853961/353337
def _merge(dict1, dict2):
"""Merge two dicts, dict2 takes precedence."""
return {**dict1, **dict2}
def duftify(style: dict, bar: bool = False) -> dict:
try:
grid_color = style["grid.color"]
except KeyError:
# default grid color,
# <https://matplotlib.org/stable/tutorials/introductory/customizing.html>
grid_color = "#b0b0b0"
_stroke_width = 0.3
# make the xticks slightly wider to make them easier to see
_xtick_width = 0.4
# See <https://matplotlib.org/stable/tutorials/introductory/customizing.html> for all
# possible rcParams.
dufte_style = {
"font.size": 14,
"text.color": grid_color,
"axes.labelcolor": grid_color,
"axes.labelpad": 18,
"axes.spines.left": False,
"axes.spines.bottom": False,
"axes.spines.top": False,
"axes.spines.right": False,
"ytick.minor.left": False,
# Axes aren't used in this theme, but still set some properties in case the user
# decides to turn them on.
"axes.edgecolor": grid_color,
"axes.linewidth": _stroke_width,
# default is "line", i.e., below lines but above patches (bars)
"axes.axisbelow": True,
#
"ytick.right": False,
"ytick.color": grid_color,
"ytick.major.width": _stroke_width,
"xtick.minor.top": False,
"xtick.minor.bottom": False,
"xtick.color": grid_color,
"xtick.major.width": _xtick_width,
"axes.grid": True,
"axes.grid.axis": "y",
"grid.color": grid_color,
# Choose the line width such that it's very subtle, but still serves as a guide.
"grid.linewidth": _stroke_width,
"axes.xmargin": 0,
"axes.ymargin": 0,
"axes.titlepad": 40,
"axes.titlesize": 14,
}
if bar:
# hide xticks for bars; the label is enough
dufte_style["xtick.major.width"] = 0
# unhide the bar labels
dufte_style["xtick.major.pad"] = 13
dufte_style["font.size"] = 16
# default:
dufte_style["axes.xmargin"] = mpl.rcParams["axes.xmargin"]
# style_bar["ytick.major.size"] = 10
dufte_style["axes.titlelocation"] = "left"
dufte_style["axes.titlesize"] = 18
return _merge(style, dufte_style)
dufte = duftify({})
dufte_bar = duftify({}, bar=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate elev2D.th for a Bay-Delta SCHSIM model using tides at
Point Reyes and Monterey.
2015-06-16: Customized
"""
import sys
import pandas as pd
from netCDF4 import Dataset
import gdal
from schimpy.separate_species import separate_species
from schimpy.schism_mesh import read_mesh
from vtools.data.vtime import hours, days, seconds
from vtools.datastore.read_ts import read_noaa, read_ts
import numpy as np
from datetime import datetime
import struct, argparse, re
import time
################# command line application #####################
def create_arg_parser():
import textwrap
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""
============== Example ==================
> gen_elev2D.py --outfile elev2D.nc --stime=2009-03-12 --etime=2010-01-01 9415020_gageheight.csv 9413450_gageheight.csv
"""),
description="""Script to create elev2D.th boundary condition from Point Reyes and Monterey NOAA file"""
)
parser.add_argument('--stime', default=None, required=False,
help="Start time in ISO-like format 2009-03-12T00:00:00. Time part and 'T' are optional.")
parser.add_argument('--etime', default=None,
required=False, help='End time.')
parser.add_argument('--hgrid', default='hgrid.gr3',
required=False, help='Name of hgrid file if not hgrid.gr3')
parser.add_argument('--outfile', default='elev2D.th.nc',
help='Name of output file: either elev2D.th or elev2D.th.nc')
parser.add_argument('--slr', default=0.0, type=float, required=False,
help='Scalar sea level rise increment')
parser.add_argument('pt_reyes', default=None,
help='Pt Reyes data file, must have a buffer of 16 days at either end of series')
parser.add_argument('monterey', default=None,
help='Monterey data file, must have a buffer of 16 days at either end of series')
return parser
class THWriter(object):
def __init__(self,path,size,starttime):
pass
#self.myfilehnandle =
def write_step(self,iter,time,vals):
pass
def write_all(self,times,vals):
# if you get to this point
pass
def __del__(self):
pass
# tear down/close things
class BinaryTHWriter(THWriter):
#super(THWriter, self).__init__(path)
def __init__(self,fpath_out,nloc,starttime):
self.outfile = open(fpath_out, 'wb')
#self.myfilehnandle =
self.tformat="f"
self.valformat="f"*nloc
def write_step(self,iter,time,vals):
print("Writing Output")
buf = struct.pack(self.tformat, time)
self.outfile.write(buf)
buf = struct.pack(self.valformat, *vals)
self.outfile.write(buf)
def write_all(self,times,vals):
# if you get to this point
pass
def __del__(self):
self.outfile.close()
# tear down/close things
class NetCDFTHWriter(THWriter):
def __init__(self,fpath_out,nloc,starttime,dt):
self.outfile = Dataset(fpath_out, "w", format="NETCDF4_CLASSIC")
fout = self.outfile
time = fout.createDimension("time", None)
nOpenBndNodes = fout.createDimension("nOpenBndNodes", nloc)
nLevels = fout.createDimension("nLevels", 1)
nComponents = fout.createDimension("nComponents", 1)
one = fout.createDimension("one", 1)
# create netCDF dimension variables and
self.times = fout.createVariable("time","f8", ("time",))
#todo: what is timestep all about? Did we invent this? Why variable rather than attribute?
#todo: what is timestep all about? Did we invent this? Why variable rather than attribute?
self.timestep = fout.createVariable("time_step","f4", ("one",))
self.timestep[0] = dt
# create elevation time series data to be writen to netCDF file
self.timeseries = fout.createVariable("time_series", "f4", ("time", "nOpenBndNodes", "nLevels", "nComponents"))
# variable attributes
self.times.long_name = "simulation time in seconds"
self.times.units = "seconds since " + str(starttime)
self.timeseries.long_name = "water surface elevation at ocean boundary"
self.timestep.long_name = "time step in seconds"
self.timeseries.units = "meters NAVD88"
# Global Attributes -- Metadata
fout.description = "Water Surface Elevation Boundary Conditions at Ocean Boundary "
fout.history = "Created " + str(datetime.now())
fout.source = "gen_ elev2D.py"
def write_step(self,iter,time,vals):
self.timeseries[iter,:,0,0]= vals
self.times[iter]=time
def write_all(self,times,vals):
# if you get to this point
pass
def __del__(self):
self.outfile.close()
def main():
parser = create_arg_parser()
args = parser.parse_args()
monterey_fpath = args.monterey
pt_reyes_fpath = args.pt_reyes
hgrid_fpath = args.hgrid
fpath_out = args.outfile
slr = args.slr
stime = args.stime
etime = args.etime
return gen_elev2D(hgrid_fpath,fpath_out,pt_reyes_fpath,monterey_fpath,stime,etime,slr)
def gen_elev2D(hgrid_fpath,outfile,pt_reyes_fpath,monterey_fpath,start,end,slr):
max_gap = 5
stime = start
etime = end
fpath_out = outfile
#todo: hardwire
nnode = 83
tbuf = days(16)
# convert start time string input to datetime
sdate = pd.Timestamp(stime)
if not etime is None:
# convert start time string input to datetime
edate = pd.Timestamp(etime)
bufend = edate + tbuf
else:
edate = None
bufend = None
# UTM positions of Point Reyes, Monterey, SF
pos_pr = np.array([502195.03, 4205445.47])
pos_mt = np.array([599422.84, 4051630.37])
pos_sf = np.array([547094.79, 4184499.42])
var_subtidal = np.array([0.938, 0.905, 0.969]) # pr, mt, sf
var_semi = np.array([0.554, 0.493, 0.580])
# Assume 45 degree from north-west to south-east
tangent = np.array([1, -1])
tangent = tangent / np.linalg.norm(tangent) # Normalize
# Rotate 90 cw to get normal vec
normal = np.array([tangent[1], -tangent[0]])
print("tangent: {}".format(tangent))
print("normal: {}".format(normal))
mt_rel = pos_mt - pos_pr
x_mt = np.dot(tangent, mt_rel) # In pr-mt direction
y_mt = np.dot(normal, mt_rel) # Normal to x-direction to the
# Grid
#todo: what is the difference between this and m = read_grid()??
mesh = read_mesh(hgrid_fpath)
ocean_boundary = mesh.boundaries[0] # First one is ocean
# Data
print("Reading Point Reyes...")
pt_reyes = read_noaa(pt_reyes_fpath, start=sdate - tbuf, end=bufend, force_regular=True)
pt_reyes.interpolate(limit=max_gap,inplace=True)
if pt_reyes.isna().any(axis=None):
raise ValueError("pt_reyes has gaps larger than fill limit")
ts_pr_subtidal, ts_pr_diurnal, ts_pr_semi, noise = separate_species(pt_reyes,noise_thresh_min=150)
del noise
print("Reading Monterey...")
monterey = read_noaa(monterey_fpath, start=sdate - tbuf, end=bufend, force_regular=True)
monterey.interpolate(limit=max_gap,inplace=True)
if pt_reyes.isna().any(axis=None):
raise ValueError("monterey has gaps larger than fill limit")
if pt_reyes.index.freq != monterey.index.freq:
raise ValueError(
"Point Reyes and Monterey time step must be the same in gen_elev2D.py")
ts_mt_subtidal, ts_mt_diurnal, ts_mt_semi, noise = separate_species(monterey,noise_thresh_min=150)
del noise
dt = monterey.index.freq/seconds(1)
print("Done Reading")
print("Interpolating and subsetting Point Reyes")
# interpolate_ts(ts_pr_subtidal.window(sdate,edate),step)
ts_pr_subtidal = ts_pr_subtidal.loc[sdate:edate]
ts_pr_diurnal = ts_pr_diurnal.loc[sdate:edate]
# interpolate_ts(ts_pr_semi.window(sdate,edate),step)
ts_pr_semi = ts_pr_semi.loc[sdate:edate]
print("Interpolating and subsetting Monterey")
# interpolate_ts(ts_mt_subtidal.window(sdate,edate),step)
ts_mt_subtidal = ts_mt_subtidal.loc[sdate:edate]
# interpolate_ts(ts_mt_diurnal.window(sdate,edate),step)
ts_mt_diurnal = ts_mt_diurnal.loc[sdate:edate]
# interpolate_ts(ts_mt_semi.window(sdate,edate),step)
ts_mt_semi = ts_mt_semi.loc[sdate:edate]
print("Creating writer") # requires dt be known for netcdf
if fpath_out.endswith("th"):
thwriter = BinaryTHWriter(fpath_out,nnode,None)
elif fpath_out.endswith("nc"):
thwriter = NetCDFTHWriter(fpath_out,nnode,sdate,dt)
else:
raise ValueError("File extension for output not recognized in file: {}".format(fpath_out))
# Grid
boundaries = mesh.nodes[ocean_boundary.nodes]
pos_rel = boundaries[:, :2] - pos_pr
# x, y in a new principal axes
x = np.dot(pos_rel, tangent.reshape((2, -1)))
y = np.dot(pos_rel, normal.reshape((2, -1)))
theta_x = x / x_mt
theta_x_comp = 1. - theta_x
theta_y = y / y_mt
theta_y_comp = 1. - theta_y
var_y = (theta_y_comp * var_semi[0] + theta_y * var_semi[1])
# adj_subtidal_mt = 0.08 # Adjustment in Monterey subtidal signal
# scaling_diurnal_mt = 0.95 # Scaling of Monterey diurnal signal (for K1/Q1)
# Used this up to v75
adj_subtidal_mt = 0. # Adjustment in Monterey subtidal signal
scaling_diurnal_mt = 1. # Scaling of Monterey diurnal signal (for K1/Q1)
# New trial for LSC2 with v75
adj_subtidal_mt = -0.07 # Adjustment in Monterey subtidal signal
scaling_diurnal_mt = 0.95 # Scaling of Monterey diurnal signal (for K1/Q1)
scaling_semidiurnal_mt = 1.03
adj_subtidal_mt = -0.14 # Adjustment in Monterey subtidal signal
scaling_diurnal_mt = 0.90 # Scaling of Monterey diurnal signal (for K1/Q1)
scaling_semidiurnal_mt = 1.07
adj_subtidal_mt = 0.10 # Adjustment in Monterey subtidal signal
scaling_diurnal_mt = 0.90 # Scaling of Monterey diurnal signal (for K1/Q1)
scaling_semidiurnal_mt = 1.03
adj_subtidal_mt = 0.10 # Adjustment in Monterey subtidal signal
scaling_diurnal_mt = 0.97 # Scaling of Monterey diurnal signal (for K1/Q1)
# Scaling of Point Reyes diurnal signal (for K1/Q1)
scaling_diurnal_pr = 0.97
scaling_semidiurnal_mt = 1.025 # Scaling at Monterey semi-diurnal signal
adj_subtidal_mt = 0.09 # Adjustment in Monterey subtidal signal
scaling_diurnal_mt = 0.94 # Scaling of Monterey diurnal signal (for K1/Q1)
# Scaling of Point Reyes diurnal signal (for K1/Q1)
scaling_diurnal_pr = 0.94
scaling_semidiurnal_mt = 1.0 # Scaling at Monterey semi-diurnal signal
if ts_pr_semi.isna().any(axis=None):
print(ts_pr_semi[ts_pr_semi.isna()])
raise ValueError('Above times are missing in Point Reyes data')
for i in range(len(ts_pr_semi)):
t = float(dt * i)
# semi-diurnal
# Scaling
pr = ts_pr_semi.iloc[i,0]
mt = ts_mt_semi.iloc[i,0] * scaling_semidiurnal_mt
if np.isnan(pr) or np.isnan(mt):
raise ValueError("One of values is numpy.nan.")
eta_pr_side = var_y / var_semi[0] * pr
eta_mt_side = var_y / var_semi[1] * mt
eta = eta_pr_side * theta_x_comp + eta_mt_side * theta_x
# diurnal
# Interpolate in x-direction only to get a better phase
pr = ts_pr_diurnal.iloc[i,0] * scaling_diurnal_pr
mt = ts_mt_diurnal.iloc[i,0] * scaling_diurnal_mt
#if i < 5:
# print("yu")
# print(pr)
# print(mt)
if np.isnan(pr) or np.isnan(mt):
raise ValueError("One of values is numpy.nan.")
eta += pr * theta_x_comp + mt * theta_x
# Subtidal
# No phase change in x-direction. Simply interpolate in
# y-direction.
pr = ts_pr_subtidal.iloc[i,0]
mt = ts_mt_subtidal.iloc[i,0] + adj_subtidal_mt
if np.isnan(pr) or np.isnan(mt):
raise ValueError("One of values is numpy.nan.")
eta += pr * theta_y_comp + mt * theta_y + slr
# write data to netCDF file
thwriter.write_step(i,t,eta)
# Delete class
del thwriter
if __name__ == "__main__":
main() |
from sklearn.model_selection import train_test_split
from lifelines import CoxPHFitter
import matplotlib.pyplot as plt
import streamlit as st
class Model:
def __init__(self, data):
self.data = data
def coxPH(self):
train_features = ['gender_Female', 'Partner_Yes', 'Dependents_Yes', 'PhoneService_Yes', 'MultipleLines_Yes',
'InternetService_DSL', 'InternetService_Fiber optic', 'OnlineSecurity_Yes',
'DeviceProtection_Yes',
'TechSupport_Yes', 'StreamingTV_Yes', 'StreamingMovies_Yes', 'Contract_One year',
'Contract_Two year',
'PaperlessBilling_Yes', 'PaymentMethod_Bank transfer (automatic)',
'PaymentMethod_Credit card (automatic)',
'PaymentMethod_Electronic check', 'MonthlyCharges', 'TotalCharges', 'tenure', 'Churn_Yes']
cph_train, cph_test = train_test_split(self.data[train_features], test_size=0.2, random_state=100)
cph = CoxPHFitter()
cph.fit(cph_train, 'tenure', 'Churn_Yes')
self.visualize(cph)
return cph_train, cph_test, cph
def visualize(self, model):
plt.clf()
model.print_summary()
st.write('''
**Feature significance chart aka coefficient chart:** This tells us the relative significance of each feature on the
customer churn.
Feature with positive coef increases the probability of customer churn and feature with negative coef
reduces the churn probability.
''')
model.plot()
st.pyplot(plt)
st.write('''
\n
**Survival curves** for customers whose TotalCharges are 4000, 2500, 2000 and 0.
Clearly customers with high TotalCharges have high survival chances.
''')
model.plot_partial_effects_on_outcome('TotalCharges', [0, 2000, 2500, 4000], cmap='coolwarm').set_xlabel('tenure period')
st.pyplot(plt)
st.write('### Approach')
st.write("""Survival analysis models are used to predict churn.
It helps you predict the survival chances of the customer at any given point of time.
Here we have used one type of survival analysis model called as CoXPH for predicting churn""")
link = '[Read more](https://en.wikipedia.org/wiki/Survival_analysis)'
st.markdown(link, unsafe_allow_html=True)
|
num = (int(input('Digite um numero')),
int(input('Digite outro numero')),
int(input('Digite mais um numero')),
int(input('Digite ultimo numero')))
print(f'Vc Digitou os numeros {num}')
print(f'o numero 9 apareceu {num.count(9)} vezes')
if 3 in num:
print(f'o valor 3 apareceu na {num.index(3) + 1}')
else:
print(' o valor 3 não foi digitado')
for n in num:
if n % 2 == 0:
print(f'o numero {n} é par')
|
import graph_preprocessing as gp
import time
import random
import json
import os
from numba import jit, cuda
# graph = {
# 1: [8, 3, 7],
# 2: [7],
# 3: [1, 5],
# 4: [5, 8],
# 5: [4,3],
# 6: [7],
# 7: [1, 2, 6],
# 8: [1, 4]
# }
# hospital = [8,6, 2, 7]
start = time.time()
graph_reader = gp.Graph_Reader()
graph = graph_reader.readGraph()
# hospital = graph_reader.readHospital()
hospital = {}
node = [5]
allPaths = {}
visitedNodePath = {}
number_hospitals = 250
for h in range(number_hospitals):
numb = random.randint(0, len(graph) - 1)
while (numb in hospital and numb not in graph):
numb = random.randint(0, len(graph) - 1)
hospital[numb] = 1
print("List of Hospital:")
print(hospital)
print("Done Reading Graphs: " + str(time.time() - start))
nodesWithoutHospital = len(graph) - number_hospitals
def write_data_json_file(output_directory, file_name, writePath):
# Load in existing file record
if os.path.isfile('./' + output_directory + file_name):
with open(output_directory + file_name) as json_file:
f_data = json.load(json_file)
if len(f_data) != 0:
data = f_data
data = writePath
with open(output_directory + file_name, 'w') as outfile:
json.dump(data, outfile)
def backtrack(parent, startNode, end):
path = [end]
while path[-1] != startNode:
currentNode = parent[path[-1]]
visitedNodePath[path[-1]] = True
path.append(currentNode)
newPath = path.copy()
newPath.reverse()
allPaths[currentNode] = newPath
#print(allPaths[currentNode])
#print("Run" + str(currentNode) + " " + str(time.time() - start))
print("Completion: " + str(round(len(allPaths) / nodesWithoutHospital * 100, 6)) + "%")
path.reverse()
return path
def BFS_Selected(startNode, hospitalNode):
visitedNode = {}
queue = []
parent = {}
queue.append(startNode)
visitedNode[startNode] = True
while queue:
currentNode = queue.pop(0)
if currentNode == hospitalNode:
return backtrack(parent, startNode, currentNode)
for i in range(len(graph[currentNode])):
if graph[currentNode][i] not in visitedNode:
if graph[currentNode][i] in allPaths:
return allPaths[graph[currentNode][i]]
parent[graph[currentNode][i]] = currentNode
queue.append(graph[currentNode][i])
visitedNode[graph[currentNode][i]] = True
def BFS_Shortest(startNode):
visitedNode = {}
queue = []
parent = {}
queue.append(startNode)
visitedNode[startNode] = True
while queue:
currentNode = queue.pop(0)
if currentNode in hospital:
return backtrack(parent, startNode, currentNode)
for i in range(len(graph[currentNode])):
if graph[currentNode][i] not in visitedNode:
parent[graph[currentNode][i]] = currentNode
queue.append(graph[currentNode][i])
visitedNode[graph[currentNode][i]] = True
# @jit
def BFS_ShortestCuda(startNode):
visitedNode = {}
queue = []
parent = {}
queue.append(startNode)
visitedNode[startNode] = True
while queue:
currentNode = queue.pop(0)
if currentNode in hospital:
return backtrack(parent, startNode, currentNode)
for i in range(len(graph[currentNode])):
if graph[currentNode][i] not in visitedNode:
parent[graph[currentNode][i]] = currentNode
queue.append(graph[currentNode][i])
visitedNode[graph[currentNode][i]] = True
def analyseGraphEdges():
edgeCounts = {}
for key in graph:
edgeCounts[key] = len(graph[key])
write_data_json_file("output/", "edgesCount.json", edgeCounts)
topEdges = 4
highKeys = []
for key in graph:
if len(graph[key]) >= topEdges and key not in hospital:
highKeys.append(key)
for key in highKeys:
if key not in allPaths:
BFS_ShortestCuda(key)
for key in graph:
if key not in allPaths and key not in hospital:
BFS_ShortestCuda(key)
write_data_json_file("output/", "AppendingBFS_output.json", {"hospitals": hospital, "paths": allPaths})
print("Nodes Number:" + str(len(graph)))
print("Run Finished: " + str(time.time() - start))
|
#This program demonstrates usage of 'With' statement from Lesson 6
with open("E:\SelfStudy\Python\PyMegaCourse\example3.txt","a+") as file:
#file.seek(0)
content=file.read()
file.write("\nTest 8")
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for the control service REST API.
"""
import socket
from contextlib import closing
from uuid import uuid4
from pyrsistent import thaw, pmap
from twisted.trial.unittest import TestCase
from twisted.internet.defer import gatherResults
from treq import get, json_content, content
from eliot import Message
from ..testtools import (
REALISTIC_BLOCKDEVICE_SIZE, loop_until, random_name, find_free_port,
)
from .testtools import (
MONGO_IMAGE, require_mongo, get_mongo_client,
require_cluster, require_moving_backend,
)
# A command that will run an "HTTP" in a Busybox container. The server
# responds "hi" to any request.
BUSYBOX_HTTP = [
u"sh", u"-c",
u"""\
echo -n '#!/bin/sh
echo -n "HTTP/1.1 200 OK\r\n\r\nhi"
' > /tmp/script.sh;
chmod +x /tmp/script.sh;
nc -ll -p 8080 -e /tmp/script.sh
"""
]
def verify_socket(host, port):
"""
Wait until the destionation can be connected to.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:return Deferred: Firing when connection is possible.
"""
def can_connect():
with closing(socket.socket()) as s:
conn = s.connect_ex((host, port))
Message.new(
message_type="acceptance:verify_socket",
host=host,
port=port,
result=conn,
).write()
return conn == 0
dl = loop_until(can_connect)
return dl
class ContainerAPITests(TestCase):
"""
Tests for the container API.
"""
def _create_container(self, cluster):
"""
Create a container listening on port 8080.
:return: ``Deferred`` firing with a tuple of ``Cluster`` instance
and container dictionary once the container is up and running.
"""
data = {
u"name": random_name(self),
u"image": "clusterhq/flask:latest",
u"ports": [{u"internal": 80, u"external": 8080}],
u'restart_policy': {u'name': u'never'},
u"node_uuid": cluster.nodes[0].uuid,
}
d = cluster.create_container(data)
def check_result(result):
cluster, response = result
self.addCleanup(cluster.remove_container, data[u"name"])
self.assertEqual(response, data)
dl = verify_socket(cluster.nodes[0].public_address, 8080)
dl.addCallback(lambda _: response)
return dl
d.addCallback(check_result)
return d
@require_cluster(1)
def test_create_container_with_ports(self, cluster):
"""
Create a container including port mappings on a single-node cluster.
"""
return self._create_container(cluster)
@require_cluster(1)
def test_create_container_with_environment(self, cluster):
"""
Create a container including environment variables on a single-node
cluster.
"""
data = {
u"name": random_name(self),
u"image": "clusterhq/flaskenv:latest",
u"ports": [{u"internal": 8080, u"external": 8081}],
u"environment": {u"ACCEPTANCE_ENV_LABEL": 'acceptance test ok'},
u'restart_policy': {u'name': u'never'},
}
data[u"node_uuid"] = cluster.nodes[0].uuid
d = cluster.create_container(data)
def check_result((cluster, response)):
self.addCleanup(cluster.remove_container, data[u"name"])
self.assertEqual(response, data)
return cluster
def query_environment(host, port):
"""
The running container, clusterhq/flaskenv, is a simple Flask app
that returns a JSON dump of the container's environment, so we
make an HTTP request and parse the response.
"""
req = get(
"http://{host}:{port}".format(host=host, port=port),
persistent=False
).addCallback(json_content)
return req
d.addCallback(check_result)
def checked(cluster):
host = cluster.nodes[0].public_address
d = verify_socket(host, 8081)
d.addCallback(lambda _: query_environment(host, 8081))
return d
d.addCallback(checked)
d.addCallback(
lambda response:
self.assertDictContainsSubset(data[u"environment"], response)
)
return d
@require_moving_backend
@require_mongo
@require_cluster(2)
def test_move_container_with_dataset(self, cluster):
"""
Create a mongodb container with an attached dataset, issue API call
to move the container. Wait until we can connect to the running
container on the new host and verify the data has moved with it.
"""
creating_dataset = create_dataset(self, cluster)
def created_dataset(result):
cluster, dataset = result
mongodb = {
u"name": random_name(self),
u"node_uuid": cluster.nodes[0].uuid,
u"image": MONGO_IMAGE,
u"ports": [{u"internal": 27017, u"external": 27017}],
u'restart_policy': {u'name': u'never'},
u"volumes": [{u"dataset_id": dataset[u"dataset_id"],
u"mountpoint": u"/data/db"}],
}
created = cluster.create_container(mongodb)
created.addCallback(lambda _: self.addCleanup(
cluster.remove_container, mongodb[u"name"]))
created.addCallback(
lambda _: get_mongo_client(cluster.nodes[0].public_address))
def got_mongo_client(client):
database = client.example
database.posts.insert({u"the data": u"it moves"})
return database.posts.find_one()
created.addCallback(got_mongo_client)
def inserted(record):
moved = cluster.move_container(
mongodb[u"name"], cluster.nodes[1].uuid
)
def destroy_and_recreate(_, record):
"""
After moving our container via the API, we then remove the
container on the new host and recreate it, pointing to the
same dataset, but with the new container instance exposing
a different external port. This technique ensures that the
test does not pass by mere accident without the container
having moved; by recreating the container on its new host
after moving, we can be sure that if we can still connect
and read the data, the dataset was successfully moved along
with the container.
"""
removed = cluster.remove_container(mongodb[u"name"])
mongodb2 = mongodb.copy()
mongodb2[u"ports"] = [
{u"internal": 27017, u"external": 27018}
]
mongodb2[u"node_uuid"] = cluster.nodes[1].uuid
removed.addCallback(
lambda _: cluster.create_container(mongodb2))
removed.addCallback(lambda _: record)
return removed
moved.addCallback(destroy_and_recreate, record)
return moved
created.addCallback(inserted)
def moved(record):
d = get_mongo_client(cluster.nodes[1].public_address, 27018)
d.addCallback(lambda client: client.example.posts.find_one())
d.addCallback(self.assertEqual, record)
return d
created.addCallback(moved)
return created
creating_dataset.addCallback(created_dataset)
return creating_dataset
@require_mongo
@require_cluster(1)
def test_create_container_with_dataset(self, cluster):
"""
Create a mongodb container with an attached dataset, insert some data,
shut it down, create a new container with same dataset, make sure
the data is still there.
"""
creating_dataset = create_dataset(self, cluster)
def created_dataset(result):
cluster, dataset = result
mongodb = {
u"name": random_name(self),
u"node_uuid": cluster.nodes[0].uuid,
u"image": MONGO_IMAGE,
u"ports": [{u"internal": 27017, u"external": 27017}],
u'restart_policy': {u'name': u'never'},
u"volumes": [{u"dataset_id": dataset[u"dataset_id"],
u"mountpoint": u"/data/db"}],
}
created = cluster.create_container(mongodb)
created.addCallback(lambda _: self.addCleanup(
cluster.remove_container, mongodb[u"name"]))
created.addCallback(
lambda _: get_mongo_client(cluster.nodes[0].public_address))
def got_mongo_client(client):
database = client.example
database.posts.insert({u"the data": u"it moves"})
return database.posts.find_one()
created.addCallback(got_mongo_client)
def inserted(record):
removed = cluster.remove_container(mongodb[u"name"])
mongodb2 = mongodb.copy()
mongodb2[u"ports"] = [{u"internal": 27017, u"external": 27018}]
removed.addCallback(
lambda _: cluster.create_container(mongodb2))
removed.addCallback(lambda _: record)
return removed
created.addCallback(inserted)
def restarted(record):
d = get_mongo_client(cluster.nodes[0].public_address, 27018)
d.addCallback(lambda client: client.example.posts.find_one())
d.addCallback(self.assertEqual, record)
return d
created.addCallback(restarted)
return created
creating_dataset.addCallback(created_dataset)
return creating_dataset
@require_cluster(1)
def test_current(self, cluster):
"""
The current container endpoint includes a currently running container.
"""
creating = self._create_container(cluster)
def created(data):
data[u"running"] = True
def in_current():
current = cluster.current_containers()
current.addCallback(lambda result: data in result)
return current
return loop_until(in_current)
creating.addCallback(created)
return creating
def assert_busybox_http(self, host, port):
"""
Assert that a HTTP serving a response with body ``b"hi"`` is running
at given host and port.
This can be coupled with code that only conditionally starts up
the HTTP server via Flocker in order to check if that particular
setup succeeded.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
"""
def query(host, port):
req = get(
"http://{host}:{port}".format(host=host, port=port),
persistent=False
).addCallback(content)
return req
d = verify_socket(host, port)
d.addCallback(lambda _: query(host, port))
d.addCallback(self.assertEqual, b"hi")
return d
@require_cluster(1)
def test_non_root_container_can_access_dataset(self, cluster):
"""
A container running as a user that is not root can write to a
dataset attached as a volume.
"""
_, port = find_free_port()
node = cluster.nodes[0]
container = {
u"name": random_name(self),
u"node_uuid": node.uuid,
u"image": u"busybox",
u"ports": [{u"internal": 8080, u"external": port}],
u'restart_policy': {u'name': u'never'},
u"volumes": [{u"dataset_id": None,
u"mountpoint": u"/data"}],
u"command_line": [
# Run as non-root user:
u"su", u"-", u"nobody", u"-c", u"sh", u"-c",
# Write something to volume we attached, and then
# expose what we wrote as a web server; for info on nc options
# you can do `docker run busybox man nc`.
u"""\
echo -n '#!/bin/sh
echo -n "HTTP/1.1 200 OK\r\n\r\nhi"
' > /data/script.sh;
chmod +x /data/script.sh;
nc -ll -p 8080 -e /data/script.sh
"""]}
creating_dataset = create_dataset(self, cluster)
def created_dataset(result):
cluster, dataset = result
container[u"volumes"][0][u"dataset_id"] = dataset[u"dataset_id"]
return cluster.create_container(container)
creating_dataset.addCallback(created_dataset)
creating_dataset.addCallback(lambda _: self.addCleanup(
cluster.remove_container, container[u"name"]))
creating_dataset.addCallback(
lambda _: self.assert_busybox_http(node.public_address, port))
return creating_dataset
@require_cluster(2)
def test_linking(self, cluster):
"""
A link from an origin container to a destination container allows the
origin container to establish connections to the destination container
when the containers are running on different machines using an address
obtained from ``<ALIAS>_PORT_<PORT>_TCP_{ADDR,PORT}``-style environment
set in the origin container's environment.
"""
_, destination_port = find_free_port()
_, origin_port = find_free_port()
[destination, origin] = cluster.nodes
busybox = pmap({
u"image": u"busybox",
})
destination_container = busybox.update({
u"name": random_name(self),
u"node_uuid": destination.uuid,
u"ports": [{u"internal": 8080, u"external": destination_port}],
u"command_line": BUSYBOX_HTTP,
})
self.addCleanup(
cluster.remove_container, destination_container[u"name"]
)
origin_container = busybox.update({
u"name": random_name(self),
u"node_uuid": origin.uuid,
u"links": [{u"alias": "DEST", u"local_port": 80,
u"remote_port": destination_port}],
u"ports": [{u"internal": 9000, u"external": origin_port}],
u"command_line": [
u"sh", u"-c", u"""\
echo -n '#!/bin/sh
nc $DEST_PORT_80_TCP_ADDR $DEST_PORT_80_TCP_PORT
' > /tmp/script.sh;
chmod +x /tmp/script.sh;
nc -ll -p 9000 -e /tmp/script.sh
"""]})
self.addCleanup(
cluster.remove_container, origin_container[u"name"]
)
running = gatherResults([
cluster.create_container(thaw(destination_container)),
cluster.create_container(thaw(origin_container)),
# Wait for the link target container to be accepting connections.
verify_socket(destination.public_address, destination_port),
# Wait for the link source container to be accepting connections.
verify_socket(origin.public_address, origin_port),
])
running.addCallback(
lambda _: self.assert_busybox_http(
origin.public_address, origin_port))
return running
def create_dataset(test_case, cluster,
maximum_size=REALISTIC_BLOCKDEVICE_SIZE):
"""
Create a dataset on a cluster (on its first node, specifically).
:param TestCase test_case: The test the API is running on.
:param Cluster cluster: The test ``Cluster``.
:param int maximum_size: The size of the dataset to create on the test
cluster.
:return: ``Deferred`` firing with a tuple of (``Cluster``
instance, dataset dictionary) once the dataset is present in
actual cluster state.
"""
# Configure a dataset on node1
requested_dataset = {
u"primary": cluster.nodes[0].uuid,
u"dataset_id": unicode(uuid4()),
u"maximum_size": maximum_size,
u"metadata": {u"name": u"my_volume"},
}
configuring_dataset = cluster.create_dataset(requested_dataset)
# Wait for the dataset to be created
waiting_for_create = configuring_dataset.addCallback(
lambda (cluster, dataset): cluster.wait_for_dataset(dataset)
)
return waiting_for_create
class DatasetAPITests(TestCase):
"""
Tests for the dataset API.
"""
@require_cluster(1)
def test_dataset_creation(self, cluster):
"""
A dataset can be created on a specific node.
"""
return create_dataset(self, cluster)
@require_moving_backend
@require_cluster(2)
def test_dataset_move(self, cluster):
"""
A dataset can be moved from one node to another.
"""
waiting_for_create = create_dataset(self, cluster)
# Once created, request to move the dataset to node2
def move_dataset((cluster, dataset)):
moved_dataset = {
u'primary': cluster.nodes[1].uuid
}
return cluster.update_dataset(dataset['dataset_id'], moved_dataset)
dataset_moving = waiting_for_create.addCallback(move_dataset)
# Wait for the dataset to be moved
waiting_for_move = dataset_moving.addCallback(
lambda (cluster, dataset): cluster.wait_for_dataset(dataset)
)
return waiting_for_move
@require_cluster(1)
def test_dataset_deletion(self, cluster):
"""
A dataset can be deleted, resulting in its removal from the node.
"""
created = create_dataset(self, cluster)
def delete_dataset(result):
cluster, dataset = result
deleted = cluster.delete_dataset(dataset["dataset_id"])
def not_exists():
request = cluster.datasets_state()
request.addCallback(
lambda actual_datasets: dataset["dataset_id"] not in
(d["dataset_id"] for d in actual_datasets))
return request
deleted.addCallback(lambda _: loop_until(not_exists))
return deleted
created.addCallback(delete_dataset)
return created
|
#!/usr/bin/env python
##########################################################################
# frontends/swig_python/RemoteThrill.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import marshal
import rpyc
class RemoteDIA():
def __init__(self, dias):
self._dias = dias
def AllGather(self):
# make async objects
anetrefs = [rpyc.async(dia.AllGather) for dia in self._dias]
# issue async requests
asyncs = [ref() for ref in anetrefs]
for a in asyncs:
a.wait()
# return values of workers as list
return [a.value for a in asyncs]
def Size(self):
# make async objects
anetrefs = [rpyc.async(dia.Size) for dia in self._dias]
# issue async requests
asyncs = [ref() for ref in anetrefs]
for a in asyncs:
a.wait()
# return values of workers as list
return [a.value for a in asyncs]
def Map(self, map_function):
# make async objects
anetrefs = [rpyc.async(dia.Map) for dia in self._dias]
# issue async requests
_map_function = marshal.dumps(map_function.__code__)
asyncs = [ref(_map_function) for ref in anetrefs]
for a in asyncs:
a.wait()
# return RemoteDIA
return RemoteDIA([a.value for a in asyncs])
def ReduceBy(self, key_extractor, reduce_function):
# make async objects
anetrefs = [rpyc.async(dia.ReduceBy) for dia in self._dias]
# issue async requests
_key_extractor = marshal.dumps(key_extractor.__code__)
_reduce_function = marshal.dumps(reduce_function.__code__)
asyncs = [ref(_key_extractor, _reduce_function) for ref in anetrefs]
for a in asyncs:
a.wait()
# return RemoteDIA
return RemoteDIA([a.value for a in asyncs])
class RemoteThrill():
def __init__(self, rpyc_hosts, thrill_hosts):
# connect to rpyc servers
self._conn = [rpyc.connect(*hp) for hp in rpyc_hosts]
# set up background serving threads
self._bgthr = [rpyc.BgServingThread(conn) for conn in self._conn]
# make async objects to create Thrill contexts
anetrefs = [rpyc.async(conn.root.Create) for conn in self._conn]
# issue async requests
asyncs = [ref(rank, thrill_hosts) for rank, ref in enumerate(anetrefs)]
for a in asyncs:
a.wait()
# get created Thrill contexts
self._ctx = [a.value for a in asyncs]
def Distribute(self, array):
# make async objects
anetrefs = [rpyc.async(ctx.Distribute) for ctx in self._ctx]
# issue async requests
asyncs = [ref(array) for ref in anetrefs]
for a in asyncs:
a.wait()
# return RemoteDIA
return RemoteDIA([a.value for a in asyncs])
def Generate(self, generator_function, size):
# make async objects
anetrefs = [rpyc.async(ctx.Generate) for ctx in self._ctx]
# issue async requests
_generator_function = marshal.dumps(generator_function.__code__)
asyncs = [ref(_generator_function, size) for ref in anetrefs]
for a in asyncs:
a.wait()
# return RemoteDIA
return RemoteDIA([a.value for a in asyncs])
##########################################################################
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_milk.png', **options):
super().__init__('ミルク', body=body, pantie_position=[741, 224], **options)
self.mask = io.imread('./mask/mask_milk.png')
self.sign_position = [754, 113]
try:
self.add_sign = self.options['add_sign']
except:
self.add_sign = self.ask(question='Add immoral sign?', default=False)
if self.add_sign:
try:
self.sign = Image.open(self.options['fsign'])
except:
self.sign = Image.open('./material/anna_sign.png')
self.sign = self.sign.resize((int(self.sign.width * 0.6), int(self.sign.height * 0.6)))
def convert(self, image):
pantie = np.array(image)
mask = io.imread('./mask/mask_milk.png')
pantie = np.bitwise_and(pantie, mask)
[r, c, d] = pantie.shape
front = pantie[:160 + 30, :200, :]
back = pantie[:300, 200:, ]
patch = pantie[-100:-5, 546:, :][::-1, ::-1, :]
# Front and front patch pre-processing
front = resize(front, (2, 2))
patch = resize(patch, (1.0, 1.15))
[fr, fc, _] = front.shape
[pr, pf, _] = patch.shape
patch_pad = np.zeros((fr, fc, d))
patch_pad[-pr:, :pf, :] = patch
patch_pad = perspective_transform(patch_pad, np.matrix('1, 0, 0; 0, 1, 0; -0.002,0,1'))
patch_pad = patch_pad[-pr - 40:, :pf - 20, :][:, ::-1, :]
[pr, pf, _] = patch_pad.shape
# Alpha blending and transform between front and front patch
remain = front[-pr:, :pf, :] * np.float32(skm.dilation(patch_pad[:, :, -1] == 0))[:, :, np.newaxis]
nonzeromask = np.logical_or(skm.dilation(patch_pad[:, :, -1] == 1), remain[:, :, -1] == 1)
patch_pad = remain + patch_pad
normalizer = patch_pad[:, :, -1][:, :, np.newaxis]
normalizer[normalizer == 0] = 1
patch_pad = patch_pad / normalizer
patch_pad[:, :, -1] = np.float32(nonzeromask)
front[-pr:, :pf, :] = patch_pad
front = perspective_transform(front, np.matrix('1, 0, 0; 0, 1, 0; -0.001,0,1'))
front = front[:, :-120, :]
front = affine_transform(front, 30, 0, inv=True)
# Back transform
back = resize(back, (1.3, 1.3))[:, ::-1, :]
back = perspective_transform(back, np.matrix('1, 0, 0; 0, 1, 0; 0.0002,0,1'))[:, ::-1, :]
back = affine_transform(back, 70, 150, inv=False)
back = back[:, 138:, :]
[fr, fc, _] = front.shape
[br, bc, _] = back.shape
pantie = np.zeros((np.max([fr, br]), fc + bc - 2, d))
shiftr = 35
row_point = np.clip(shiftr + fr, 0, np.max([fr, br]))
pantie[shiftr:row_point, :fc, :] = front[:-(shiftr + fr - row_point), :, :]
pantie[:bc, fc - 1:, :] = back[:, 1:, :]
# io.imshow(pantie)
# Finalize
pantie = np.uint8(pantie * 255)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
image = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
if self.add_sign:
self.paste(patched, self.sign, self.sign_position)
patched = self.paste(patched, image, self.pantie_position)
return patched
|
"""93. Restore IP Addresses"""
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
## Practice:
""" DFS Backtrack string, iterate through first 3 eles for each
string. """
self.res = []
self.dfs(0, "", s)
return self.res
def dfs(self, cnt, path, s):
if cnt > 4: ## !! don't forget
return
if cnt == 4 and not s:
self.res.append(path[:-1])
return
for i in range(1, len(s)+1):
if s[:i] == "0" or (s[0] != '0' and 0< int(s[:i]) < 256):
self.dfs(cnt + 1, path + s[:i] + '.', s[i:])
##
res = []
self.backtrack(s, 0, "", res)
return res
def backtrack(self, s, idx, path, res):
if idx > 4:
return
if idx == 4 and not s:
res.append(path[:-1])
return # don't forget to end this track
for i in range(1, len(s)+1):
# each integer is between 0 and 255
# cannot have leading zeros
if s[:i] == '0' or (s[0] != '0' and 0 < int(s[:i])< 256):
self.backtrack(s[i:], idx+1, path+s[:i]+".", res)
|
'''
- Leetcode problem: 25
- Difficulty: Hard
- Brief problem description:
Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.
Example:
Given this linked list: 1->2->3->4->5
For k = 2, you should return: 2->1->4->3->5
For k = 3, you should return: 3->2->1->4->5
Note:
Only constant extra memory is allowed.
You may not alter the values in the list's nodes, only nodes itself may be changed.
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
if head == None or head.next == None:
return head
pre_tail = None
result_head = None
while self.checkLengthValid(head, k):
new_head, new_tail = self.reverseByN(head, k)
if result_head == None:
result_head = new_head
if pre_tail != None:
pre_tail.next = new_head
pre_tail = new_tail
head = new_tail.next
return result_head
# return tuple (head_node, tail_node)
def reverseByN(self, head, n):
pre = head
cur = head.next
i = 0
while i < n - 1:
pre.next = cur.next
cur.next = head
cur, head = pre.next, cur
i += 1
return head, pre
def checkLengthValid(self, head, n):
for i in range(n):
if head == None:
return False
head = head.next
return True
|
import json
import os
from transfacils import models
from transfacils.helpers.get_trans_api_data import line_data_generator, \
get_line_data
def initialize_lines_db(filename: str = "initial_data.json"):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(base_dir, "fixtures/", filename), "w") as file:
data_list = initialize_line_data()
json.dump(data_list, file, sort_keys=True, ensure_ascii=False,
indent=4)
def initialize_line_data():
data_dicts = []
line = get_line_data()
line_dict = {"model": "transfacils.route", "pk": line["line_cd"],
"fields": {"name": line["line_name"],
"kind": models.Route.TRAIN}}
data_dicts.append(line_dict)
for station in line_data_generator():
data_dict = {"model": "transfacils.station",
"pk": station["station_cd"],
"fields": {"name": station["station_name"],
"route": station["line_cd"],
"group_cd": station["station_g_cd"],
"next_station": station["next"],
"previous_station": station["previous"]}}
data_dicts.append(data_dict)
return data_dicts
|
"""
Performance-related tests to make sure we don't use more memory than we should.
For now this is just for SpectralCube, not DaskSpectralCube.
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import pytest
import tempfile
import sys
try:
import tracemalloc
tracemallocOK = True
except ImportError:
tracemallocOK = False
# The comparison of Quantities in test_memory_usage
# fail with older versions of numpy
from distutils.version import LooseVersion
NPY_VERSION_CHECK = LooseVersion(np.version.version) >= "1.13"
from .test_moments import moment_cube
from .helpers import assert_allclose
from ..spectral_cube import SpectralCube
from . import utilities
from astropy import convolution, units as u
WINDOWS = sys.platform == "win32"
def find_base_nbytes(obj):
# from http://stackoverflow.com/questions/34637875/size-of-numpy-strided-array-broadcast-array-in-memory
if obj.base is not None:
return find_base_nbytes(obj.base)
return obj.nbytes
def test_pix_size():
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
s,y,x = sc._pix_size()
# float64 by default
bytes_per_pix = 8
assert find_base_nbytes(s) == sc.shape[0]*bytes_per_pix
assert find_base_nbytes(y) == sc.shape[1]*sc.shape[2]*bytes_per_pix
assert find_base_nbytes(x) == sc.shape[1]*sc.shape[2]*bytes_per_pix
def test_compare_pix_size_approaches():
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
sa,ya,xa = sc._pix_size()
s,y,x = (sc._pix_size_slice(ii) for ii in range(3))
assert_allclose(sa, s)
assert_allclose(ya, y)
assert_allclose(xa, x)
def test_pix_cen():
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
s,y,x = sc._pix_cen()
# float64 by default
bytes_per_pix = 8
assert find_base_nbytes(s) == sc.shape[0]*bytes_per_pix
assert find_base_nbytes(y) == sc.shape[1]*sc.shape[2]*bytes_per_pix
assert find_base_nbytes(x) == sc.shape[1]*sc.shape[2]*bytes_per_pix
@pytest.mark.skipif('True')
def test_parallel_performance_smoothing():
import timeit
setup = 'cube,_ = utilities.generate_gaussian_cube(shape=(300,64,64))'
stmt = 'result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(20.0), num_cores={0}, use_memmap=False)'
rslt = {}
for ncores in (1,2,3,4):
time = timeit.timeit(stmt=stmt.format(ncores), setup=setup, number=5, globals=globals())
rslt[ncores] = time
print()
print("memmap=False")
print(rslt)
setup = 'cube,_ = utilities.generate_gaussian_cube(shape=(300,64,64))'
stmt = 'result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(20.0), num_cores={0}, use_memmap=True)'
rslt = {}
for ncores in (1,2,3,4):
time = timeit.timeit(stmt=stmt.format(ncores), setup=setup, number=5, globals=globals())
rslt[ncores] = time
stmt = 'result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(20.0), num_cores={0}, use_memmap=True, parallel=False)'
rslt[0] = timeit.timeit(stmt=stmt.format(1), setup=setup, number=5, globals=globals())
print()
print("memmap=True")
print(rslt)
if False:
for shape in [(300,64,64), (600,64,64), (900,64,64),
(300,128,128), (300,256,256), (900,256,256)]:
setup = 'cube,_ = utilities.generate_gaussian_cube(shape={0})'.format(shape)
stmt = 'result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(20.0), num_cores={0}, use_memmap=True)'
rslt = {}
for ncores in (1,2,3,4):
time = timeit.timeit(stmt=stmt.format(ncores), setup=setup, number=5, globals=globals())
rslt[ncores] = time
stmt = 'result = cube.spectral_smooth(kernel=convolution.Gaussian1DKernel(20.0), num_cores={0}, use_memmap=True, parallel=False)'
rslt[0] = timeit.timeit(stmt=stmt.format(1), setup=setup, number=5, globals=globals())
print()
print("memmap=True shape={0}".format(shape))
print(rslt)
# python 2.7 doesn't have tracemalloc
@pytest.mark.skipif('not tracemallocOK or (sys.version_info.major==3 and sys.version_info.minor<6) or not NPY_VERSION_CHECK or WINDOWS')
def test_memory_usage():
"""
Make sure that using memmaps happens where expected, for the most part, and
that memory doesn't get overused.
"""
ntf = tempfile.NamedTemporaryFile()
tracemalloc.start()
snap1 = tracemalloc.take_snapshot()
# create a 64 MB cube
cube,_ = utilities.generate_gaussian_cube(shape=[200,200,200])
sz = _.dtype.itemsize
snap1b = tracemalloc.take_snapshot()
diff = snap1b.compare_to(snap1, 'lineno')
diffvals = np.array([dd.size_diff for dd in diff])
# at this point, the generated cube should still exist in memory
assert diffvals.max()*u.B >= 200**3*sz*u.B
del _
snap2 = tracemalloc.take_snapshot()
diff = snap2.compare_to(snap1b, 'lineno')
assert diff[0].size_diff*u.B < -0.3*u.MB
cube.write(ntf.name, format='fits')
# writing the cube should not occupy any more memory
snap3 = tracemalloc.take_snapshot()
diff = snap3.compare_to(snap2, 'lineno')
assert sum([dd.size_diff for dd in diff])*u.B < 100*u.kB
del cube
# deleting the cube should remove the 64 MB from memory
snap4 = tracemalloc.take_snapshot()
diff = snap4.compare_to(snap3, 'lineno')
assert diff[0].size_diff*u.B < -200**3*sz*u.B
cube = SpectralCube.read(ntf.name, format='fits')
# reading the cube from filename on disk should result in no increase in
# memory use
snap5 = tracemalloc.take_snapshot()
diff = snap5.compare_to(snap4, 'lineno')
assert diff[0].size_diff*u.B < 1*u.MB
mask = cube.mask.include()
snap6 = tracemalloc.take_snapshot()
diff = snap6.compare_to(snap5, 'lineno')
assert diff[0].size_diff*u.B >= mask.size*u.B
filled_data = cube._get_filled_data(use_memmap=True)
snap7 = tracemalloc.take_snapshot()
diff = snap7.compare_to(snap6, 'lineno')
assert diff[0].size_diff*u.B < 100*u.kB
filled_data = cube._get_filled_data(use_memmap=False)
snap8 = tracemalloc.take_snapshot()
diff = snap8.compare_to(snap7, 'lineno')
assert diff[0].size_diff*u.B > 10*u.MB
del filled_data
# cube is <1e8 bytes, so this is use_memmap=False
filled_data = cube.filled_data[:]
snap9 = tracemalloc.take_snapshot()
diff = snap9.compare_to(snap6, 'lineno')
assert diff[0].size_diff*u.B > 10*u.MB
# python 2.7 doesn't have tracemalloc
@pytest.mark.skipif('not tracemallocOK or (sys.version_info.major==3 and sys.version_info.minor<6) or not NPY_VERSION_CHECK')
def test_memory_usage_coordinates():
"""
Watch out for high memory usage on huge spatial files
"""
ntf = tempfile.NamedTemporaryFile()
tracemalloc.start()
snap1 = tracemalloc.take_snapshot()
size = 200
# create a "flat" cube
cube,_ = utilities.generate_gaussian_cube(shape=[1,size,size])
sz = _.dtype.itemsize
snap1b = tracemalloc.take_snapshot()
diff = snap1b.compare_to(snap1, 'lineno')
diffvals = np.array([dd.size_diff for dd in diff])
# at this point, the generated cube should still exist in memory
assert diffvals.max()*u.B >= size**2*sz*u.B
del _
snap2 = tracemalloc.take_snapshot()
diff = snap2.compare_to(snap1b, 'lineno')
assert diff[0].size_diff*u.B < -0.3*u.MB
print(cube)
# printing the cube should not occupy any more memory
# (it will allocate a few bytes for the cache, but should *not*
# load the full size x size coordinate arrays for RA, Dec
snap3 = tracemalloc.take_snapshot()
diff = snap3.compare_to(snap2, 'lineno')
assert sum([dd.size_diff for dd in diff])*u.B < 100*u.kB
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Covid Plots
#
# I wanted to test out using jupyter notebook to show off some plotly graphs. So here goes.
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from covid_alberta import *
# %%
abC19scaper = albertaC19_webscraper.albertaC19_webscraper()
abTotal, abRegion, abTesting = abC19scaper.scrape_all(return_dataframes=True)
# %%
abTesting['total_tests'] = 0
abTesting['total_tests'] = abTesting.sum(axis=1)
region_cum = alberta_stats.calculate_cumulatives(abRegion, combine_df=True)
region_dt = alberta_stats.calculate_doublingtimes_region(region_cum, combine_df=False)
total_dt = alberta_stats.calculate_doublingtimes_region(abTotal, col_suffix='cum_cases', combine_df=False)
all_data = abTotal.join([total_dt, region_cum, region_dt, abTesting['total_tests']])
all_data.rename(columns={'new_cases': 'Ab_cases',
'cum_cases':'Ab_cumCases',
'dtime':'Ab_dtime',
'dtime_rw':'Ab_dtime_rw'}, inplace=True)
# %%
# Set up the defaults and the data
ln_width = 2
days_to_trim = 1
mode = 'lines'
trace_formats = {'Ab_cases': {'mode': mode,
'line': {'color': 'green', 'width':ln_width},
'name': 'Alberta Daily'},
'Calgary_cumCases': {'mode': mode,
'line': {'color': 'orange', 'width':ln_width},
'name': 'Calgary Cumulative'},
'Edmont_cumCases': {'mode': mode,
'line': {'color': 'blue', 'width':ln_width},
'name': 'Edmonton Cumulative'}
}
plot_data = all_data[:-days_to_trim]
updated = plot_data.index[-1].strftime("%B %d")
# Create the plot
data = list()
date_fmt = "%m/%d"
for key in trace_formats.keys():
data.append(go.Scatter(x=plot_data.index.strftime(date_fmt), y=plot_data[key],
mode=trace_formats[key]['mode'], line=trace_formats[key]['line'],
name=trace_formats[key]['name'],
)
)
data.append(go.Bar(x=plot_data.index.strftime(date_fmt), y=plot_data['total_tests'],
name='C19 Tests/day', yaxis='y2', marker={'color':'darkgrey'}))
layout = go.Layout(title=f'{updated} - Alberta Covid-19: Case Counts and Number of Tests',
xaxis=dict(domain=[0.01, 0.95], title='Date', titlefont={'size': 12},
rangemode='nonnegative', tick0=0, dtick=2, tickangle=45,
tickfont={'color':'black', 'size':10}),
yaxis=dict(title='Case Count', titlefont=dict(color='black'),
tickfont={'color':'black', 'size':11}, overlaying='y2', side='right',
rangemode='nonnegative', tick0=0, dtick=100),
yaxis2=dict(domain=[0.1, 0.95], title='New Tests per Day', titlefont={'size': 12, 'color':'black'},
tickfont={'color':'black', 'size':11}, showgrid=False,
anchor='x', side='left', rangemode='nonnegative', ticks='inside'),
legend_orientation="h", hovermode='x')
fig = go.Figure(data=data, layout=layout)
fig.show()
fig.write_html('images/Alberta_dailyCases.html')
# %%
# Set up the defaults and the data
ln_width = 2
days_to_trim = 1
mode = 'lines'
trace_formats = {'Alberta': {'x_data': 'Ab_cumCases',
'y_data': 'Ab_dtime_rw',
'mode': mode,
'line': {'color': 'green', 'width':ln_width}},
'Calgary': {'x_data': 'Calgary_cumCases',
'y_data': 'Calgary_dtime_rw',
'mode': mode,
'line': {'color': 'orange', 'width':ln_width}},
'Edmonton': {'x_data': 'Edmont_cumCases',
'y_data': 'Edmont_dtime_rw',
'mode': mode,
'line': {'color': 'blue', 'width':ln_width}}
}
plot_data = all_data[:-days_to_trim]
updated = plot_data.index[-1].strftime("%B %d")
# Create the plot
fig = go.Figure()
annotations = list()
for key in trace_formats.keys():
fig.add_trace(go.Scatter(x=plot_data[trace_formats[key]['x_data']], y=plot_data[trace_formats[key]['y_data']],
mode=trace_formats[key]['mode'], line=trace_formats[key]['line'],
name=key, hovertemplate='dt: %{y: 0.2f}'
),
)
last_x = plot_data[trace_formats[key]['x_data']][-1]
last_y = plot_data[trace_formats[key]['y_data']][-1]
last_day = plot_data.index[-1].strftime("%B %d")
annotations.append(dict(x=last_x, y=last_y, xref='x', yref='y', text=last_day,
showarrow=True, ax=0, ay=-10))
fig.update_layout(dict(title=f'{updated} - Doubling Time: 6 day rolling window', titlefont={'size':20},
xaxis=dict(title='Cumulative Case Count', titlefont={'size': 10},
rangemode='nonnegative', tick0=0, dtick=200,
tickfont={'color':'black', 'size':10}),
yaxis=dict(title='Doubling Time (Days)', titlefont={'size': 10},
tickfont={'color':'black', 'size':10}, side='left',
rangemode='nonnegative', tick0=0, dtick=1),
legend_orientation="v", hovermode='x', annotations=annotations ))
fig.show()
fig.write_html('images/Alberta_doublingTime_RW.html')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGenericArtifactsContentResult',
'AwaitableGetGenericArtifactsContentResult',
'get_generic_artifacts_content',
]
@pulumi.output_type
class GetGenericArtifactsContentResult:
"""
A collection of values returned by getGenericArtifactsContent.
"""
def __init__(__self__, artifact_id=None, id=None):
if artifact_id and not isinstance(artifact_id, str):
raise TypeError("Expected argument 'artifact_id' to be a str")
pulumi.set(__self__, "artifact_id", artifact_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="artifactId")
def artifact_id(self) -> str:
return pulumi.get(self, "artifact_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
class AwaitableGetGenericArtifactsContentResult(GetGenericArtifactsContentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGenericArtifactsContentResult(
artifact_id=self.artifact_id,
id=self.id)
def get_generic_artifacts_content(artifact_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGenericArtifactsContentResult:
"""
This data source provides details about a specific Generic Artifacts Content resource in Oracle Cloud Infrastructure Generic Artifacts Content service.
Gets the specified artifact's content.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_generic_artifacts_content = oci.genericartifactscontent.get_generic_artifacts_content(artifact_id=oci_generic_artifacts_content_artifact["test_artifact"]["id"])
```
:param str artifact_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the artifact. Example: `ocid1.genericartifact.oc1..exampleuniqueID`
"""
__args__ = dict()
__args__['artifactId'] = artifact_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:genericartifactscontent/getGenericArtifactsContent:getGenericArtifactsContent', __args__, opts=opts, typ=GetGenericArtifactsContentResult).value
return AwaitableGetGenericArtifactsContentResult(
artifact_id=__ret__.artifact_id,
id=__ret__.id)
|
import logging
from logging import config as config_log
from colorlog import ColoredFormatter
from DeviceManager.conf import CONFIG
from DeviceManager.utils import HTTPRequestError
class Log:
def __init__(self, LOG_LEVEL = CONFIG.log_level,
LOG_FORMAT = "[%(log_color)s%(asctime)-8s%(reset)s] |%(log_color)s%(module)-8s%(reset)s| %(log_color)s%(levelname)s%(reset)s: %(log_color)s%(message)s%(reset)s", DISABLED = False):
#Disable all others modules logs
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
}
dateFormat = '%d/%m/%y - %H:%M:%S'
config_log.dictConfig(LOGGING)
self.formatter = ColoredFormatter(LOG_FORMAT, dateFormat)
self.log = logging.getLogger('device-manager.' + __name__)
self.log.setLevel(LOG_LEVEL)
self.log.disabled = DISABLED
self.level = LOG_LEVEL
if not getattr(self.log, 'handler_set', None):
self.stream = logging.StreamHandler()
self.stream.setLevel(LOG_LEVEL)
self.stream.setFormatter(self.formatter)
self.log.setLevel(LOG_LEVEL)
self.log.addHandler(self.stream)
self.log.handler_set = True
def update_log_level(self, LEVEL):
levelToName = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
try:
self.log = logging.getLogger('device-manager.' + __name__)
for hdlr in self.log.handlers[:]:
self.log.removeHandler(hdlr)
self.stream = logging.StreamHandler()
self.stream.setLevel(LEVEL)
self.stream.setFormatter(self.formatter)
self.log.setLevel(LEVEL)
self.log.addHandler(self.stream)
self.log.handler_set = True
self.level = LEVEL
except ValueError:
raise HTTPRequestError(400, "Unknown level: {} valid are {}".format(LEVEL, levelToName))
def get_log_level(self):
return self.level
def color_log(self):
return self.log
|
import os
import re
import time
from contextlib import closing
import psycopg2
from confluent_kafka import Consumer
DSN_TEMPLATE = os.environ.get("CDC_POSTGRES_DSN_TEMPLATE", "postgres:///{database}")
DATABASE_NAME = "test_db"
def _wait_for_slot() -> None:
with closing(
psycopg2.connect(DSN_TEMPLATE.format(database="postgres"))
) as connection:
for i in range(1, 10):
print("Waiting for slot")
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM pg_replication_slots;")
for row in cursor:
print("Found slot")
return
time.sleep(1)
def test_producer() -> None:
_wait_for_slot()
with closing(
psycopg2.connect(DSN_TEMPLATE.format(database="test_db"))
) as connection:
with connection.cursor() as cursor:
cursor.execute(
"CREATE TABLE test_producer (a integer, b text, c timestamptz, primary key(a))"
)
cursor.execute(
"INSERT INTO test_producer (a, b, c) VALUES (%s, %s, %s)",
[1, "test", "2019-06-16 06:21:39+00"],
)
connection.commit()
conf = {
"bootstrap.servers": "kafka:9092",
"auto.offset.reset": "smallest",
"group.id": "test",
}
consumer = Consumer(conf)
consumer.subscribe(["cdc"])
def assert_message(regex: str) -> None:
message = consumer.poll(timeout=60)
assert message is not None
value = message.value().decode("utf-8")
result = re.search(regex, value)
assert (
result is not None
), f"Unexpected message: {value}. Expected regex {regex}"
assert_message(r"BEGIN \d+")
assert_message(
r"INSERT\: a\[integer\]\:1 b\[text\]\:'test' c\[timestamp with time zone\]\:'2019\-06\-16 06\:21\:39\+00'"
)
assert_message(r"COMMIT .+")
|
from yapsy.IPlugin import IPlugin
from yapsy.PluginManager import PluginManagerSingleton
from flask.ext.admin import BaseView, expose
from flask import render_template
import logging, inspect
class PluginObj(IPlugin, BaseView):
def __init__(self, **kwargs):
IPlugin.__init__(self)
BaseView.__init__(self, **kwargs)
self.manager = PluginManagerSingleton.get()
self.logger = logging.getLogger(self.__class__.__name__)
self.enabled = False
self.viewable = False
self.widgetized = False
self.use_filestore = False
self.use_sqllog = False
#set defaults for template paths
# ais/plugins/name/widget.html
# ais/plugins/name/index.html
path_items = inspect.getfile(self.__class__).split('/')[-3:-1]
self.path = str.join('/',path_items)
self.view_template = self.path+'/index.html'
self.widget_template = self.path+'/widget.html'
self.url="/"+self.__class__.__name__.lower()+"/"
self.filestore = None
try:
getattr(self.manager, 'app')
except Exception:
pass
else:
self.app = self.manager.app
self.logger.debug("%s Init finished", self.__class__.__name__)
def is_accessible(self):
'''
Makes viewable plugins appear and disappear as enabled/disabled
'''
return self.enabled
def activate(self):
super(PluginObj, self).activate()
self.logger.debug("Plugin: %s activated" % self.__class__.__name__)
def deactivate(self):
super(PluginObj, self).deactivate()
self.logger.debug("Plugin: %s deactivated" % self.__class__.__name__)
def widget_view(self):
template = self.widget_template
return render_template(template, data=self)
def get_configs(self):
'''
get_configs returns a list of Config objects that
the plugin wants to seed the db with.
'''
return list()
@expose('/')
def plugin_view(self):
template = self.view_template
return self.render(template, data=self) |
# Uses python3
n = int(input())
if n == 1:
print(1)
print(1)
quit()
W = n
prizes = []
for i in range(1, n):
if W>2*i:
prizes.append(i)
W -= i
else:
prizes.append(W)
break
print(len(prizes))
print(' '.join([str(i) for i in prizes])) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
class Menuconfig():
@classmethod
def _promt(cls, questions, **kwargs):
prompt = importlib.import_module('hb_internal.cts.prompt')
common = importlib.import_module('hb_internal.cts.common')
return prompt.prompt(questions,
style=common.get_style('answer'),
**kwargs)
def list_promt(self, name, message, choices, **kwargs):
questions = self.get_questions('list', name, message, choices)
return self._promt(questions, **kwargs)
def checkbox_promt(self, name, message, choices, **kwargs):
questions = self.get_questions('checkbox', name, message, choices)
return self._promt(questions, **kwargs)
@classmethod
def get_questions(cls, promt_type, name, message, choices):
questions = [{
'type': promt_type,
'qmark': 'OHOS',
'name': name,
'message': message,
'choices': choices
}]
return questions
if __name__ == "__main__":
pass
|
import torch
import numpy as np
import open3d as o3d
class FPFH_RANSAC(torch.nn.Module):
def __init__(self, voxel_size, max_dist, max_iter, max_val):
super().__init__()
self.voxel_size = voxel_size
self.max_iter = max_iter
self.max_val = max_val
self.max_dist = max_dist
def preprocess_point_cloud(self, p):
'''p: numpy array [npts, 3]. returns downsampled pointcloud and its fpfh features'''
voxel_size = self.voxel_size
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(p[:,:3])
pcd_down = pcd.voxel_down_sample(voxel_size)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0,
max_nn=30))
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0,
max_nn=100))
return (pcd_down, pcd_fpfh)
def forward(self, pb):
'''pb [batch, 2, npts, 4]. returns the registration result from FPFH RANSAC model'''
assert pb.size(0) == 1, 'batch size must be 1'
#compute fpfh features
p0, fp0 = self.preprocess_point_cloud(pb[0,0].cpu().numpy())
p1, fp1 = self.preprocess_point_cloud(pb[0,1].cpu().numpy())
#compute registration
res = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(p0, p1, fp0, fp1, self.max_dist, ransac_n=3, criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(self.max_iter, self.max_val))
Rt = res.transformation.astype(np.float32)
R, t = Rt[:3,:3], Rt[:3, -1]
R = torch.from_numpy(R).unsqueeze(0).cuda()
t = torch.from_numpy(t).view(1,1,3).cuda()
return R,t
|
from __future__ import print_function
import os
import sys
from documentcloud import DocumentCloud
print("Enter your Document Cloud Credentials")
sys.stdout.write("Username: ")
username = raw_input().strip()
sys.stdout.write("Password: ")
password = raw_input().strip()
base_uri = "https://sourceafrica.net/api/"
client = DocumentCloud(username, password, base_uri)
import pdb; pdb.set_trace()
print(client.projects.all())
|
import os
import sys
import resource
import subprocess
import argparse
import logging
import itertools
import collections
import json
import numpy as np
import pdb
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def make_expts(b_fq_toc, b_compbined_alleles, b_r1, b_r2, bulk_toc, G_ij, seed):
BULK_FQ = b_r1 + b_r2 # combined the r1 and r2 lists
with open(bulk_toc, 'w') as TOC:
for bulk_sample in range(len(BULK_FQ)//2):
dist = G_ij[bulk_sample]
experiment = {'total_reads': 100000, 'rng_seed': seed,
'mix_path_r1': BULK_FQ[bulk_sample],
'mix_path_r2': BULK_FQ[len(BULK_FQ)//2 + bulk_sample]}
TOC.write('Bulk sample {} consists of:\n'.format(bulk_sample + 1))
b_fq_fnames = open(b_fq_toc, 'r').readlines()
logger.info("mixing fastqs")
for num in range(len(b_fq_fnames) // 2):
proto_r1 = b_fq_fnames[2*num].strip()
proto_r2 = b_fq_fnames[2*num+1].strip()
experiment['sc{}'.format(num+1)] = { 'fraction': dist[num],
'path_r1': proto_r1,
'path_r2': proto_r2 }
TOC.write(' {} of mutant reference {}\n'.format(dist[num - 1], num))
experiment = json.dumps(experiment)
mix_fastq(experiment)
def mix_fastq(jsonconfig):
# Load experiment configuration from json filet
mix_config = json.loads(jsonconfig)
# Set the rng
np.random.seed(mix_config['rng_seed'])
# Remove output files if they exist
remove_file(mix_config['mix_path_r1'])
remove_file(mix_config['mix_path_r2'])
# read-sample-write each single cell
for i in range(1, len(mix_config) - 3):
logger.info("Processing single cell {}...".format(i))
cell_readcount = int(np.floor(mix_config['total_reads'] * mix_config['sc{}'.format(i)]['fraction']))
etl_mix(source_path_r1 = mix_config['sc{}'.format(i)]['path_r1'],
source_path_r2 = mix_config['sc{}'.format(i)]['path_r2'],
readcount = cell_readcount,
mix_path_r1 = mix_config['mix_path_r1'],
mix_path_r2 = mix_config['mix_path_r2'])
def etl_mix(source_path_r1, source_path_r2, readcount, mix_path_r1, mix_path_r2):
"""Load reads from source fq.gz file, sample readcount reads, and append to mix fq."""
logger.info("Loading reads...")
read1 = load_reads(source_path_r1)
read2 = load_reads(source_path_r2)
# Check that readcount is less than total number of reads available
if(readcount > len(read1)):
sys.exit(1)
if(readcount > len(read2)):
sys.exit(1)
# Sample reads
logger.info("Sampling reads...")
sample_idx = np.random.choice(len(read1), size=readcount, replace=False)
# Write reads
write_reads(read1, sample_idx, mix_path_r1)
write_reads(read2, sample_idx, mix_path_r2)
def load_reads(source_path):
"""Returns reads from source path fq.gz file."""
logger.info("Uncompressing and loading {}...".format(source_path))
readlines = subprocess.check_output(["gunzip", "-c", source_path]).splitlines()
# Populate a list of lists for the read data
logger.info("Compiling data structure...")
read_list = []
for i in range(0, len(readlines), 4):
read_id = readlines[i].decode('utf-8')
read_seq = readlines[i+1].decode('utf-8')
read_id2 = readlines[i+2].decode('utf-8')
read_qc = readlines[i+3].decode('utf-8')
read_list.append([read_id, read_seq, read_id2, read_qc])
logger.debug("Using {} Mb of memory.".format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
return read_list
def write_reads(reads, sample_idx, mix_path):
# Write R1 reads to file
logger.info("Writing mixture fq...")
with open(mix_path, 'a') as fh:
for idx in sample_idx:
fh.write("\n".join(reads[idx]))
fh.write("\n")
def remove_file(filename):
"""Deletes a file if it exists."""
try:
os.remove(filename)
except OSError:
pass
|
## Script (Python) "guard_cancelled_object"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
workflow = context.portal_workflow
# Note: Also used by profiles/default/types/AnalysisRequest.xml
# Can't do anything to the object if it's cancelled
if workflow.getInfoFor(context, 'cancellation_state', "active") == "cancelled":
return False
return True
|
import torch
# "long" and "short" denote longer and shorter samples
class PixelShuffle1D(torch.nn.Module):
"""
1D pixel shuffler. https://arxiv.org/pdf/1609.05158.pdf
Upscales sample length, downscales channel length
"short" is input, "long" is output
"""
def __init__(self, upscale_factor):
super(PixelShuffle1D, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, x):
batch_size = x.shape[0]
short_channel_len = x.shape[1]
short_width = x.shape[2]
long_channel_len = short_channel_len // self.upscale_factor
long_width = self.upscale_factor * short_width
x = x.contiguous().view([batch_size, self.upscale_factor, long_channel_len, short_width])
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(batch_size, long_channel_len, long_width)
return x
class PixelUnshuffle1D(torch.nn.Module):
"""
Inverse of 1D pixel shuffler
Upscales channel length, downscales sample length
"long" is input, "short" is output
"""
def __init__(self, downscale_factor):
super(PixelUnshuffle1D, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, x):
batch_size = x.shape[0]
long_channel_len = x.shape[1]
long_width = x.shape[2]
short_channel_len = long_channel_len * self.downscale_factor
short_width = long_width // self.downscale_factor
x = x.contiguous().view([batch_size, long_channel_len, short_width, self.downscale_factor])
x = x.permute(0, 3, 1, 2).contiguous()
x = x.view([batch_size, short_channel_len, short_width])
return x |
import random
import emoji
def nick_generator():
nicks = [
'Васёк',
'Геракл',
'Фетида',
'Зевс',
'Врач',
'Вкусняшка',
'Л0ЛЬК0'
]
return random.choice(nicks)
def switch_chars(txt):
symbols = 'qwertyuiopasdfghjklzxcvbnm'
symbols += 'йёцукенгшщзхъфывапролджэячсмитьбю'
symbols += '1234567890'
try:
for symbol in txt:
if symbol.lower() not in symbols:
txt = txt.replace(symbol, "")
except :
pass
return txt
def switch_chars_donat(txt):
symbols = 'qwertyuiopasdfghjklzxcvbnm'
symbols += 'йёцукенгшщзхъфывапролджэячсмитьбю'
symbols += '1234567890'
symbols += '|°•-_='
try:
_space = False
for symbol in txt:
if symbol in ' ' and not _space:
_space = True
elif symbol.lower() not in symbols:
txt = txt.replace(symbol, "")
else:
_space = False
except :
pass
return txt
def check_emoji(txt):
if txt in str(emoji.UNICODE_EMOJI):
return True
else:
return False
def new_guild_password(length):
output = ""
for x in range(length):
output += random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890')
return output
def get_new_exp(lvl):
new_exp = [10, 20, 75, 125, 250, 500, 850]
if int(lvl) < 7:
return (new_exp[lvl])
else:
return (99999999) |
import datetime
import json
import numpy as np
import pandas as pd
import requests
import xarray as xr
from utils import divide_chunks, get_indices_not_done, \
get_site_codes, append_to_csv_column_wise, load_s3_zarr_store,\
convert_df_to_dataset
def get_all_streamflow_data(output_file, sites_file, huc2=None,
num_sites_per_chunk=5, start_date="1970-01-01",
end_date='2019-01-01', time_scale='H',
output_format='zarr', num_site_chunks_write=6,
s3=False):
"""
gets all streamflow data for a date range for a given huc2. Calls are
chunked by station
:param output_file: [str] path to the csv file or zarr store where the data
will be stored
:param sites_file: [str] path to file that contains the nwis site
information
:param huc2: [str] zero-padded huc 2 (e.g., "02")
:param num_sites_per_chunk: [int] the number of sites that will be pulled
at in each web service call
:param start_date: [str] the start date of when you want the data for
(e.g., "1980-01-01")
:param end_date: [str] the end date of when you want the data for
(e.g., "1990-01-01")
:param time_scale: [str] Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:param output_format: [str] the format of the output file. 'csv' or 'zarr'
:param num_site_chunks_write:
:param S3:
:return: None
"""
product = get_product_from_time_scale(time_scale)
site_codes = get_site_codes(sites_file, huc2)
not_done_sites = get_indices_not_done(output_file, site_codes, 'site_code',
output_format, is_column=False,
s3=s3)
site_codes_chunked = divide_chunks(not_done_sites, num_sites_per_chunk)
# loop through site_code_chunks
chunk_dfs = []
i = 0
for site_chunk in site_codes_chunked:
last_chunk = False
if site_chunk[-1] == not_done_sites[-1]:
last_chunk = True
streamflow_df_sites = None
# catch if there is a problem on the server retrieving the data
try:
streamflow_df_sites = get_streamflow_data(site_chunk,
start_date,
end_date,
product,
time_scale)
except json.decoder.JSONDecodeError:
continue
if streamflow_df_sites is not None:
chunk_dfs.append(streamflow_df_sites)
# add the number of stations for which we got data
i += streamflow_df_sites.shape[1]
if not i % (num_site_chunks_write * num_sites_per_chunk) or \
last_chunk:
print('writing out', flush=True)
write_out_chunks(chunk_dfs, output_file, output_format)
chunk_dfs = []
def write_out_chunks(chunks_dfs, out_file, out_format):
all_chunks_df = pd.concat(chunks_dfs, axis=1)
# write the data out to the output file
if out_format == 'zarr':
zarr_store = load_s3_zarr_store(out_file)
append_to_zarr(all_chunks_df, zarr_store)
elif out_format == 'csv':
append_to_csv_column_wise(all_chunks_df, out_file)
else:
raise ValueError("output_format should be 'csv' or 'zarr'")
def get_product_from_time_scale(time_scale):
"""
get the the USGS nwis product that is appropriate for the time scale
:param time_scale: str - Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:return:
"""
iv_scales = ['15T', 'T', 'H']
dv_scale = ['D']
if time_scale in iv_scales:
return 'iv'
elif time_scale in dv_scale:
return 'dv'
else:
raise ValueError("time scale must be '15T', 'T', 'H', or 'D'")
def append_to_zarr(streamflow_df, output_zarr):
# chunks
time_chunk = len(streamflow_df.index)
site_code_chunk = len(streamflow_df.columns)
ds = convert_df_to_dataset(streamflow_df, 'site_code', 'datetime',
'streamflow', {'datetime': time_chunk,
'site_code': site_code_chunk})
ds.to_zarr(output_zarr, append_dim='site_code', mode='a')
def get_streamflow_data(sites, start_date, end_date, product, time_scale):
response = call_nwis_service(sites, start_date, end_date, product)
data = json.loads(response.text)
streamflow_df = nwis_json_to_df(data, start_date, end_date,
time_scale)
return streamflow_df
def call_nwis_service(sites, start_date, end_date, product):
"""
gets the data for a list of sites from a start date to an end date
"""
base_url = "http://waterservices.usgs.gov/nwis/{}/?format=json&sites={}&" \
"startDT={}&endDT={}¶meterCd=00060&siteStatus=all"
url = base_url.format(product, ",".join(sites), start_date, end_date)
request_start_time = datetime.datetime.now()
print(f"starting request for sites {sites} at {request_start_time}, "
f"for period {start_date} to {end_date}", flush=True)
r = None
while not r:
try:
r = requests.get(url)
except:
print('there was some problem. trying again', flush=True)
request_end_time = datetime.datetime.now()
request_time = request_end_time - request_start_time
print(f"took {request_time} to get data for huc {sites}", flush=True)
return r
def format_dates(datetime_txt):
# convert datetime
datetime_ser = pd.to_datetime(datetime_txt, utc=True)
# remove the time zone info since we are now in utc
datetime_ser = datetime_ser.dt.tz_localize(None)
return datetime_ser
def resample_reindex(df, start_date, end_date, time_scale):
# resample to get mean at correct time scale
df_resamp = df.resample(time_scale).mean()
# get new index
date_index = pd.date_range(start=start_date, end=end_date,
freq=time_scale)
# make so the index goes from start to end regardless of actual data
# presence
df_reindexed = df_resamp.reindex(date_index)
return df_reindexed
def delete_non_approved_data(df):
"""
disregard the data that do not have the "approved" tag in the qualifier
column
:param df: dataframe with qualifiers
:return: dataframe with just the values that are approved
"""
# first I have to get the actual qualifiers. originally, these are lists
# in a column in the df (e.g., [A, [91]]
# todo: what does the number mean (i.e., [91])
qualifiers_list = df['qualifiers'].to_list()
qualifiers = [q[0] for q in qualifiers_list]
# check qualifier's list
if qualifiers[0] not in ['A', 'P']:
print("we have a weird qualifier. it is ", qualifiers[0])
qualifier_ser = pd.Series(qualifiers, index=df.index)
approved_indices = (qualifier_ser == 'A')
approved_df = df[approved_indices]
return approved_df
def format_df(ts_df, site_code, start_date, end_date, time_scale,
only_approved=True):
"""
format unformatted dataframe. this includes setting a datetime index,
resampling, reindexing to the start and end date,
renaming the column to the site code, removing the qualifier column and
optionally screening out any data points that are not approved
:param ts_df: (dataframe) unformatted time series dataframe from nwis json
data
:param site_code: (str) the site_code of the site (taken from json data)
:param start_date: (str) start date of call
:param end_date: (str) end date of call
:param time_scale: (str) time scale in which you want to resample and at
which your new index will be. should be a code (i.e., 'H' for hourly)
:param only_approved: (bool) whether or not to screen out non-approved data
points
:return: formatted dataframe
"""
# convert datetime
ts_df['dateTime'] = format_dates(ts_df['dateTime'])
ts_df.set_index('dateTime', inplace=True)
if only_approved:
# get rid of any points that were not approved
ts_df = delete_non_approved_data(ts_df)
# delete qualifiers column
del ts_df['qualifiers']
# rename the column from 'value' to the site_code
ts_df = ts_df.rename(columns={'value': site_code})
# make the values numeric
ts_df[site_code] = pd.to_numeric(ts_df[site_code])
ts_df = resample_reindex(ts_df, start_date, end_date, time_scale)
return ts_df
def nwis_json_to_df(json_data, start_date, end_date, time_scale='H'):
"""
combine time series in json produced by nwis web from multiple sites into
one pandas df. the df is also resampled to a time scale and reindexed so
the dataframes are from the start date to the end date regardless of
whether there is data available or not
"""
df_collection = []
time_series = json_data['value']['timeSeries']
for ts in time_series:
site_code = ts['sourceInfo']['siteCode'][0]['value']
print('processing the data for site ', site_code, flush=True)
# this is where the actual data is
ts_data = ts['values'][0]['value']
if ts_data:
ts_df = pd.DataFrame(ts_data)
ts_df_formatted = format_df(ts_df, site_code, start_date, end_date,
time_scale)
df_collection.append(ts_df_formatted)
if df_collection:
df_combined = pd.concat(df_collection, axis=1)
df_combined = df_combined.replace(-999999, np.nan)
return df_combined
else:
return None
|
import numpy as np
from mygrad import log, log1p, log2, log10
from tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory
@fwdprop_test_factory(
mygrad_func=log, true_func=np.log, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log_fwd():
pass
@backprop_test_factory(
mygrad_func=log, true_func=np.log, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log_backward():
pass
@fwdprop_test_factory(
mygrad_func=log2, true_func=np.log2, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log2_fwd():
pass
@backprop_test_factory(
mygrad_func=log2, true_func=np.log2, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log2_backward():
pass
@fwdprop_test_factory(
mygrad_func=log10, true_func=np.log10, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log10_fwd():
pass
@backprop_test_factory(
mygrad_func=log10, true_func=np.log10, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log10_backward():
pass
@fwdprop_test_factory(
mygrad_func=log1p, true_func=np.log1p, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log1p_fwd():
pass
@backprop_test_factory(
mygrad_func=log1p, true_func=np.log1p, index_to_bnds={0: (1e-5, 100)}, num_arrays=1
)
def test_log1p_backward():
pass
|
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("SparkApp")\
.master("spark://VN-L2041.hcg.homecredit.net:7077")\
.getOrCreate()
df1 = spark.range(2, 10000000, 2)
df2 = spark.range(2, 10000000, 4)
step1 = df1.repartition(5)
step12 = df2.repartition(6)
step2 = step1.selectExpr("id * 5 as id")
step3 = step2.join(step12, ["id"])
step4 = step3.selectExpr("sum(id)")
result = step4.collect() # 2500000000000
print(result)
|
import asyncio
import logging
import time
import math
from aiohttp import ClientConnectionError
from asyncio.queues import QueueEmpty
from .utils import Throttler
from botocore.exceptions import ClientError
from .base import Base
from . import exceptions
from .processors import JsonProcessor
log = logging.getLogger(__name__)
class Producer(Base):
def __init__(
self,
stream_name,
endpoint_url=None,
region_name=None,
buffer_time=0.5,
put_rate_limit_per_shard=1000,
put_bandwidth_limit_per_shard=1024,
after_flush_fun=None,
batch_size=500,
max_queue_size=10000,
processor=None,
skip_describe_stream=False,
retry_limit=None,
expo_backoff=None,
expo_backoff_limit=120,
create_stream=False,
create_stream_shards=1,
):
super(Producer, self).__init__(
stream_name,
endpoint_url=endpoint_url,
region_name=region_name,
retry_limit=retry_limit,
expo_backoff=expo_backoff,
expo_backoff_limit=expo_backoff_limit,
skip_describe_stream=skip_describe_stream,
create_stream=create_stream,
create_stream_shards=create_stream_shards,
)
self.buffer_time = buffer_time
self.processor = processor if processor else JsonProcessor()
self.queue = asyncio.Queue(maxsize=max_queue_size)
self.batch_size = batch_size
# A single shard can ingest up to 1 MiB of data per second (including partition keys)
# or 1,000 records per second for writes
self.put_rate_limit_per_shard = put_rate_limit_per_shard
self.put_rate_throttle = None
self.put_bandwidth_limit_per_shard = put_bandwidth_limit_per_shard
self.put_bandwidth_throttle = None
if put_bandwidth_limit_per_shard > 1024:
log.warning(
(
"Put bandwidth {}kb exceeds 1024kb. Expect throughput errors..".format(
put_bandwidth_limit_per_shard
)
)
)
self.set_put_rate_throttle()
self.flush_task = asyncio.create_task(self._flush())
self.is_flushing = False
self.after_flush_fun = after_flush_fun
# keep track of these (used by unit test only)
self.throughput_exceeded_count = 0
# overflow buffer
self.overflow = []
self.flush_total_records = 0
self.flush_total_size = 0
def set_put_rate_throttle(self):
self.put_rate_throttle = Throttler(
rate_limit=self.put_rate_limit_per_shard
* (len(self.shards) if self.shards else 1),
period=1,
)
self.put_bandwidth_throttle = Throttler(
# kb per second. Go below a bit to avoid hitting the threshold
size_limit=self.put_bandwidth_limit_per_shard
* (len(self.shards) if self.shards else 1),
period=1,
)
async def put(self, data):
# Raise exception from Flush Task to main task otherwise raise exception inside
# Flush Task will fail silently
if self.flush_task.done():
raise self.flush_task.exception()
if not self.stream_status == self.ACTIVE:
await self.get_conn()
elif self.queue.qsize() >= self.batch_size:
await self.flush()
for output in self.processor.add_item(data):
await self.queue.put(output)
async def close(self):
log.debug(f"Closing Connection.. (stream status:{self.stream_status})")
if not self.stream_status == self.RECONNECT:
# Cancel Flush Task
self.flush_task.cancel()
# final flush (probably not required but no harm)
await self.flush()
await self.client.close()
async def _flush(self):
while True:
if self.stream_status == self.ACTIVE:
if not self.is_flushing:
await self.flush()
await asyncio.sleep(self.buffer_time)
async def flush(self):
if self.is_flushing:
log.debug("Flush already in progress, ignoring..")
return
self.is_flushing = True
if self.processor.has_items():
for output in self.processor.get_items():
await self.queue.put(output)
while True:
self.flush_total_records = 0
self.flush_total_size = 0
if self.queue.qsize() > 0 or len(self.overflow) > 0:
log.debug(
"flush queue={} overflow={}".format(
self.queue.qsize(), len(self.overflow)
)
)
items = await self.get_batch()
if not items:
break
else:
result = await self._push_kinesis(items)
await self.process_result(result, items)
self.is_flushing = False
async def process_result(self, result, items):
if result["FailedRecordCount"]:
errors = list(
set(
[
r.get("ErrorCode")
for r in result["Records"]
if r.get("ErrorCode")
]
)
)
if not errors:
raise exceptions.UnknownException(
"Failed to put records but no errorCodes return in results"
)
if "ProvisionedThroughputExceededException" in errors:
log.warning(
"Throughput exceeded ({} records failed, added back..), pausing for 0.25s..".format(
result["FailedRecordCount"]
)
)
self.throughput_exceeded_count += 1
for i, record in enumerate(result["Records"]):
if "ErrorCode" in record:
self.overflow.append(items[i])
# log.debug("items={} overflow={}".format(len(items), len(overflow)))
await asyncio.sleep(0.25)
elif "InternalFailure" in errors:
log.warning("Received InternalFailure from Kinesis")
await self.get_conn()
for i, record in enumerate(result["Records"]):
if "ErrorCode" in record:
self.overflow.append(items[i])
else:
raise exceptions.UnknownException(
"Failed to put records due to: {}".format(", ".join(errors))
)
else:
if self.after_flush_fun:
await self.after_flush_fun(items)
async def get_batch(self):
items = []
flush_max_size = 0
for num in range(self.queue.qsize() + len(self.overflow)):
async with self.put_rate_throttle:
if self.overflow:
item = self.overflow.pop()
else:
try:
item = self.queue.get_nowait()
except QueueEmpty:
break
size_kb = math.ceil(item[0] / 1024)
flush_max_size += size_kb
if flush_max_size > 1024:
self.overflow.append(item)
elif num <= self.batch_size:
async with self.put_bandwidth_throttle(size=self.flush_total_size):
items.append(item)
self.flush_total_size += size_kb
self.flush_total_records += item[1]
else:
self.overflow.append(item)
return items
async def _push_kinesis(self, items):
log.debug(
"doing flush with {} record ({} items) @ {} kb".format(
len(items), self.flush_total_records, self.flush_total_size
)
)
while True:
try:
# todo: custom partition key
results = await self.client.put_records(
Records=[
{
"Data": item.data,
"PartitionKey": "{0}{1}".format(
time.perf_counter(), time.time()
),
}
for item in items
],
StreamName=self.stream_name,
)
log.info(
"flush complete with {} record ({} items) @ {} kb".format(
len(items), self.flush_total_records, self.flush_total_size
)
)
return results
except ClientError as err:
code = err.response["Error"]["Code"]
if code == "ValidationException":
if (
"must have length less than or equal"
in err.response["Error"]["Message"]
):
log.warning(
"Batch size {} exceeded the limit. retrying with less".format(
len(items)
)
)
existing_batch_size = self.batch_size
self.batch_size -= round(self.batch_size / 10)
# Must be small batch of big items, take at least one out..
if existing_batch_size == self.batch_size:
self.batch_size -= 1
self.overflow.extend(items)
self.flush_total_records = 0
self.flush_max_size = 0
self.flush_total_size = 0
items = await self.get_batch()
else:
log.warning(
f'Unknown ValidationException error code {err.response["Error"]["Code"]}'
)
log.exception(err)
await self.get_conn()
# raise err
elif code == "ResourceNotFoundException":
raise exceptions.StreamDoesNotExist(
"Stream '{}' does not exist".format(self.stream_name)
) from None
else:
log.warning(
f'Unknown Client error code {err.response["Error"]["Code"]}'
)
log.exception(err)
await self.get_conn()
# raise err
except ClientConnectionError as err:
await self.get_conn()
except asyncio.CancelledError:
return
except Exception as e:
log.exception(e)
log.critical("Unknown Exception Caught")
await self.get_conn()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 10:57:50 2019
@author: ngritti
"""
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5.QtWidgets import (QApplication, QVBoxLayout, QDialog,
QGridLayout, QLabel, QPushButton,
QWidget, QSizePolicy, QSpinBox, QDoubleSpinBox)
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import sys, warnings, os, time
from skimage.io import imread
import scipy.ndimage as ndi
from matplotlib.figure import Figure
import matplotlib as mpl
from matplotlib.path import Path as MplPath
import matplotlib.patches as mpatches
import utils_postprocessing, utils_image
warnings.filterwarnings("ignore")
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# https://stackoverflow.com/questions/52581727/how-to-return-value-from-the-qwidget
class spotWindow(QDialog):
def __init__(self, val, parent=None):
super(spotWindow, self).__init__(parent)
self.val = val
self.initUI()
def initUI(self):
endButton = QPushButton('OK')
endButton.clicked.connect(self.on_clicked)
lay = QVBoxLayout(self)
lay.addWidget(endButton)
self.setWindowTitle(str(self.val))
@QtCore.pyqtSlot()
def on_clicked(self):
self.val += 1
self.accept()
app = QApplication(sys.argv)
# in the outside code, use
ex = spotWindow(0)
ex.show()
if ex.exec_() == QtWidgets.QDialog.Accepted:
print(ex.val)
else:
print('Bad exit')
'''
class spotWindow(QDialog):
def __init__(self, input_folder, params, parent=None):
super(spotWindow, self).__init__(parent)
self.input_folder = input_folder
self.params = params
# load the first image to use for parameter definition and find out the number of channels
_, cond = os.path.split(input_folder)
save_folder = os.path.join(input_folder,'result_segmentation')
props = utils_postprocessing.load_morpho_params(save_folder, cond)
props = {key:props[key][0] for key in props}
mask_file = props['mask_file']
path_to_mask = os.path.join(input_folder,mask_file)
self.mask = imread(path_to_mask)[props['slice']].astype(np.float)
input_file = props['input_file']
path_to_file = os.path.join(input_folder,input_file)
self.img = imread(path_to_file).astype(float)
if len(self.img.shape) == 2:
self.img = np.expand_dims(self.img,0)
self.img = np.array([img[props['slice']] for img in self.img])
self.n_channels = self.img.shape[0]
# if params are none, set them to default values
params_default = [0.8,2,0,(2,self.img.shape[1]*self.img.shape[2])]
for i,p in enumerate(self.params):
# if there is no channel indexing, create one if length 1
if p==None:
self.params[i] = [None for i in self.n_channels]
# for every element in the channel indexing, if it is None, set it to defualt
for ch in range(len(p)):
if (p[ch]==None) or (p[ch]==(None,None)):
self.params[i][ch] = params_default[i]
# create window
self.initUI()
self.updateParamsAndFigure()
def initUI(self):
self.figure = Figure(figsize=(10, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
axs = self.figure.subplots(nrows=1, ncols=4)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
for i in [0,1,3]:
axs[i].axis('off')
axs[2].set_xlabel('Fluo')
axs[2].ticklabel_format(axis="x", style="sci", scilimits=(2,2))
axs[2].set_ylabel('Counts')
axs[2].ticklabel_format(axis="y", style="sci", scilimits=(0,2))
self.canvas.draw()
self.channel = QSpinBox()
self.channel.setMaximum(self.n_channels-1)
self.channel.valueChanged.connect(self.updateChannel)
self.channel.setAlignment(Qt.AlignRight)
self.enhancement = QDoubleSpinBox()
self.enhancement.setMinimum(0)
self.enhancement.setMaximum(1)
self.enhancement.setSingleStep(0.05)
self.enhancement.setValue(self.params[0][self.channel.value()])
self.enhancement.setAlignment(Qt.AlignRight)
self.nClasses = QSpinBox()
self.nClasses.setMinimum(2)
self.nClasses.setValue(self.params[1][self.channel.value()])
self.nClasses.valueChanged.connect(self.updatenThrChoice)
self.nClasses.setAlignment(Qt.AlignRight)
self.nThr = QSpinBox()
self.nThr.setValue(self.params[2][self.channel.value()])
self.nThr.setAlignment(Qt.AlignRight)
self.minSize = QSpinBox()
self.minSize.setMaximum(self.img.shape[1]*self.img.shape[2])
self.minSize.setValue(self.params[3][self.channel.value()][0])
self.minSize.setAlignment(Qt.AlignRight)
self.maxSize = QSpinBox()
self.maxSize.setMaximum(self.img.shape[1]*self.img.shape[2])
self.maxSize.setValue(self.img.shape[1]*self.img.shape[2])
self.maxSize.setAlignment(Qt.AlignRight)
applyButton = QPushButton('Apply params')
applyButton.clicked.connect(self.updateParamsAndFigure)
endButton = QPushButton('UPDATE AND RETURN PARAMS')
endButton.clicked.connect(self.on_clicked)
lay = QGridLayout(self)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(QLabel('Current channel'),2,0,1,1)
lay.addWidget(self.channel,2,1,1,1)
lay.addWidget(QLabel('Enhancement'),3,0,1,1)
lay.addWidget(self.enhancement,3,1,1,1)
lay.addWidget(QLabel('Expected classes for thresholding'),4,0,1,1)
lay.addWidget(self.nClasses,4,1,1,1)
lay.addWidget(QLabel('Selected threshold'),5,0,1,1)
lay.addWidget(self.nThr,5,1,1,1)
lay.addWidget(QLabel('Minimum spot size'),6,0,1,1)
lay.addWidget(self.minSize,6,1,1,1)
lay.addWidget(QLabel('Maximum spot size'),7,0,1,1)
lay.addWidget(self.maxSize,7,1,1,1)
lay.addWidget(applyButton,8,0,1,2)
lay.addWidget(endButton,9,0,1,2)
self.setWindowTitle(self.input_folder)
QApplication.setStyle('Macintosh')
def updatenThrChoice(self):
self.nThr.setMaximum(self.nClasses.value()-2)
def updateChannel(self):
ch = self.channel.value()
self.enhancement.setValue(self.params[0][ch])
self.nClasses.setValue(self.params[1][ch])
self.nThr.setValue(self.params[2][ch])
self.minSize.setValue(self.params[3][ch][0])
self.maxSize.setValue(self.params[3][ch][1])
self.updateParamsAndFigure()
def updateParamsAndFigure(self):
from matplotlib import rc
from matplotlib.backends.backend_pdf import PdfPages
rc('font', size=8)
rc('font', family='Arial')
# rc('font', serif='Times')
rc('pdf', fonttype=42)
# rc('text', usetex=True)
self.nThr.setMaximum(self.nClasses.value()-2)
ch = self.channel.value()
enhancement = self.enhancement.value()
nclasses = self.nClasses.value()
nThr = self.nThr.value()
sizelims = (self.minSize.value(),self.maxSize.value())
dict_, enhanced, thrs, objects = utils_image.detect_peaks(self.img[ch], self.mask,
enhancement=enhancement, nclasses=nclasses, nThr=nThr, sizelims=sizelims)
### update the values
self.params[0][ch] = enhancement
self.params[1][ch] = nclasses
self.params[2][ch] = nThr
self.params[3][ch] = sizelims
### update the plot
self.figure.clear()
axs = self.figure.subplots(nrows=1, ncols=4)
self.figure.subplots_adjust(top=0.9,right=1.,left=0.,bottom=0.2)#,wspace=0.01)#,hspace=0.01)
for i in [0,1,3]:
axs[i].axis('off')
axs[2].set_xlabel('Fluo')
axs[2].ticklabel_format(axis="x", style="sci", scilimits=(2,2))
axs[2].set_ylabel('Counts')
axs[2].ticklabel_format(axis="y", style="sci", scilimits=(0,2))
axs[0].set_title('Input image')
axs[1].set_title('Enhanced image')
axs[2].set_title('Histogram')
axs[3].set_title('Segmented spots: %d'%len(dict_['centroid']))
axs[2].set_yscale('log')
axs[0].imshow(self.img[ch], cmap='magma',vmin = np.percentile(self.img[ch],0.3), vmax = np.percentile(self.img[ch],99.7))
axs[1].imshow(enhanced, cmap='magma',vmin = np.percentile(enhanced,0.3), vmax = np.percentile(enhanced,99.7))
n,_,_ = axs[2].hist(enhanced[self.mask>0],bins=100)
for thr in thrs:
axs[2].plot([thr,thr],[0,np.max(n)],'-r')
axs[2].plot([thrs[nThr]],[np.max(n)],'*r',ms=10)
axs[3].imshow(objects, cmap='gray')
for coords, area in zip(dict_['centroid'],dict_['area']):
# draw circle around segmented coins
circle = mpatches.Circle((coords[1],coords[0]),radius=np.sqrt(area/np.pi),
fc=(1,0,0,0.5), ec = (1,0,0,1), linewidth=2)
axs[3].add_patch(circle)
# axs[3].plot(coords[1],coords[0],'+r',ms=5,alpha=.8)
self.canvas.draw()
@QtCore.pyqtSlot()
def on_clicked(self):
self.accept()
|
"""
* Primitives 3D.
*
* Placing mathematically 3D objects in synthetic space.
* The lights() method reveals their imagined dimension.
* The box() and sphere() functions each have one parameter
* which is used to specify their size. These shapes are
* positioned using the translate() function.
"""
size(640, 360, P3D)
background(0)
lights()
noStroke()
pushMatrix()
translate(130, height/2, 0)
rotateY(1.25)
rotateX(-0.4)
box(100)
popMatrix()
noFill()
stroke(255)
pushMatrix()
translate(500, height*0.35, -200)
sphere(280)
popMatrix()
|
class Goal(object):
def __init__(self, goalTuple):
super().__init__()
self._GoalID = goalTuple[0]
self._ProjectID = goalTuple[1]
self._ProjectTargetWeight = goalTuple[2]
def getGoalID(self):
return self._GoalID
def getProjectID(self):
return self._ProjectID
def getProjectTargetWeight(self):
return self._ProjectTargetWeight
|
from exchanges import helpers
from exchanges import bitfinex
from exchanges import bitstamp
from exchanges import okcoin
from exchanges import cex
from exchanges import btce
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime', 'bitstamp_price', 'bitstamp_bid', 'bitstamp_ask', 'bitfinex_price', 'bitfinex_bid', 'bitfinex_ask', 'okcoin_price', 'okcoin_bid', 'okcoin_ask', 'cex_price', 'cex_bid', 'cex_ask', 'btc-e_price', 'btc-e_bid', 'btc_ask']
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
row.append(bitstamp.get_current_price())
row.append(bitstamp.get_current_bid())
row.append(bitstamp.get_current_ask())
row.append(bitfinex.get_current_price())
row.append(bitfinex.get_current_bid())
row.append(bitfinex.get_current_ask())
row.append(okcoin.get_current_price())
row.append(okcoin.get_current_bid())
row.append(okcoin.get_current_ask())
row.append(cex.get_current_price())
row.append(cex.get_current_bid())
row.append(cex.get_current_ask())
row.append(btce.get_current_price())
row.append(btce.get_current_ask())
row.append(btce.get_current_bid())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5) |
import numpy
def bad_reward(dead, water_amount):
if water_amount > 0 and not dead:
return 1
return 0
def good_reward(old_moisture, new_moisture):
# gaussian function
old_moisture_reward = numpy.exp(-numpy.power(old_moisture - 0.5, 2.) / (2 * numpy.power(0.25, 2.)))
new_moisture_reward = numpy.exp(-numpy.power(new_moisture - 0.5, 2.) / (2 * numpy.power(0.25, 2.)))
return new_moisture_reward - old_moisture_reward
|
while True:
user = input('Criptografar(1) Descriptografar(2) cancelar(0):')
if user == '0':break
mensagem = input("sua mensagem: ").lower().replace(' ','')
senha = input("sua senha: ").lower().replace(' ','')
crip = ''
alpha = 'abcdefghijklmnopqrstuvwxyz'
while len(mensagem) > len(senha): senha+=senha # ** caso a mensagem seja maior que a senha **
def criptografar(mensagem, senha):
global crip
for l in range(len(mensagem)):
crip = crip + alpha[((alpha.find(mensagem[l]) + alpha.find(senha[l]))%26)]
return crip
def descriptografar(mensagem, senha):
global crip
for l in range(len(mensagem)):
crip = crip + alpha[((alpha.find(mensagem[l]) - alpha.find(senha[l]))%26)]
return crip
if user == '1': print('Mensagem Criptografada:', criptografar(mensagem, senha))
elif user == '2': print('Mensagem Descriptografada:', descriptografar(mensagem, senha))
|
# Generated by Django 3.1.4 on 2021-01-30 22:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("person", "0024_date_migration_information_target_group"),
]
operations = [
migrations.RemoveField(
model_name="speakerinformation",
name="exclude_unconfirmed",
),
migrations.RemoveField(
model_name="speakerinformation",
name="include_submitters",
),
]
|
# -*- coding: utf-8 -*-
u"""
Created on 2015-7-13
@author: cheng.li
"""
from PyFin.tests.DateUtilities.testCalendar import TestCalendar
from PyFin.tests.DateUtilities.testDate import TestDate
from PyFin.tests.DateUtilities.testPeriod import TestPeriod
from PyFin.tests.DateUtilities.testSchedule import TestSchedule
from PyFin.tests.DateUtilities.testDayCounter import TestDayCounter
__all__ = ["TestCalendar",
"TestDate",
"TestPeriod",
"TestSchedule",
"TestDayCounter"]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 4 18:14:29 2016
@author: becker
"""
import numpy as np
import scipy.linalg as linalg
import scipy.sparse as sparse
try:
from simfempy.meshes.simplexmesh import SimplexMesh
except ModuleNotFoundError:
from simfempy.meshes.simplexmesh import SimplexMesh
import simfempy.fems.bdrydata
#=================================================================#
class FemP1(object):
def __init__(self, mesh=None):
if mesh is not None:
self.setMesh(mesh)
self.dirichlet_al = 10
def setMesh(self, mesh, bdrycond=None):
self.mesh = mesh
self.nloc = self.mesh.dimension+1
simps = self.mesh.simplices
self.cols = np.tile(simps, self.nloc).reshape(-1)
self.rows = np.repeat(simps, self.nloc).reshape(-1)
self.cellgrads = self.computeCellGrads()
self.massmatrix = self.computeMassMatrix()
if bdrycond:
self.robinmassmatrix = self.computeBdryMassMatrix(bdrycond, type="Robin")
def computeCellGrads(self):
ncells, normals, cellsOfFaces, facesOfCells, dV = self.mesh.ncells, self.mesh.normals, self.mesh.cellsOfFaces, self.mesh.facesOfCells, self.mesh.dV
scale = -1/self.mesh.dimension
# print("dV", np.where(dV<0.0001))
# print("dV", dV[dV<0.00001])
return scale*(normals[facesOfCells].T * self.mesh.sigma.T / dV.T).T
def computeMassMatrix(self, lumped=False):
nnodes = self.mesh.nnodes
scalemass = 1 / self.nloc / (self.nloc+1);
massloc = np.tile(scalemass, (self.nloc,self.nloc))
massloc.reshape((self.nloc*self.nloc))[::self.nloc+1] *= 2
mass = np.einsum('n,kl->nkl', self.mesh.dV, massloc).ravel()
return sparse.coo_matrix((mass, (self.rows, self.cols)), shape=(nnodes, nnodes)).tocsr()
def computeBdryMassMatrix(self, bdrycond, type, lumped=False):
# TODO: find a way to get linear solution exactly with lumped=True
nnodes = self.mesh.nnodes
rows = np.empty(shape=(0), dtype=int)
cols = np.empty(shape=(0), dtype=int)
mat = np.empty(shape=(0), dtype=float)
if lumped:
for color, faces in self.mesh.bdrylabels.items():
if bdrycond.type[color] != type: continue
scalemass = bdrycond.param[color]/ self.mesh.dimension
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS, axis=1)
nodes = self.mesh.faces[faces]
rows = np.append(rows, nodes)
cols = np.append(cols, nodes)
mass = np.repeat(scalemass*dS, self.mesh.dimension)
mat = np.append(mat, mass)
return sparse.coo_matrix((mat, (rows, cols)), shape=(nnodes, nnodes)).tocsr()
else:
for color, faces in self.mesh.bdrylabels.items():
if bdrycond.type[color] != type: continue
scalemass = bdrycond.param[color] / (1+self.mesh.dimension)/self.mesh.dimension
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS, axis=1)
nodes = self.mesh.faces[faces]
nloc = self.nloc-1
rows = np.append(rows, np.repeat(nodes, nloc).reshape(-1))
cols = np.append(cols, np.tile(nodes, nloc).reshape(-1))
massloc = np.tile(scalemass, (nloc, nloc))
massloc.reshape((nloc*nloc))[::nloc+1] *= 2
mat = np.append(mat, np.einsum('n,kl->nkl', dS, massloc).reshape(-1))
return sparse.coo_matrix((mat, (rows, cols)), shape=(nnodes, nnodes)).tocsr()
def prepareBoundary(self, colorsdir, colorsflux=[]):
bdrydata = simfempy.fems.bdrydata.BdryData()
bdrydata.nodesdir={}
bdrydata.nodedirall = np.empty(shape=(0), dtype=int)
for color in colorsdir:
facesdir = self.mesh.bdrylabels[color]
bdrydata.nodesdir[color] = np.unique(self.mesh.faces[facesdir].flat[:])
bdrydata.nodedirall = np.unique(np.union1d(bdrydata.nodedirall, bdrydata.nodesdir[color]))
bdrydata.nodesinner = np.setdiff1d(np.arange(self.mesh.nnodes, dtype=int),bdrydata.nodedirall)
bdrydata.nodesdirflux={}
for color in colorsflux:
facesdir = self.mesh.bdrylabels[color]
bdrydata.nodesdirflux[color] = np.unique(self.mesh.faces[facesdir].ravel())
return bdrydata
def matrixDiffusion(self, k, bdrycond, method, bdrydata):
# alpha = problemdata.bdrycond.param[color]
# print(f"??? {alpha=}")
nnodes = self.mesh.nnodes
matxx = np.einsum('nk,nl->nkl', self.cellgrads[:, :, 0], self.cellgrads[:, :, 0])
matyy = np.einsum('nk,nl->nkl', self.cellgrads[:, :, 1], self.cellgrads[:, :, 1])
matzz = np.einsum('nk,nl->nkl', self.cellgrads[:, :, 2], self.cellgrads[:, :, 2])
mat = ( (matxx+matyy+matzz).T*self.mesh.dV*k).T.ravel()
A = sparse.coo_matrix((mat, (self.rows, self.cols)), shape=(nnodes, nnodes)).tocsr()
A += self.robinmassmatrix
return self.matrixDirichlet(A, bdrycond, method, bdrydata)
def formDiffusion(self, du, u, k):
graduh = np.einsum('nij,ni->nj', self.cellgrads, u[self.mesh.simplices])
graduh = np.einsum('ni,n->ni', graduh, self.mesh.dV*k)
# du += np.einsum('nj,nij->ni', graduh, self.cellgrads)
raise ValueError(f"graduh {graduh.shape} {du.shape}")
return du
def computeRhs(self, u, problemdata, kheatcell, method, bdrydata):
rhs = problemdata.rhs
rhscell = problemdata.rhscell
rhspoint = problemdata.rhspoint
bdrycond = problemdata.bdrycond
normals = self.mesh.normals
b = np.zeros(self.mesh.nnodes)
if rhs:
x, y, z = self.mesh.points.T
b += self.massmatrix * rhs(x, y, z)
if rhscell:
scale = 1/(self.mesh.dimension+1)
for label,fct in rhscell.items():
if fct is None: continue
cells = self.mesh.cellsoflabel[label]
xc, yc, zc = self.mesh.pointsc[cells].T
bC = scale*fct(xc, yc, zc)*self.mesh.dV[cells]
# print("bC", bC)
np.add.at(b, self.mesh.simplices[cells].T, bC)
if rhspoint:
for label,fct in rhspoint.items():
if fct is None: continue
points = self.mesh.verticesoflabel[label]
xc, yc, zc = self.mesh.points[points].T
# print("xc, yc, zc, f", xc, yc, zc, fct(xc, yc, zc))
b[points] += fct(xc, yc, zc)
help = np.zeros(self.mesh.nnodes)
for color, faces in self.mesh.bdrylabels.items():
if bdrycond.type[color] != "Robin": continue
if not color in bdrycond.fct or bdrycond.fct[color] is None: continue
nodes = np.unique(self.mesh.faces[faces].reshape(-1))
x, y, z = self.mesh.points[nodes].T
# print(f"normals {normals.shape}")
# raise ValueError(f"normals = {np.mean(normals, axis=0)}")
# nx, ny, nz = normals[faces].T
nx, ny, nz = np.mean(normals[faces], axis=0)
help[nodes] = bdrycond.fct[color](x, y, z, nx, ny, nz)
b += self.robinmassmatrix*help
scale = 1 / self.mesh.dimension
for color, faces in self.mesh.bdrylabels.items():
if bdrycond.type[color] != "Neumann": continue
if not color in bdrycond.fct or bdrycond.fct[color] is None: continue
normalsS = normals[faces]
dS = linalg.norm(normalsS,axis=1)
normalsS = normalsS/dS[:,np.newaxis]
assert(dS.shape[0] == len(faces))
x1, y1, z1 = self.mesh.pointsf[faces].T
nx, ny, nz = normalsS.T
bS = scale * bdrycond.fct[color](x1, y1, z1, nx, ny, nz) * dS
np.add.at(b, self.mesh.faces[faces].T, bS)
if bdrycond.hasExactSolution():
for color, faces in self.mesh.bdrylabels.items():
if bdrycond.type[color] != "Robin": continue
normalsS = normals[faces]
dS = linalg.norm(normalsS, axis=1)
normalsS = normalsS / dS[:, np.newaxis]
assert (dS.shape[0] == len(faces))
x1, y1, z1 = self.mesh.pointsf[faces].T
nx, ny, nz = normalsS.T
bS = scale * bdrycond.fctexact["Neumann"](x1, y1, z1, nx, ny, nz) * dS
np.add.at(b, self.mesh.faces[faces].T, bS)
return self.vectorDirichlet(b, u, bdrycond, method, bdrydata)
def matrixDirichlet(self, A, bdrycond, method, bdrydata):
nodesdir, nodedirall, nodesinner, nodesdirflux = bdrydata.nodesdir, bdrydata.nodedirall, bdrydata.nodesinner, bdrydata.nodesdirflux
nnodes = self.mesh.nnodes
for color, nodes in nodesdirflux.items():
nb = nodes.shape[0]
help = sparse.dok_matrix((nb, nnodes))
for i in range(nb): help[i, nodes[i]] = 1
bdrydata.Asaved[color] = help.dot(A)
bdrydata.A_inner_dir = A[nodesinner, :][:, nodedirall]
if method == 'strong':
help = np.ones((nnodes))
help[nodedirall] = 0
help = sparse.dia_matrix((help, 0), shape=(nnodes, nnodes))
A = help.dot(A.dot(help))
help = np.zeros((nnodes))
help[nodedirall] = 1.0
help = sparse.dia_matrix((help, 0), shape=(nnodes, nnodes))
A += help
else:
bdrydata.A_dir_dir = self.dirichlet_al*A[nodedirall, :][:, nodedirall]
help = np.ones(nnodes)
help[nodedirall] = 0
help = sparse.dia_matrix((help, 0), shape=(nnodes, nnodes))
help2 = np.zeros(nnodes)
help2[nodedirall] = np.sqrt(self.dirichlet_al)
help2 = sparse.dia_matrix((help2, 0), shape=(nnodes, nnodes))
A = help.dot(A.dot(help)) + help2.dot(A.dot(help2))
return A, bdrydata
def vectorDirichlet(self, b, u, bdrycond, method, bdrydata):
nodesdir, nodedirall, nodesinner, nodesdirflux = bdrydata.nodesdir, bdrydata.nodedirall, bdrydata.nodesinner, bdrydata.nodesdirflux
if u is None: u = np.zeros_like(b)
elif u.shape != b.shape : raise ValueError("u.shape != b.shape {} != {}".format(u.shape, b.shape))
x, y, z = self.mesh.points.T
for color, nodes in nodesdirflux.items():
bdrydata.bsaved[color] = b[nodes]
if method == 'strong':
for color, nodes in nodesdir.items():
if color in bdrycond.fct:
dirichlet = bdrycond.fct[color](x[nodes], y[nodes], z[nodes])
b[nodes] = dirichlet
else:
b[nodes] = 0
u[nodes] = b[nodes]
b[nodesinner] -= bdrydata.A_inner_dir * b[nodedirall]
else:
for color, nodes in nodesdir.items():
dirichlet = bdrycond.fct[color]
if dirichlet:
u[nodes] = dirichlet(x[nodes], y[nodes], z[nodes])
else:
u[nodes] = 0
b[nodes] = 0
b[nodesinner] -= bdrydata.A_inner_dir * u[nodedirall]
b[nodedirall] += bdrydata.A_dir_dir * u[nodedirall]
return b, u, bdrydata
def vectorDirichletZero(self, du, bdrydata):
nodesdir = bdrydata.nodesdir
for color, nodes in nodesdir.items():
du[nodes] = 0
return du
def tonode(self, u):
return u
# def grad(self, ic):
# normals = self.mesh.normals[self.mesh.facesOfCells[ic,:]]
# grads = 0.5*normals/self.mesh.dV[ic]
# chsg = (ic == self.mesh.cellsOfFaces[self.mesh.facesOfCells[ic,:],0])
# # print("### chsg", chsg, "normals", normals)
# grads[chsg] *= -1.
# return grads
def computeErrorL2(self, solexact, uh):
x, y, z = self.mesh.points.T
en = solexact(x, y, z) - uh
xc, yc, zc = self.mesh.pointsc.T
ec = solexact(xc, yc, zc) - np.mean(uh[self.mesh.simplices], axis=1)
return np.sqrt( np.dot(en, self.massmatrix*en) ), np.sqrt(np.sum(ec**2* self.mesh.dV)), en
def computeErrorFluxL2(self, solexact, diffcell, uh):
xc, yc, zc = self.mesh.pointsc.T
graduh = np.einsum('nij,ni->nj', self.cellgrads, uh[self.mesh.simplices])
errv = 0
for i in range(self.mesh.dimension):
solxi = solexact.d(i, xc, yc, zc)
errv += np.sum( diffcell*(solxi-graduh[:,i])**2* self.mesh.dV)
return np.sqrt(errv)
def computeBdryMean(self, u, colors):
# colors = [int(x) for x in data.split(',')]
mean, omega = np.zeros(len(colors)), np.zeros(len(colors))
for i,color in enumerate(colors):
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS, axis=1)
omega[i] = np.sum(dS)
mean[i] = np.sum(dS*np.mean(u[self.mesh.faces[faces]],axis=1))
return mean/omega
def comuteFluxOnRobin(self, u, faces, dS, uR, cR):
uhmean = np.sum(dS * np.mean(u[self.mesh.faces[faces]], axis=1))
xf, yf, zf = self.mesh.pointsf[faces].T
nx, ny, nz = np.mean(self.mesh.normals[faces], axis=0)
if uR: uRmean = np.sum(dS * uR(xf, yf, zf, nx, ny, nz))
else: uRmean=0
return cR*(uRmean-uhmean)
def computeBdryDn(self, u, colors, bdrydata, bdrycond):
# colors = [int(x) for x in data.split(',')]
flux, omega = np.zeros(len(colors)), np.zeros(len(colors))
for i,color in enumerate(colors):
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS, axis=1)
omega[i] = np.sum(dS)
if bdrycond.type[color] == "Robin":
flux[i] = self.comuteFluxOnRobin(u, faces, dS, bdrycond.fct[color], bdrycond.param[color])
elif bdrycond.type[color] == "Dirichlet":
bs, As = bdrydata.bsaved[color], bdrydata.Asaved[color]
flux[i] = np.sum(As * u - bs)
else:
raise NotImplementedError(f"computeBdryDn for condition '{bdrycond.type[color]}' color={color}")
return flux
def computeBdryFct(self, u, colors):
# colors = [int(x) for x in data.split(',')]
nodes = np.empty(shape=(0), dtype=int)
for color in colors:
faces = self.mesh.bdrylabels[color]
nodes = np.unique(np.union1d(nodes, self.mesh.faces[faces].ravel()))
return self.mesh.points[nodes], u[nodes]
def computePointValues(self, u, colors):
# colors = [int(x) for x in data.split(',')]
up = np.empty(len(colors))
for i,color in enumerate(colors):
nodes = self.mesh.verticesoflabel[color]
up[i] = u[nodes]
return up
def computeMeanValues(self, u, colors):
# colors = [int(x) for x in data.split(',')]
up = np.empty(len(colors))
for i, color in enumerate(colors):
up[i] = self.computeMeanValue(u,color)
return up
def computeMeanValue(self, u, color):
cells = self.mesh.cellsoflabel[color]
# print("umean", np.mean(u[self.mesh.simplices[cells]],axis=1))
return np.sum(np.mean(u[self.mesh.simplices[cells]],axis=1)*self.mesh.dV[cells])
# ------------------------------------- #
if __name__ == '__main__':
trimesh = SimplexMesh(geomname="backwardfacingstep", hmean=0.3)
fem = FemP1(trimesh)
fem.testgrad()
import plotmesh
import matplotlib.pyplot as plt
plotmesh.meshWithBoundaries(trimesh)
plt.show()
|
import lc_data_access
import time
lc_access = lc_data_access.LC_Access()
# TODO:
# > Deal with the async aspect if there are too many calls.
# > Cleanup this file for testing.
# > link to the problem on the !get command.
# > don't count duplicates.
# > update recap command to say something if there aren't any users.
start_time = time.time()
# Get a user
# print(lc_access.get_user_most_recent('testuser'))
# print(lc_access.get_users_str())
# print()
# # Users recents:
# print("Testing User's Recents:")
# print(lc_access.users_recents())
# print()
# # Remove a user
# print(lc_access.get_users_str())
# print(lc_access.remove_user("testuser"))
# print("testuser removed, here's who's left:")
# print(lc_access.get_users_str())
# print()
# Weekly Recap
print(lc_access.weekly_recap())
end_time = time.time()
print("Took {} seconds.", end_time - start_time)
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class DeleteChatMessagesByDate(BaseObject):
"""
Deletes all messages between the specified dates in a chat. Supported only for private chats and basic groups. Messages sent in the last 30 seconds will not be deleted
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param min_date: The minimum date of the messages to delete
:type min_date: :class:`int`
:param max_date: The maximum date of the messages to delete
:type max_date: :class:`int`
:param revoke: Pass true to delete chat messages for all users; private chats only
:type revoke: :class:`bool`
"""
ID: str = Field("deleteChatMessagesByDate", alias="@type")
chat_id: int
min_date: int
max_date: int
revoke: bool
@staticmethod
def read(q: dict) -> DeleteChatMessagesByDate:
return DeleteChatMessagesByDate.construct(**q)
|
import re
import socketserver
# 自定义处理函数
class MyHandler(socketserver.BaseRequestHandler):
def handle(self):
"""自定义处理方法"""
data = self.request.recv(2048)
if data:
header, other = data.decode("utf-8").split("\r\n", 1)
self.set_request_headers(other) # 把请求头组装成字典
# GET /xxx HTTP/1.1
ret = re.match("[^ ]+ ([^ ]+)", header)
if ret:
file_name = ret.group(1)
if file_name == "/":
file_name = "/index.html" # 代表主页
# 是否是动态页面
if file_name.endswith(".py"):
# .py去除掉
self.response_body = self.__dynamic_handler(file_name[:-3])
else: # 静态页面
self.response_body = self.__static_handler(file_name)
# 根据有没有内容来设置返回的状态码
if self.response_body:
self.start_response("200 ok") # 有这个文件
else:
self.response_404() # 404页面
# 响应浏览器
self.response()
# 没有变化
def set_request_headers(self, headers):
"""设置请求头为指定格式"""
self.request_headers = dict()
for line in headers.splitlines():
# 防止出现:Host:https://www.baidu.com 这种情况
item = line.split(":", 1) # key-value array
if len(item) == 2: # 最后一行是空([:])
self.request_headers[item[0]] = item[1]
# 没有变化
def start_response(self, status, header_dict={}):
"""设置响应头"""
self.response_headers = f"HTTP/1.1 {status}\r\n"
for key, value in header_dict.items():
self.response_headers += f"{key}:{value}\r\n"
# header和body分隔的地方是两个\r\n
self.response_headers += "Server:MyServer\r\n\r\n"
# 没有变化
def response(self):
"""响应浏览器"""
self.request.send(
self.response_headers.encode("utf-8") + self.response_body)
# 没有变化
def response_404(self):
"""返回404页面"""
self.start_response("404 Not Found") # 没有这个文件
self.response_body = self.__static_handler("/404.html")
# 没有变化
def __static_handler(self, file_name):
"""返回文件内容"""
file_name = f"./root{file_name}"
print(file_name)
import os
if os.path.exists(file_name):
# 二进制方式打开文件(图片、文本都适用)
with open(file_name, "rb") as f:
return f.read()
else:
return None
def __dynamic_handler(self, name):
"""动态页面"""
self.request_headers["path"] = name # 把请求方法传递过去
from dynamic.frame import WebFrame
# 根据WSGI协议来
return WebFrame().application(self.request_headers,
self.start_response)
# 没有变化
def main():
with socketserver.ThreadingTCPServer(("", 8080), MyHandler) as server:
server.allow_reuse_address = True # 防止端口占用
server.serve_forever()
if __name__ == "__main__":
main()
|
#!/share/software/user/open/python/3.6.1/bin/python3
from src.ModelDriver import *
## MODIFY THESE PARAMS FOR SPECIFIC RUN ###
X_train = "/oak/stanford/groups/aboettig/Aparna/NNreviews/TestRobustness/jitterData/train_5.23.18_JitterRad-40.0_jitterPerc-0.5_xyz.txt"
Y_train = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/train_5.23.18_unbalanced_unaugmented_rna_2.txt"
X_dev = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/dev_5.23.18_unbalanced_unaugmented_xyz.txt"
Y_dev = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/dev_5.23.18_unbalanced_unaugmented_rna_2.txt"
version = 0
specific_info = "RF_AbdA_train_5.23.18_JitterRad-40.0_jitterPerc-0.5"
architecture = "rf"
num_estimators = 900
min_sample_split = 15
max_depth = None
max_leaf_nodes = 3
random_state = 0
class_weight = "balanced"
n_jobs = -1
tag = specific_info
## END OF PARAMS TO MODIFY ##
PARAMETERS = {
"X_train" : X_train,
"Y_train" : Y_train,
"X_dev" : X_dev,
"Y_dev" : Y_dev,
"architecture" : architecture,
"num_estimators" : num_estimators,
"min_sample_split" : min_sample_split,
"max_depth" : max_depth,
"max_leaf_nodes" : max_leaf_nodes,
"random_state" : random_state,
"class_weight" : class_weight,
"n_jobs" : n_jobs,
"tag" : tag,
"print_cost" : True
}
modelDriver = ModelDriver(PARAMETERS)
modelDriver.load()
modelDriver.init_model()
modelDriver.run_model()
|
# -*- coding: utf-8 -*-
'''
Provide authentication using a Slack token
Slack auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
slack:
fred:
- .*
- '@runner'
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.http
log = logging.getLogger(__name__)
__virtualname__ = 'slack'
def __virtual__():
return __virtualname__
def auth(username, password):
'''
Slack authentication
'''
url = https://slack.com/api/auth.test?token={}'.format(self.password)
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', status=True, decode=True)
if result['status'] == 200:
log.debug('eauth REST call returned 200: %s', result)
if result['dict'] is not None:
if result['dict']['ok'] == False:
log.debug('eauth Slack call failed (reject by server): %s', result)
return False
if result['dict']['user'] != username:
log.debug('eauth Slack call failed (user mismatch): %s', result)
return False
return True
else:
log.debug('eauth Slack call failed (non-200 response): %s', result)
return False
|
"""Refresh cog."""
from discord.ext import commands, tasks
from bot import google
class Refresh(commands.Cog, command_attrs={"hidden": True}):
"""All the miscellaneous commands."""
def __init__(self, bot):
self.bot = bot
self.refresh_loop.start()
@staticmethod
async def run_refresh():
"""Reload all the data on the worksheets."""
await google.connector.rules.refresh()
await google.connector.whatis.refresh()
@commands.command()
async def refresh(self, ctx):
"""Refresh data for Info and Tourney."""
with ctx.typing():
await self.run_refresh()
await ctx.send("♻ **Refreshed!**")
@tasks.loop(hours=1)
async def refresh_loop(self):
"""Loop that refreshes worksheets."""
await self.run_refresh()
def setup(bot):
bot.add_cog(Refresh(bot))
|
from cyaron import *
CASES = 10
for t in range(1, CASES + 1):
io = IO(f"{t}.in")
# ==============================
n = 100
k = randint(0, int(1e12)) if t <= 5 else int(1e12)
io.input_writeln(n, k)
for i in range(n):
io.input_writeln(
*(randint(-1000 if t <= 7 else 0, 1000) for j in range(n))
)
# ==============================
io.close()
|
""" Convert img-list to tensorflow TFRecord format """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import pandas as pd
import argparse
import cv2
import json
from natsort import natsorted
from tqdm import tqdm
from .. import pipeline as pipe
from .. import utils
from collections import OrderedDict
np.random.seed(21)
def run(view_angles,
extra_radius_buffer_px,
num_channels,
stride,
crop_size,
view_planes,
num_negative_examples_per_nodule_free_patient_per_view_plane,
HU_tissue_range):
# check dataset
if not pipe.dataset_name == 'LUNA16':
raise ValueError('gen_nodule_seg_data only valid for dataset LUNA16')
sys.exit()
# define view_planes
view_planes = [x if x in ['y', 'x', 'z'] else 'ERROR' for x in natsorted(view_planes)]
if len(view_planes) == 0:
pipe.log.error("No view_plane is determined!!! Continue with default 'zyx'.")
view_planes = ['x','y','z'] # default
if len(crop_size) != 2:
raise ValueError('Wrong crop_size. Use format HeightxWidth')
sys.exit()
gen_nodule_masks_json = pipe.load_json('out.json', 'gen_nodule_masks')
resample_lungs_json = pipe.load_json('out.json', 'resample_lungs')
patients_lst = OrderedDict(pipe.patients_by_split)
for lst_type in patients_lst.keys():
if len(patients_lst[lst_type]) == 0:
continue
pipe.log.info('processing lst {} with len {}'.format(lst_type, len(patients_lst[lst_type])))
generate_data_lsts(
HU_tissue_range,
gen_nodule_masks_json,
resample_lungs_json,
lst_type,
patients_lst[lst_type],
num_channels,
stride,
crop_size,
view_planes,
num_negative_examples_per_nodule_free_patient_per_view_plane)
def get_slice_from_zyx_array(array, slice_start, slice_end, axis):
slice_start = max(0, slice_start)
slice_end = min(slice_end, array.shape[axis])
if slice_start>slice_end:
print ('ERROR with slice_start, slice_end!!!')
sys.exit()
if axis == 0:
return np.swapaxes(array[slice_start:slice_end].copy(),0,2) # z,y,x -> x,y,z # order equal to gen_prob_maps ordering
elif axis == 1:
return np.swapaxes(array[:, slice_start:slice_end].copy(), 1, 2) #z,y,x, -> z,x,y
elif axis == 2:
return array[:, :, slice_start:slice_end].copy() #z,y,x
def ensure_point_lst_within_array(lst, array_shape):
return [int(np.clip(x, 0, array_shape)) for x in lst]
def generate_data_lsts(HU_tissue_range,
gen_nodule_masks_json,
resample_lungs_json,
lst_type, patient_lst,
num_channels, stride,
crop_size,
view_planes,
num_negatives_per_patient,
use_shell_slices=False):
# initialize some vars
num_data = 0
patient_slice_num = 0
all_data = []
stacked_data = np.zeros(list(crop_size)+[num_channels]+[3], dtype=np.uint8)
lst_out_path = pipe.get_step_dir() + lst_type + '_nodule_seg_data.lst'
with open(lst_out_path, 'w') as out_lst:
for pa_cnt, patient in enumerate(tqdm(patient_lst)):
patient_slice_num = 0
try:
patient_json = gen_nodule_masks_json[patient]
except:
pipe.log.error('could not load gen_nodule_mask patient_json for patient {}!!!'.format(patient))
sys.exit()
try:
scan = pipe.load_array(resample_lungs_json[patient]['basename'], 'resample_lungs')
except:
pipe.log.error('could not load resample_lungs_array of patient {}. continue with next patient.'.format(patient))
continue
try:
mask = pipe.load_array(gen_nodule_masks_json[patient]['basename'], 'gen_nodule_masks')
except:
pipe.log.error('could not load mask-array of patient {}. Continue with next patient'.format(patient))
continue
#normalize and zero_center scan and lab. Also checking dtyppe
if scan.dtype == np.int16:
scan = ((scan/(float(HU_tissue_range[1]-HU_tissue_range[0])))*255).astype(np.uint8)
elif scan.dtype == np.float32:
scan = (scan*255).astype(np.uint8)
elif scan.dtype == np.uint8:
scan = scan
else:
pipe.log.error('dtype of scan for patient {} is NOT one of these [uint8, float32, int16]. Continue with next patient'.format(patient))
if mask.dtype == np.uint8:
mask = mask
else:
pipe.log.error('dtype of mask for patient {} is NOT uint8. Continue with next patient'.format(patient))
# combine scan and mask to data
data = np.zeros(list(scan.shape)+[3], dtype=np.uint8)
data[:, :, :, 0] = scan
data[:, :, :, 1:3] = mask
# initialize some lists
images_nodule_free = []
nodules_extract_coords_lst = []
num_nodule_pakets_lst = []
nodules_center_coords_lst = []
nodule_id_lst = []
images = []
# get patient infos
if patient_json['nodule_patient']:
nodules = patient_json['nodules']
for nodule in nodules:
#skip nodules < 3 #adjustment for retraining LUNA (checkpoints for this setting already included pretrained in the April 7 model release)
if nodule['nodule_priority'] < 3:
continue
#nodule_bounding_box_coords_zyx_px = nodule["nodule_box_ymin/ymax_xmin/xmax_zmin/zmax_px"]
factor = 1.0
nodule_center_zyx_px = nodule['center_zyx_px']
nodule_max_diameter_zyx_px = nodule['max_diameter_zyx_px']
nodule_bounding_box_coords_zyx_px = nodule["nodule_box_zmin/zmax_ymin/ymax_xmin/xmax_px"]
# ensure points within array
nodule_bounding_box_coords_zyx_px = [ensure_point_lst_within_array([nodule_bounding_box_coords_zyx_px[x]], data.shape[x//2])[0] for x in range(6)]
# ensure that bounding box has at least num_channel size
nodule_bounding_box_coords_zyx_px = [int(nodule_bounding_box_coords_zyx_px[v]) if v%2==0
else max(int(nodule_bounding_box_coords_zyx_px[v]+1), int(nodule_bounding_box_coords_zyx_px[v-1])+num_channels) for v in range(6)]
# get center_box_coords
nodule_center_box_coords_zyx_px = nodule["nodule_center_box_zmin/zmax_px_ymin/ymax_xmin/xmax"]
# ensure points within array
nodule_center_box_coords_zyx_px = [ensure_point_lst_within_array([nodule_center_box_coords_zyx_px[x]], data.shape[x//2])[0] for x in range(6)]
# draw center
# loop over view_planes
for view_plane in view_planes:
# get affected layers from scan and homogenize plan orientation (num_channels always in last dimension)
if view_plane =='z':
center_coords = [nodule_center_box_coords_zyx_px[0], nodule_center_box_coords_zyx_px[1]]
shell_coords = [nodule_bounding_box_coords_zyx_px[0], nodule_bounding_box_coords_zyx_px[1]]
nodule_box_coords = [nodule_bounding_box_coords_zyx_px[4], nodule_bounding_box_coords_zyx_px[5], # y on first axis
nodule_bounding_box_coords_zyx_px[2], nodule_bounding_box_coords_zyx_px[3]] # x on second axis
axis = 0
elif view_plane =='y':
center_coords = [nodule_center_box_coords_zyx_px[2], nodule_center_box_coords_zyx_px[3]]
shell_coords = [nodule_bounding_box_coords_zyx_px[2], nodule_bounding_box_coords_zyx_px[3]]
nodule_box_coords = [nodule_bounding_box_coords_zyx_px[0], nodule_bounding_box_coords_zyx_px[1], # z on first axis
nodule_bounding_box_coords_zyx_px[4], nodule_bounding_box_coords_zyx_px[5]] # x on second axis
axis = 1
elif view_plane =='x':
center_coords = [nodule_center_box_coords_zyx_px[4], nodule_center_box_coords_zyx_px[5]]
shell_coords = [nodule_bounding_box_coords_zyx_px[4], nodule_bounding_box_coords_zyx_px[5]]
nodule_box_coords = [nodule_bounding_box_coords_zyx_px[0], nodule_bounding_box_coords_zyx_px[1], # z on first axis
nodule_bounding_box_coords_zyx_px[2], nodule_bounding_box_coords_zyx_px[3]] # y on second axis
axis = 2
if use_shell_slices:
shell_slices = get_slice_from_zyx_array(data, shell_coords[0], shell_coords[1], axis=axis)
slices = shell_slices
else:
center_slices = get_slice_from_zyx_array(data, center_coords[0], center_coords[1], axis=axis)
slices = center_slices
num_layers = slices.shape[2]
# for nodules with many layers split several parts from
nodules_pakets = []
if num_layers == num_channels:
nodules_pakets.append(list(range(num_channels)))
elif num_layers > num_channels:
rand_offset = np.random.randint(0,int((num_layers-num_channels) % (stride))+1)
for paket in range(int((num_layers-num_channels)/(stride))+1):
nodules_pakets.append(list(np.arange(rand_offset+paket*stride, rand_offset+paket*stride+num_channels)))
# make nodule_pakets where label is central layer of paket
for nodules_paket in nodules_pakets:
images.append(slices[:,:,min(nodules_paket):min(nodules_paket)+num_channels])
nodules_extract_coords_lst.append(nodule_box_coords)
num_nodule_pakets_lst.append(len(nodules_pakets))
nodule_id_lst.append(nodule['nodule_id'])
# get some negative samples for every view_plane
rand_layers_z = np.random.permutation(range(scan.shape[0]))
rand_layers_y = np.random.permutation(range(scan.shape[1]))
rand_layers_x = np.random.permutation(range(scan.shape[2]))
rand_layer_cnt = 0
while len(images_nodule_free) < num_negatives_per_patient*len(view_planes):
if 'z' in view_planes:
idx0 = min(rand_layers_z[rand_layer_cnt], scan.shape[0]-num_channels)
idx1 = idx0 + num_channels
idx2 = np.random.randint(0, max(1,scan.shape[1]-crop_size[0]))
idx3 = idx2 + crop_size[0]
idx4 = np.random.randint(0, max(1,scan.shape[2]-crop_size[1]))
idx5 = idx4 + crop_size[1]
# introduce some random black parts
if np.random.randint(0,10)==0:
rand_black_padding = np.random.randint(0,4)
if rand_black_padding:
idx2 += np.random.randint(1,crop_size[0]//3)
elif rand_black_padding ==1:
idx3 -= np.random.randint(1,crop_size[0]//3)
elif rand_black_padding==2:
idx3 += np.random.randint(1,crop_size[1]//3)
elif rand_black_padding==3:
idx4 -= np.random.randint(1,crop_size[1]//3)
if np.sum(mask[idx0:idx1, idx2:idx3, idx4:idx5]) < 1:
images_nodule_free.append(np.swapaxes(data[idx0:idx1, idx2:idx3, idx4:idx5].copy(), 0, 2))
# cv2.imwrite('test_imgs/'+'y'+'_'+str(num_data)+'_'+str(len(images_nodule_free))+'_shitto.jpg', data[idx0+(idx1-idx0)//2, idx2:idx3, idx4:idx5,0])
if 'y' in view_planes:
idx0 = np.random.randint(0, max(1,scan.shape[0]-crop_size[0]))
idx1 = idx0 + crop_size[0]
idx2 = min(rand_layers_y[rand_layer_cnt], scan.shape[1]-num_channels)
idx3 = idx2 + num_channels
idx4 = np.random.randint(0, max(1,scan.shape[2]-crop_size[1]))
idx5 = idx4 + crop_size[1]
# introduce some random black parts
if np.random.randint(0,10)==0:
rand_black_padding = np.random.randint(0,4)
if rand_black_padding:
idx0 += np.random.randint(1,crop_size[0]//3)
elif rand_black_padding ==1:
idx1 -= np.random.randint(1,crop_size[0]//3)
elif rand_black_padding==2:
idx3 += np.random.randint(1,crop_size[1]//3)
elif rand_black_padding==3:
idx4 -= np.random.randint(1,crop_size[1]//3)
if np.sum(mask[idx0:idx1, idx2:idx3, idx4:idx5]) < 1:
images_nodule_free.append(np.swapaxes(data[idx0:idx1, idx2:idx3, idx4:idx5].copy(), 1,2))
# cv2.imwrite('test_imgs/'+'x'+'_'+str(num_data)+'_'+str(len(images_nodule_free))+'_shitto.jpg', data[idx0:idx1, idx2+(idx3-idx2)//2, idx4:idx5,0])
if 'x' in view_planes:
idx0 = np.random.randint(0, max(1,scan.shape[0]-crop_size[0]))
idx1 = idx0 + crop_size[0]
idx2 = np.random.randint(0, max(1,scan.shape[1]-crop_size[1]))
idx3 = idx2 + crop_size[1]
idx4 = min(rand_layers_x[rand_layer_cnt], scan.shape[2]-num_channels)
idx5 = idx4 + num_channels
# introduce some random black parts
if np.random.randint(0,10)==0:
rand_black_padding = np.random.randint(0,4)
if rand_black_padding:
idx0 += np.random.randint(1,crop_size[0]//3)
elif rand_black_padding ==1:
idx1 -= np.random.randint(1,crop_size[0]//3)
elif rand_black_padding==2:
idx2 += np.random.randint(1,crop_size[1]//3)
elif rand_black_padding==3:
idx3 -= np.random.randint(1,crop_size[1]//3)
if np.sum(mask[idx0:idx1, idx2:idx3, idx4:idx5]) < 1:
images_nodule_free.append(data[idx0:idx1, idx2:idx3, idx4:idx5].copy())
# cv2.imwrite('test_imgs/'+'z'+'_'+str(num_data)+'_'+str(len(images_nodule_free))+'_shitto.jpg', data[idx0:idx1, idx2:idx3, idx4+(idx5-idx4)//2,0])
rand_layer_cnt += 1
if rand_layer_cnt == min(len(rand_layers_z), len(rand_layers_y), len(rand_layers_x)): break
# loop over all images and labels sprang out from nodule
cropped_images_lsts = {'nodules': [images, nodules_extract_coords_lst, num_nodule_pakets_lst, nodule_id_lst], 'nodule_free': [images_nodule_free, [None]*len(images_nodule_free), [1]*len(images_nodule_free), [-1]*len(images_nodule_free)]}
for cropped_images_lst_key in cropped_images_lsts.keys():
zipped_lsts = cropped_images_lsts[cropped_images_lst_key]
# loop over all pakets in images,..
for img_cnt in range(len(zipped_lsts[0])):
org_img = zipped_lsts[0][img_cnt][:,:,:,:1]
org_lab = zipped_lsts[0][img_cnt][:,:,:,1:3]
nodule_box_coords_in_extract = zipped_lsts[1][img_cnt]
num_nodule_pakets = zipped_lsts[2][img_cnt]
nodule_id = zipped_lsts[3][img_cnt]
img = np.zeros(crop_size+[num_channels]+[1], dtype=np.uint8)
lab = np.zeros(crop_size+[1]+[2], dtype=np.uint8) # first channel ist label to predict, second channel for drawing center
# crop or pad org_img
img_coords = []
org_coords = []
if patient_json['nodule_patient']:
# crop random around nodule or pad black
# ensure that nodule is within crop
for idx in range(2):
if crop_size[idx] < org_img.shape[idx]:
if patient_json['nodule_patient']:
img_coords.append(0)
img_coords.append(crop_size[idx])
if (nodule_box_coords_in_extract[idx*2+1]-nodule_box_coords_in_extract[idx*2])<crop_size[idx]:
start_random = max(0,min(org_img.shape[idx]-crop_size[idx], nodule_box_coords_in_extract[idx*2+1]-(crop_size[idx])))
end_random = max(start_random+1, min(nodule_box_coords_in_extract[idx*2], org_img.shape[idx]-crop_size[idx]))
org_coords.append(np.random.randint(start_random, end_random))
org_coords.append(org_coords[-1]+crop_size[idx])
else:
org_coords.append(np.random.randint(0, org_img.shape[idx]-crop_size[idx]))
org_coords.append(org_coords[-1]+crop_size[idx])
else:
img_coords.append(0)
img_coords.append(crop_size[idx])
org_coords.append(np.random.randint(0, org_img.shape[idx]-crop_size[idx]))
org_coords.append(org_coords[-1]+crop_size[idx])
elif crop_size[idx] >= org_img.shape[idx]:
img_coords.append((crop_size[idx]-org_img.shape[idx])/2)
img_coords.append(img_coords[-1]+org_img.shape[idx])
org_coords.append(0)
org_coords.append(org_img.shape[idx])
else:
# crop or pad negative_img
for idx in range(2):
if org_img.shape[idx] >= img.shape[idx]:
# start
img_coords.append(0)
org_coords.append((org_img.shape[idx]-img.shape[idx])//2)
# end
img_coords.append(img.shape[idx])
org_coords.append(org_coords[-1]+img.shape[idx])
else:
# start
img_coords.append((img.shape[idx]-org_img.shape[idx])//2)
org_coords.append(0)
# end
img_coords.append(img_coords[-1]+org_img.shape[idx])
org_coords.append(org_img.shape[idx])
img_coords = [int(x) for x in img_coords]
org_coords = [int(x) for x in org_coords]
img[img_coords[0]:img_coords[1], img_coords[2]:img_coords[3], :] = org_img[org_coords[0]:org_coords[1], org_coords[2]:org_coords[3]].copy()
lab[img_coords[0]:img_coords[1], img_coords[2]:img_coords[3], 0] = org_lab[org_coords[0]:org_coords[1], org_coords[2]:org_coords[3], org_lab.shape[2]//2].copy()
# stack img and lab and include in all_data lst
stacked_data[:,:,:img.shape[-2],:1] = img.copy()
stacked_data[:,:,:lab.shape[-2],1:3] = lab.copy()
all_data.append(stacked_data.copy())
# write info to out_lst
out_lst.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(num_data, patient, nodule_id, patient_slice_num, 1 if cropped_images_lst_key=='nodules' else 0, num_nodule_pakets))
num_data += 1
patient_slice_num += 1
# if 0 and cropped_images_lst_key=='nodules':
# randy = np.random.randint(0,10000)
# cv2.imwrite('test_imgs/'+str(randy)+'_img.jpg', stacked_data[:,:,stacked_data.shape[2]//2,0])
# cv2.imwrite('test_imgs/'+str(randy)+'_lab.jpg', stacked_data[:,:,stacked_data.shape[2]//2,1])
# cv2.imwrite('test_imgs/'+str(randy)+'_center.jpg', stacked_data[:,:,stacked_data.shape[2]//2,2])
# if num_data == 3000 and 1:
# out_path = pipe.save_array(basename=lst_type+'.npy' , array=np.array(all_data, dtype=np.uint8), step_name='gen_nodule_seg_data')
# out_lst.close()
# sys.exit()
pipe.log.info('{} num data: {}'.format(lst_type, num_data))
print ('saving npy-construct')
out_path = pipe.save_array(basename=lst_type+'.npy' , array=np.array(all_data, dtype=np.uint8), step_name='gen_nodule_seg_data')
# load list and save again with headers
out_lst = pd.read_csv(lst_out_path, header=None, sep='\t')
out_lst = out_lst.rename(columns={0:'ongoing_num',1:'id', 2:'nodule_id', 3:'patient_slice_num', 4:'is_nodule', 5:'num_nodule_pakets'})
# calculate num_nodule_compensation_factor based on biggest nodule
out_lst['compensation_factor'] = np.max(out_lst['num_nodule_pakets'].values.tolist())/out_lst['num_nodule_pakets']
# negatives compensation_factor set to zero
out_lst['compensation_factor'].loc[out_lst['is_nodule']==0] = 1
print ('maximal compensation_factor', np.max(out_lst['compensation_factor'].values))
# save
out_lst.to_csv('/'.join(lst_out_path.split('/')[:-1])+ '/' + lst_out_path.split('/')[-1].split('.')[0]+'_DF.csv', index=False,sep='\t')
|
from rjgtoys.cli import Command
class GreeterBase(Command):
DEFAULT_NAME = "you"
def _arg_name(self, p):
p.add_argument(
'--name',
type=str,
help="Name of the person to greet",
default=self.DEFAULT_NAME
)
def run(self, args):
print(f"Hello {args.name}!")
|
from rest_framework import exceptions, serializers
from api.v2.serializers.summaries import StatusTypeSummarySerializer
from core.models import StatusType
class StatusTypeRelatedField(serializers.RelatedField):
def get_queryset(self):
return StatusType.objects.all()
def to_representation(self, status_type):
serializer = StatusTypeSummarySerializer(
status_type,
context=self.context)
return serializer.data
def to_internal_value(self, data):
queryset = self.get_queryset()
if isinstance(data, dict):
identity = data.get("id", None)
else:
identity = data
try:
return queryset.get(id=identity)
except:
raise exceptions.ValidationError(
"StatusType with id '%s' does not exist."
% identity
)
|
from test_all_fixers import lib3to2FixerTestCase
class Test_printfunction(lib3to2FixerTestCase):
fixer = u"printfunction"
def test_generic(self):
b = u"""print()"""
a = u"""from __future__ import print_function\nprint()"""
self.check(b, a)
def test_literal(self):
b = u"""print('spam')"""
a = u"""from __future__ import print_function\nprint('spam')"""
self.check(b, a)
def test_not_builtin_unchanged(self):
s = u"this.shouldnt.be.changed.because.it.isnt.builtin.print()"
self.unchanged(s)
# XXX: Quoting this differently than triple-quotes, because with newline
# XXX: setting, I can't quite get the triple-quoted versions to line up.
def test_arbitrary_printing(self):
b = u"import dinosaur.skull\nimport sys\nprint"\
u"(skull.jaw, skull.jaw.biteforce, file=sys.stderr)"
a = u"from __future__ import print_function\n"\
u"import dinosaur.skull\nimport sys\nprint"\
u"(skull.jaw, skull.jaw.biteforce, file=sys.stderr)"
self.check(b, a)
def test_long_arglist(self):
b = u"print(spam, spam, spam, spam, spam, baked_beans, spam, spam,"\
u"spam, spam, sep=', spam, ', end=wonderful_spam)\nprint()"
a = u"from __future__ import print_function\n"\
u"print(spam, spam, spam, spam, spam, baked_beans, spam, spam,"\
u"spam, spam, sep=', spam, ', end=wonderful_spam)\nprint()"
self.check(b, a)
|
from django import forms
from utils.api.client import MarketAccessAPIClient
from utils.forms import (
ChoiceFieldWithHelpText,
ClearableMixin,
DayMonthYearField,
MultipleChoiceFieldWithHelpText,
YesNoBooleanField,
YesNoDontKnowBooleanField,
)
from .mixins import APIFormMixin
class UpdateCommercialValueForm(APIFormMixin, forms.Form):
commercial_value = forms.IntegerField(
min_value=0,
max_value=1000000000000,
localize=True,
label="What is the value of the barrier to the affected business(es) in GBP?",
error_messages={
"required": "Enter a value",
"min_value": "Enter a valid number",
"max_value": "Enter a valid number",
},
)
commercial_value_explanation = forms.CharField(
widget=forms.Textarea,
error_messages={"required": "Enter a value description and timescale"},
)
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(id=self.id, **self.cleaned_data)
class UpdateBarrierTitleForm(APIFormMixin, forms.Form):
title = forms.CharField(
label="Suggest a title for this barrier",
help_text=(
"Include both the title or service name and the country being "
"exported to, for example, Import quotas for steel rods in India."
),
max_length=255,
error_messages={
"max_length": "Title should be %(limit_value)d characters or fewer",
"required": "Enter a title for this barrier",
},
)
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(id=self.id, title=self.cleaned_data["title"])
class UpdateBarrierProductForm(APIFormMixin, forms.Form):
product = forms.CharField(
label="What product or service is being exported?",
max_length=255,
error_messages={
"max_length": "Product or service should be %(limit_value)d characters or fewer",
"required": "Enter a product or service",
},
)
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(id=self.id, product=self.cleaned_data["product"])
class UpdateBarrierSummaryForm(APIFormMixin, forms.Form):
summary = forms.CharField(
label="Give us a summary of the barrier and how you found out about it",
widget=forms.Textarea,
error_messages={"required": "Enter a brief description for this barrier"},
)
is_summary_sensitive = YesNoBooleanField(
label="Does the summary contain OFFICIAL-SENSITIVE information?",
error_messages={
"required": (
"Indicate if summary contains OFFICIAL-SENSITIVE information or not"
)
},
)
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id,
summary=self.cleaned_data["summary"],
is_summary_sensitive=self.cleaned_data["is_summary_sensitive"],
)
class UpdateBarrierSourceForm(APIFormMixin, forms.Form):
CHOICES = [
("COMPANY", "Company"),
("TRADE", "Trade association"),
("GOVT", "Government entity"),
("OTHER", "Other "),
]
source = forms.ChoiceField(
label="How did you find out about the barrier?",
choices=CHOICES,
widget=forms.RadioSelect,
error_messages={"required": "Select how you became aware of the barrier"},
)
other_source = forms.CharField(
label="Please specify",
required=False,
max_length=255,
error_messages={
"max_length": "Other source should be %(limit_value)d characters or fewer",
},
)
def clean(self):
cleaned_data = super().clean()
source = cleaned_data.get("source")
other_source = cleaned_data.get("other_source")
if source == "OTHER":
if not other_source and "other_source" not in self.errors:
self.add_error(
"other_source", "Enter how you became aware of the barrier"
)
else:
cleaned_data["other_source"] = ""
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id,
source=self.cleaned_data["source"],
other_source=self.cleaned_data["other_source"],
)
class UpdateBarrierPriorityForm(APIFormMixin, forms.Form):
CHOICES = [
("UNKNOWN", "<strong>Unknown</strong> priority"),
("HIGH", "<strong>High</strong> priority"),
("MEDIUM", "<strong>Medium</strong> priority"),
("LOW", "<strong>Low</strong> priority"),
]
priority = forms.ChoiceField(
label="What is the priority of the barrier?",
choices=CHOICES,
widget=forms.RadioSelect,
error_messages={"required": "Select a barrier priority"},
)
priority_summary = forms.CharField(
label="Why did the priority change? (optional)",
widget=forms.Textarea,
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
initial_priority = kwargs.get("initial", {}).get("priority")
if initial_priority == "UNKNOWN":
self.fields[
"priority_summary"
].label = "Why did you choose this priority? (optional)"
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id,
priority=self.cleaned_data["priority"],
priority_summary=self.cleaned_data["priority_summary"],
)
class UpdateBarrierTermForm(APIFormMixin, forms.Form):
CHOICES = [
(
1,
"A procedural, short-term barrier",
"for example, goods stuck at the border or documentation issue",
),
(
2,
"A long-term strategic barrier",
"for example, a change of regulation",
),
]
term = ChoiceFieldWithHelpText(
label="What is the scope of the barrier?",
choices=CHOICES,
widget=forms.RadioSelect,
error_messages={"required": "Select a barrier scope"},
)
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(id=self.id, term=self.cleaned_data["term"])
class UpdateBarrierEndDateForm(ClearableMixin, APIFormMixin, forms.Form):
end_date = DayMonthYearField(
label="End date",
help_text=(
"For example, 30 11 2020. If you don't know the day, please enter 1 for "
"the first of the month."
),
error_messages={"required": "Enter the end date"},
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
end_date = kwargs.get("initial", {}).get("end_date")
if end_date:
self.fields["end_date"].label = "Change end date"
def clean_end_date(self):
return self.cleaned_data["end_date"].isoformat()
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id,
end_date=self.cleaned_data.get("end_date"),
)
class UpdateBarrierTagsForm(APIFormMixin, forms.Form):
tags = MultipleChoiceFieldWithHelpText(
label="Is this issue caused by or related to any of the following?",
choices=[],
required=False,
)
def __init__(self, tags, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["tags"].choices = tags
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(id=self.id, tags=self.cleaned_data["tags"])
class UpdateTradeDirectionForm(APIFormMixin, forms.Form):
trade_direction = forms.ChoiceField(
label="Which trade direction does this barrier affect?",
choices=[],
widget=forms.RadioSelect,
error_messages={"required": "Select a trade direction"},
)
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id, trade_direction=self.cleaned_data["trade_direction"]
)
def __init__(self, trade_direction_choices, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["trade_direction"].choices = trade_direction_choices
class CausedByTradingBlocForm(forms.Form):
caused_by_trading_bloc = YesNoDontKnowBooleanField(
label="",
error_messages={
"required": ("Indicate if the barrier was caused by the trading bloc")
},
)
def __init__(self, trading_bloc, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["caused_by_trading_bloc"].label = (
f"Was this barrier caused by a regulation introduced by "
f"{trading_bloc['short_name']}?"
)
self.fields["caused_by_trading_bloc"].help_text = self.get_help_text(
trading_bloc.get("code")
)
def get_help_text(self, trading_bloc_code):
help_text = {
"TB00016": (
"Yes should be selected if the barrier is a local application of an EU "
"regulation. If it is an EU-wide barrier, the country location should "
"be changed to EU in the location screen."
)
}
return help_text.get(trading_bloc_code, "")
class UpdateCausedByTradingBlocForm(APIFormMixin, CausedByTradingBlocForm):
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id,
caused_by_trading_bloc=self.cleaned_data["caused_by_trading_bloc"],
)
class UpdateEconomicAssessmentEligibilityForm(APIFormMixin, forms.Form):
economic_assessment_eligibility = YesNoBooleanField(
label="Is the barrier eligible for an initial economic assessment?",
error_messages={
"required": (
"Select yes if the barrier is eligible for an initial economic assessment"
)
},
)
economic_assessment_eligibility_summary = forms.CharField(
label="Why is this barrier not eligible for an initial economic assessment?",
help_text="Please explain why this barrier is not eligible",
max_length=1500,
widget=forms.Textarea,
required=False,
)
def clean(self):
cleaned_data = super().clean()
economic_assessment_eligibility = cleaned_data.get(
"economic_assessment_eligibility"
)
economic_assessment_eligibility_summary = cleaned_data.get(
"economic_assessment_eligibility_summary"
)
if economic_assessment_eligibility is False:
if not economic_assessment_eligibility_summary:
self.add_error(
"economic_assessment_eligibility_summary",
"Enter why this barrier is not eligible",
)
else:
cleaned_data["economic_assessment_eligibility_summary"] = ""
def save(self):
client = MarketAccessAPIClient(self.token)
client.barriers.patch(
id=self.id,
economic_assessment_eligibility=self.cleaned_data[
"economic_assessment_eligibility"
],
economic_assessment_eligibility_summary=self.cleaned_data[
"economic_assessment_eligibility_summary"
],
)
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import os
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.framework import Program
from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.executor import Executor
from paddle.fluid.parallel_executor import ParallelExecutor
from paddle.fluid.framework import Variable, Parameter
from .runtime_base import RuntimeBase
from ..base.private_helper_function import wait_server_ready
__all__ = []
def conv_indent(indent):
return "".join([" "] * indent)
PSERVER_SAVE_SUFFIX = ".shard"
def parse_table_class(varname, o_main_program):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_sparse_op
for op in o_main_program.global_block().ops:
if not is_distributed_sparse_op(op) and not is_sparse_op(op):
continue
param_name = op.input("W")[0]
if param_name == varname and op.type == "lookup_table" or op.type == "lookup_table_v2":
if op.has_attr('table_class') and op.attr("table_class") != "none":
return op.attr('table_class')
else:
return "CommonSparseTable"
class Accessor:
def __init__(self):
self.accessor_class = ""
self.optimizer = None
self.feature_dim = -1
self.embedding_dim = -1
self.optimizer = None
def to_string(self, indent):
accessor_str = "{}accessor {{{}\n{}}}"
attrs = ""
attrs += "accessor_class: \"{}\" ".format(self.accessor_class)
attrs += "fea_dim: {} ".format(self.feature_dim)
attrs += "embedx_dim: {} ".format(self.embedding_dim)
attrs += "\n"
if self.optimizer is not None:
attrs += self.optimizer.to_string(indent)
return accessor_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class CommonAccessor:
def __init__(self):
self.accessor_class = ""
self.table_name = None
self.entry = None
self.attrs = []
self.params = []
self.dims = []
self.trainer_num = 0
self.sync = "false"
self.initializers = []
self.opt_input_map = {}
self.opt_attr_map = {}
self.opt_init_map = {}
self.define_optimize_map()
def define_optimize_map(self):
opt_input_map = {}
opt_input_map["sgd"] = [("Param", None), ("LearningRate", 1)]
opt_input_map["adam"] = [("Param", None), ("Moment1", None),
("Moment2", None), ("Beta1Pow", 1),
("Beta2Pow", 1), ("LearningRate", 1)]
opt_input_map["sum"] = [("Param", None)]
opt_input_map["naive_adagrad"] = [("Param", None), ("G2Sum", 1),
("LearningRate", 1)]
opt_attr_map = {}
opt_attr_map["sgd"] = []
opt_attr_map["sum"] = []
opt_attr_map["naive_adagrad"] = []
opt_attr_map["adam"] = [("beta1", "f"), ("beta2", "f"),
("epsilon", "f")]
opt_init_map = {}
opt_init_map["gaussian_random"] = ["seed", "mean", "std"]
opt_init_map["fill_constant"] = ["value"]
opt_init_map["uniform_random"] = ["seed", "min", "max"]
opt_init_map["truncated_gaussian_random"] = ["seed", "mean", "std"]
self.opt_attr_map = opt_attr_map
self.opt_input_map = opt_input_map
self.opt_init_map = opt_init_map
def parse_entry(self, varname, o_main_program):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_distributed_sparse_op
from paddle.fluid.incubate.fleet.parameter_server.ir.public import is_sparse_op
for op in o_main_program.global_block().ops:
if not is_distributed_sparse_op(op) and not is_sparse_op(op):
continue
param_name = op.input("W")[0]
if param_name == varname and op.type == "lookup_table":
self.entry = op.attr('entry')
break
if param_name == varname and op.type == "lookup_table_v2":
self.entry = "none"
break
def get_shard(self, total_dim, shard_num, pserver_id):
# remainder = total_dim % shard_num
blocksize = int(total_dim / shard_num + 1)
if blocksize * (pserver_id + 1) <= total_dim:
return blocksize
else:
if blocksize * pserver_id < total_dim:
return total_dim - blocksize * pserver_id
else:
return 0
def get_initializer_attr(self, value_name, o_startup_program):
l_in = "&"
attr_str = ""
origin_var_name = value_name
for op in o_startup_program.global_block().ops:
if op.type in self.opt_init_map.keys(
) and origin_var_name == op.output("Out")[0]:
init_attr = [op.type]
for attr in self.opt_init_map[op.type]:
init_attr.append(str(op.attr(attr)))
attr_str = l_in.join(init_attr)
break
return attr_str
def parse_by_optimizer(self, grad_name, is_sparse, total_dims,
compiled_strategy):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops
param_name = compiled_strategy.grad_name_to_param_name[grad_name]
main_program, startup_program = compiled_strategy.get_origin_programs()
pserver_id = compiled_strategy.get_role_id()
pserver_num = len(compiled_strategy.get_ps_endpoints())
optimizer_ops = _get_optimize_ops(main_program)
oop = None
for op in optimizer_ops:
if ("Param" in op.input_names) and (
op.input("Param")[0] == param_name):
oop = op
break
if oop is None:
raise ValueError("can not find optimizer for {}".format(grad_name))
params = []
dims = []
attrs = []
initializers = []
self.trainer_num = compiled_strategy.get_trainers()
if compiled_strategy.is_geo_mode():
param_varnames = self.opt_input_map["sum"]
attr_varnames = self.opt_attr_map["sum"]
self.accessor_class = "sum"
elif compiled_strategy.use_ps_gpu and is_sparse:
param_varnames = self.opt_input_map["naive_adagrad"]
attr_varnames = self.opt_attr_map["naive_adagrad"]
self.accessor_class = "sgd"
else:
param_varnames = self.opt_input_map[oop.type]
attr_varnames = self.opt_attr_map[oop.type]
self.accessor_class = oop.type
for (formal_name, shape) in param_varnames:
params.append(formal_name)
if formal_name == "G2Sum":
dims.append(1)
initializer = "fill_constant&0"
initializers.append(initializer)
else:
param = main_program.global_block().vars[oop.input(formal_name)[
0]]
if formal_name == "LearningRate" and param.name != "learning_rate_0":
warnings.warn("will support decay soon")
param = main_program.global_block().vars["learning_rate_0"]
if shape is None:
if is_sparse:
shape = total_dims
else:
shape = self.get_shard(total_dims, pserver_num,
pserver_id)
dims.append(shape)
initializer = self.get_initializer_attr(param.name,
startup_program)
initializers.append(initializer)
for (attr_varname, type_) in attr_varnames:
value = oop.attr(attr_varname)
attrs.append("&".join([attr_varname, type_, str(value)]))
self.params = params
self.dims = dims
self.initializers = initializers
self.attrs = attrs
def to_string(self, indent):
accessor_str = "{}common {{{}\n{}}}"
attrs = ""
attrs += "name: \"{}\" ".format(self.accessor_class)
if self.table_name:
attrs += "table_name: \"{}\" ".format(self.table_name)
if self.entry:
attrs += "entry: \"{}\" ".format(self.entry)
attrs += "trainer_num: {} ".format(self.trainer_num)
attrs += "sync: {} ".format(self.sync)
for param in self.params:
attrs += "params: \"{}\" ".format(param)
for dim in self.dims:
attrs += "dims: {} ".format(dim)
for initializer in self.initializers:
attrs += "initializers: \"{}\" ".format(initializer)
attrs += "\n"
return accessor_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class Tensor:
def __init__(self):
self.main_program_id = None
self.startup_program_id = None
self.feed_var_name = None
self.fetch_var_name = None
self.tensor_table_class = False
def to_string(self, indent):
program_str = "{}tensor {{{}\n{}}}"
attrs = ""
attrs += "feed_var_name: \"{}\" ".format(str(self.feed_var_name))
attrs += "fetch_var_name: \"{}\" ".format(str(self.fetch_var_name))
attrs += "startup_program_id: {} ".format(str(self.startup_program_id))
attrs += "main_program_id: {} ".format(str(self.main_program_id))
attrs += "tensor_table_class: \"{}\" ".format(
str(self.tensor_table_class))
attrs += "\n"
return program_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class Table:
def __init__(self):
self.id = -1
self.table_class = None
self.shard_num = -1
self.type = None
self.accessor = None
self.common = None
self.tensor = None
def to_string(self, indent):
table_str = "{}downpour_table_param {{{}\n{}}}"
attrs = ""
attrs += "table_id: {} ".format(self.id)
attrs += "table_class: \"{}\" ".format(self.table_class)
attrs += "shard_num: {} ".format(self.shard_num)
attrs += "type: {}".format(self.type)
attrs += "\n"
indent += 2
if self.accessor is not None:
attrs += self.accessor.to_string(indent)
attrs += "\n"
if self.tensor is not None:
attrs += self.tensor.to_string(indent)
attrs += "\n"
if self.common is not None:
attrs += self.common.to_string(indent)
attrs += "\n"
return table_str.format(conv_indent(indent), attrs, conv_indent(indent))
class Service:
def __init__(self):
self.server_class = "BrpcPsServer"
self.client_class = "BrpcPsClient"
self.service_class = "BrpcPsService"
self.start_server_port = 0
self.server_thread_num = 12
def to_string(self, indent):
service_str = "{}service_param {{{}\n{}}}"
attrs = ""
attrs += "server_class: \"{}\" ".format(self.server_class)
attrs += "client_class: \"{}\" ".format(self.client_class)
attrs += "service_class: \"{}\" ".format(self.service_class)
attrs += "start_server_port: {} ".format(self.start_server_port)
attrs += "server_thread_num: {} ".format(self.server_thread_num)
return service_str.format(
conv_indent(indent), attrs, conv_indent(indent))
class DownpourServer:
def __init__(self):
self.service = None
self.tables = []
def set_service_param(self, service):
self.service = service
def append_tables(self, table):
if not isinstance(table, Table):
raise ValueError("only support instance Table")
self.tables.append(table)
def to_string(self, indent):
server_str = "{}downpour_server_param {{{}\n{}}}"
table_strs = ""
indent += 2
table_strs += "\n"
table_strs += self.service.to_string(indent)
for table in self.tables:
table_strs += "\n"
table_strs += table.to_string(indent)
return server_str.format(
conv_indent(indent), table_strs, conv_indent(indent))
class Server:
def __init__(self):
self.servers = []
def add_server(self, server):
if not isinstance(server, DownpourServer):
raise ValueError("only support instance DownpourServer")
self.servers.append(server)
def __str__(self):
server_str = "server_param {{{}\n}}"
indent = 2
servers_str = ""
for server in self.servers:
servers_str += "\n"
servers_str += server.to_string(indent)
return server_str.format(servers_str)
class DownpourWorker:
def __init__(self):
self.tables = []
def append_tables(self, table):
if not isinstance(table, Table):
raise ValueError("only support instance Table")
self.tables.append(table)
def to_string(self, indent):
worker_str = "{}downpour_worker_param {{{}\n{}}}"
table_strs = ""
indent += 2
for table in self.tables:
table_strs += "\n"
table_strs += table.to_string(indent)
return worker_str.format(
conv_indent(indent), table_strs, conv_indent(indent))
class Worker:
def __init__(self):
self.workers = []
def add_worker(self, worker):
if not isinstance(worker, DownpourWorker):
raise ValueError("only support instance DownpourWorker")
self.workers.append(worker)
def __str__(self):
worker_str = "worker_param {{{}\n}}"
indent = 2
workers_str = ""
for worker in self.workers:
workers_str += "\n"
workers_str += worker.to_string(indent)
return worker_str.format(workers_str)
class TheOnePSRuntime(RuntimeBase):
def __init__(self):
super(TheOnePSRuntime, self).__init__()
self._communicator = None
self._server = None
self._worker = fluid.core.DistFleetWrapper()
self._server_sub_program = []
self._heter_client = None
def _set_basic_info(self, context):
self.context = context
self.role_maker = context["role_maker"]
self.origin_main_program = context["origin_main_program"]
self.origin_startup_program = context["origin_startup_program"]
self.async_strategy = self._get_distributed_strategy()
self.compiled_strategy = self.build_compiled_startegy()
def _get_distributed_strategy(self):
strategy = None
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \
StrategyFactory
dist_strategy = self.context["valid_strategy"]
k_steps = dist_strategy.a_sync_configs["k_steps"]
if not dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_sync_strategy()
if dist_strategy.a_sync and k_steps == 0:
strategy = StrategyFactory.create_async_strategy()
if dist_strategy.a_sync and k_steps > 0:
strategy = StrategyFactory.create_geo_strategy(k_steps)
if not strategy:
raise ValueError("k_steps must be invalid value, please check")
if dist_strategy.a_sync_configs["use_ps_gpu"]:
strategy.use_ps_gpu = True
return strategy
def build_compiled_startegy(self):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import CompileTimeStrategy
compiled_config = CompileTimeStrategy(
self.origin_main_program, self.origin_main_program,
self.async_strategy, self.role_maker)
if self.async_strategy.use_ps_gpu:
compiled_config.use_ps_gpu = True
return compiled_config
def _init_worker(self):
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import \
SyncStrategy, GeoStrategy
is_sync = self.compiled_strategy.is_sync_mode()
worker = self._get_fleet_proto(is_server=False, is_sync=is_sync)
server = self._get_fleet_proto(is_server=True, is_sync=is_sync)
dist_strategy = self.context["valid_strategy"]
use_ps_gpu = dist_strategy.a_sync_configs["use_ps_gpu"]
if use_ps_gpu:
main_program = self.context['loss'].block.program
if not main_program._fleet_opt:
main_program._fleet_opt = {}
main_program._fleet_opt["use_ps_gpu"] = True
gpus_env = os.getenv("FLAGS_selected_gpus")
main_program._fleet_opt[
"worker_places"] = [int(s) for s in gpus_env.split(",")]
def sync_strategy_envs():
kwargs = {}
kwargs[
"pserver_endpoints"] = self.role_maker._get_pserver_endpoints()
kwargs["trainer_id"] = self.role_maker._worker_index()
return kwargs
proto_txt = str(worker) + "\n" + str(server)
debug = bool(int(os.getenv("PSERVER_DEBUG", "0")))
if debug:
print("worker: \n{}".format(proto_txt))
endpoints = self.compiled_strategy.get_ps_endpoints()
string_hosts = []
for idx, ep in enumerate(endpoints):
host, port = ep.split(":")
pshost = fluid.core.PSHost(host, int(port), idx)
string_hosts.append(pshost.serialize_to_string())
dense_map = self.compiled_strategy.get_the_one_recv_context(
split_dense_table=self.role_maker._is_heter_parameter_server_mode)
send_ctx = self.compiled_strategy.get_the_one_send_context(
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=self.role_maker._is_heter_parameter_server_mode,
ep_list=endpoints)
trainer_config = self.async_strategy.get_trainer_runtime_config()
debug = bool(int(os.getenv("PSERVER_DEBUG", "0")))
if debug:
print("worker: \n{}".format(proto_txt))
print("communicator send_ctx:")
for key in send_ctx:
print("{}: {}".format(key, send_ctx[key]))
for key in dense_map:
print("{}: {}".format(key, dense_map[key]))
kwargs = {}
kwargs['need_global_step'] = "0"
kwargs["trainer_id"] = self.role_maker._role_id()
kwargs["trainers"] = self.role_maker._worker_num()
#if self.role_maker._is_heter_worker():
# kwargs["trainer_id"] += kwargs["trainers"]
for table in server.servers[0].tables:
if table.table_class == "BarrierTable":
kwargs["barrier_table_id"] = table.id
break
if isinstance(self.async_strategy, SyncStrategy):
sync_kwargs = sync_strategy_envs()
kwargs.update(sync_kwargs)
from paddle.fluid.communicator import Communicator, HeterClient
self._communicator = Communicator(
trainer_config.mode, kwargs,
trainer_config.get_communicator_flags())
self._communicator.init_with_ctx(send_ctx, dense_map, proto_txt,
string_hosts, fluid.global_scope())
dist_strategy = self.context["valid_strategy"]
is_test = bool(int(os.getenv("TEST_MODE", "0")))
if self.role_maker._is_first_worker(
) and self.role_maker._is_heter_parameter_server_mode:
# for ps-heter mode load all parameters on first_worker
init_params = self.compiled_strategy.get_the_one_recv_context(
split_dense_table=True, use_origin_program=True)
else:
init_params = dense_map
import paddle.distributed.fleet as fleet
if not is_test:
self._communicator.init_params(init_params)
fleet.util.barrier()
self._communicator.pull_dense(init_params)
fleet.util.barrier()
if not self._communicator.is_running():
self._communicator.start()
else:
warnings.warn("communicator has been initialized, skip")
launch_barrier = dist_strategy.a_sync_configs["launch_barrier"]
launch_barrier_flag = int(os.getenv("FLAGS_LAUNCH_BARRIER", "1"))
if launch_barrier and launch_barrier_flag:
# for trainer wait server ready
wait_server_ready(self.role_maker._get_pserver_endpoints())
if self.role_maker._is_heter_parameter_server_mode and self.role_maker._get_next_trainers(
) != []:
wait_server_ready(self.role_maker._get_next_trainers())
if self.role_maker._is_heter_parameter_server_mode:
previous_trainers = []
if self.role_maker._get_previous_trainers() != []:
previous_trainers = self.role_maker._get_previous_trainers()
next_trainers = []
if self.role_maker._get_next_trainers() != []:
next_trainers = self.role_maker._get_next_trainers()
self._heter_client = HeterClient(next_trainers,
previous_trainers,
self.role_maker._role_id())
def _push_sparse_param(self,
var_name,
table_id=-1,
scope=fluid.global_scope()):
self._communicator.push_sparse_param(var_name, table_id, scope)
def _get_executor(self):
executor = fluid.Executor(fluid.CPUPlace())
if self.role_maker._is_heter_parameter_server_mode:
if self.role_maker._is_heter_worker():
heter_device_type = self.role_maker._heter_device_type().upper()
if heter_device_type not in ["GPU", "XPU", "CPU"]:
raise ValueError("Heter Worker Not Support Device {}".
format(device_type))
if heter_device_type == "GPU":
executor = Executor(
fluid.CUDAPlace(
int(os.getenv("FLAGS_selected_gpus", "0"))))
elif heter_device_type == "XPU":
executor = Executor(
fluid.XPUPlace(
int(os.getenv("FLAGS_selected_xpus", "0"))))
return executor
def _get_fleet_proto(self, is_server, is_sync):
def _build_merge_accessor(ctx):
accessor = Accessor()
accessor.accessor_class = "CommMergeAccessor"
accessor.optimizer = None
if ctx.is_sparse():
accessor.feature_dim = ctx.sections()[0]
accessor.embedding_dim = ctx.sections()[1]
else:
accessor.feature_dim = ctx.sections()[0]
accessor.embedding_dim = 1
return accessor
def _build_barrier_table(idx):
table = Table()
table.id = idx
table.type = "PS_OTHER_TABLE"
table.table_class = "BarrierTable"
table.shard_num = 256
accessor = Accessor()
accessor.accessor_class = "CommMergeAccessor"
accessor.optimizer = None
accessor.feature_dim = 0
accessor.embedding_dim = 0
table.accessor = accessor
common = CommonAccessor()
common.table_name = "barrier_table"
trainer_num = self.compiled_strategy.get_trainers()
if self.role_maker._is_heter_parameter_server_mode:
trainer_num += len(self.role_maker._get_heter_worker_endpoints(
))
common.trainer_num = trainer_num
common.attrs = ""
common.dims = []
common.params = []
table.common = common
return table
def _build_tensor_table(idx, tensor_dict):
table = Table()
table.id = idx
table.type = "PS_OTHER_TABLE"
table.table_class = tensor_dict["tensor_table_class"]
table.shard_num = 256
accessor = Accessor()
accessor.accessor_class = "CommMergeAccessor"
accessor.optimizer = None
accessor.feature_dim = 0
accessor.embedding_dim = 0
table.accessor = accessor
common = CommonAccessor()
common.table_name = tensor_dict["feed_var_name"]
common.trainer_num = self.compiled_strategy.get_trainers()
common.attrs = ""
common.dims = []
common.params = []
table.common = common
tensor = Tensor()
tensor.main_program_id = tensor_dict["main_program_id"]
tensor.startup_program_id = tensor_dict["startup_program_id"]
tensor.feed_var_name = tensor_dict["feed_var_name"]
tensor.fetch_var_name = tensor_dict["fetch_var_name"]
tensor.tensor_table_class = tensor_dict["tensor_table_class"]
table.tensor = tensor
return table
def _add_tensor_table(tables):
tensor_table_dict = self.compiled_strategy.get_tensor_table_dict()
program_idx = 0
for table_name in tensor_table_dict:
if tensor_table_dict[table_name]["startup_program"] != None:
tensor_table_dict[table_name][
"startup_program_id"] = program_idx
self._server_sub_program.append(tensor_table_dict[
table_name]["startup_program"].desc)
program_idx += 1
if tensor_table_dict[table_name]["main_program"] != None:
tensor_table_dict[table_name][
"main_program_id"] = program_idx
self._server_sub_program.append(tensor_table_dict[
table_name]["main_program"].desc)
program_idx += 1
# Todo: Hard code for lr_decay table apply table id
new_table = _build_tensor_table(
len(tables), tensor_table_dict[table_name])
tables.append(new_table)
return tables
def _get_tables():
send_ctx = self.compiled_strategy.get_the_one_send_context(
use_origin_program=True,
split_dense_table=self.role_maker.
_is_heter_parameter_server_mode)
tables = []
for idx, (name, ctx) in enumerate(send_ctx.items()):
if ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1:
continue
table = Table()
table.id = ctx.table_id()
common = CommonAccessor()
if ctx.is_sparse():
table.type = "PS_SPARSE_TABLE"
table.shard_num = 256
common.table_name = self.compiled_strategy.grad_name_to_param_name[
ctx.origin_varnames()[0]]
if self.compiled_strategy.is_geo_mode():
table.table_class = "SparseGeoTable"
else:
table.table_class = parse_table_class(
common.table_name, self.origin_main_program)
else:
table.type = "PS_DENSE_TABLE"
table.table_class = "CommonDenseTable"
table.shard_num = 256
common.table_name = "MergedDense"
common.parse_by_optimizer(ctx.origin_varnames()[0],
ctx.is_sparse(),
ctx.sections()[1] if ctx.is_sparse()
else ctx.sections()[0],
self.compiled_strategy)
if ctx.is_sparse():
common.parse_entry(common.table_name,
self.origin_main_program)
if is_sync:
common.sync = "true"
else:
common.sync = "false"
table.common = common
accessor = _build_merge_accessor(ctx)
table.accessor = accessor
tables.append(table)
tensor_table_dict = self.compiled_strategy.get_tensor_table_dict()
if len(tensor_table_dict) > 0:
tables = _add_tensor_table(tables)
else:
empty_porgram = Program()
self._server_sub_program.append(empty_porgram.desc)
barrier_table = _build_barrier_table(len(tables))
tables.append(barrier_table)
return tables
if is_server:
server = Server()
downpour_server = DownpourServer()
service = Service()
dist_strategy = self.context["valid_strategy"]
use_ps_gpu = dist_strategy.a_sync_configs["use_ps_gpu"]
if use_ps_gpu:
service.server_class = "PsLocalServer"
service.client_class = "PsLocalClient"
downpour_server.set_service_param(service)
tables = _get_tables()
downpour_server.tables = tables
server.add_server(downpour_server)
return server
else:
worker = Worker()
downpour_worker = DownpourWorker()
tables = _get_tables()
downpour_worker.tables = tables
worker.add_worker(downpour_worker)
return worker
def _init_server(self, dirname=None, var_names=None, **kwargs):
role_id = self.compiled_strategy.get_role_id()
endpoints = self.compiled_strategy.get_ps_endpoints()
is_sync = self.compiled_strategy.is_sync_mode()
trainers = self.compiled_strategy.get_trainers()
if self.role_maker._is_heter_parameter_server_mode:
trainers += len(self.role_maker._get_heter_worker_endpoints())
server = self._get_fleet_proto(is_server=True, is_sync=is_sync)
proto_txt = str(server)
debug = bool(int(os.getenv("PSERVER_DEBUG", "0")))
if debug:
print("server: \n{}".format(proto_txt))
string_hosts = []
for idx, ep in enumerate(endpoints):
host, port = ep.split(":")
pshost = fluid.core.PSHost(host, int(port), idx)
string_hosts.append(pshost.serialize_to_string())
self._server = fluid.core.DistFleetWrapper()
self._server.init_server(proto_txt, string_hosts, role_id, trainers,
self._server_sub_program)
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
dist_varnames = get_sparse_tablenames(self.origin_main_program, True)
sparse_varnames = get_sparse_tablenames(self.origin_main_program, False)
distributed_varnames = dist_varnames + sparse_varnames
if var_names is None:
load_varnames = distributed_varnames
else:
for var_name in var_names:
if var_name not in distributed_varnames:
raise ValueError(
"fleet.init server can only load sparse variables in {}".
format(distributed_varnames))
load_varnames = var_names
if dirname is None or not load_varnames:
return
sparse_table_maps = {}
for table in server.servers[0].tables:
if table.type == "PS_SPARSE_TABLE" and table.common is not None:
sparse_table_maps[table.common.table_name] = table.id
dirname = os.path.normpath(dirname)
pserver_id = self.role_maker._role_id()
for var_name in load_varnames:
table_id = sparse_table_maps[var_name]
# path = os.path.join(dirname, var_name + PSERVER_SAVE_SUFFIX,
# "{}.block{}.txt".format(var_name, pserver_id))
# meta = os.path.join(dirname, var_name + PSERVER_SAVE_SUFFIX,
# "{}.block{}.meta".format(var_name, pserver_id))
self._server.load_sparse(dirname, "0", table_id)
def _run_server(self):
ep = self.compiled_strategy.get_ps_endpoint()
host, port = ep.split(":")
self._server.run_server(host, int(port))
def _init_heter_worker(self):
executor = self._get_executor()
startup_program = fluid.default_startup_program()
#real_startup_program = startup_program._heter_pipeline_opt[
# "startup_program"]
executor.run(startup_program)
self._init_worker()
def _run_heter_worker(self,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
executor = self._get_executor()
# dataset is not needed for heter worker
executor.train_from_dataset(
program=fluid.default_main_program(),
dataset=None,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
def _stop_worker(self):
self._communicator.stop()
if self.role_maker._is_heter_parameter_server_mode:
assert self._heter_client != None, "heter client should not be None in heterps mode"
self._heter_client.stop()
#executor = self._get_executor()
#executor.close()
@staticmethod
def __exclude_vars(exclude_var_names=[]):
def is_valid(var):
if var.name in exclude_var_names:
return False
from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_varname_parts
origin_varname, _, _ = _get_varname_parts(var.name)
if origin_varname.endswith("@GRAD"):
return False
if origin_varname == "learning_rate_0":
return False
if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \
var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \
var.desc.type() == core.VarDesc.VarType.READER:
return False
return var.persistable
return is_valid
def _save_sparse_params(self, executor, dirname, context, main_program,
mode):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
distributed_varnames = get_sparse_tablenames(
self.compiled_strategy.origin_main_program, True)
values = []
for id, names in context.items():
if names[0] not in distributed_varnames:
# only save sparse param to local
self._worker.recv_and_save_model(id, dirname)
# save sparse & distributed param on server
self._worker.save_one_model(id, dirname, mode)
values.extend(names)
return values
def _save_distributed_persistables(self,
executor,
dirname,
main_program,
mode=0):
denses = self.compiled_strategy.get_the_one_recv_context(
is_dense=True,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparse_varnames = self._save_sparse_params(executor, dirname, sparses,
main_program, mode)
recv_dense_varnames = []
for id, names in denses.items():
recv_dense_varnames.extend(names)
saved_varnames = sparse_varnames
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(saved_varnames),
main_program.list_vars()))
self._communicator.pull_dense(denses)
import paddle
for var in remaining_vars:
# if var.name not in recv_dense_varnames:
# continue
tensor = var.get_value()
paddle.save(
tensor, os.path.join(dirname, var.name), use_binary_format=True)
def _ps_inference_save_persistables(self,
executor,
dirname,
main_program=None,
mode=0,
**kwargs):
"""
This function filters out all variables with `persistable==True` from the
give `main_program` and then saves these variables to the folder `dirname`
or file `filename`.
The `dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set `filename` None; if you would like to save all variables in a
single file, use `filename` to specify the file name.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type")
if main_program is None:
main_program = self.compiled_strategy.get_origin_ps_main_program()
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save() function, main_program must be as Program type, CompiledProgram is not allowed"
)
# Todo(MrChengmo): Save optimizer status
self._save_distributed_persistables(executor, dirname, main_program,
mode)
def _ps_inference_save_inference_model(self,
executor,
dirname,
feeded_var_names,
target_vars,
main_program=None,
export_for_deployment=True,
mode=0):
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` by the `executor`.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save() function, executor must be as Executor type")
import paddle
program = self.origin_main_program if main_program is None else main_program
if isinstance(program, CompiledProgram):
raise TypeError(
"in fleet.save() function, main_program must be as Program type, CompiledProgram is not allowed"
)
feed_vars = [
program.global_block().var(name) for name in feeded_var_names
]
infer_program = paddle.static.normalize_program(program, feed_vars,
target_vars)
infer_program._copy_dist_param_info_from(program)
model_basename = "__model__"
model_basename = os.path.join(dirname, model_basename)
paddle.save(infer_program, model_basename)
self._ps_inference_save_persistables(executor, dirname, infer_program,
mode)
def _save_inference_model(self, *args, **kwargs):
self._ps_inference_save_inference_model(*args, **kwargs)
def _save_persistables(self, *args, **kwargs):
self._ps_inference_save_persistables(*args, **kwargs)
def _load_sparse_params(self, dirname, context, main_program, mode):
from paddle.fluid.incubate.fleet.parameter_server.ir.public import get_sparse_tablenames
distributed_varnames = get_sparse_tablenames(
self.compiled_strategy.origin_main_program, True)
values = []
for id, names in context.items():
if names[0] not in distributed_varnames:
# TODO: only load sparse param from local
warnings.warn("varname is not in distributed_varnames, pass")
# load sparse & distributed param on server
self._worker.load_one_table(id, dirname, mode)
values.extend(names)
return values
def _load_distributed_persistables(self, dirname, main_program=None,
mode=0):
if main_program is None:
main_program = self.compiled_strategy.get_origin_ps_main_program()
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save() function, main_program must be as Program type, CompiledProgram is not allowed"
)
denses = self.compiled_strategy.get_the_one_recv_context(
is_dense=True,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker._is_heter_parameter_server_mode,
use_origin_program=True)
sparse_varnames = self._load_sparse_params(dirname, sparses,
main_program, mode)
recv_dense_varnames = []
for id, names in denses.items():
recv_dense_varnames.extend(names)
loaded_varnames = sparse_varnames
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(loaded_varnames),
main_program.list_vars()))
import paddle
for var in remaining_vars:
if var.name not in recv_dense_varnames:
continue
tensor = paddle.load(os.path.join(dirname, var.name))
var.set_value(tensor)
self._communicator.init_params(denses)
def load_model(self, path, mode):
self._load_distributed_persistables(path, mode=mode)
def _shrink(self, threshold):
import paddle.distributed.fleet as fleet
fleet.util.barrier()
if self.role_maker._is_first_worker():
sparses = self.compiled_strategy.get_the_one_recv_context(
is_dense=False,
split_dense_table=self.role_maker.
_is_heter_parameter_server_mode,
use_origin_program=True)
for id, names in sparses.items():
self._worker.shrink_sparse_table(id, threshold)
fleet.util.barrier()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 13:21:23 2017
@author: James Jiang
"""
all_lines = [line.rstrip('\n') for line in open('Data.txt')]
all_lines_chars = []
for i in range(len(all_lines)):
chars = [j for j in all_lines[i]]
all_lines_chars.append(chars)
index_list = 0
index_all = 0
for i in range(len(all_lines_chars[0])):
if all_lines_chars[0][i] == '|':
index_list = i
mode = 'down'
total = 0
while True:
if all_lines_chars[index_all][index_list] == ' ':
break
if all_lines_chars[index_all][index_list] == '+':
k = 0
if (mode == 'down') or (mode == 'up'):
if index_list != 0:
if all_lines_chars[index_all][index_list - 1] != ' ':
mode = 'left'
k += 1
if index_list != len(all_lines_chars[index_all]) - 1:
if all_lines_chars[index_all][index_list + 1] != ' ':
mode = 'right'
k += 1
elif (mode == 'left') or (mode == 'right'):
if index_all != 0:
if all_lines_chars[index_all - 1][index_list] != ' ':
mode = 'up'
k += 1
if index_all != len(all_lines_chars) - 1:
if all_lines_chars[index_all + 1][index_list] != ' ':
mode = 'down'
k += 1
if k == 0:
break
if mode == 'down':
index_all += 1
elif mode == 'up':
index_all -= 1
elif mode == 'left':
index_list -= 1
elif mode == 'right':
index_list += 1
total += 1
print(total)
|
from django.db import models
from django.urls import reverse
import datetime
from .genre import Genre
from .category import Categorie
class Artist(models.Model):
name = models.CharField(max_length=100)
known_as = models.CharField(max_length=60)
genre = models.ManyToManyField(Genre)
categorie = models.ForeignKey(Categorie, on_delete=models.CASCADE, blank=True)
image = models.ImageField(upload_to='uploads/media/images/artist_image/%y/%m/%d')
slug = models.SlugField(unique=True)
class Meta:
ordering = ['known_as']
def __str__(self):
return self.known_as
def get_absolute_url(self):
return reverse("artist-detail", kwargs={"slug": self.slug, 'pk': self.pk}) |
import setuptools
from commandintegrator import __version__ as version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="commandintegrator",
version=version,
author="Simon Olofsson",
author_email="[email protected]",
description="A framework and API for developing chatbots and other command-driven applications",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dotchetter/commandintegrator",
packages=[
"commandintegrator",
"commandintegrator.baseclasses",
"commandintegrator.core",
"commandintegrator.models",
"commandintegrator.tools"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"pytz",
"requests",
"urllib3"
],
data_files=[
('config', [
"commandintegrator\\language.json",
"commandintegrator\\commandintegrator.settings"
]
)
],
python_requires='>=3.8',
include_package_data=True
)
|
from thingsboard_gateway.connectors.converter import Converter, abstractmethod, log
class MPlcConverter(Converter):
@abstractmethod
def convert(self, config, data):
pass
|
# -*- coding: utf-8 -*-
from jsonlint.i18n import DummyTranslations
from jsonlint.validators import ValidationError, StopValidation
class DummyField(object):
_translations = DummyTranslations()
def __init__(self, data, errors=(), raw_data=None):
self.data = data
self.errors = list(errors)
self.raw_data = raw_data
def gettext(self, string):
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
return self._translations.ngettext(singular, plural, n)
def grab_error_message(callable, form, field):
try:
callable(form, field)
except ValidationError as e:
return e.args[0]
def grab_stop_message(callable, form, field):
try:
callable(form, field)
except StopValidation as e:
return e.args[0]
|
import datetime
import posixpath
import pytz
from pylons import request
from pylons import app_globals as g
from pylons import tmpl_context as c
from pylons.controllers.util import abort
from r2.config import feature
from r2.controllers import add_controller
from r2.controllers.reddit_base import RedditController
from r2.lib import websockets, ratelimit, utils
from r2.lib.errors import errors
from r2.lib.template_helpers import js_timestamp
from r2.lib.validator import (
json_validate,
validate,
validatedForm,
VAccountByName,
VAdmin,
VBoolean,
VLength,
VModhash,
VNotInTimeout,
VOneOf,
VUser,
)
from r2.models import Account
from . import events
from .validators import VRobinRoom
from .pages import (
RobinAdmin,
RobinAll,
RobinPage,
RobinChatPage,
RobinJoin,
RobinChat,
)
from .models import RobinRoom, VALID_VOTES
from .matchmaker import add_to_waitinglist
from .reaper import prompt_for_voting, reap_ripe_rooms, get_reap_time
@add_controller
class RobinController(RedditController):
def pre(self):
RedditController.pre(self)
if not feature.is_enabled("robin"):
self.abort404()
@validate(
VUser(),
VNotInTimeout(),
)
def GET_join(self):
room = RobinRoom.get_room_for_user(c.user)
if room:
return self.redirect("/robin")
return RobinPage(
title="robin",
content=RobinJoin(robin_heavy_load=g.live_config.get(
'robin_heavy_load')),
).render()
@validate(
VAdmin(),
)
def GET_all(self):
return RobinPage(
title="robin",
content=RobinAll(),
).render()
@validate(
VAdmin(),
)
def GET_admin(self):
return RobinPage(
title="robin",
content=RobinAdmin(),
).render()
@validate(
VUser(),
VNotInTimeout(),
)
def GET_chat(self):
room = RobinRoom.get_room_for_user(c.user)
if not room:
return self.redirect("/robin/join")
return self._get_chat_page(room)
@validate(
VAdmin(),
room=VRobinRoom("room_id", allow_admin=True),
)
def GET_force_room(self, room):
"""Allow admins to view a specific room"""
return self._get_chat_page(room)
@validate(
VAdmin(),
user=VAccountByName("user"),
)
def GET_user_room(self, user):
"""Redirect admins to a user's room"""
room = RobinRoom.get_room_for_user(user)
if not room:
self.abort404()
self.redirect("/robin/" + room.id)
def _get_chat_page(self, room):
path = posixpath.join("/robin", room.id, c.user._id36)
websocket_url = websockets.make_url(path, max_age=3600)
all_user_ids = room.get_all_participants()
all_present_ids = room.get_present_participants()
all_votes = room.get_all_votes()
users = Account._byID(all_user_ids, data=True, stale=True)
user_list = []
for user in users.itervalues():
if user._id in all_votes:
vote = all_votes.get(user._id)
else:
vote = None
user_list.append({
"name": user.name,
"present": user._id in all_present_ids,
"vote": vote,
});
return RobinChatPage(
title="chat in %s" % room.name,
content=RobinChat(room=room),
extra_js_config={
"robin_room_is_continued": room.is_continued,
"robin_room_name": room.name,
"robin_room_id": room.id,
"robin_websocket_url": websocket_url,
"robin_user_list": user_list,
"robin_room_date": js_timestamp(room.date),
"robin_room_reap_time": js_timestamp(get_reap_time(room)),
},
).render()
def _has_exceeded_ratelimit(self, form, room):
# grab the ratelimit (as average events per second) for the room's
# current level, using the highest level configured that's not bigger
# than the room. e.g. if ratelimits are defined for levels 1, 2, and 4
# and the room is level 3, this will give us the ratelimit specified
# for 2.
desired_avg_per_sec = 1
by_level = g.live_config.get("robin_ratelimit_avg_per_sec", {})
for level, avg_per_sec in sorted(by_level.items(), key=lambda (x,y): int(x)):
if int(level) > room.level:
break
desired_avg_per_sec = avg_per_sec
# now figure out how many events per window that means
window_size = g.live_config.get("robin_ratelimit_window", 10)
allowed_events_per_window = int(desired_avg_per_sec * window_size)
try:
# now figure out how much they've actually used
ratelimit_key = "robin/{}".format(c.user._id36)
time_slice = ratelimit.get_timeslice(window_size)
usage = ratelimit.get_usage(ratelimit_key, time_slice)
# ratelimit them if too much
if usage >= allowed_events_per_window:
g.stats.simple_event("robin.ratelimit.exceeded")
period_end = datetime.datetime.utcfromtimestamp(time_slice.end)
period_end_utc = period_end.replace(tzinfo=pytz.UTC)
until_reset = utils.timeuntil(period_end_utc)
c.errors.add(errors.RATELIMIT, {"time": until_reset},
field="ratelimit", code=429)
form.has_errors("ratelimit", errors.RATELIMIT)
return True
# or record the usage and move on
ratelimit.record_usage(ratelimit_key, time_slice)
except ratelimit.RatelimitError as exc:
g.log.warning("ratelimit error: %s", exc)
return False
@validatedForm(
VUser(),
VNotInTimeout(),
VModhash(),
room=VRobinRoom("room_id"),
message=VLength("message", max_length=140), # TODO: do we want md?
)
def POST_message(self, form, jquery, room, message):
if self._has_exceeded_ratelimit(form, room):
return
if form.has_errors("message", errors.NO_TEXT, errors.TOO_LONG):
return
websockets.send_broadcast(
namespace="/robin/" + room.id,
type="chat",
payload={
"from": c.user.name,
"body": message,
},
)
events.message(
room=room,
message=message,
sent_dt=datetime.datetime.utcnow(),
context=c,
request=request,
)
@validatedForm(
VUser(),
VNotInTimeout(),
VModhash(),
room=VRobinRoom("room_id"),
vote=VOneOf("vote", VALID_VOTES),
)
def POST_vote(self, form, jquery, room, vote):
if self._has_exceeded_ratelimit(form, room):
return
if not vote:
# TODO: error return?
return
g.stats.simple_event('robin.vote.%s' % vote)
room.set_vote(c.user, vote)
websockets.send_broadcast(
namespace="/robin/" + room.id,
type="vote",
payload={
"from": c.user.name,
"vote": vote,
},
)
events.vote(
room=room,
vote=vote,
sent_dt=datetime.datetime.utcnow(),
context=c,
request=request,
)
@validatedForm(
VUser(),
VNotInTimeout(),
VModhash(),
)
def POST_join_room(self, form, jquery):
if g.live_config.get('robin_heavy_load'):
request.environ["usable_error_content"] = (
"Robin is currently experience high load.")
abort(503)
room = RobinRoom.get_room_for_user(c.user)
if room:
# user is already in a room, they should get redirected by the
# frontend after polling /api/room_assignment.json
return
add_to_waitinglist(c.user)
@validatedForm(
VUser(),
VModhash(),
)
def POST_leave_room(self, form, jquery):
room = RobinRoom.get_room_for_user(c.user)
if not room:
return
room.remove_participants([c.user])
websockets.send_broadcast(
namespace="/robin/" + room.id,
type="users_abandoned",
payload={
"users": [c.user.name],
},
)
@json_validate(
VUser(),
VNotInTimeout(),
)
def GET_room_assignment(self, responder):
room = RobinRoom.get_room_for_user(c.user)
if room:
return {"roomId": room.id}
@validatedForm(
VAdmin(),
VModhash(),
)
def POST_admin_prompt(self, form, jquery):
prompt_for_voting()
@validatedForm(
VAdmin(),
VModhash(),
)
def POST_admin_reap(self, form, jquery):
reap_ripe_rooms()
@validatedForm(
VAdmin(),
VModhash(),
message=VLength("message", max_length=140),
)
def POST_admin_broadcast(self, form, jquery, message):
if form.has_errors("message", errors.NO_TEXT, errors.TOO_LONG):
return
websockets.send_broadcast(
namespace="/robin",
type="system_broadcast",
payload={
"body": message,
},
)
|
""" Download views for editorial app. """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponse
from django.utils import timezone
from django.views.generic import TemplateView , UpdateView, DetailView, FormView, View
from django.views.decorators.csrf import csrf_exempt
from cStringIO import StringIO
from zipfile import ZipFile
import datetime
import json
from braces.views import LoginRequiredMixin, FormMessagesMixin
from editorial.views import CustomUserTest
# from editorial.forms import StoryDownloadForm
from editorial.models import (
Story,
Facet,
ImageAsset,
DocumentAsset,
AudioAsset,
VideoAsset
)
#----------------------------------------------------------------------#
# Download View
#----------------------------------------------------------------------#
class StoryDownloadTemplateView(CustomUserTest, TemplateView):
"""Display form for a story download."""
template_name = 'editorial/story/story_download_form.html'
def test_user(self, user):
"""User must be member of an org."""
if user.organization:
return True
raise PermissionDenied()
def get_context_data(self, pk):
story = Story.objects.get(id=pk)
story_images = story.get_story_images()
story_documents = story.get_story_documents()
story_audio = story.get_story_audio()
story_video = story.get_story_video()
return {
'story': story,
'story_images': story_images,
'story_documents': story_documents,
'story_audio': story_audio,
'story_video': story_video,
}
# ACCESS: Any org user, or user from an organization that is in collaborate_with
# should be able to download a story
# Contractors should not be able to download
class StoryDownloadProcessView(CustomUserTest, View):
"""Create the download for a story and its facets."""
def test_user(self, user):
"""User must be member of an org."""
if user.organization:
return True
raise PermissionDenied()
def post(self, request, pk):
""" Process download form to collect objects and create download file."""
# get the story and associated facets no matter what options are selected
story_id = request.POST.get('story')
story = get_object_or_404(Story, id=pk)
story_txt = story.get_story_download()
select_all_images = story.get_story_images()
select_all_documents = story.get_story_documents()
select_all_audio = story.get_story_audio()
select_all_video = story.get_story_video()
image_txt = ""
document_txt = ""
audio_txt = ""
video_txt = ""
# Set up zip file
fp = StringIO()
z = ZipFile(fp, mode="w")
# Always Zip up story meta
z.writestr("story_details.txt", story_txt)
# ------------------------------ #
# IF SELECT ALL #
# ------------------------------ #
# if select_all is chosen, then all items will be downloaded
story_sa_id = request.POST.get('select_all')
if story_sa_id:
story = get_object_or_404(Story, id=story_sa_id)
if story_sa_id:
# Zip up all facets and assets including story metadata
for facet in story.facet_set.all():
z.writestr("{name}.txt".format(name=facet.name), facet.get_facet_download())
for image in select_all_images:
z.writestr("{image}.jpg".format(image=image.title), image.photo.read())
new_info = image.get_asset_download_info()
image_txt += new_info
for document in select_all_documents:
if document.asset_type == "PDF":
z.writestr("{document}.pdf".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "WORD DOC":
z.writestr("{document}.docx".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "TEXT":
z.writestr("{document}.txt".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "COMMA SEPARATED":
z.writestr("{document}.csv".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "EXCEL":
z.writestr("{document}.xls".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
for audiofile in select_all_audio:
if audiofile.asset_type == "MP3":
z.writestr("{audiofile}.mp3".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
if audiofile.asset_type == "WAV":
z.writestr("{audiofile}.wav".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
for video in select_all_video:
if video.asset_type == "YOUTUBE":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_youtube_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
if video.asset_type == "VIMEO":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_vimeo_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
# user can also select download all items associated with specific facets
# ------------------------------ #
# IF FACET ALL #
# ------------------------------ #
facet_sa_id = request.POST.getlist('facet_select_all')
if facet_sa_id:
for facet in facet_sa_id:
facet = get_object_or_404(Facet, id=facet)
# Zip up story meta, facet content and facet images
if facet:
z.writestr("{name}.txt".format(name=facet.name), facet.get_facet_download())
for image in facet.image_assets.all():
z.writestr("{image}.jpg".format(image=image.title), image.photo.read())
new_info = image.get_asset_download_info()
image_txt += new_info
for document in facet.document_assets.all():
if document.asset_type == "PDF":
z.writestr("{document}.pdf".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "WORD DOC":
z.writestr("{document}.docx".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "TEXT":
z.writestr("{document}.txt".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "COMMA SEPARATED":
z.writestr("{document}.csv".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "EXCEL":
z.writestr("{document}.xls".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
for audiofile in facet.audio_assets.all():
if audiofile.asset_type == "MP3":
z.writestr("{audiofile}.mp3".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
if audiofile.asset_type == "WAV":
z.writestr("{audiofile}.wav".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
for video in facet.video_assets.all():
if video.asset_type == "YOUTUBE":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_youtube_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
if video.asset_type == "VIMEO":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_vimeo_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
# if not select all OR facet select all, then user chooses the facet and the images
# ------------------------------ #
# IF FACET SPECIFIC #
# ------------------------------ #
facet_sp_id = request.POST.getlist('facet_specific_content')
if facet_sp_id:
for facet_id in facet_sp_id:
facet = get_object_or_404(Facet, id=facet_id)
z.writestr("{name}.txt".format(name=facet.name), facet.get_facet_download())
# ------------------------------ #
# IF SPECIFIC IMAGES #
# ------------------------------ #
# if not select all or by facet, then user chooses specific images
images = request.POST.getlist('images')
images = ImageAsset.objects.filter(pk__in=images)
if images:
for image in images:
z.writestr("{image}.jpg".format(image=image.title), image.photo.read())
new_info = image.get_asset_download_info()
image_txt += new_info
# ------------------------------ #
# IF SPECIFIC DOCUMENTS #
# ------------------------------ #
# if not select all or by facet, then user chooses specific documents
documents = request.POST.getlist('documents')
documents = DocumentAsset.objects.filter(pk__in=documents)
if documents:
for document in documents:
if document.asset_type == "PDF":
z.writestr("{document}.pdf".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "WORD DOC":
z.writestr("{document}.docx".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "TEXT":
z.writestr("{document}.txt".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "COMMA SEPARATED":
z.writestr("{document}.csv".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "EXCEL":
z.writestr("{document}.xls".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
# ------------------------------ #
# IF SPECIFIC AUDIO #
# ------------------------------ #
# if not select all or by facet, then user chooses specific audiofiles
audiofiles = request.POST.getlist('audiofiles')
audiofiles = AudioAsset.objects.filter(pk__in=audiofiles)
if audiofiles:
for audiofile in audiofiles:
if audiofile.asset_type == "MP3":
z.writestr("{audiofile}.mp3".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
if audiofile.asset_type == "WAV":
z.writestr("{audiofile}.wav".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
# ------------------------------ #
# IF SPECIFIC VIDEO #
# ------------------------------ #
# if not select all or by facet, then user chooses specific video files
videos = request.POST.getlist('videofiles')
videos = VideoAsset.objects.filter(pk__in=videos)
if videos:
for video in videos:
if video.asset_type == "YOUTUBE":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_youtube_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
if video.asset_type == "VIMEO":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_vimeo_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
# ------------------------------ #
# Create download #
# ------------------------------ #
#Take the final version of asset_txts and write it.
if image_txt:
z.writestr("image_details.txt", image_txt)
if document_txt:
z.writestr("document_details.txt", document_txt)
if audio_txt:
z.writestr("audio_details.txt", audio_txt)
if video_txt:
z.writestr("video_details.txt", video_txt)
z.close()
fp.seek(0)
response = HttpResponse(fp, content_type='application/zip')
fp.close()
return response
|
from multiprocessing import Pool
class Collection:
"""
Helper to provide a natural Functional Programming approach in Python.
"""
def __init__(self, val):
self.val = val
def apply(self, f):
return Collection(f(self.val))
def filter(self, f):
return Collection(filter(f, self.val))
def reduce(self, f, initial):
return Collection(reduce(f, self.val, initial))
def map(self, f):
return Collection(map(f, self.val))
def mapGenerator(self, f):
return Collection((f(item) for item in self.val))
def flatMap(self, f):
return Collection([item for items in map(f, self.val) for item in items])
def parallelMap(self, f, maxConcurrency=4):
pool = Pool(processes=maxConcurrency)
return Collection(pool.map(f, self.val))
def forEach(self, f):
for item in self.val:
f(item)
# To allow chaining
return self
def comment(self, comment):
# Just allow chaining
return self
def getValue(self):
return self.val
def __str__(self):
return "Collection(%s)" % str(self.val)
|
import math
from lib.preset import Preset
from lib.color_fade import Rainbow
from lib.basic_tickers import fade, offset, speed
from lib.parameters import FloatParameter
class RadialRainbow(Preset):
"""
demonstrates scene attributes by assigning a color rainbow
to fixtures based on their radial position in the scene
"""
def setup(self):
self.add_parameter(FloatParameter('speed', 0.2))
self.add_parameter(FloatParameter('width', 1.0))
self._create_tickers()
def parameter_changed(self, parameter):
if str(parameter) == 'width':
self._create_tickers()
def _create_tickers(self):
self.clear_tickers()
fixtures = self.scene().fixtures()
midpoint_tuples = [(f.strand, f.address, f.midpoint()) for f in fixtures]
extents = self.scene().extents()
center = self.scene().center_point()
for strand, address, midpoint in midpoint_tuples:
dx, dy = (midpoint[0] - center[0], midpoint[1] - center[1])
angle = (math.pi + math.atan2(dy, dx)) / (2.0 * math.pi) * self.parameter('width').get()
self.add_ticker(speed(offset(fade((strand, address), Rainbow), angle), self.parameter('speed')))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A demonstration of simulating open system dynamics by sampling
over randomized pure state trajectories.
This simulates the dynamics of an edge-driven XXZ chain,
which has an analytic solution for lam = 1.0, eps = 2.0.
See: T. Prosen, Phys. Rev. Lett. 107, 137201 (2011)
For comparison, with short spin chains this script also
evolves the full density matrix corresponding to the sample
trajectories using Euler integration of the Lindblad master equation.
The starting state is a random state composed of N_samp random pure states.
@author: Ashley Milsted
"""
import math as ma
import scipy as sp
import evoMPS.tdvp_gen_diss as tdvp
import time
import copy
import multiprocessing as mp
"""
First, we set up some global variables to be used as parameters.
"""
N = 6 #The length of the finite spin chain.
bond_dim = 64 #The maximum bond dimension
Nmax_fullrho = 8 #Maximum chain length for computing the full density matrix
num_procs = mp.cpu_count() #Number of parallel processes to use
live_plot_mode = True #Attempt to show a live plot of expectation values during evolution
#If False, save results to a file instead!
plot_saved_data = False #Do not simulate. Instead load and plot saved data.
plot_res = -1 #Which result to plot. -1 means the last result.
#Set number of sample trajectories based on available cores if live plotting
if live_plot_mode:
if N <= Nmax_fullrho:
N_samp = num_procs - 1 #the density matrix computation requires a process too
else:
N_samp = num_procs
else:
N_samp = 20 #number of samples to compute when saving data to a file
#System parameters
lam = 1.0
eps = 2.0
dt = 0.001 #Time-step size for Euler integration
N_steps = 1000 #Number of steps to compute
res_every = 20 #Number of steps to wait between computation of results (expectation values)
random_seed = 1 #Seed used to generate the pseudo-random starting state.
#The same random number generator is used to simulate the Wiener processes needed to
#integrate the stochastic differential equation.
"""
Next, we define the operators used, including the Hamiltonian and the Lindblad operators.
"""
Sx = sp.array([[0., 1.],
[1., 0.]])
Sy = 1.j * sp.array([[0., -1.],
[1., 0.]])
Sz = sp.array([[1., 0.],
[0., -1.]])
Sp = 0.5 * (Sx + 1.j * Sy)
Sm = 0.5 * (Sx - 1.j * Sy)
def get_ham(N, lam):
h = (2. * sp.kron(Sp, Sm) + 2. * sp.kron(Sm, Sp)
+ lam * sp.kron(Sz, Sz)).reshape(2, 2, 2, 2)
return [None] + [h] * (N - 1)
def get_linds(N, eps):
#Lindblad operators must have same range as Hamiltonian terms. In this case they are nearest-neighbour.
Sp1 = (sp.kron(Sp, sp.eye(2))).reshape(2, 2, 2, 2)
Sm2 = (sp.kron(sp.eye(2), Sm)).reshape(2, 2, 2, 2)
L1 = (1, sp.sqrt(eps) * Sp1)
L2 = (N-1, sp.sqrt(eps) * Sm2)
return [L1, L2]
"""
The bond dimension for each site is given as a vector, length N + 1.
Here we set the bond dimension = bond_dim for all sites.
evoMPS will adjust it to the maximum useful value near the ends of the chain.
"""
D = [bond_dim] * (N + 1)
"""
The site Hilbert space dimension is also given as a vector, length N + 1.
Here, we set all sites to dimension = 2.
"""
q = [2] * (N + 1)
def get_full_state(s):
psi = sp.zeros(tuple([2]*N), dtype=sp.complex128)
for ind in sp.ndindex(psi.shape):
A = 1.0
for n in xrange(N, 0, -1):
A = s.A[n][ind[n-1]].dot(A)
psi[ind] = A[0,0]
psi = psi.ravel()
return psi
def get_full_op(op):
fop = sp.zeros((2**N, 2**N), dtype=sp.complex128)
for n in xrange(1, min(len(op), N + 1)):
if op[n] is None:
continue
n_sites = len(op[n].shape) / 2
opn = op[n].reshape(2**n_sites, 2**n_sites)
fop_n = sp.kron(sp.eye(2**(n - 1)), sp.kron(opn, sp.eye(2**(N - n - n_sites + 1))))
assert fop.shape == fop_n.shape
fop += fop_n
return fop
def go(load_from=None, N_steps=100, resQ=None, pid=None, stateQ=None):
sp.random.seed(random_seed + pid)
s_start = tdvp.EvoMPS_TDVP_Generic_Dissipative(N, D, q, get_ham(N, lam), get_linds(N, eps))
print "Starting MPS sample", pid
if load_from is not None:
s_start.load_state(load_from)
else:
s_start.randomize()
s = s_start
#Send the state to the density matrix simulator
if N <= Nmax_fullrho and stateQ is not None:
psi = get_full_state(s)
stateQ.put([pid, psi])
eta = 1
Hexp = 0
for i in xrange(N_steps + 1):
s.update()
Szs = [s.expect_1s(Sz, n).real for n in xrange(1, s.N + 1)]
pHexp = Hexp
Hexp = s.H_expect
if i % res_every == 0:
if resQ is not None:
resQ.put([pid, i, Szs])
else:
print pid, i / res_every, Hexp.real, Hexp.real - pHexp.real, Szs
s.take_step_dissipative(dt)
def fullrho(qu, squ):
print "#Starting full density matrix simulation!"
#Build state from pure states received from the MPS processes
rho = sp.zeros((2**N, 2**N), dtype=sp.complex128)
psis = [None] * N_samp
for n in range(N_samp):
pnum, psi = squ.get()
psis[pnum] = psi
rho += sp.outer(psi, psi.conj())
squ.task_done()
rho /= sp.trace(rho)
Hfull = get_full_op(get_ham(N, lam))
linds = get_linds(N, eps)
linds = [(n, L.reshape(tuple([sp.prod(L.shape[:sp.ndim(L)/2])]*2))) for (n, L) in linds]
linds_full = [sp.kron(sp.eye(2**(n-1)), sp.kron(L, sp.eye(2**(N - n + 1) / L.shape[0]))) for (n, L) in linds]
for L in linds_full:
assert L.shape == Hfull.shape
Qfull = -1.j * Hfull - 0.5 * sp.sum([L.conj().T.dot(L) for L in linds_full], axis=0)
szs = [None] + [sp.kron(sp.kron(sp.eye(2**(n - 1)), Sz), sp.eye(2**(N - n))) for n in range(1, N + 1)]
for i in range(N_steps + 1):
rho /= sp.trace(rho)
esz = []
for n in xrange(1, N + 1):
esz.append(sp.trace(szs[n].dot(rho)).real)
if i % res_every == 0:
if qu is None:
print esz
else:
qu.put([-1, i, esz])
qu.put([-2, i, [sp.NaN] * N]) #this slot is reserved for a second "exact" result
#Do Euler steps, approximately integrating the Lindblad master equation
rho += dt * (Qfull.dot(rho) + rho.dot(Qfull.conj().T) +
sum([L.dot(rho).dot(L.conj().T) for L in linds_full]))
def plotter(q):
import matplotlib
matplotlib.use("wxagg")
import matplotlib.pyplot as plt
fig = plt.figure()
lns = [plt.plot([0]*N, ':')[0] for n in range(N_samp)]
av = plt.plot([0]*N, 'k-', linewidth=2.0)[0]
av_err1 = plt.plot([0]*N, 'k-', linewidth=1.0)[0]
av_err2 = plt.plot([0]*N, 'k-', linewidth=1.0)[0]
exa = plt.plot([0]*N, 'r-', linewidth=2.0)[0]
#exa_s = plt.plot([0]*N, 'm--', linewidth=2.0)[0]
plt.legend([exa, av], ["Density matrix", "Sample average"])
plt.xlabel(r"$n$")
plt.ylabel(r"$\langle \sigma^z_n \rangle$")
plt.ylim((-1, 1))
plt.xlim((0, N - 1))
plt.ion()
plt.show()
i_buf = 0
data_buf = [[None] * (N_samp + 2)]
if N <= Nmax_fullrho:
effbuflen = (N_samp + 2)
else:
effbuflen = N_samp
while True:
data = q.get()
if data is None:
break
num = data[0]
i = data[1]
ys = data[2]
i_off = (i - i_buf) / res_every
if i_off >= len(data_buf):
for j in range(len(data_buf), i_off + 1):
data_buf.append([None] * (N_samp + 2))
data_buf[i_off][num] = ys
if not None in data_buf[0][:effbuflen]:
print "Plotting results for step", i_buf, "buffer length", len(data_buf)
for j in range(N_samp):
lns[j].set_ydata(data_buf[0][j])
av_ys = sp.zeros_like(ys)
for da in data_buf[0][:-2]:
av_ys += da
av_ys /= N_samp
av.set_ydata(av_ys)
#Compute stdev and use it to display error
av_ys_var = 1./(N_samp - 1) / N_samp * sp.sum([(da - av_ys)**2 for da in data_buf[0][:-2]], axis=0)
av_ys_e1 = av_ys + sp.sqrt(av_ys_var)
av_ys_e2 = av_ys - sp.sqrt(av_ys_var)
av_err1.set_ydata(av_ys_e1)
av_err2.set_ydata(av_ys_e2)
exa.set_ydata(data_buf[0][-1])
#exa_s.set_ydata(data_buf[0][-2])
fig.canvas.draw()
data_buf.pop(0)
i_buf += res_every
plt.pause(0.01)
q.task_done()
plt.ioff()
plt.show()
def get_filename():
return 'sample_data_eps%g_lam%g_N%u_ns%u_ts%u_dt%g_resev%u_maxD%u.bin' % (eps, lam, N, N_samp,
N_steps, dt, res_every,
bond_dim)
def writer(q):
df = sp.memmap(get_filename(), dtype=sp.float64, mode='w+', shape=(N_samp + 2, N_steps / res_every + 1, N))
while True:
data = q.get()
if data is None:
break
num = data[0]
i = data[1]
ys = data[2]
df[num, i / res_every, :] = ys
if i == N_steps:
df.flush()
print "Sample", num, "finished. Data saved."
del df
def plot_saved():
import matplotlib
import matplotlib.pyplot as plt
data = sp.memmap(get_filename(), dtype=sp.float64, mode='r', shape=(N_samp + 2, N_steps / res_every + 1, N))
exa = data[-1]
data = data[:-2]
fins = sp.array([d[plot_res] for d in data if not sp.all(d[plot_res] == 0)])
nsam = len(fins)
print "Samples:", nsam
av = fins.sum(axis=0) / nsam
av_var = 1./(nsam - 1) / nsam * sp.sum((fins/nsam - av)**2, axis=0)
av_e1 = av + sp.sqrt(av_var)
av_e2 = av - sp.sqrt(av_var)
plt.figure()
pav = plt.plot(av, 'k-')[0]
plt.plot(av_e1, 'k--')
plt.plot(av_e2, 'k--')
if not sp.all(exa[-1] == 0):
pexa = plt.plot(exa[-1], 'r-')[0]
plt.legend([pexa, pav], ["Density matrix", "Sample average"])
plt.ylim((-1, 1))
plt.xlabel(r"$n$")
plt.ylabel(r"$\langle \sigma^z_n \rangle$")
plt.show()
def f(args):
pid, resQ, stateQ = args
go(load_from=None, N_steps=N_steps, resQ=resQ, pid=pid, stateQ=stateQ)
if __name__ == "__main__":
if plot_saved_data:
plot_saved()
else:
mpm = mp.Manager()
qu = mpm.Queue()
state_q = mpm.Queue()
if live_plot_mode:
res_handler = plotter
else:
res_handler = writer
resp = mp.Process(target=res_handler, args=(qu,))
resp.start()
p = mp.Pool(num_procs)
if N <= Nmax_fullrho:
exa = p.apply_async(fullrho, args=(qu, state_q))
p.map(f, zip(range(N_samp), [qu] * N_samp, [state_q] * N_samp))
if N <= Nmax_fullrho:
exa.get()
qu.put(None)
resp.join()
|
import numpy as np
from seizures.features.FeatureExtractBase import FeatureExtractBase
from seizures.data.Instance import Instance
import numpy as np
from numpy import unwrap, angle
from scipy.signal import hilbert
from matplotlib import pyplot as plt
class PLVFeatures(FeatureExtractBase):
"""
Class to extracts Phase Locking Value (PLV) between pairs of channels.
@author V&J
"""
def __init__(self):
pass
def extract(self, instance):
data = instance.eeg_data
n_ch, time = data.shape
n_pairs = n_ch*(n_ch-1)/2
# initiate matrices
phases = np.zeros((n_ch,time))
delta_phase_pairwise = np.zeros((n_pairs,time))
plv = np.zeros((n_pairs,))
# extract phases for each channel
for c in range(n_ch):
phases[c,:] = unwrap(angle(hilbert(data[c,:])))
# compute phase differences
k = 0
for i in range(n_ch):
for j in range(i+1,n_ch):
delta_phase_pairwise[k,:] = phases[i,:]-phases[j,:]
k+=1
# compute PLV
for k in range(n_pairs):
plv[k] = np.abs(np.sum(np.exp(1j*delta_phase_pairwise[k,:]))/time)
self.assert_features(plv)
# features = a 1d ndarray
return plv
def __str__(self):
return "PLV"
|
from week_zero.swapper import swapper
from week_zero.matrix import matrix1
from week_zero.design import design
from week_zero.funcy import monkey
from week_one.week_one import fibonacci
from week_one.week_one import for_loopy, while_loopy, recursive_loopy
from main import my_info
from week_two.factor import normal_factor
from week_two.Palindrome import runner1
from week_two.factorial import factorial_function
from week_three.gcd import gcdtest
main_menu = [
["About Me", my_info],
]
# Submenu list of [Prompt, Action]
# Works similarly to main_menu
sub_menu = [
["Swap", swapper],
["Factorial", factorial_function],
["Factor", normal_factor],
["Palindrome", runner1],
["Fibonacci", fibonacci],
["GCF", gcdtest]
]
week_two_sub_menu = [
["InfoDB For Loop", for_loopy],
["InfoDB While Loop", while_loopy],
["InfoDB Recursive Loop", recursive_loopy],
]
week_three_sub_menu = [
["Matrix", matrix1],
["Design", design],
["Animation", monkey],
]
# Menu banner is typically defined by menu owner
border = "=" * 25
banner = f"\n{border}\nPlease Select An Option\n{border}"
def menu():
print("")
title = "\033[96m Adi K Main Menu \n \033[0m" + banner
menu_list = main_menu.copy()
menu_list.append(["Number Functions", submenu])
menu_list.append(["Looping Fun", week2_submenu])
menu_list.append(["Function Fun", week3_submenu])
buildMenu(title, menu_list)
# def submenu
# using sub menu list above:
# sub_menu works similarly to menu()
def submenu():
title = "Function Submenu" + banner
buildMenu(title, sub_menu)
def week2_submenu():
title = "Function Submenu" + banner
buildMenu(title, week_two_sub_menu)
def week3_submenu():
title = "Function Submenu" + banner
buildMenu(title, week_three_sub_menu)
def buildMenu(banner, options):
# header for menu
print(banner)
# build a dictionary from options
prompts = {0: ["Exit", None]}
for op in options:
index = len(prompts)
prompts[index] = op
# print menu or dictionary
for key, value in prompts.items():
print(key, '->', value[0])
# get user choice
choice = input("Type your choice> ")
print(" ")
# validate choice and run
# execute selection
# convert to number
try:
choice = int(choice)
if choice == 0:
# stop
return
try:
# try as function
action = prompts.get(choice)[1]
action()
except TypeError:
try: # try as playground style
exec(open(action).read())
except FileNotFoundError:
print(f"File not found!: {action}")
# end function try
# end prompts try
except ValueError:
# not a number error
print(f"Not a number: {choice}")
except UnboundLocalError:
# traps all other errors
print(f"Invalid choice: {choice}")
# end validation try
buildMenu(banner, options) # recursion, start menu over again
if __name__ == "__main__":
menu()
|
# Alex Hancock, UCSC CGL
# Luigi Monitor
import json
import boto
from boto.s3.key import Key
from sqlalchemy import MetaData, Table, Column, String, Float, create_engine
unique_job = 'spawnFlop_SRR1988343_demo__consonance_jobs__0992701f6f'
sample_id = 'DTB-116_Baseline_1'
topfolder = 'UCSF_SU2C_WCDT_DTB-116_DTB-116_Baseline'
def getTouchfile(touchfile_name):
s3 = boto.connect_s3()
bucket_name = 'cgl-core-analysis-run-touch-files'
bucket = s3.get_bucket(bucket_name)
k = Key(bucket)
k.key = touchfile_name
contents = k.get_contents_as_string()
return contents
#
# Database initialization, creation if table doesn't exist
#
db = create_engine('postgresql:///monitor', echo=False)
conn = db.connect()
metadata = MetaData(db)
luigi = Table('luigi', metadata,
Column("luigi_job", String(100), primary_key=True),
Column("status", String(20)),
Column("submitter_specimen_id", String(100)),
Column("specimen_uuid", String(100)),
Column("workflow_name", String(100)),
Column("center_name", String(100)),
Column("submitter_donor_id", String(100)),
Column("consonance_job_uuid", String(100)),
Column("submitter_donor_primary_site", String(100)),
Column("project", String(100)),
Column("analysis_type", String(100)),
Column("program", String(100)),
Column("donor_uuid", String(100)),
Column("submitter_sample_id", String(100)),
Column("submitter_experimental_design", String(100)),
Column("submitter_specimen_type", String(100)),
Column("workflow_version", String(100)),
Column("sample_uuid", String(100)),
Column("start_time", Float),
Column("last_updated", Float)
)
#
# S3 Scraping below
#
filepath = 'consonance-jobs/RNASeq_3_1_x_Coordinator/3_1_3/' + topfolder
touchfile_name = filepath + '/' + \
sample_id + \
'_meta_data.json'
stringContents = getTouchfile(touchfile_name)
jsonMetadata = json.loads(stringContents)
ins_query = luigi.insert().values(luigi_job=unique_job,
status="DONE",
submitter_specimen_id=jsonMetadata[
'submitter_specimen_id'],
specimen_uuid=jsonMetadata['specimen_uuid'],
workflow_name=jsonMetadata['workflow_name'],
center_name=jsonMetadata['center_name'],
submitter_donor_id=jsonMetadata[
'submitter_donor_id'],
consonance_job_uuid=jsonMetadata[
'consonance_job_uuid'],
submitter_donor_primary_site=jsonMetadata[
'submitter_donor_primary_site'],
project=jsonMetadata['project'],
analysis_type=jsonMetadata['analysis_type'],
program=jsonMetadata['program'],
donor_uuid=jsonMetadata['donor_uuid'],
submitter_sample_id=jsonMetadata[
'submitter_sample_id'],
submitter_experimental_design=jsonMetadata[
'submitter_experimental_design'],
submitter_specimen_type=jsonMetadata[
'submitter_specimen_type'],
workflow_version=jsonMetadata[
'workflow_version'],
sample_uuid=jsonMetadata['sample_uuid'],
start_time=1487716334.29525,
last_updated=1487716634.38815)
exec_result = conn.execute(ins_query)
print exec_result
|
import sys
import SelectiveRepeat
import StopAndWait
import GoBackN
def print_usage():
print("""\nUsage: main.py option config-file-name\n
option:\n-st:\tstop and wait\n-sr:\tselective repeat\n-go:\tgo back N\n""")
if len(sys.argv) < 3:
print_usage()
elif len(sys.argv) == 3:
method = sys.argv[1]
file_name = sys.argv[2]
if method == '-sr':
SelectiveRepeat.start(file_name)
elif method == '-st':
StopAndWait.start(file_name)
elif method == '-go':
GoBackN.start(file_name)
else:
print_usage()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import os
import io
import re
try:
from setuptools import setup
from setuptools.command.install import install
setup
except ImportError:
from distutils.core import setup
setup
# Get the long description from the README
def readme():
with open('README.rst') as f:
return f.read()
# Read, version funcs taken from:
# https://github.com/ellisonbg/altair/blob/master/setup.py
def read(path, encoding='utf-8'):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding=encoding) as fp:
return fp.read()
def version(path):
"""
Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
VERSION = version('trappist/__init__.py')
# Setup!
setup(name = 'trappist',
version = VERSION,
description = 'Constraining the stellar evolution of TRAPPIST-1 using MCMC',
long_description = readme(),
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Astronomy',
],
url = 'https://github.com/dflemin3/trappist',
author = 'David P. Fleming',
author_email = '[email protected]',
license = 'MIT',
packages = ['trappist'],
install_requires = [
'numpy',
'matplotlib >= 3.0.0',
'scipy',
'emcee == 3.0rc2',
'corner',
'vplot'
],
include_package_data = True,
zip_safe = False)
|
from openpnm.algorithms import TransientReactiveTransport, FickianDiffusion
from openpnm.utils import logging, Docorator
logger = logging.getLogger(__name__)
docstr = Docorator()
class TransientFickianDiffusion(TransientReactiveTransport, FickianDiffusion):
r"""
A class to simulate transient diffusion with reactions
"""
def __init__(self, settings={}, **kwargs):
super().__init__(**kwargs)
self.settings.update(settings)
|
from typing import Optional, Union
from datetime import date, time
from .reserve import Reserve, ReserveSetType
from .user import User
class Supboard(Reserve):
"""Supboard reservation data class
Attributes:
is_complete:
A boolean attribute indicates that reservation is complete or not.
user:
A instance of User class object.
start:
Reservation start datetime.
start_date:
Reservation start date only.
start_time:
Reservation start time only.
end:
Reservation end datetime.
end_date:
Reservation end date only.
end_time:
Reservation end time only.
set_type:
A string value indicates type of set ("set", "hour").
set_count:
A integer value of set's count.
count:
An integer value of a supboard equipment rent need
id:
An integer wakeboard reservation identifier
canceled:
Optional. A boolean meaning that a reserevation is canceled
cancel_telegram_id:
Optional. An integer telegram identifier
of user canceled a reserevation
"""
def __init__(self,
user: Optional[User] = None,
start_date: Optional[date] = None,
start_time: Optional[time] = None,
set_type_id: str = "set",
set_count: int = 1,
count: int = 1,
id: Union[int, None] = None,
canceled: Union[bool, None] = False,
cancel_telegram_id: Union[int, None] = None):
"""Wakeboard reservation data class
Args:
user:
Optional. A instance of User class object.
start_date:
Optional. Wakeboard reservation start date only.
start_time:
Optional. Wakeboard reservation start time only.
set_type:
A reservation set type instances.
set_count:
A integer value of set's count.
count:
An integer value of a supboard equipment rent need
id:
An integer wakeboard reservation identifier
canceled:
Optional. A boolean meaning that a reserevation is canceled
cancel_telegram_id:
Optional. An integer telegram identifier
of user canceled a reserevation
"""
super().__init__(user=user,
start_date=start_date, start_time=start_time,
set_type_id=set_type_id, set_count=set_count,
count=count, id=id,
canceled=canceled,
cancel_telegram_id=cancel_telegram_id)
if set_type_id == "hour":
self.set_type = ReserveSetType(set_type_id, 60)
else:
self.set_type = ReserveSetType(set_type_id, 30)
def __copy__(self):
return Supboard(self.user, self.start_date, self.start_time,
self.set_type.set_id, self.set_count,
self.count, self.id)
def __deepcopy__(self):
return Supboard(self.user.__deepcopy__(),
self.start_date, self.start_time,
self.set_type.set_id, self.set_count,
self.count, self.id)
def __eq__(self, other) -> bool:
if (self.start_date == other.start_date
and self.set_count == other.set_count
and self.minutes == other.minutes
and self.count == other.count):
return True
else:
return False
if __name__ == "__main__":
test = Supboard()
|
name = "declutter" |
#!/usr/bin/python3
def only_diff_elements(set_1, set_2):
return (set_1 ^ set_2)
|
from emf.fields import enviro as env
for i in range(1,4):
fn1 = 'enviro-files/Envsmpl' + str(i) + '.o01'
fn2 = 'enviro-files/Envsmpl' + str(i) + '.o02'
env.compare_o01_o02(fn1, fn2, path='o0-comparison')
|
import requests
import logging
from shotgun_api3 import Shotgun
from sgsession import Session
import json
log = logging.getLogger(__name__)
SLACK_CHANNEL = SLACK_CHANNEL
SLACK_TOKEN = SLACK_TOKEN
def callback(event):
sg = Session()
print event['entity']
if event['project']['id'] != 74:
return "checking project id is not 74 " + str(event['project']['id'])
elif event['meta']['new_value'] != 'rev':
return "checking new value is not rev" + str(event['meta']['new_value'])
elif event['event_type'] != "Shotgun_Task_Change":
return "checking: event['event_type']" + str(event['event_type'])
else:
seq_link = ""
task_id = event['entity']['id']
task = sg.find_one('Task', [('id', 'is', task_id)], ['entity'])
shot_id = task['entity']['id']
if 'sg_sequence' in task['entity']:
seq_id = task['entity']['sg_sequence']['id']
print seq_id
seq_type = task['entity']['sg_sequence']['type']
print seq_type
seq_name =task['entity']['sg_sequence']['name']
print seq_name
seq_link = '\n<https://markmedia.shotgunstudio.com/detail/Shot/' + str(shot_id) + '|Shot Link> \n<https://markmedia.shotgunstudio.com/detail/Shot/' + str(shot_id) +'#' + str(seq_type) + '_' + str(seq_id) + '_' + str(seq_name) + '|Sequence Link>'
print "checking else statement"
print event['project']['id'], event['meta']['new_value'], event['event_type']
print "\n"
requests.get(
'https://slack.com/api/chat.postMessage',
params={
'channel': SLACK_CHANNEL,
'fallback': 'This is ready for review: '+ str(task['entity']['name']) + '>' + str(event['entity']['name']),
'params': None,
'attachments': json.dumps([
{'fallback': 'This is ready for review: ' + str(task['entity']['name']) + ' > ' + str(event['entity']['name']),
'title': 'This is ready for review: ' + str(task['entity']['name']) + ' > ' + str(event['entity']['name']),
'text': '<https://markmedia.shotgunstudio.com/detail/Task/'+str(event['entity']['id']) + '/|'+ str(task['entity']['name']) + ' ' + str(event['entity']['name']) + ">\nProject: " + str(event['project']['name']) + seq_link
}
]),
'token': SLACK_TOKEN,
'username': 'elainew',
'as_user': True
}
)
__sgevents__ = {
'type': 'callback',
'callback': '%s:callback' % __file__.rstrip('c'),
}
'''
what change
- is it a task? (event object ? event_type > meta (attr name, new value = rev))
- sg_status_list on the task?
- is it interesting for stubbs? (is it rev? )
- if interesting, ping stubbs
'''
'''build/scripts/sgevents-dispatch -p plugins/examples/slack.py outputcopy.json
on rf40.mm -> python build/scripts/sgevents-daemon -p plugins/examples/slack.py
''' |
def response_decider():
"""
This will execute commands or reply base on user input
Returns:
Nothing
"""
# Begin Application
print(INTRO)
# User Input Analyzer
while True:
# Get user input
user_input = input("User: ")
if user_input.lower() == "":
print("Chakbot: Enter something ( - _ -\")")
elif user_input.lower() == "help" or user_input.lower() == "menu":
print(HELP_MENU)
elif user_input[:4].lower() == "help" and user_input[5:] in HELP_EX.keys():
print("{\r\n" + HELP_EX[user_input[5:]] + "\r\n}\r\n")
elif user_input == "//exit":
# Always check if user wants to terminate
sys.exit()
elif user_input[:9].lower() == "calculate":
# Determine if formula follows after calculate, if not then prompt
if len(user_input) <= 9:
# Chakbot will request formula
print('Chakbot: ' + str(calculate(input("Chakbot: Sure, what is your formula?\r\nFormula: "))))
else:
# Formula following calculate
print('Chakbot: ' + str(calculate(user_input[9:])))
elif user_input[:7].lower() == "convert":
if len(user_input) <= 7:
# Chakbot will request conversion
print('Chakbot: ' + str(convert(input("Chakbot: Sure, what do you want to convert?\r\nConversion: "))))
else:
# Conversion following convert
print('Chakbot: ' + str(convert(user_input[7:])))
elif user_input[:7].lower() == "encrypt":
if len(user_input) <= 7:
# Chakbot will request message to encrypt
print('Chakbot: ' + str(encrypt(input("Chakbot: Sure, what's the message?\r\nMessage: "))))
else:
# Conversion following encrypt
print('Chakbot: ' + str(encrypt(user_input[8:])))
elif user_input[:7].lower() == "decrypt":
if len(user_input) <= 7:
# Chakbot will request message and key to decrypt
message = input("Chakbot: Sure, what's the message?\r\nMessage: ")
print('Chakbot: ' + str(decrypt(message, input("Chakbot: What's the key?\r\nKey: "))))
else:
# Conversion following decrypt
print('Chakbot: ' + str(decrypt(user_input[8:user_input.rfind(' ')],
user_input[user_input.rfind(' ') + 1:])))
elif user_input[:5].lower() == "morse":
if len(user_input) <= 5:
# Chakbot will request message to translate to morse
print('Chakbot: ' + str(morse(input("Chakbot: Sure, what's the message?\r\nMessage: "))))
else:
# Conversion following decrypt
print('Chakbot: ' + str(morse(user_input[6:])))
elif user_input[:5].lower() == "teach":
if len(user_input) <= 5:
# Chakbot will request message to translate to morse
categ = input("Chakbot: Sure, what's the category?\r\nCategory: ")
request = input("Chakbot: Okay, what's the request?\r\nMessage: ")
response = input("Chakbot: How should I response to it?\r\nMessage: ")
print('Chakbot: ' + str(teach(categ, request, response)))
else:
# Conversion following decrypt
categ = user_input[6:][:user_input[6:].find(' ')]
sec_1 = 6 + len(categ) + 2
request = user_input[sec_1: sec_1 + user_input[sec_1:].find("\"")]
sec_2 = sec_1 + len(request) + 3
response = user_input[sec_2:len(user_input) - 1]
print('Chakbot: ' + str(teach(categ, request, response)))
elif user_input[:4].lower() == "game":
if len(user_input) <= 4:
# Chakbot will request message to translate to morse
print(GAMES_MENU)
print("Chakbot: " +
str(game_launcher(input("Chakbot: Which game would you like to play?\r\nInput: "))))
else:
# Conversion following decrypt
print('Chakbot: ' + str(game_launcher(user_input[5:])))
else:
# Get reply from brain
reply = get_message(user_input)
# For if user wants to stop talking
if reply == 0:
option = input("Chakbot: Do you want to leave me? (y/n) <(T ^ T v)\r\nOption: ")
if option == 'y':
print(str("Chakbot: " + random.choice(CATEGORIES["r;salute"])))
sys.exit()
elif option == 'n':
print("Chakbot: Yay! We can continue talking ^( ^ o ^ )>")
else:
print("Chakbot: That's not a valid option, so you are staying with me.")
elif reply == 1:
# For asking dates
print("Chakbot: " + random.choice(CATEGORIES["r;askingdate"]) + " " + str(datetime.datetime.now()))
elif reply == 2:
# Placeholder for corresponding trivia
pass
else:
# For all responses
print("Chakbot: " + reply)
if __name__ == "__main__":
import sys
import datetime
from text_output import *
from learning import *
from cryptography import *
from extra_tools import *
from games import *
# First Initialization
read_brain()
response_decider()
|
import dgl
import torch
import torch.nn as nn
from data.molecules import MoleculeDataset
from data.QM9 import QM9Dataset
from data.SBMs import SBMsDataset
from data.TSP import TSPDataset
from data.superpixels import SuperPixDataset
from data.cora import CoraDataset
from models.networks import *
from utils.utils import *
class TransInput(nn.Module):
def __init__(self, trans_fn):
super().__init__()
self.trans = trans_fn
def forward(self, input):
if self.trans:
input['V'] = self.trans(input['V'])
return input
class TransOutput(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
if args.task == 'node_level':
channel_sequence = (args.node_dim, ) * args.nb_mlp_layer + (args.nb_classes, )
self.trans = MLP(channel_sequence)
elif args.task == 'link_level':
channel_sequence = (args.node_dim * 2, ) * args.nb_mlp_layer + (args.nb_classes, )
self.trans = MLP(channel_sequence)
elif args.task == 'graph_level':
channel_sequence = (args.node_dim, ) * args.nb_mlp_layer + (args.nb_classes, )
self.trans = MLP(channel_sequence)
else:
raise Exception('Unknown task!')
def forward(self, input):
G, V = input['G'], input['V']
if self.args.task == 'node_level':
output = self.trans(V)
elif self.args.task == 'link_level':
def _edge_feat(edges):
e = torch.cat([edges.src['V'], edges.dst['V']], dim=1)
return {'e': e}
G.ndata['V'] = V
G.apply_edges(_edge_feat)
output = self.trans(G.edata['e'])
elif self.args.task == 'graph_level':
G.ndata['V'] = V
readout = dgl.mean_nodes(G, 'V')
output = self.trans(readout)
else:
raise Exception('Unknown task!')
return output
def get_trans_input(args):
if args.data in ['ZINC']:
trans_input = nn.Embedding(args.in_dim_V, args.node_dim)
elif args.data in ['TSP']:
trans_input = nn.Linear(args.in_dim_V, args.node_dim)
elif args.data in ['SBM_CLUSTER', 'SBM_PATTERN']:
trans_input = nn.Embedding(args.in_dim_V, args.node_dim)
elif args.data in ['CIFAR10', 'MNIST', 'Cora']:
trans_input = nn.Linear(args.in_dim_V, args.node_dim)
elif args.data in ['QM9']:
trans_input = nn.Linear(args.in_dim_V, args.node_dim)
else:
raise Exception('Unknown dataset!')
return trans_input
def get_loss_fn(args):
if args.data in ['ZINC', 'QM9']:
loss_fn = MoleculesCriterion()
elif args.data in ['TSP']:
loss_fn = TSPCriterion()
elif args.data in ['SBM_CLUSTER', 'SBM_PATTERN']:
loss_fn = SBMsCriterion(args.nb_classes)
elif args.data in ['CIFAR10', 'MNIST']:
loss_fn = SuperPixCriterion()
elif args.data in ['Cora']:
loss_fn = CiteCriterion()
else:
raise Exception('Unknown dataset!')
return loss_fn
def load_data(args):
if args.data in ['ZINC']:
return MoleculeDataset(args.data)
elif args.data in ['QM9']:
return QM9Dataset(args.data, args.extra)
elif args.data in ['TSP']:
return TSPDataset(args.data)
elif args.data in ['MNIST', 'CIFAR10']:
return SuperPixDataset(args.data)
elif args.data in ['SBM_CLUSTER', 'SBM_PATTERN']:
return SBMsDataset(args.data)
elif args.data in ['Cora']:
return CoraDataset(args.data)
else:
raise Exception('Unknown dataset!')
def load_metric(args):
if args.data in ['ZINC', 'QM9']:
return MAE
elif args.data in ['TSP']:
return binary_f1_score
elif args.data in ['MNIST', 'CIFAR10']:
return accuracy_MNIST_CIFAR
elif args.data in ['SBM_CLUSTER', 'SBM_PATTERN']:
return accuracy_SBM
elif args.data in ['Cora']:
return CoraAccuracy
else:
raise Exception('Unknown dataset!')
|
# based on https://gist.github.com/wshanshan/c825efca4501a491447056849dd207d6
# Ported for ProjectAlf by Alfiananda P.A
import os
import random
import numpy as np
from colour import Color
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image, ImageDraw, ImageFont
from telethon.tl.types import DocumentAttributeFilename
from scape import CMD_HELP, bot
from scape.events import register
bground = "black"
@register(outgoing=True, pattern=r"^\.(ascii|asciis)$")
async def ascii(event):
if not event.reply_to_msg_id:
await event.edit("`Reply to Any media..`")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("`reply to a image/sticker/video`")
return
await event.edit("`Downloading Media..`")
if reply_message.photo:
IMG = await bot.download_media(
reply_message,
"ascii.png",
)
elif (
DocumentAttributeFilename(file_name="AnimatedSticker.tgs")
in reply_message.media.document.attributes
):
await bot.download_media(
reply_message,
"ASCII.tgs",
)
os.system("lottie_convert.py ASCII.tgs ascii.png")
IMG = "ascii.png"
elif reply_message.video:
video = await bot.download_media(
reply_message,
"ascii.mp4",
)
extractMetadata(createParser(video))
os.system("ffmpeg -i ascii.mp4 -vframes 1 -an -s 480x360 -ss 1 ascii.png")
IMG = "ascii.png"
else:
IMG = await bot.download_media(
reply_message,
"ascii.png",
)
try:
await event.edit("`Processing..`")
list = await random_color()
color1 = list[0]
color2 = list[1]
bgcolor = bground
await asciiart(IMG, color1, color2, bgcolor)
cmd = event.pattern_match.group(1)
if cmd == "asciis":
os.system("cp ascii.png ascii.webp")
ascii_file = "ascii.webp"
else:
ascii_file = "ascii.png"
await event.client.send_file(
event.chat_id,
ascii_file,
force_document=False,
reply_to=event.reply_to_msg_id,
)
await event.delete()
os.system("rm *.png *.webp *.mp4 *.tgs")
except BaseException as e:
os.system("rm *.png *.webp *.mp4 *.png")
return await event.edit(str(e))
async def asciiart(IMG, color1, color2, bgcolor):
chars = np.asarray(list(" .,:irs?@9B&#"))
font = ImageFont.load_default()
letter_width = font.getsize("x")[0]
letter_height = font.getsize("x")[1]
WCF = letter_height / letter_width
img = Image.open(IMG)
widthByLetter = round(img.size[0] * 0.15 * WCF)
heightByLetter = round(img.size[1] * 0.15)
S = (widthByLetter, heightByLetter)
img = img.resize(S)
img = np.sum(np.asarray(img), axis=2)
img -= img.min()
img = (1.0 - img / img.max()) ** 2.2 * (chars.size - 1)
lines = ("\n".join(("".join(r)
for r in chars[img.astype(int)]))).split("\n")
nbins = len(lines)
colorRange = list(Color(color1).range_to(Color(color2), nbins))
newImg_width = letter_width * widthByLetter
newImg_height = letter_height * heightByLetter
newImg = Image.new("RGBA", (newImg_width, newImg_height), bgcolor)
draw = ImageDraw.Draw(newImg)
leftpadding = 0
y = 0
lineIdx = 0
for line in lines:
color = colorRange[lineIdx]
lineIdx += 1
draw.text((leftpadding, y), line, color.hex, font=font)
y += letter_height
IMG = newImg.save("ascii.png")
return IMG
# this is from userge
async def random_color():
color = [
"#" + "".join([random.choice("0123456789ABCDEF") for k in range(6)])
for i in range(2)
]
return color
@register(outgoing=True, pattern=r"^\.asciibg(?: |$)(.*)")
async def _(event):
BG = event.pattern_match.group(1)
if BG.isnumeric():
return await event.edit("`Please input a color not a number!`")
elif BG:
global bground
bground = BG
else:
return await event.edit("`please insert bg of ascii`")
await event.edit(f"`Successfully set bg of ascii to` **{BG}**")
CMD_HELP.update(
{
"ascii": "`.ascii`\n"
"Usage: create ascii art from media\n\n"
"`.asciis`\n"
"Usage: same but upload the result as sticker\n\n"
"`.asciibg <color>`\n"
"Usage: to change background color of this ascii module"
}
)
|
import argparse
import gym
from .agents import HumanPlayer, RandomPlayer, SelfPlayRLAgent
parser = argparse.ArgumentParser("Play Connect-Four between two players")
parser.add_argument("--p1", choices=['self_play', 'random', 'human'], default='self_play')
parser.add_argument("--p2", choices=['self_play', 'random', 'human'], default='human')
args = parser.parse_args()
env = gym.make('ConnectFour7x7-v0')
cross_player = None
if args.p1 == 'self_play':
cross_player = SelfPlayRLAgent('connect_four_train_data/best_agent.pkl',
verbose=True)
elif args.p1 == 'human':
cross_player = HumanPlayer()
elif args.p1 == 'random':
cross_player = RandomPlayer()
naught_player = None
if args.p2 == 'self_play':
naught_player = SelfPlayRLAgent('connect_four_train_data/best_agent.pkl',
verbose=True)
elif args.p2 == 'human':
naught_player = HumanPlayer()
elif args.p2 == 'random':
naught_player = RandomPlayer()
cross_player.eval()
naught_player.eval()
obs = env.reset()
while True:
action = cross_player.get_action(obs[1], 1)
obs, reward, done, _ = env.step((1, action))
env.render()
print(reward)
if all(done):
break
action = naught_player.get_action(obs[0], 0)
obs, reward, done, _ = env.step((0, action))
env.render()
print(reward)
if all(done):
break
if reward[0] == 0.:
print("This was draw.")
elif reward[0] == -1.:
print("Player {} win!".format(cross_player.get_name()))
else:
print("Player {} win!".format(naught_player.get_name()))
print("Switching sides now!")
obs = env.reset()
while True:
action = naught_player.get_action(obs[1], 1)
obs, reward, done, _ = env.step((1, action))
env.render()
print(reward)
if all(done):
break
action = cross_player.get_action(obs[0], 0)
obs, reward, done, _ = env.step((0, action))
env.render()
print(reward)
if all(done):
break
if reward[0] == 0.:
print("This was draw.")
elif reward[0] == -1.:
print("Player {} win!".format(naught_player.get_name()))
else:
print("Player {} win!".format(cross_player.get_name()))
|
# Delete a video using its video ID
import apivideo
from apivideo.apis import VideosApi
from apivideo.exceptions import ApiAuthException
api_key = "your api key here"
client = apivideo.AuthenticatedApiClient(api_key)
# If you'd rather use the sandbox environment:
# client = apivideo.AuthenticatedApiClient(api_key, production=False)
client.connect()
videos_api = VideosApi(client)
title = "Sample AVI Video"
# List videos that have the exact, unique title you wanted to delete
videos = videos_api.list(title=title)
# Get list of videos out of response object or single item depending on whether you filtered
videos = videos['data']
# In this case, let's assume we know there's only one video with the title we filtered for.
print(videos[0]['video_id'])
# Delete the video
response = videos_api.delete(videos[0]['video_id'])
print(response)
|
from api import db, bcrypt, UserModel
import datetime
class Administration():
@staticmethod
def create_admin_user(name, login, password):
Administration.delete_user(login)
user = UserModel(name, login, password, timestamp = datetime.datetime.now())
try:
result = db.session.add(user)
db.session.commit()
except BaseException as e:
print(e.orig)
@staticmethod
def delete_user(login):
user_exists = UserModel.query.filter_by(login=login).first()
if user_exists is not None:
db.session.delete(user_exists)
db.session.commit()
|
def break_words(stuff):
"""this function will break up words for us.
Args:
stuff ([string]): [the string to break]
"""
words = stuff.split(' ')
return words
def sort_words(words):
"""this function will sort words
Args:
words ([string]): [the string to sort]
"""
return sorted(words)
def print_first_word(words):
"""prints the first word after popping it off.
Args:
words ([list]): [the list you want to operate on]
"""
word = words.pop(0)
print(word)
def print_last_word(words):
"""prints the last word after popping it off.
Args:
words ([list]): [the list you want to operate on]
"""
word = words.pop(-1)
print(word)
def sort_sentence(sentence):
"""takes in a full sentence and returns the sorted words.
Args:
sentence ([string]): [the sentence to sort]
"""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""takes in a full sentence and returns the sorted words.
Args:
sentence ([string]): [the sentence you want to operate on]
"""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""sorts the words then print the first and last one.
Args:
sentence ([string]): [the string you want to operate on]
"""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words) |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Graz University of Technology.
#
# invenio-records-lom is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Marshmallow schema for validating and serializing LOM JSONs."""
from flask import current_app
from invenio_rdm_records.services.schemas import RDMRecordSchema
from marshmallow import fields
from werkzeug.local import LocalProxy
from .fields import ControlledVocabularyField
class LOMRecordSchema(RDMRecordSchema):
"""Marshmallow schema for validating LOM records."""
# NOTE: To ensure compatibility with invenio systemfields,
# use ``NestedAttribute`` instead of ``fields.Nested()``.
# overwrite metadata-field: allow any dict
metadata = fields.Dict(keys=fields.String(), values=fields.Field())
resource_type = ControlledVocabularyField(
vocabulary=LocalProxy(lambda: current_app.config["LOM_RESOURCE_TYPES"]),
)
__all__ = ("LOMRecordSchema",)
|
import re
from pathlib import Path
from random import random
import torch
from more_itertools import flatten
from torch import nn
from torch.utils.data import Dataset
from torchvision import transforms
from util import dose2locs, loc2dose
class ImageDataset(Dataset):
def __init__(self, folder, image_size, transparent=False, train=True, norm_f=None,
aug_prob=0., greyscale=False, doses=[0.0], label=False):
def paths(folder, doses):
not_52 = re.compile('/[^(52)]')
assays = flatten(dose2locs[dose] for dose in doses)
gen = flatten((Path(f'{folder}').glob(
f'**/*{assay}*.pt')) for assay in assays)
return [p for p in gen if not_52.search(str(p))]
self.dose2id = {k: i for i, k in enumerate(doses)}
self.f = d8 if train else (lambda x: x)
super().__init__()
self.folder = folder
self.image_size = image_size
self.label = label
self.norm_f = norm_f or (lambda x: x)
self.paths = paths(folder, doses)
assert len(self.paths) > 0, f'No images were found in {folder} for training'
#convert_image_fn = convert_transparent_to_rgb if not transparent else convert_rgb_to_transparent
self.num_channels = 3 if not transparent else 5
self.num_channels = 1 if greyscale else self.num_channels
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomCrop(256),
transforms.RandomPerspective(p=aug_prob),
transforms.RandomErasing(p=aug_prob),
# transforms.ColorJitter(saturation=.1, contrast=.1)
# RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = self.norm_f(torch.load(path))
if self.num_channels == 3:
img = img[:3]
elif self.num_channels == 1:
img = img[1:2]
if self.label:
label = self.dose2id[loc2dose[str(path).split()[-2]]]
return self.transform(self.f(img/255)), label
return self.transform(self.f(img/255))
class MSNorm:
def __init__(self, norm_path):
self.mean, self.std = torch.load(norm_path, map_location='cpu')
def __call__(self, img):
return (img - self.mean) / self.std
def invert(self, img):
return img * self.std + self.mean
def denorm_f(ms, device):
mean, std = map(lambda x: torch.tensor(x, device=device)[None, :, None, None], ms)
return lambda x: (x*std + mean).cpu()
def d8(img):
r = random()
if r > .75:
img = torch.rot90(img, 3, dims=(1,2))
elif r > .5:
img = torch.rot90(img, 2, dims=(1,2))
elif r > .25:
img = torch.rot90(img, 1, dims=(1,2))
if random()>.5:
img = torch.flip(img, dims=(2,))
return img
|
import requests
def call_api(city_name):
url = "http://api.openweathermap.org/data/2.5/weather"
api_key = "d14f9d7cc8a0c8af189d902e396458ea"
params = {"appid" : api_key, "q" : city_name, "units" : "metric"}
response = requests.get(url, params = params)
response_in_json = response.json()
weather_result = format_response(response_in_json)
misc_result = misc_info(response_in_json)
return weather_result, misc_result
def format_response(response_json):
try:
City_name_0 = response_json['name']
City_Latitude = str(response_json['coord']['lat'])
City_Longitude = str(response_json['coord']['lon'])
Country = response_json['sys']['country']
City_overview = City_name_0 + ' -(Latitude: ' + City_Latitude + ', Longitude: ' + City_Longitude + ') ' + Country
Clouds = str(response_json['weather'][0]['description']).capitalize()
Temperature = str(response_json['main']['temp']) + '°C'
final_str = '%s\nClouds: %s\nTemperature: %s' % (City_overview,Clouds,Temperature)
except:
final_str = "Name isn't Valid"
return final_str
def misc_info(response_json):
try:
temp_min = str(response_json['main']['temp_min']) + '°C'
temp_max = str(response_json['main']['temp_max']) + '°C'
pressure = str(response_json['main']['pressure']) + ' hPa'
humidity = str(response_json['main']['humidity']) + ' %'
wind_speed = str(response_json['wind']['speed']) + ' m/s'
time_zone = str(response_json['timezone']) + ' UTC'
miscellaneous_str = "Min Temp: %s\nMax Temp: %s\nPressure: %s\nHumidity: %s\nWind Speed: %s\nTimezone: %s" % (temp_min,temp_max,pressure,humidity,wind_speed,time_zone)
except:
miscellaneous_str = "Name isn't Valid"
return miscellaneous_str
city = input("Enter City Name: ")
call_weather_result, call_misc_result = call_api(city)
print(call_weather_result)
if call_weather_result != "Name isn't Valid":
further_info = input("Display Misc Information?\n[Y/N]\n")
if further_info == "N":
print("You're all set!")
elif further_info == "Y":
print(call_misc_result) |
# -*- coding: utf-8 -*-
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
from ucloud.core.client import Client
from ucloud.services.uhub.schemas import apis
class UHubClient(Client):
def __init__(self, config, transport=None, middleware=None, logger=None):
super(UHubClient, self).__init__(config, transport, middleware, logger)
def create_repo(self, req=None, **kwargs):
""" CreateRepo - 创建镜像仓库
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **RepoName** (str) - (Required) 仓库名称,不可修改
- **Description** (str) - 仓库备注
- **IsShared** (bool) - 镜像仓库是否公开,公开为true、不公开为false;默认为false
**Response**
- **Message** (str) - 有错误时返回内容
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.CreateRepoRequestSchema().dumps(d)
kwargs["max_retries"] = 0
resp = self.invoke("CreateRepo", d, **kwargs)
return apis.CreateRepoResponseSchema().loads(resp)
def delete_repo(self, req=None, **kwargs):
""" DeleteRepo - 删除镜像仓库
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **RepoName** (str) - (Required) 镜像仓库名称
**Response**
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.DeleteRepoRequestSchema().dumps(d)
resp = self.invoke("DeleteRepo", d, **kwargs)
return apis.DeleteRepoResponseSchema().loads(resp)
def delete_repo_image(self, req=None, **kwargs):
""" DeleteRepoImage - 删除镜像
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **ImageName** (str) - (Required) 镜像名称
- **RepoName** (str) - (Required) 镜像仓库名称
- **TagName** (str) - 不指定tag则删除全部tag
**Response**
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.DeleteRepoImageRequestSchema().dumps(d)
resp = self.invoke("DeleteRepoImage", d, **kwargs)
return apis.DeleteRepoImageResponseSchema().loads(resp)
def get_image_tag(self, req=None, **kwargs):
""" GetImageTag - 获取镜像tag
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **ImageName** (str) - (Required) 镜像名称
- **RepoName** (str) - (Required) 镜像仓库名称
- **Limit** (int) - 每次获取数量,默认为20
- **Offset** (int) - 偏移量,默认0
- **TagName** (str) - 默认不写,如果填写,代表查询该tag,否则查全部tag
**Response**
- **TagSet** (list) - 见 **TagSet** 模型定义
- **TotalCount** (int) - tag总数
**Response Model**
**TagSet**
- **TagName** (str) - Tag名称
- **UpdateTime** (str) - 镜像更新时间
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.GetImageTagRequestSchema().dumps(d)
resp = self.invoke("GetImageTag", d, **kwargs)
return apis.GetImageTagResponseSchema().loads(resp)
def get_repo(self, req=None, **kwargs):
""" GetRepo - 获取镜像仓库
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **Limit** (int) - 数量,默认20
- **Offset** (int) - 偏移量,默认0
- **Type** (str) - private私有仓库,public公共仓库,默认public
**Response**
- **RepoSet** (list) - 见 **RepoSet** 模型定义
- **TotalCount** (int) - 总的仓库数量
**Response Model**
**RepoSet**
- **CreateTime** (str) - 仓库创建时间
- **Description** (str) - 镜像仓库描述
- **IsOutSide** (str) - 镜像仓库是否外网可以访问,可以为ture,不可以为false
- **IsShared** (str) - 镜像仓库类型,false为私有;true为公有
- **RepoName** (str) - 镜像仓库名称
- **UpdateTime** (str) - 仓库更新时间
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.GetRepoRequestSchema().dumps(d)
resp = self.invoke("GetRepo", d, **kwargs)
return apis.GetRepoResponseSchema().loads(resp)
def get_repo_image(self, req=None, **kwargs):
""" GetRepoImage - 获取镜像仓库下的镜像
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **RepoName** (str) - (Required) 镜像仓库名称
- **Limit** (int) - 显示数量,默认为20
- **Offset** (int) - 偏移量,默认0
**Response**
- **ImageSet** (list) - 见 **ImageSet** 模型定义
- **TotalCount** (int) -
**Response Model**
**ImageSet**
- **CreateTime** (str) - 创建时间
- **ImageName** (str) - 镜像名称
- **LatestTag** (str) - 最新push的Tag
- **PullCount** (int) - 镜像被下载次数
- **RepoName** (str) - 镜像仓库名称
- **UpdateTime** (str) - 修改时间
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.GetRepoImageRequestSchema().dumps(d)
resp = self.invoke("GetRepoImage", d, **kwargs)
return apis.GetRepoImageResponseSchema().loads(resp)
def update_repo(self, req=None, **kwargs):
""" UpdateRepo - 更新镜像仓库
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **RepoName** (str) - (Required) 镜像仓库名称,不可修改
- **Description** (str) - 备注
- **IsShared** (str) - false设置为私有;true设置为公有。默认false
**Response**
- **Message** (str) - 错误的时候返回
"""
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.UpdateRepoRequestSchema().dumps(d)
resp = self.invoke("UpdateRepo", d, **kwargs)
return apis.UpdateRepoResponseSchema().loads(resp)
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Defined Configs."""
class PBAPolicyConfig(object):
"""PBA Policy Config."""
total_epochs = 81
config_count = 16
each_epochs = 3
total_rungs = 200
class PBAConfig(object):
"""PBA Config."""
policy = PBAPolicyConfig
objective_keys = 'accuracy'
transformers = dict(Cutout=True,
Rotate=True,
Translate_X=True,
Translate_Y=True,
Brightness=True,
Color=True,
Invert=True,
Sharpness=True,
Posterize=True,
Shear_X=True,
Solarize=True,
Shear_Y=True,
Equalize=True,
AutoContrast=True,
Contras=True)
|
from typing import Any, List
from ..problem import ProblemPredicate, ProblemType
from .base import Field, NonNullMixin
class BoolField(NonNullMixin, Field):
def validate(self, value: Any) -> List[ProblemPredicate]:
problems = super().validate(value)
if problems:
return problems
return self.check_type(value, bool, ProblemType.BOOLEAN)
|
import datetime, time
from io import StringIO
import typing
import aiohttp
from aiohttp.client import ClientSession
import discord
from discord import message
from discord import file
from discord.components import SelectOption
from discord.enums import DefaultAvatar
from discord.ext import commands
from discord.ext.commands.core import Command, Group, guild_only
from discord.ext.commands.errors import BadArgument, MemberNotFound
from discord.http import Route
from discord.ui import view
from discord.ui.select import Select
from convertors import HTTPConvertors
from utils import MessageUtils, MathUtils
from views.help import HelpView
from inspect import Parameter
import re
import os
DISCORD_API_ENDPOINT = "https://discord.com/api/v9"
URL_REGEX = re.compile(
"(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,})"
)
WHITELIST = [
DISCORD_API_ENDPOINT + "/users/@me",
"https://httpbin.org/get",
"https://httpbin.org/put",
"https://httpbin.org/delete",
"https://httpbin.org/post",
"https://httpbin.org/patch",
]
BLACKLIST = ["https://www.google.com/"]
def will_send(url: str) -> bool:
if url in WHITELIST:
return True
if url in BLACKLIST:
return False
return True
class Basic(commands.Cog):
def __init__(self, client) -> None:
super().__init__()
self.client: commands.Bot = client
self.client.help_command = Help()
self.session = aiohttp.ClientSession
@commands.command(name="about")
async def about(self, ctx: commands.Context):
uptime = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
- self.client.start_time
)
hours, remainder = divmod(int(uptime.total_seconds()), 3600)
days, hours = divmod(hours, 24)
minutes, seconds = divmod(remainder, 60)
total = str(sum(len(guild.members) for guild in self.client.guilds))
unique = str(len(self.client.users))
description = f"Stats for shard 0\nI've been up for {days} days, {hours} hours, {minutes} minutes, {seconds} seconds\nI've recieved {self.client.user_messages} user messages, {self.client.bot_messages} bot messages ({self.client.self_messages} were mine)\nI'm serving {total} users ({unique} unique)"
embed = discord.Embed(
description=description,
colour=0x00CEA2,
timestamp=datetime.datetime.utcfromtimestamp(time.time()).replace(
tzinfo=datetime.timezone.utc
),
)
await ctx.send(embed=embed)
@commands.command(
name="userinfo", aliases=["user", "whois", "user_info", "user_profile"]
)
@commands.guild_only()
async def userinfo(
self,
ctx: commands.Context,
target: typing.Union[discord.Member, discord.User] = None,
):
await ctx.send(
embed=MessageUtils.build(type="user_info", member=target, issuer=ctx.author)
)
@userinfo.error
async def userinfo_error(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send("I can't find that member")
return
@commands.group(name="math")
async def mathtools(self, ctx: commands.Context):
"""Root command for math tools"""
if ctx.invoked_subcommand is None:
await ctx.send_help("math")
@mathtools.command(name="fib")
async def fib(self, ctx: commands.Context, *, n: str = None):
"""Compute the nth Fibbonaci term"""
if n is None:
await ctx.send_help("math fib")
else:
try:
n = int(n.replace(" ", "").replace(",", ""))
if n == 0:
await ctx.send_help("math fib")
elif n < 0:
raise BadArgument()
else:
try:
start_time = time.time()
fib = MathUtils.fib(n)
end_time = time.time()
await ctx.send(
f"The {n}th number in the classic Fibonnaci sequence is\n```{fib}\n```"
)
except RecursionError:
await ctx.send(
f"The number supplied ({n}) is greater then my threshold"
)
except ValueError:
raise BadArgument()
@mathtools.command(name="tri")
async def tri(self, ctx: commands.Context, *, n: str = None):
"""Compute the nth triangular number"""
if n is None:
await ctx.send_help("math tri")
else:
try:
n = int(n.replace(" ", "").replace(",", ""))
if n == 0:
await ctx.send_help("math tri")
elif n < 0:
raise BadArgument()
else:
start_time = time.time()
tri = MathUtils.tri(n)
end_time = time.time()
await ctx.send(f"The {n}th triangular number is\n```{tri}\n```")
except ValueError:
raise BadArgument()
@commands.command(name="serverinfo", aliases=["server"])
@commands.guild_only()
async def server_info(self, ctx: commands.Context):
guild = ctx.guild
guild_features = ", ".join(guild.features)
if guild_features == "":
guild_features = None
guild_made = guild.created_at.strftime("%d-%m-%Y")
embed = discord.Embed(
color=guild.roles[-1].color,
timestamp=datetime.datetime.utcfromtimestamp(time.time()).replace(
tzinfo=datetime.timezone.utc
),
)
if guild.icon is not None:
embed.set_thumbnail(url=guild.icon.url)
embed.add_field(name="Name", value=guild.name, inline=True)
embed.add_field(name="ID", value=guild.id, inline=True)
embed.add_field(name="Owner", value=guild.owner, inline=True)
embed.add_field(name="Members", value=guild.member_count, inline=True)
embed.add_field(
name="Channels",
value=f"📚 Categories: {str(len(guild.categories))}\n📝 Text channels: {str(len(guild.text_channels))}\n:microphone2: Voice channels: {str(len(guild.voice_channels))}\nTotal channels: {str(len(guild.text_channels) + len(guild.voice_channels))}",
inline=True,
)
embed.add_field(
name="Created at",
value=f"{guild_made} ({(datetime.datetime.utcfromtimestamp(time.time()).replace(tzinfo=datetime.timezone.utc) - guild.created_at).days} days ago)",
inline=True,
)
embed.add_field(name="VIP features", value=guild_features, inline=True)
if guild.icon is not None:
embed.add_field(
name="Server icon",
value=f"[Server icon]({guild.icon.url})",
inline=True,
)
roles = ", ".join(role.name for role in guild.roles)
embed.add_field(
name="Roles",
value=roles if len(roles) < 1024 else f"{len(guild.roles)} roles",
inline=False,
)
if guild.emojis:
emoji = "".join(str(e) for e in guild.emojis)
embed.add_field(
name="Server emoji",
value=emoji if len(emoji) < 1024 else f"{len(guild.emojis)} emoji",
)
if guild.splash is not None:
embed.set_image(url=guild.splash.url)
if guild.banner is not None:
embed.set_image(url=guild.banner.url)
embed.set_footer(
text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url
)
await ctx.send(embed=embed)
class Help(commands.HelpCommand):
"""Custom help command"""
def __init__(self):
super().__init__()
def is_group(self, command: Command):
return "\n ↪" if isinstance(command, Group) else ""
async def send_bot_help(self, mapping):
message = await MessageUtils.gen_bot_help(self, mapping)
view = HelpView(self, self.context.bot)
await self.get_destination().send(content=message, view=view)
async def send_cog_help(self, cog: commands.Cog):
message = await MessageUtils.gen_cog_help(self, cog)
view = HelpView(self, self.context.bot)
await self.get_destination().send(
content=message, view=view.set_default(cog.qualified_name)
)
async def send_group_help(self, group: commands.Group):
message = await MessageUtils.gen_group_help(self, group)
view = HelpView(self, self.context.bot)
await self.get_destination().send(
content=message, view=view.set_default(group.cog_name)
)
async def send_command_help(self, command: commands.Command):
message = await MessageUtils.gen_command_help(self, command)
view = HelpView(self, self.context.bot)
await self.get_destination().send(
content=message, view=view.set_default(command.cog_name)
)
async def command_not_found(self, string):
return f"I can't seem to find any cog or command named {string}"
def setup(client):
client.add_cog(Basic(client=client))
|
"""Annotate with UCSC Genome Browser or other sql RefGene database."""
__author__ = "Martin Haagmans (https://github.com/zaag)"
__license__ = "MIT"
import os
import pymysql
import pybedtools
class Annotation:
"""Annotate gene or region with RefGene.
Method to query genomic intervals returns list of genenames.
Method to query gene names returns list of genomic intervals.
"""
def __init__(self, host=None, user=None, passwd=None, db=None,
genefile=None, genes=None):
"""Establish database connection."""
if host is None and user is None and passwd is None:
host = 'genome-mysql.cse.ucsc.edu'
user = 'genome'
if passwd is None:
passwd = ''
if db is None:
db = 'hg19'
self.host = host
self.user = user
self.db = db
self.passwd = passwd
self.conn = pymysql.connect(host=self.host, user=self.user,
passwd=self.passwd, db=self.db)
self.c = self.conn.cursor()
self.genes = None
def _parse_ucsc_output(self, out):
"""Read database output into list and return list."""
if len(out) == 1:
return [out[0][0]]
elif len(out) > 1:
return [tup[0] for tup in out]
elif len(out) == [0]:
['NOGENE']
def get_genename(self, chromosome, start=None, end=None, tx=True, cds=False):
"""Read locus or interval and return gene(s).
Format locus/interval can be chr1:123456/chr1:123456-123789 or
chromosome, start and (optional) end.
Return a list with any gene(s) in the interval.
"""
if cds:
tx = False
if start is None and end is None:
if ':' not in chromosome:
raise IOError('Geen chromosoom en start opgegeven')
chromosome, region = chromosome.split(':')
if '-' in region:
start, end = region.split('-')
else:
start = region
basetxsql = '''SELECT DISTINCT name2
FROM refGene
WHERE ((chrom='{c}') AND
'''.format(c=chromosome)
basecdssql = '''SELECT DISTINCT name2
FROM refGene
WHERE ((chrom='{c}') AND
'''.format(c=chromosome)
if end is not None:
txsql = '''(('{s}'<txEnd AND '{s}'>txStart) AND
('{e}'<txEnd AND '{e}'>txStart)))
'''.format(s=start, e=end)
cdssql = '''(('{s}'<cdsEnd AND '{s}'>cdsStart) AND
('{e}'<cdsEnd AND '{e}'>cdsStart)))
'''.format(s=start, e=end)
txsql = basetxsql + txsql
cdssql = basecdssql + cdssql
elif end is None:
txsql = '''('{s}'<txEnd AND '{s}'>txStart))
'''.format(s=start)
cdssql = '''('{s}'<cdsEnd AND '{s}'>cdsStart))Locus can be
'''.format(s=start)
txsql = basetxsql + txsql
cdssql = basecdssql + cdssql
if tx and not cds:
self.c.execute(txsql)
elif cds and not tx:
self.c.execute(cdssql)
if self.c.rowcount != 0:
return self._parse_ucsc_output(self.c.fetchall())
elif self.c.rowcount == 0:
if end is not None:
txsql = '''(('{s}'<txEnd AND '{s}'>txStart) OR
('{e}'<txEnd AND '{e}'>txStart)))
'''.format(s=start, e=end)
cdssql = '''(('{s}'<cdsEnd AND '{s}'>cdsStart) OR
('{e}'<cdsEnd AND '{e}'>cdsStart)))
'''.format(s=start, e=end)
txsql = basetxsql + txsql
cdssql = basecdssql + cdssql
if tx and not cds:
self.c.execute(txsql)
elif cds and not tx:
self.c.execute(cdssql)
if self.c.rowcount == 0:
return ['NOGENE']
else:
return self._parse_ucsc_output(self.c.fetchall())
elif end is None:
return 'NOGENE'
def get_region(self, gene, tx=True, cds=False):
"""Read gene name2 and return list of tuples.
Parse gene name2 and return list of tuples with coding (cds) or
transcription (tx) region for all NM-numbers of that gene.
"""
if cds:
tx = False
txsql = """SELECT DISTINCT chrom, txStart, txEnd
FROM refGene
WHERE name2='{g}'
""".format(g=gene)
cdssql = """SELECT DISTINCT chrom, cdsStart, cdsEnd
FROM refGene
WHERE name2='{g}'
""".format(g=gene)
if tx and not cds:
self.c.execute(txsql)
elif cds and not tx:
self.c.execute(cdssql)
generegions = list()
for i in self.c.fetchall():
region = i[0], i[1], i[2]
generegions.append(region)
return generegions
class TargetAnnotation(Annotation):
"""Read BED file and optional (file with) list of genes.
Method to annotate BED with gene names via RefSeq database
Method to compare requested gene(s) with gene(s) in BED
"""
def __init__(self, bedfile, genes=None, skip=25, **kwds):
"""Establish connection with database and parse BED-file and genelist.
Raise IOerror if genes is not a file or list.
"""
super().__init__(**kwds)
self.skip = skip
self.bedlocation = bedfile
self.is_annotated = self._is_annotated()
self.bed = self._parse_bed()
if genes is None:
self.genes = list()
elif genes is not None:
if isinstance(genes, list):
genes = set(genes)
self.genes = [gene for gene in genes]
elif os.path.isfile(genes):
self.genes = self._parse_genefile(genes)
else:
raise IOError("Can't understand genes. Should be list or file")
def _is_annotated(self):
"""Check if there are >3 columns in BED. Return boolean."""
with open(self.bedlocation, 'r') as f:
cols = len(next(f).split())
if cols == 3:
return False
elif cols > 3:
return True
def _parse_bed(self):
"""Read BED. Return list of lists."""
bed = list()
with open(self.bedlocation, 'r') as f:
lines = [line.rstrip().split() for line in f]
lines = list(line for line in lines if line)
lines = sorted(lines, key=lambda x: (x[0], int(x[1]), int(x[2])))
if not self.is_annotated:
for line in lines:
chromosome, start, end = line
bed.append([chromosome, start, end])
elif self.is_annotated:
for line in lines:
chromosome, start, end, gene = line[:4]
bed.append([chromosome, start, end, gene])
return bed
def _parse_genefile(self, gf):
"""Read genefile into list and return list."""
with open(gf, 'r') as f:
lines = [line.rstrip() for line in f]
lines = list(line for line in lines if line)
return [l.split()[0] for l in lines]
def _parse_annot_out(self, gl):
if len(gl) == 1:
return gl
elif len(gl) > 1:
return [_ for _ in gl]
def annotate_bed(self):
"""Query RefSeq for every target in BED. Return list of lists."""
annotated_bed = list()
for target in self.bed:
chromosome, start, end = target[:3]
genename = self.get_genename(chromosome, int(start) + self.skip, int(end) - self.skip)
annotated_bed.append([chromosome, start, end, '/'.join(genename)])
self.is_annotated = True
return annotated_bed
def annotate_bed_and_filter_genes(self):
"""Annotate targets in BED and filter output with genelist.
Query RefSeq for every target in BED, filter results with list of genes
if the query returns >1 gene. Return list of lists.
"""
annotated_bed = list()
for target in self.bed:
chromosome, start, end = target[:3]
genename = self.get_genename(chromosome, int(start) + self.skip, int(end) - self.skip)
if len(genename) > 1:
_ = [gn for gn in genename if gn in self.genes]
if len(_) >= 1:
genename = _
annotated_bed.append([chromosome, start, end, '/'.join(genename)])
self.is_annotated = True
return annotated_bed
def get_genes_in_bed(self):
"""Query RefSeq for every target in non-annotated BED into list and return list."""
genesout = set()
if not self.is_annotated:
for target in self.bed:
chromosome, start, end = target
genename = self.get_genename(chromosome, int(start) + self.skip, int(end) - self.skip)
genesout.update(self._parse_annot_out(genename))
return list(genesout)
elif self.is_annotated:
return self.get_genes_in_annotated_bed(self.bed)
def get_genes_in_annotated_bed(self, annotated_bed):
"""Collect genes from annotated BED into list and return list."""
genesout = set()
for target in annotated_bed:
_chromosome, _start, _end, gene = target
genesout.update([i for i in gene.split('/')])
return list(genesout)
def get_genes_not_found(self, bedgenes=None):
"""Compare genes in BED with genes requested into list and return list."""
if bedgenes is None:
bedgenes = self.get_genes_in_bed()
notfound = [gene for gene in self.genes if gene not in bedgenes]
if len(notfound) > 0:
notfound.sort()
return notfound
def get_genes_not_requested(self, genesout=None):
"""Compare genes requested with genes in BED into list and return list."""
if genesout is None:
genesout = self.get_genes_in_bed()
notrequested = [gene for gene in genesout if gene not in self.genes]
return notrequested
def report_genecomp(self):
"""Report differences between genes in BED and genes requested."""
if not self.is_annotated:
annotated_bed = self.annotate_bed()
elif self.is_annotated:
annotated_bed = self.bed
genes_in_bed = self.get_genes_in_annotated_bed(annotated_bed)
genes_not_found = self.get_genes_not_found(bedgenes=genes_in_bed)
genes_not_requested = self.get_genes_not_requested(genesout=genes_in_bed)
return genes_not_found, genes_not_requested
def annotate_bed(bedfile, output, genes=None, merge=True):
"""Merge and annotate bedfile.
A file with genenames is optional and used to filter if needed.
Write merged and annotated BED-file to output.
"""
if merge:
bed = pybedtools.BedTool(bedfile)
bed = bed.sort()
bed_merged = bed.merge()
bed_merged.saveas(bedfile)
if genes:
TA = TargetAnnotation(bedfile, genes=genes)
bed_annotated = TA.annotate_bed_and_filter_genes()
elif genes is None:
TA = TargetAnnotation(bedfile)
bed_annotated = TA.annotate_bed()
with open(output, 'w') as f:
for line in bed_annotated:
chromosome, start, end, gene = line
f.write('{}\t{}\t{}\t{}\n'.format(chromosome, start, end, gene))
|
import math
def add_vectors(vector1, vector2):
#Note the vectors are tuple (angle, magnitude)
x = math.sin(vector1[0]) * vector1[1] + math.sin(vector2[0]) * vector2[1]
y = math.cos(vector1[0]) * vector1[1] + math.cos(vector2[0]) * vector2[1]
mag = math.hypot(x, y)
angle = (math.pi/2) - math.atan2(y, x)
return (angle, mag) |
"""
Generalized extreme value distribution
--------------------------------------
Note that the parameter xi used here has the opposite sign
of the corresponding shape parameter in `scipy.stats.genextreme`.
"""
import mpmath
__all__ = ['pdf', 'logpdf', 'cdf', 'sf', 'mean', 'var']
def pdf(x, xi, mu=0, sigma=1):
"""
Generalized extreme value distribution probability density function.
"""
xi = mpmath.mpf(xi)
mu = mpmath.mpf(mu)
sigma = mpmath.mpf(sigma)
# Formula from wikipedia, which has a sign convention for xi that
# is the opposite of scipy's shape parameter.
z = (x - mu)/sigma
if xi != 0:
t = mpmath.power(1 + z*xi, -1/xi)
else:
t = mpmath.exp(-z)
p = mpmath.power(t, xi+1) * mpmath.exp(-t) / sigma
return p
def logpdf(x, xi, mu=0, sigma=1):
"""
Natural logarithm of the PDF of the generalized extreme value distribution.
"""
xi = mpmath.mpf(xi)
mu = mpmath.mpf(mu)
sigma = mpmath.mpf(sigma)
# Formula from wikipedia, which has a sign convention for xi that
# is the opposite of scipy's shape parameter.
z = (x - mu)/sigma
if xi != 0:
t = mpmath.power(1 + z*xi, -1/xi)
logt = -mpmath.log1p(z*xi)/xi
else:
t = mpmath.exp(-z)
logt = -z
p = (xi + 1)*logt - t - mpmath.log(sigma)
return p
def cdf(x, xi, mu=0, sigma=1):
"""
Generalized extreme value distribution cumulative density function.
"""
xi = mpmath.mpf(xi)
mu = mpmath.mpf(mu)
sigma = mpmath.mpf(sigma)
# Formula from wikipedia, which has a sign convention for xi that
# is the opposite of scipy's shape parameter.
if xi != 0:
t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
else:
t = mpmath.exp(-(x - mu)/sigma)
return mpmath.exp(-t)
def sf(x, xi, mu=0, sigma=1):
"""
Generalized extreme value distribution survival function.
"""
xi = mpmath.mpf(xi)
mu = mpmath.mpf(mu)
sigma = mpmath.mpf(sigma)
# Formula from wikipedia, which has a sign convention for xi that
# is the opposite of scipy's shape parameter.
if xi != 0:
t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
else:
t = mpmath.exp(-(x - mu)/sigma)
return -mpmath.expm1(-t)
def mean(xi, mu=0, sigma=1):
"""
Mean of the generalized extreme value distribution.
"""
xi = mpmath.mpf(xi)
mu = mpmath.mpf(mu)
sigma = mpmath.mpf(sigma)
if xi == 0:
return mu + sigma * mpmath.euler
elif xi < 1:
g1 = mpmath.gamma(mpmath.mp.one - xi)
return mu + sigma * (g1 - mpmath.mp.one)/xi
else:
return mpmath.inf
def var(xi, mu=0, sigma=1):
"""
Variance of the generalized extreme value distribution.
"""
xi = mpmath.mpf(xi)
mu = mpmath.mpf(mu)
sigma = mpmath.mpf(sigma)
if xi == 0:
return sigma**2 * mpmath.pi**2 / 6
elif 2*xi < 1:
g1 = mpmath.gamma(mpmath.mp.one - xi)
g2 = mpmath.gamma(mpmath.mp.one - 2*xi)
return sigma**2 * (g2 - g1**2) / xi**2
else:
return mpmath.inf
|
from __future__ import unicode_literals
from .models import ec2_backends
from ..core.models import MockAWS
ec2_backend = ec2_backends['us-east-1']
def mock_ec2(func=None):
if func:
return MockAWS(ec2_backends)(func)
else:
return MockAWS(ec2_backends)
|
class Vector2D:
def __init__(self, vec: List[List[int]]):
self.vec = []
self.i = 0
for A in vec:
self.vec += A
def next(self) -> int:
ans = self.vec[self.i]
self.i += 1
return ans
def hasNext(self) -> bool:
return self.i < len(self.vec)
|
import os
serve_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "serve"))
serve = {"__mpld3": serve_path}
scripts = [
"/__mpld3/d3.placeholder.js", # will load faster for mpld3
"/__mpld3/d3.v5.min.js",
# "/__mpld3/mpld3.v0.5.7.js",
"/__mpld3/mpld3.v0.5.7.min.js",
"/__mpld3/vue-mpld3.umd.min.js",
]
vue_use = ["VueMatplotlib"]
|
# from python
from pathlib import Path
from typing import Union
import textwrap
# from pypi
from bokeh.embed import autoload_static
import bokeh.plotting
import bokeh.resources
import holoviews
PathType = Union[str, Path]
class EmbedBokeh:
"""Embed a bokeh figure
Args:
plot: a hvplot to embed
folder_path: path to the folder to save the file
file_name: name of the file to save the javascript in
create_folder: if the folder doesn't exist create it
make_parents: if creating a folder add the missing folders in the path
"""
def __init__(self, plot: holoviews.core.overlay.NdOverlay,
file_name: str,
folder_path: PathType,
create_folder: bool=True,
make_parents: bool=True) -> None:
self.plot = plot
self._figure = None
self.create_folder = create_folder
self.make_parents = make_parents
self._folder_path = None
self.folder_path = folder_path
self._file_name = None
self.file_name = file_name
self._source = None
self._javascript = None
self._bokeh_source = None
self._export_string = None
return
@property
def folder_path(self) -> Path:
"""The path to the folder to store javascript"""
return self._folder_path
@folder_path.setter
def folder_path(self, path: PathType) -> None:
"""Sets the path to the javascript folder"""
self._folder_path = Path(path)
if self.create_folder and not self._folder_path.is_dir():
self._folder_path.mkdir(parents=self.make_parents)
return
@property
def file_name(self) -> str:
"""The name of the javascript file"""
return self._file_name
@file_name.setter
def file_name(self, name: str) -> None:
"""Sets the filename
Args:
name: name to save the javascript (without the folder)
"""
name = Path(name)
self._file_name = "{}.js".format(name.stem)
return
@property
def figure(self) -> bokeh.plotting.Figure:
"""The Figure to plot"""
if self._figure is None:
self._figure = holoviews.render(self.plot)
return self._figure
@property
def bokeh_source(self) -> bokeh.resources.Resources:
"""The javascript source
"""
if self._bokeh_source is None:
self._bokeh_source = bokeh.resources.CDN
return self._bokeh_source
@property
def source(self) -> str:
"""The HTML fragment to export"""
if self._source is None:
self._javascript, self._source = autoload_static(self.figure,
self.bokeh_source,
self.file_name)
return self._source
@property
def javascript(self) -> str:
"""javascript to save"""
if self._javascript is None:
self._javascript, self._source = autoload_static(self.figure,
self.bokeh_source,
self.file_name)
return self._javascript
@property
def export_string(self) -> str:
"""The string to embed the figure into org-mode"""
if self._export_string is None:
self._export_string = textwrap.dedent(
"""#+begin_export html{}
#+end_export""".format(self.source))
return self._export_string
def save_figure(self) -> None:
"""Saves the javascript file"""
with open(self.folder_path.joinpath(self.file_name), "w") as writer:
writer.write(self.javascript)
return
def __call__(self) -> None:
"""Creates the bokeh javascript and emits it"""
self.save_figure()
print(self.export_string)
return
def reset(self) -> None:
"""Sets the generated (bokeh) properties back to None"""
self._export_string = None
self._javascript = None
self._source = None
self._figure = None
return
|
from logging import Filter, LogRecord
from typing import Union
from pydantic import BaseModel
class GetData(BaseModel):
"""BaseModel that handles input data for the API which is treated as members for the class ``GetData``.
>>> GetData
See Also:
- ``command``: Offline command sent via API which ``Jarvis`` has to perform.
"""
command: str
native_audio: bool = False
class GetText(BaseModel):
"""BaseModel that handles input data for the API which is treated as members for the class ``GetText``.
>>> GetText
See Also:
- ``text``: Text to be processed with speech synthesis.
- ``timeout``: Timeout for speech-synthesis API call.
- ``quality``: Quality of speech synthesis.
- ``voice``: Voice module to be used.
"""
text: str
timeout: Union[int, float] = None
quality: str = "high"
voice: str = "en-us_northern_english_male-glow_tts"
class InvestmentFilter(Filter):
"""Class to initiate ``/investment`` filter in logs while preserving other access logs.
>>> InvestmentFilter
See Also:
- Overrides logging by implementing a subclass of ``logging.Filter``
- The method ``filter(record)``, that examines the log record and returns True to log it or False to discard it.
"""
def filter(self, record: LogRecord) -> bool:
"""Filter out logging at ``/investment?token=`` from log streams.
Args:
record: ``LogRecord`` represents an event which is created every time something is logged.
Returns:
bool:
False flag for the endpoint that needs to be filtered.
"""
return record.getMessage().find("/investment?token=") == -1
|
import requests
from requests.exceptions import HTTPError
import pprint
def get_json_dict(url: str) -> dict:
try:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:86.0) Gecko/20100101 Firefox/86.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
# "Referer": "https://www.oddschecker.com/us/basketball/nba",
"X-Requested-With": "XMLHttpRequest",
"Cache-Control": "no-store, no-cache, must-revalidate, max-age=0",
"Expires": "Thu, 01 Jan 1970 00:00:00 GMT",
"Pragma": "no-cache",
"DNT": "1",
"Connection": "keep-alive",
"Sec-GPC": "1",
"TE": "Trailers"
}
response = requests.get(url, headers=headers)
# If the response was successful, no Exception will be raised
# print(response.status_code)
response.raise_for_status()
return response.json()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}') # Python 3.6
except Exception as err:
print(f'Other error occurred: {err}') # Python 3.6
else:
print("Success")
# pp = pprint.PrettyPrinter(indent=4)
# response = get_json_dict("https://www.oddschecker.com/us/basketball/nba?ajax=1")
# response = get_json_dict("https://www.oddschecker.com/us/basketball/nba?ajax=1")
# pp.pprint(response['data']['card']['matches'][0]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.