repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
KimSoungRyoul/drf_unitteset_study_project
account/views.py
9a0d824bdc6343eeba6209299c077a6e9d280516
# Create your views here. from django.db.models import QuerySet from django.utils.decorators import method_decorator from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets, status from rest_framework.permissions import IsAuthenticated, AllowAny from rest_framework.response import Response from rest_framework.viewsets import mixins from account.documents import DjangoFilterDescriptionInspector from account.models import Customer from account.serializers import CustomerInfoSerializer, SignUpFormSerializer @method_decorator(name='retrieve', decorator=swagger_auto_schema( operation_description="회원 개인정보 조회 API", filter_inspectors=[DjangoFilterDescriptionInspector], )) @method_decorator(name='create', decorator=swagger_auto_schema( operation_description="회원 가입 API", )) @method_decorator(name='update', decorator=swagger_auto_schema( operation_description="회원 정보 수정 API", )) @method_decorator(name='destroy', decorator=swagger_auto_schema( operation_description="회원 탈퇴 API", )) class CustomerAPIViewSet(mixins.CreateModelMixin, mixins.DestroyModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset: QuerySet = Customer.objects permission_classes = (IsAuthenticated,) http_method_names = ['get', 'post', 'put', 'delete'] def get_serializer_class(self): if self.request.method == 'POST': return SignUpFormSerializer elif self.request.method == 'GET': return CustomerInfoSerializer elif self.request.method == 'PUT': return SignUpFormSerializer elif self.request.method == 'DELETE': return SignUpFormSerializer def get_permissions(self): if self.request.method == 'POST': permission_classes = [AllowAny] return [permission() for permission in permission_classes] def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
[((57, 15, 57, 103), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((15, 45, 18, 1), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', (), '', False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((19, 43, 21, 1), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', (), '', False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((22, 43, 24, 1), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', (), '', False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((25, 44, 27, 1), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', (), '', False, 'from drf_yasg.utils import swagger_auto_schema\n')]
Mannan2812/azure-cli-extensions
src/front-door/azext_front_door/_validators.py
e2b34efe23795f6db9c59100534a40f0813c3d95
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import argparse def get_name_or_id_validator(dest, child_type=None, resource_type='Frontdoors', resource_namespace='Microsoft.Network', resource_name_dest='front_door_name'): def _validate_name_or_id(cmd, namespace): from azure.cli.core.commands.client_factory import get_subscription_id from msrestazure.tools import is_valid_resource_id, resource_id subscription_id = get_subscription_id(cmd.cli_ctx) resource_group = namespace.resource_group_name names_or_ids = getattr(namespace, dest) is_list = True # treat single values as a list, but convert back in the end if not isinstance(names_or_ids, list): is_list = False names_or_ids = [names_or_ids] if names_or_ids == [None] or not names_or_ids: return ids = [] for val in names_or_ids: id_params = { 'subscription': subscription_id, 'resource_group': resource_group, 'namespace': resource_namespace, 'type': resource_type, 'name': getattr(namespace, resource_name_dest) if child_type else val, 'child_type_1': child_type, 'child_name_1': val if child_type else None } if not is_valid_resource_id(val): val = resource_id(**id_params) ids.append(val) setattr(namespace, dest, ids if is_list else ids[0]) return _validate_name_or_id def validate_waf_policy(cmd, namespace): get_name_or_id_validator( dest='waf_policy', resource_type='WebApplicationFirewallPolicy' )(cmd, namespace) def validate_keyvault(cmd, namespace): get_name_or_id_validator( dest='vault', resource_type='vaults', resource_namespace='Microsoft.Keyvault' )(cmd, namespace) def validate_load_balancing_settings(cmd, namespace): get_name_or_id_validator('load_balancing_settings', 'loadBalancingSettings')(cmd, namespace) def validate_probe_settings(cmd, namespace): get_name_or_id_validator('probe_settings', 'healthProbeSettings')(cmd, namespace) def validate_frontend_endpoints(cmd, namespace): get_name_or_id_validator('frontend_endpoints', 'frontendEndpoints')(cmd, namespace) def validate_backend_pool(cmd, namespace): get_name_or_id_validator('backend_pool', 'backendPools')(cmd, namespace) def validate_rules_engine(cmd, namespace): get_name_or_id_validator('rules_engine', 'rulesEngines')(cmd, namespace) # pylint: disable=protected-access class MatchConditionAction(argparse._AppendAction): # pylint: disable=no-self-use def parse_match_condition(self, values): from azext_front_door.vendored_sdks.models import MatchCondition if not isinstance(values, list): values = values.split(' ') try: return MatchCondition( match_variable=values[0], operator=values[1], match_value=values[2:] ) except IndexError: from knack.util import CLIError raise CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]') def __call__(self, parser, namespace, values, option_string=None): match_condition = self.parse_match_condition(values) super(MatchConditionAction, self).__call__(parser, namespace, match_condition, option_string)
[((17, 26, 17, 58), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', ({(17, 46, 17, 57): 'cmd.cli_ctx'}, {}), '(cmd.cli_ctx)', False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n'), ((96, 19, 100, 13), 'azext_front_door.vendored_sdks.models.MatchCondition', 'MatchCondition', (), '', False, 'from azext_front_door.vendored_sdks.models import MatchCondition\n'), ((41, 19, 41, 44), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', ({(41, 40, 41, 43): 'val'}, {}), '(val)', False, 'from msrestazure.tools import is_valid_resource_id, resource_id\n'), ((42, 22, 42, 46), 'msrestazure.tools.resource_id', 'resource_id', ({}, {}), '(**id_params)', False, 'from msrestazure.tools import is_valid_resource_id, resource_id\n'), ((103, 18, 103, 98), 'knack.util.CLIError', 'CLIError', ({(103, 27, 103, 97): '"""usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]"""'}, {}), "('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]'\n )", False, 'from knack.util import CLIError\n')]
DevAerial/mimesis
mimesis/data/int/development.py
33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
"""Provides all the data related to the development.""" LICENSES = [ "Apache License, 2.0 (Apache-2.0)", "The BSD 3-Clause License", "The BSD 2-Clause License", "GNU General Public License (GPL)", "General Public License (LGPL)", "MIT License (MIT)", "Mozilla Public License 2.0 (MPL-2.0)", "Common Development and Distribution License (CDDL-1.0)", "Eclipse Public License (EPL-1.0)", ] PROGRAMMING_LANGS = [ "ASP", "Assembly", "AutoIt", "Awk", "Bash", "C", "C Shell", "C#", "C++", "Caml", "Ceylon", "Clojure", "CoffeeScript", "Common Lisp", "D", "Dart", "Delphi", "Dylan", "ECMAScript", "Elixir", "Emacs Lisp", "Erlang", "F#", "Falcon", "Fortran", "GNU Octave", "Go", "Groovy", "Haskell", "haXe", "Io", "J#", "Java", "JavaScript", "Julia", "Kotlin", "Lisp", "Lua", "Mathematica", "Objective-C", "OCaml", "Perl", "PHP", "PL-I", "PL-SQL", "PowerShell", "Prolog", "Python", "R", "Racket", "Ruby", "Rust", "Scala", "Scheme", "Smalltalk", "Tcl", "Tex", "Transact-SQL", "TypeScript", "Z shell", ] OS = [ "Arch", "CentOS", "Debian", "Fedora", "FreeBSD", "Gentoo", "Kali", "Lubuntu", "Manjaro", "Mint", "OS X", "macOS", "OpenBSD", "PCLinuxOS", "Slackware", "Ubuntu", "Windows 10", "Windows 7", "Windows 8", "Windows 8.1", "Zorin", "elementaryOS", "macOS", "openSUSE", ] FOLDERS = [ "Development", "Downloads", "Documents", "Music", "Video", "Work", "Pictures", "Desktop", "Study", ] PROJECT_NAMES = [ "aardonyx", "abelisaurus", "achelousaurus", "achillobator", "acrocanthosaurus", "aegyptosaurus", "afrovenator", "agilisaurus", "alamosaurus", "albertaceratops", "albertosaurus", "alectrosaurus", "alioramus", "allosaurus", "alvarezsaurus", "amargasaurus", "ammosaurus", "ampelosaurus", "amygdalodon", "anatotitan", "anchiceratops", "anchisaurus", "ankylosaurus", "anserimimus", "antarctopelta", "antarctosaurus", "apatosaurus", "aragosaurus", "aralosaurus", "archaeoceratops", "archaeopteryx", "archaeornithomimus", "argentinosaurus", "arrhinoceratops", "atlascopcosaurus", "aucasaurus", "austrosaurus", "avaceratops", "avalonia", "avimimus", "azendohsaurus", "bactrosaurus", "bagaceratops", "bambiraptor", "barapasaurus", "barosaurus", "baryonyx", "becklespinax", "beipiaosaurus", "bellusaurus", "borogovia", "brachiosaurus", "brachyceratops", "bugenasaura", "buitreraptor", "camarasaurus", "camptosaurus", "carnotaurus", "caudipteryx", "cedarpelta", "centrosaurus", "ceratosaurus", "cetiosauriscus", "cetiosaurus", "chaoyangsaurus", "chasmosaurus", "chialingosaurus", "chindesaurus", "chinshakiangosaurus", "chirostenotes", "chubutisaurus", "chungkingosaurus", "citipati", "coelophysis", "coelurus", "coloradisaurus", "compsognathus", "conchoraptor", "confuciusornis", "corythosaurus", "cryolophosaurus", "dacentrurus", "daspletosaurus", "datousaurus", "deinocheirus", "deinonychus", "deltadromeus", "diceratops", "dicraeosaurus", "dilophosaurus", "diplodocus", "dracorex", "dravidosaurus", "dromaeosaurus", "dromiceiomimus", "dryosaurus", "dryptosaurus", "dubreuillosaurus", "edmontonia", "edmontosaurus", "einiosaurus", "elaphrosaurus", "emausaurus", "eolambia", "eoraptor", "eotyrannus", "equijubus", "erketu", "erlikosaurus", "euhelopus", "euoplocephalus", "europasaurus", "euskelosaurus", "eustreptospondylus", "fukuiraptor", "fukuisaurus", "gallimimus", "gargoyleosaurus", "garudimimus", "gasosaurus", "gasparinisaura", "gastonia", "giganotosaurus", "gilmoreosaurus", "giraffatitan", "gobisaurus", "gorgosaurus", "goyocephale", "graciliceratops", "gryposaurus", "guaibasaurus", "guanlong", "hadrosaurus", "hagryphus", "haplocanthosaurus", "harpymimus", "herrerasaurus", "hesperosaurus", "heterodontosaurus", "homalocephale", "huayangosaurus", "hylaeosaurus", "hypacrosaurus", "hypselosaurus", "hypsilophodon", "iguanodon", "indosuchus", "ingenia", "irritator", "isisaurus", "janenschia", "jaxartosaurus", "jingshanosaurus", "jinzhousaurus", "jobaria", "juravenator", "kentrosaurus", "khaan", "kotasaurus", "kritosaurus", "lamaceratops", "lambeosaurus", "lapparentosaurus", "leaellynasaura", "leptoceratops", "lesothosaurus", "lexovisaurus", "liaoceratops", "liaoxiornis", "ligabuesaurus", "liliensternus", "lophorhothon", "lophostropheus", "lufengosaurus", "lurdusaurus", "lycorhinus", "magyarosaurus", "maiasaura", "majungatholus", "malawisaurus", "mamenchisaurus", "mapusaurus", "marshosaurus", "masiakasaurus", "massospondylus", "maxakalisaurus", "megalosaurus", "melanorosaurus", "metriacanthosaurus", "microceratops", "micropachycephalosaurus", "microraptor", "minmi", "monolophosaurus", "mononykus", "mussaurus", "muttaburrasaurus", "nanotyrannus", "nanshiungosaurus", "nemegtosaurus", "neovenator", "neuquenosaurus", "nigersaurus", "nipponosaurus", "noasaurus", "nodosaurus", "nomingia", "nothronychus", "nqwebasaurus", "omeisaurus", "ornitholestes", "ornithomimus", "orodromeus", "oryctodromeus", "othnielia", "ouranosaurus", "oviraptor", "rebbachisaurus", "rhabdodon", "rhoetosaurus", "rinchenia", "riojasaurus", "rugops", "saichania", "saltasaurus", "saltopus", "sarcosaurus", "saurolophus", "sauropelta", "saurophaganax", "saurornithoides", "scelidosaurus", "scutellosaurus", "secernosaurus", "segisaurus", "segnosaurus", "seismosaurus", "shamosaurus", "shanag", "shantungosaurus", "shunosaurus", "shuvuuia", "silvisaurus", "sinocalliopteryx", "sinornithosaurus", "sinosauropteryx", "sinraptor", "sinvenator", "zalmoxes", "zephyrosaurus", "zuniceratops", "byzantine", "svengali", "accolade", "acrimony", "angst", "anomaly", "antidote", "baroque", "bona_fide", "bourgeois", "bravado", "brogue", "brusque", "cacophony", "caustic", "charisma", "cloying", "deja-vu", "dichotomy", "elan", "ennui", "epitome", "esoteric", "euphemism", "faux pas", "fiasco", "finagle", "glib", "harbinger", "hedonist", "heresy", "idyllic", "insidious", "junket", "kitsch", "litany", "lurid", "malaise", "malinger", "mantra", "maudlin", "mercenary", "misnomer", "nirvana", "oblivion", "ogle", "ostracize", "panacea", "paradox", "peevish", "propriety", "revel", "rhetoric", "spartan", "stigma", "stoic", "suave", "sycophant", "tirade", "tryst", "untenable", "vicarious", "vile", "waft", "zealous", ]
[]
pcmoritz/flow
docs/mathparse.py
bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
""" A preliminary attempt at parsing an RST file's math syntax in order to make math render as inline rather than display mode. This doesn't work as of yet but might be useful. It could, however, be not useful if there's a pandoc option for converting .md to .rst that makes math inline and not display. Keeping it around, though. """ import re s = """Define .. math:: v_{des} as the desired velocity, .. math:: 1^k a vector of ones of length""" with open('/Users/nishant/Downloads/tutorialtest.rst', 'r') as myfile: s = myfile.read() print([elem[11:-2] for elem in re.findall('\n.. math:: *\S*\n\n', s)])
[((26, 31, 26, 68), 're.findall', 're.findall', ({(26, 42, 26, 64): '"""\n.. math:: *\\\\S*\n\n"""', (26, 66, 26, 67): 's'}, {}), '("""\n.. math:: *\\\\S*\n\n""", s)', False, 'import re\n')]
tailhook/pyzza
lib/layout/primitives.py
610be6ee4bea9b64f8226faf7338523fdafdf2cf
from layout import Shape, Widget from flash.text.engine import TextBlock, TextElement @package('layout') class Poly(Shape): __slots__ = ('fillcolor', 'sequence') def __init__(self, name, fillcolor, seq, states): super().__init__(name, states) self.fillcolor = fillcolor self.sequence = seq def draw(self, w, h): g = self.graphics g.clear() for line in values(self.sequence): g.beginFill(self.fillcolor) g.moveTo(int(line[0][0]*w), int(line[0][1]*h)) for idx in range(1, line.length): g.lineTo(int(line[idx][0]*w), int(line[idx][1]*h)) g.endFill() @package('layout') class RoundRect(Shape): __slots__ = ('fillcolor', 'radius') def __init__(self, name, fillcolor, radius, states): super().__init__(name, states) self.fillcolor = fillcolor self.radius = radius def draw(self, width, height): g = self.graphics g.clear() g.beginFill(self.fillcolor) g.drawRoundRect(0, 0, width, height, self.radius, self.radius) g.endFill() @package('layout') class TextLine(Widget): __slots__ = ('format', 'text', 'textline') def __init__(self, format, text, name, states): self.format = format self.text = text super().__init__(name, states) def draw(self, width, height): if self.textline: self.removeChild(self.textline) tb = TextBlock() tb.content = TextElement(self.text, self.format) self.textline = tb.createTextLine(None, width) self.addChild(self.textline) @package('layout') class CenteredLine(TextLine): def __init__(self, format, text, name, states): super().__init__(format, text, name, states) def draw(self, width, height): super().draw(width, height) self.textline.x = int((width - self.textline.width)/2) self.textline.y = int((height - self.textline.height)/2)
[((48, 13, 48, 24), 'flash.text.engine.TextBlock', 'TextBlock', ({}, {}), '()', False, 'from flash.text.engine import TextBlock, TextElement\n'), ((49, 21, 49, 56), 'flash.text.engine.TextElement', 'TextElement', ({(49, 33, 49, 42): 'self.text', (49, 44, 49, 55): 'self.format'}, {}), '(self.text, self.format)', False, 'from flash.text.engine import TextBlock, TextElement\n')]
ImportTaste/WebRequest
tests/testing_server.py
0cc385622624de16ec980e0c12d9080d593cab74
import traceback import uuid import socket import logging import os import base64 import zlib import gzip import time import datetime from http import cookies from http.server import BaseHTTPRequestHandler from http.server import HTTPServer from threading import Thread import WebRequest def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False): # print("Capturing expected headers:") # print(expected_headers) assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers) for key, val in expected_headers.items(): assert isinstance(key, str) assert isinstance(val, str) cookie_key = uuid.uuid4().hex log = logging.getLogger("Main.TestServer") sucuri_reqs_1 = 0 sucuri_reqs_2 = 0 sucuri_reqs_3 = 0 class MockServerRequestHandler(BaseHTTPRequestHandler): def log_message(self, format, *args): return def validate_headers(self): for key, value in expected_headers.items(): if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding': # So PhantomJS monkeys with accept-encoding headers # Just ignore that particular header, I guess. pass # Selenium is fucking retarded, and I can't override the user-agent # and other assorted parameters via their API at all. elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language': pass elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept': pass elif not skip_header_checks: v1 = value.replace(" ", "") v2 = self.headers[key] if v2 is None: v2 = "" v2 = v2.replace(" ", "") test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format( key, value, self.headers[key], { 'is_annoying_pjs' : is_annoying_pjs, 'is_chromium' : is_chromium, 'is_selenium_garbage_chromium' : is_selenium_garbage_chromium, 'skip_header_checks' : skip_header_checks, }, ) ) def _get_handler(self): # Process an HTTP GET request and return a response with an HTTP 200 status. # print("Path: ", self.path) # print("Headers: ", self.headers) # print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[])) try: self.validate_headers() except Exception: self.send_response(500) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"Headers failed validation!") raise if self.path == "/": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"Root OK?") elif self.path == "/favicon.ico": self.send_response(404) self.end_headers() elif self.path == "/raw-txt": self.send_response(200) self.send_header('Content-type', "text/plain") self.end_headers() self.wfile.write(b"Root OK?") elif self.path == "/html-decode": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"Root OK?") elif self.path == "/html/real": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><body>Root OK?</body></html>") elif self.path == "/compressed/deflate": self.send_response(200) self.send_header('Content-Encoding', 'deflate') self.send_header('Content-type', "text/html") self.end_headers() inb = b"Root OK?" cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS) t1 = cobj.compress(inb) + cobj.flush() self.wfile.write(t1) elif self.path == "/compressed/gzip": self.send_response(200) self.send_header('Content-Encoding', 'gzip') self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(gzip.compress(b"Root OK?")) elif self.path == "/json/invalid": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"LOLWAT") elif self.path == "/json/valid": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b'{"oh" : "hai"}') elif self.path == "/json/no-coding": self.send_response(200) self.end_headers() self.wfile.write(b'{"oh" : "hai"}') elif self.path == "/filename/path-only.txt": self.send_response(200) self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename/path-only-trailing-slash/": self.send_response(200) self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename/content-disposition": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.txt") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/path-only.txt": self.send_response(200) self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.txt") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-html-suffix": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.html") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-1": self.send_response(200) self.send_header('Content-Disposition', "filename='lolercoaster.html'") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-2": self.send_response(200) self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-spaces-1": self.send_response(200) self.send_header('Content-Disposition', "filename='loler coaster.html'") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-spaces-2": self.send_response(200) self.send_header('Content-Disposition', "filename=\"loler coaster.html\"") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/explicit-html-mime": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.html") self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/redirect/bad-1": self.send_response(302) self.end_headers() elif self.path == "/redirect/bad-2": self.send_response(302) self.send_header('location', "bad-2") self.end_headers() elif self.path == "/redirect/bad-3": self.send_response(302) self.send_header('location', "gopher://www.google.com") self.end_headers() elif self.path == "/redirect/from-1": self.send_response(302) self.send_header('location', "to-1") self.end_headers() elif self.path == "/redirect/to-1": self.send_response(200) self.end_headers() self.wfile.write(b"Redirect-To-1") elif self.path == "/redirect/from-2": self.send_response(302) self.send_header('uri', "to-2") self.end_headers() elif self.path == "/redirect/to-2": self.send_response(200) self.end_headers() self.wfile.write(b"Redirect-To-2") elif self.path == "/redirect/from-3": self.send_response(302) newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1]) self.send_header('uri', newurl) self.end_headers() elif self.path == "/password/expect": # print("Password") # print(self.headers) self.send_response(200) self.end_headers() if not 'Authorization' in self.headers: self.wfile.write(b"Password not sent!!") return val = self.headers['Authorization'] passval = val.split(" ")[-1] passstr = base64.b64decode(passval) if passstr == b'lol:wat': self.wfile.write(b"Password Ok?") else: self.wfile.write(b"Password Bad!") elif self.path == "/content/have-title": self.send_response(200) self.end_headers() self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>") elif self.path == "/content/no-title": self.send_response(200) self.end_headers() self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>") elif self.path == "/binary_ctnt": self.send_response(200) self.send_header('Content-type', "image/jpeg") self.end_headers() self.wfile.write(b"Binary!\x00\x01\x02\x03") elif self.path == "/binary_ctnt": self.send_response(200) self.send_header('Content-type', "image/jpeg") self.end_headers() self.wfile.write(b"Binary!\x00\x01\x02\x03") ################################################################################################################################## # Cookie stuff ################################################################################################################################## elif self.path == '/cookie_test': cook = cookies.SimpleCookie() cook['cookie_test_key'] = cookie_key cook['cookie_test_key']['path'] = "/" cook['cookie_test_key']['domain'] = "" expiration = datetime.datetime.now() + datetime.timedelta(days=30) cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST") self.send_response(200) self.send_header('Content-type', "text/html") self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString()) self.end_headers() self.wfile.write(b"<html><body>CF Cookie Test</body></html>") elif self.path == '/cookie_require': if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'cookie_test_key' and cook_value == cookie_key: self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>") return self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><body>Cookie is missing</body></html>") ################################################################################################################################## # Sucuri validation ################################################################################################################################## elif self.path == '/sucuri_shit_3': # I'd like to get this down to just 2 requests (cookie bounce, and fetch). # Doing that requires pulling html content out of chromium, though. # Annoying. nonlocal sucuri_reqs_3 sucuri_reqs_3 += 1 if sucuri_reqs_3 > 3: raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3) if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478': # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(plain_contents) elif self.path == '/sucuri_shit_2': # This particular path is the one we should already have a cookie for. # As such, we expect one request only nonlocal sucuri_reqs_2 sucuri_reqs_2 += 1 if sucuri_reqs_2 > 1: raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2) if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478': # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(plain_contents) elif self.path == '/sucuri_shit': nonlocal sucuri_reqs_1 sucuri_reqs_1 += 1 if sucuri_reqs_1 > 4: raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1) # print("Fetch for ", self.path) # print("Cookies:", self.headers.get_all('Cookie', failobj=[])) if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478': # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(plain_contents) ################################################################################################################################## # Cloudflare validation ################################################################################################################################## elif self.path == '/cloudflare_under_attack_shit_2': if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key: # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.server_version = "cloudflare is garbage" self.send_response(503) self.send_header('Server', "cloudflare is garbage") self.send_header('Content-type','text/html') self.end_headers() self.wfile.write(plain_contents) elif self.path == '/cloudflare_under_attack_shit': if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key: # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.server_version = "cloudflare is garbage" self.send_response(503) self.send_header('Server', "cloudflare is garbage") self.send_header('Content-type','text/html') self.end_headers() self.wfile.write(plain_contents) elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594': cook = cookies.SimpleCookie() cook['cloudflare_validate_key'] = cookie_key cook['cloudflare_validate_key']['path'] = "/" cook['cloudflare_validate_key']['domain'] = "" expiration = datetime.datetime.now() + datetime.timedelta(days=30) cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST") self.send_response(200) self.send_header('Content-type', "text/html") self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString()) self.end_headers() body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>" self.wfile.write(body.encode("utf-8")) ################################################################################################################################## # Handle requests for an unknown path ################################################################################################################################## else: test_context.assertEqual(self.path, "This shouldn't happen!") def do_GET(self): # Process an HTTP GET request and return a response with an HTTP 200 status. log.info("Request for URL path: '%s'", self.path) # print("Headers: ", self.headers) # print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[])) try: return self._get_handler() except Exception as e: log.error("Exception in handler!") for line in traceback.format_exc().split("\n"): log.error(line) raise e return MockServerRequestHandler def get_free_port(): s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM) s.bind(('localhost', 0)) address, port = s.getsockname() s.close() return port def start_server(assertion_class, from_wg, port_override = None, is_chromium = None, is_selenium_garbage_chromium = False, is_annoying_pjs = False, skip_header_checks = False ): # Configure mock server. if port_override: mock_server_port = port_override else: mock_server_port = get_free_port() expected_headers = dict(from_wg.browserHeaders) print(from_wg) print(expected_headers) assert isinstance(expected_headers, dict) captured_server = capture_expected_headers( expected_headers = expected_headers, test_context = assertion_class, is_chromium = is_chromium, is_selenium_garbage_chromium = is_selenium_garbage_chromium, is_annoying_pjs = is_annoying_pjs, skip_header_checks = skip_header_checks ) retries = 4 for x in range(retries + 1): try: mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server) break except OSError: time.sleep(0.2) if x >= retries: raise # Start running mock server in a separate thread. # Daemon threads automatically shut down when the main process exits. mock_server_thread = Thread(target=mock_server.serve_forever) mock_server_thread.setDaemon(True) mock_server_thread.start() return mock_server_port, mock_server, mock_server_thread if __name__ == '__main__': wg = WebRequest.WebGetRobust() srv = start_server( assertion_class = None, from_wg = wg, skip_header_checks = True) print("running server on port: ", srv) while 1: time.sleep(1)
[((32, 7, 32, 43), 'logging.getLogger', 'logging.getLogger', ({(32, 25, 32, 42): '"""Main.TestServer"""'}, {}), "('Main.TestServer')", False, 'import logging\n'), ((556, 5, 556, 59), 'socket.socket', 'socket.socket', (), '', False, 'import socket\n'), ((604, 22, 604, 62), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((614, 6, 614, 31), 'WebRequest.WebGetRobust', 'WebRequest.WebGetRobust', ({}, {}), '()', False, 'import WebRequest\n'), ((31, 14, 31, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((622, 2, 622, 15), 'time.sleep', 'time.sleep', ({(622, 13, 622, 14): '(1)'}, {}), '(1)', False, 'import time\n'), ((595, 17, 595, 75), 'http.server.HTTPServer', 'HTTPServer', ({(595, 28, 595, 57): "('0.0.0.0', mock_server_port)", (595, 59, 595, 74): 'captured_server'}, {}), "(('0.0.0.0', mock_server_port), captured_server)", False, 'from http.server import HTTPServer\n'), ((598, 3, 598, 18), 'time.sleep', 'time.sleep', ({(598, 14, 598, 17): '(0.2)'}, {}), '(0.2)', False, 'import time\n'), ((549, 16, 549, 38), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((130, 11, 130, 50), 'zlib.compressobj', 'zlib.compressobj', (), '', False, 'import zlib\n'), ((139, 21, 139, 47), 'gzip.compress', 'gzip.compress', ({(139, 35, 139, 46): "b'Root OK?'"}, {}), "(b'Root OK?')", False, 'import gzip\n'), ((275, 14, 275, 39), 'base64.b64decode', 'base64.b64decode', ({(275, 31, 275, 38): 'passval'}, {}), '(passval)', False, 'import base64\n'), ((309, 11, 309, 33), 'http.cookies.SimpleCookie', 'cookies.SimpleCookie', ({}, {}), '()', False, 'from http import cookies\n'), ((313, 17, 313, 40), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((313, 43, 313, 70), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((373, 20, 373, 45), 'os.path.dirname', 'os.path.dirname', ({(373, 36, 373, 44): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((374, 12, 374, 77), 'os.path.join', 'os.path.join', ({(374, 25, 374, 38): 'container_dir', (374, 40, 374, 53): '"""waf_garbage"""', (374, 55, 374, 76): '"""sucuri_garbage.html"""'}, {}), "(container_dir, 'waf_garbage', 'sucuri_garbage.html')", False, 'import os\n'), ((407, 20, 407, 45), 'os.path.dirname', 'os.path.dirname', ({(407, 36, 407, 44): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((408, 12, 408, 77), 'os.path.join', 'os.path.join', ({(408, 25, 408, 38): 'container_dir', (408, 40, 408, 53): '"""waf_garbage"""', (408, 55, 408, 76): '"""sucuri_garbage.html"""'}, {}), "(container_dir, 'waf_garbage', 'sucuri_garbage.html')", False, 'import os\n'), ((442, 20, 442, 45), 'os.path.dirname', 'os.path.dirname', ({(442, 36, 442, 44): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((443, 12, 443, 77), 'os.path.join', 'os.path.join', ({(443, 25, 443, 38): 'container_dir', (443, 40, 443, 53): '"""waf_garbage"""', (443, 55, 443, 76): '"""sucuri_garbage.html"""'}, {}), "(container_dir, 'waf_garbage', 'sucuri_garbage.html')", False, 'import os\n'), ((471, 20, 471, 45), 'os.path.dirname', 'os.path.dirname', ({(471, 36, 471, 44): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((472, 12, 472, 89), 'os.path.join', 'os.path.join', ({(472, 25, 472, 38): 'container_dir', (472, 40, 472, 53): '"""waf_garbage"""', (472, 55, 472, 88): '"""cf_js_challenge_03_12_2018.html"""'}, {}), "(container_dir, 'waf_garbage', 'cf_js_challenge_03_12_2018.html')", False, 'import os\n'), ((498, 20, 498, 45), 'os.path.dirname', 'os.path.dirname', ({(498, 36, 498, 44): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((499, 12, 499, 89), 'os.path.join', 'os.path.join', ({(499, 25, 499, 38): 'container_dir', (499, 40, 499, 53): '"""waf_garbage"""', (499, 55, 499, 88): '"""cf_js_challenge_03_12_2018.html"""'}, {}), "(container_dir, 'waf_garbage', 'cf_js_challenge_03_12_2018.html')", False, 'import os\n'), ((512, 11, 512, 33), 'http.cookies.SimpleCookie', 'cookies.SimpleCookie', ({}, {}), '()', False, 'from http import cookies\n'), ((516, 17, 516, 40), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((516, 43, 516, 70), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n')]
qrowsxi/calcgrades
calcgrades.py
93c71c1afef8dde5174726ae1702b71ccba633de
import csv import math import numpy as np import pandas import scipy.optimize import sys import argparse def ineq_constraint_1(v): return np.array([vi for vi in v]) def ineq_constraint_2(v): return np.array([-vi + 30 for vi in v]) class WeightAverage: def __init__(self, mean, csv): self.df = pandas.read_csv(csv) self.course = self.df['name'] self.expected_mean = mean self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0] self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))]) self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0] self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0] self.tot_credits = sum(self.owned_credits) + sum(self.credits) def weight_average(self, v): term1 = 0 term2 = 0 for i in range(0, len(self.owned_grades)): term1 = term1 + self.owned_grades[i] * self.owned_credits[i] for i in range(0, len(v)): term2 = term2 + v[i] * self.credits[i] return (term1 + term2) / self.tot_credits def eq_constraint(self, v): return self.weight_average(v) - self.expected_mean def solve(self): cons = ( {'type': 'eq', 'fun': self.eq_constraint}, {'type': 'ineq', 'fun': ineq_constraint_1}, {'type': 'ineq', 'fun': ineq_constraint_2}) res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons) if not res.success: return None return res.x def error_no_solution(): print("Mean not possible with current vote :(") exit(0) def output_result(solver, sol): avg = solver.weight_average(sol) df = solver.df print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110") if sol is None: print("Not Possible with current grades :(") exit() for index, row in df.query('grade > 0').iterrows(): print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}") i = 0 for index, row in df.query('grade == 0').iterrows(): print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}") i += 1 return 0 def main(): name = "calcGrades" description = """CalcGrades is an utility which purpose is to compute the minimum grades required to get a certain weight average of the grades over the credits, given the desired output and the grades already owned.""" parser = argparse.ArgumentParser(name, description=description) parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean') parser.add_argument('--file',dest='file', default='courses.csv', type=str, help='path to the csv file containing the courses (default: courses.csv)') parser.add_argument('--floor', default=False, action='store_true', help='apply floor operation instead of round to solution') parser.add_argument('--ceil', default=False, action='store_true', help='apply ceil operation instead of round to solution') args = parser.parse_args() mean = args.mean courses = args.file solver = WeightAverage(mean, courses) sol = solver.solve() if sol is None: error_no_solution() if args.ceil: sol = [math.ceil(x) for x in sol] elif args.floor: sol = [math.floor(x) for x in sol] else: sol = [round(x) for x in sol] output_result(solver, sol) return 0 if __name__ == '__main__': main()
[((11, 11, 11, 37), 'numpy.array', 'np.array', ({(11, 20, 11, 36): '[vi for vi in v]'}, {}), '([vi for vi in v])', True, 'import numpy as np\n'), ((15, 11, 15, 43), 'numpy.array', 'np.array', ({(15, 20, 15, 42): '[(-vi + 30) for vi in v]'}, {}), '([(-vi + 30) for vi in v])', True, 'import numpy as np\n'), ((79, 13, 79, 67), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((21, 18, 21, 38), 'pandas.read_csv', 'pandas.read_csv', ({(21, 34, 21, 37): 'csv'}, {}), '(csv)', False, 'import pandas\n'), ((95, 15, 95, 27), 'math.ceil', 'math.ceil', ({(95, 25, 95, 26): 'x'}, {}), '(x)', False, 'import math\n'), ((97, 15, 97, 28), 'math.floor', 'math.floor', ({(97, 26, 97, 27): 'x'}, {}), '(x)', False, 'import math\n')]
AaronFriel/pulumi-google-native
sdk/python/pulumi_google_native/testing/v1/test_matrix.py
75d1cda425e33d4610348972cd70bddf35f1770d
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['TestMatrixArgs', 'TestMatrix'] @pulumi.input_type class TestMatrixArgs: def __init__(__self__, *, environment_matrix: pulumi.Input['EnvironmentMatrixArgs'], result_storage: pulumi.Input['ResultStorageArgs'], test_specification: pulumi.Input['TestSpecificationArgs'], client_info: Optional[pulumi.Input['ClientInfoArgs']] = None, fail_fast: Optional[pulumi.Input[bool]] = None, flaky_test_attempts: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, request_id: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a TestMatrix resource. :param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on. :param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written. :param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test. :param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test. :param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. :param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. :param pulumi.Input[str] project: The cloud project that owns the test matrix. """ pulumi.set(__self__, "environment_matrix", environment_matrix) pulumi.set(__self__, "result_storage", result_storage) pulumi.set(__self__, "test_specification", test_specification) if client_info is not None: pulumi.set(__self__, "client_info", client_info) if fail_fast is not None: pulumi.set(__self__, "fail_fast", fail_fast) if flaky_test_attempts is not None: pulumi.set(__self__, "flaky_test_attempts", flaky_test_attempts) if project is not None: pulumi.set(__self__, "project", project) if request_id is not None: pulumi.set(__self__, "request_id", request_id) @property @pulumi.getter(name="environmentMatrix") def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']: """ The devices the tests are being executed on. """ return pulumi.get(self, "environment_matrix") @environment_matrix.setter def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']): pulumi.set(self, "environment_matrix", value) @property @pulumi.getter(name="resultStorage") def result_storage(self) -> pulumi.Input['ResultStorageArgs']: """ Where the results for the matrix are written. """ return pulumi.get(self, "result_storage") @result_storage.setter def result_storage(self, value: pulumi.Input['ResultStorageArgs']): pulumi.set(self, "result_storage", value) @property @pulumi.getter(name="testSpecification") def test_specification(self) -> pulumi.Input['TestSpecificationArgs']: """ How to run the test. """ return pulumi.get(self, "test_specification") @test_specification.setter def test_specification(self, value: pulumi.Input['TestSpecificationArgs']): pulumi.set(self, "test_specification", value) @property @pulumi.getter(name="clientInfo") def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]: """ Information about the client which invoked the test. """ return pulumi.get(self, "client_info") @client_info.setter def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]): pulumi.set(self, "client_info", value) @property @pulumi.getter(name="failFast") def fail_fast(self) -> Optional[pulumi.Input[bool]]: """ If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. """ return pulumi.get(self, "fail_fast") @fail_fast.setter def fail_fast(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "fail_fast", value) @property @pulumi.getter(name="flakyTestAttempts") def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]: """ The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. """ return pulumi.get(self, "flaky_test_attempts") @flaky_test_attempts.setter def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "flaky_test_attempts", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The cloud project that owns the test matrix. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="requestId") def request_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "request_id") @request_id.setter def request_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_id", value) class TestMatrix(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None, environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None, fail_fast: Optional[pulumi.Input[bool]] = None, flaky_test_attempts: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, request_id: Optional[pulumi.Input[str]] = None, result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None, test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None, __props__=None): """ Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices. Auto-naming is currently not supported for this resource. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test. :param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on. :param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. :param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. :param pulumi.Input[str] project: The cloud project that owns the test matrix. :param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written. :param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test. """ ... @overload def __init__(__self__, resource_name: str, args: TestMatrixArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices. Auto-naming is currently not supported for this resource. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state. :param str resource_name: The name of the resource. :param TestMatrixArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None, environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None, fail_fast: Optional[pulumi.Input[bool]] = None, flaky_test_attempts: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, request_id: Optional[pulumi.Input[str]] = None, result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None, test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TestMatrixArgs.__new__(TestMatrixArgs) __props__.__dict__["client_info"] = client_info if environment_matrix is None and not opts.urn: raise TypeError("Missing required property 'environment_matrix'") __props__.__dict__["environment_matrix"] = environment_matrix __props__.__dict__["fail_fast"] = fail_fast __props__.__dict__["flaky_test_attempts"] = flaky_test_attempts __props__.__dict__["project"] = project __props__.__dict__["request_id"] = request_id if result_storage is None and not opts.urn: raise TypeError("Missing required property 'result_storage'") __props__.__dict__["result_storage"] = result_storage if test_specification is None and not opts.urn: raise TypeError("Missing required property 'test_specification'") __props__.__dict__["test_specification"] = test_specification __props__.__dict__["invalid_matrix_details"] = None __props__.__dict__["outcome_summary"] = None __props__.__dict__["state"] = None __props__.__dict__["test_executions"] = None __props__.__dict__["test_matrix_id"] = None __props__.__dict__["timestamp"] = None super(TestMatrix, __self__).__init__( 'google-native:testing/v1:TestMatrix', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix': """ Get an existing TestMatrix resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = TestMatrixArgs.__new__(TestMatrixArgs) __props__.__dict__["client_info"] = None __props__.__dict__["environment_matrix"] = None __props__.__dict__["fail_fast"] = None __props__.__dict__["flaky_test_attempts"] = None __props__.__dict__["invalid_matrix_details"] = None __props__.__dict__["outcome_summary"] = None __props__.__dict__["project"] = None __props__.__dict__["result_storage"] = None __props__.__dict__["state"] = None __props__.__dict__["test_executions"] = None __props__.__dict__["test_matrix_id"] = None __props__.__dict__["test_specification"] = None __props__.__dict__["timestamp"] = None return TestMatrix(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="clientInfo") def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']: """ Information about the client which invoked the test. """ return pulumi.get(self, "client_info") @property @pulumi.getter(name="environmentMatrix") def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']: """ The devices the tests are being executed on. """ return pulumi.get(self, "environment_matrix") @property @pulumi.getter(name="failFast") def fail_fast(self) -> pulumi.Output[bool]: """ If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. """ return pulumi.get(self, "fail_fast") @property @pulumi.getter(name="flakyTestAttempts") def flaky_test_attempts(self) -> pulumi.Output[int]: """ The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. """ return pulumi.get(self, "flaky_test_attempts") @property @pulumi.getter(name="invalidMatrixDetails") def invalid_matrix_details(self) -> pulumi.Output[str]: """ Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state. """ return pulumi.get(self, "invalid_matrix_details") @property @pulumi.getter(name="outcomeSummary") def outcome_summary(self) -> pulumi.Output[str]: """ Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED. """ return pulumi.get(self, "outcome_summary") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The cloud project that owns the test matrix. """ return pulumi.get(self, "project") @property @pulumi.getter(name="resultStorage") def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']: """ Where the results for the matrix are written. """ return pulumi.get(self, "result_storage") @property @pulumi.getter def state(self) -> pulumi.Output[str]: """ Indicates the current progress of the test matrix. """ return pulumi.get(self, "state") @property @pulumi.getter(name="testExecutions") def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]: """ The list of test executions that the service creates for this matrix. """ return pulumi.get(self, "test_executions") @property @pulumi.getter(name="testMatrixId") def test_matrix_id(self) -> pulumi.Output[str]: """ Unique id set by the service. """ return pulumi.get(self, "test_matrix_id") @property @pulumi.getter(name="testSpecification") def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']: """ How to run the test. """ return pulumi.get(self, "test_specification") @property @pulumi.getter def timestamp(self) -> pulumi.Output[str]: """ The time this test matrix was initially created. """ return pulumi.get(self, "timestamp")
[((52, 5, 52, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((64, 5, 64, 40), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((76, 5, 76, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((88, 5, 88, 37), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((100, 5, 100, 35), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((112, 5, 112, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((136, 5, 136, 36), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((280, 5, 280, 37), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((288, 5, 288, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((296, 5, 296, 35), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((304, 5, 304, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((312, 5, 312, 47), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((320, 5, 320, 41), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((336, 5, 336, 40), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((352, 5, 352, 41), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((360, 5, 360, 39), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((368, 5, 368, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((37, 8, 37, 70), 'pulumi.set', 'pulumi.set', ({(37, 19, 37, 27): '__self__', (37, 29, 37, 49): '"""environment_matrix"""', (37, 51, 37, 69): 'environment_matrix'}, {}), "(__self__, 'environment_matrix', environment_matrix)", False, 'import pulumi\n'), ((38, 8, 38, 62), 'pulumi.set', 'pulumi.set', ({(38, 19, 38, 27): '__self__', (38, 29, 38, 45): '"""result_storage"""', (38, 47, 38, 61): 'result_storage'}, {}), "(__self__, 'result_storage', result_storage)", False, 'import pulumi\n'), ((39, 8, 39, 70), 'pulumi.set', 'pulumi.set', ({(39, 19, 39, 27): '__self__', (39, 29, 39, 49): '"""test_specification"""', (39, 51, 39, 69): 'test_specification'}, {}), "(__self__, 'test_specification', test_specification)", False, 'import pulumi\n'), ((57, 15, 57, 53), 'pulumi.get', 'pulumi.get', ({(57, 26, 57, 30): 'self', (57, 32, 57, 52): '"""environment_matrix"""'}, {}), "(self, 'environment_matrix')", False, 'import pulumi\n'), ((61, 8, 61, 53), 'pulumi.set', 'pulumi.set', ({(61, 19, 61, 23): 'self', (61, 25, 61, 45): '"""environment_matrix"""', (61, 47, 61, 52): 'value'}, {}), "(self, 'environment_matrix', value)", False, 'import pulumi\n'), ((69, 15, 69, 49), 'pulumi.get', 'pulumi.get', ({(69, 26, 69, 30): 'self', (69, 32, 69, 48): '"""result_storage"""'}, {}), "(self, 'result_storage')", False, 'import pulumi\n'), ((73, 8, 73, 49), 'pulumi.set', 'pulumi.set', ({(73, 19, 73, 23): 'self', (73, 25, 73, 41): '"""result_storage"""', (73, 43, 73, 48): 'value'}, {}), "(self, 'result_storage', value)", False, 'import pulumi\n'), ((81, 15, 81, 53), 'pulumi.get', 'pulumi.get', ({(81, 26, 81, 30): 'self', (81, 32, 81, 52): '"""test_specification"""'}, {}), "(self, 'test_specification')", False, 'import pulumi\n'), ((85, 8, 85, 53), 'pulumi.set', 'pulumi.set', ({(85, 19, 85, 23): 'self', (85, 25, 85, 45): '"""test_specification"""', (85, 47, 85, 52): 'value'}, {}), "(self, 'test_specification', value)", False, 'import pulumi\n'), ((93, 15, 93, 46), 'pulumi.get', 'pulumi.get', ({(93, 26, 93, 30): 'self', (93, 32, 93, 45): '"""client_info"""'}, {}), "(self, 'client_info')", False, 'import pulumi\n'), ((97, 8, 97, 46), 'pulumi.set', 'pulumi.set', ({(97, 19, 97, 23): 'self', (97, 25, 97, 38): '"""client_info"""', (97, 40, 97, 45): 'value'}, {}), "(self, 'client_info', value)", False, 'import pulumi\n'), ((105, 15, 105, 44), 'pulumi.get', 'pulumi.get', ({(105, 26, 105, 30): 'self', (105, 32, 105, 43): '"""fail_fast"""'}, {}), "(self, 'fail_fast')", False, 'import pulumi\n'), ((109, 8, 109, 44), 'pulumi.set', 'pulumi.set', ({(109, 19, 109, 23): 'self', (109, 25, 109, 36): '"""fail_fast"""', (109, 38, 109, 43): 'value'}, {}), "(self, 'fail_fast', value)", False, 'import pulumi\n'), ((117, 15, 117, 54), 'pulumi.get', 'pulumi.get', ({(117, 26, 117, 30): 'self', (117, 32, 117, 53): '"""flaky_test_attempts"""'}, {}), "(self, 'flaky_test_attempts')", False, 'import pulumi\n'), ((121, 8, 121, 54), 'pulumi.set', 'pulumi.set', ({(121, 19, 121, 23): 'self', (121, 25, 121, 46): '"""flaky_test_attempts"""', (121, 48, 121, 53): 'value'}, {}), "(self, 'flaky_test_attempts', value)", False, 'import pulumi\n'), ((129, 15, 129, 42), 'pulumi.get', 'pulumi.get', ({(129, 26, 129, 30): 'self', (129, 32, 129, 41): '"""project"""'}, {}), "(self, 'project')", False, 'import pulumi\n'), ((133, 8, 133, 42), 'pulumi.set', 'pulumi.set', ({(133, 19, 133, 23): 'self', (133, 25, 133, 34): '"""project"""', (133, 36, 133, 41): 'value'}, {}), "(self, 'project', value)", False, 'import pulumi\n'), ((138, 15, 138, 45), 'pulumi.get', 'pulumi.get', ({(138, 26, 138, 30): 'self', (138, 32, 138, 44): '"""request_id"""'}, {}), "(self, 'request_id')", False, 'import pulumi\n'), ((142, 8, 142, 45), 'pulumi.set', 'pulumi.set', ({(142, 19, 142, 23): 'self', (142, 25, 142, 37): '"""request_id"""', (142, 39, 142, 44): 'value'}, {}), "(self, 'request_id', value)", False, 'import pulumi\n'), ((285, 15, 285, 46), 'pulumi.get', 'pulumi.get', ({(285, 26, 285, 30): 'self', (285, 32, 285, 45): '"""client_info"""'}, {}), "(self, 'client_info')", False, 'import pulumi\n'), ((293, 15, 293, 53), 'pulumi.get', 'pulumi.get', ({(293, 26, 293, 30): 'self', (293, 32, 293, 52): '"""environment_matrix"""'}, {}), "(self, 'environment_matrix')", False, 'import pulumi\n'), ((301, 15, 301, 44), 'pulumi.get', 'pulumi.get', ({(301, 26, 301, 30): 'self', (301, 32, 301, 43): '"""fail_fast"""'}, {}), "(self, 'fail_fast')", False, 'import pulumi\n'), ((309, 15, 309, 54), 'pulumi.get', 'pulumi.get', ({(309, 26, 309, 30): 'self', (309, 32, 309, 53): '"""flaky_test_attempts"""'}, {}), "(self, 'flaky_test_attempts')", False, 'import pulumi\n'), ((317, 15, 317, 57), 'pulumi.get', 'pulumi.get', ({(317, 26, 317, 30): 'self', (317, 32, 317, 56): '"""invalid_matrix_details"""'}, {}), "(self, 'invalid_matrix_details')", False, 'import pulumi\n'), ((325, 15, 325, 50), 'pulumi.get', 'pulumi.get', ({(325, 26, 325, 30): 'self', (325, 32, 325, 49): '"""outcome_summary"""'}, {}), "(self, 'outcome_summary')", False, 'import pulumi\n'), ((333, 15, 333, 42), 'pulumi.get', 'pulumi.get', ({(333, 26, 333, 30): 'self', (333, 32, 333, 41): '"""project"""'}, {}), "(self, 'project')", False, 'import pulumi\n'), ((341, 15, 341, 49), 'pulumi.get', 'pulumi.get', ({(341, 26, 341, 30): 'self', (341, 32, 341, 48): '"""result_storage"""'}, {}), "(self, 'result_storage')", False, 'import pulumi\n'), ((349, 15, 349, 40), 'pulumi.get', 'pulumi.get', ({(349, 26, 349, 30): 'self', (349, 32, 349, 39): '"""state"""'}, {}), "(self, 'state')", False, 'import pulumi\n'), ((357, 15, 357, 50), 'pulumi.get', 'pulumi.get', ({(357, 26, 357, 30): 'self', (357, 32, 357, 49): '"""test_executions"""'}, {}), "(self, 'test_executions')", False, 'import pulumi\n'), ((365, 15, 365, 49), 'pulumi.get', 'pulumi.get', ({(365, 26, 365, 30): 'self', (365, 32, 365, 48): '"""test_matrix_id"""'}, {}), "(self, 'test_matrix_id')", False, 'import pulumi\n'), ((373, 15, 373, 53), 'pulumi.get', 'pulumi.get', ({(373, 26, 373, 30): 'self', (373, 32, 373, 52): '"""test_specification"""'}, {}), "(self, 'test_specification')", False, 'import pulumi\n'), ((381, 15, 381, 44), 'pulumi.get', 'pulumi.get', ({(381, 26, 381, 30): 'self', (381, 32, 381, 43): '"""timestamp"""'}, {}), "(self, 'timestamp')", False, 'import pulumi\n'), ((41, 12, 41, 60), 'pulumi.set', 'pulumi.set', ({(41, 23, 41, 31): '__self__', (41, 33, 41, 46): '"""client_info"""', (41, 48, 41, 59): 'client_info'}, {}), "(__self__, 'client_info', client_info)", False, 'import pulumi\n'), ((43, 12, 43, 56), 'pulumi.set', 'pulumi.set', ({(43, 23, 43, 31): '__self__', (43, 33, 43, 44): '"""fail_fast"""', (43, 46, 43, 55): 'fail_fast'}, {}), "(__self__, 'fail_fast', fail_fast)", False, 'import pulumi\n'), ((45, 12, 45, 76), 'pulumi.set', 'pulumi.set', ({(45, 23, 45, 31): '__self__', (45, 33, 45, 54): '"""flaky_test_attempts"""', (45, 56, 45, 75): 'flaky_test_attempts'}, {}), "(__self__, 'flaky_test_attempts', flaky_test_attempts)", False, 'import pulumi\n'), ((47, 12, 47, 52), 'pulumi.set', 'pulumi.set', ({(47, 23, 47, 31): '__self__', (47, 33, 47, 42): '"""project"""', (47, 44, 47, 51): 'project'}, {}), "(__self__, 'project', project)", False, 'import pulumi\n'), ((49, 12, 49, 58), 'pulumi.set', 'pulumi.set', ({(49, 23, 49, 31): '__self__', (49, 33, 49, 45): '"""request_id"""', (49, 47, 49, 57): 'request_id'}, {}), "(__self__, 'request_id', request_id)", False, 'import pulumi\n'), ((212, 19, 212, 43), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ({}, {}), '()', False, 'import pulumi\n'), ((260, 50, 260, 79), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', (), '', False, 'import pulumi\n')]
MoriokaReimen/ConfigHeaderGenerator
View/View.py
73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
import tkinter as tk import tkinter.messagebox from Control import Control class View: def __init__(self, control : Control.Control): self.control = control # Init Window self.root = tk.Tk() self.root.title(u"Header File Generator") self.root.geometry("700x800") self.config_frame = tk.Frame(self.root) # Config Table lb_symbol = tk.Label(self.config_frame, width = 20) lb_symbol["text"] = "Symbol" lb_symbol.grid(row = 0, column = 0) lb_description = tk.Label(self.config_frame, width = 40) lb_description["text"] = "Detail" lb_description.grid(row = 0, column = 1) lb_enable = tk.Label(self.config_frame, width = 10) lb_enable["text"] = "Enable" lb_enable.grid(row = 0, column = 2) for i, config in enumerate(self.control.getConfigs()): symbol_entry = tk.Entry(self.config_frame, width=20) symbol_entry.insert(tk.END, config.symbol) symbol_entry.config(state = tk.DISABLED) symbol_entry.config(disabledforeground = "black", disabledbackground = "white") symbol_entry.grid(row= i + 1, column = 0) detail_entry = tk.Entry(self.config_frame, width=40) detail_entry.insert(tk.END, config.detail) detail_entry.config(state = tk.DISABLED) detail_entry.config(disabledforeground = "black", disabledbackground = "white") detail_entry.grid(row= i + 1, column = 1) bt_enable = tk.Button(self.config_frame, text="ON", width= 5) bt_enable["text"] = "ON" if config.enable else "OFF" color = "green" if config.enable else "red" bt_enable.config(bg=color, activebackground = color) bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button) bt_enable.grid(row = i + 1, column = 2) self.config_frame.pack(side=tk.TOP, anchor=tk.NW) self.value_config_frame = tk.Frame(self.root) # Config Table lb_symbol = tk.Label(self.value_config_frame, width = 20) lb_symbol["text"] = "Symbol" lb_symbol.grid(row = 0, column = 0) lb_description = tk.Label(self.value_config_frame, width = 40) lb_description["text"] = "Detail" lb_description.grid(row = 0, column = 1) lb_value = tk.Label(self.value_config_frame, width = 10) lb_value["text"] = "Value" lb_value.grid(row = 0, column = 2) lb_enable = tk.Label(self.value_config_frame, width = 10) lb_enable["text"] = "Enable" lb_enable.grid(row = 0, column = 3) for i, val_config in enumerate(self.control.getValConfigs()): symbol_entry = tk.Entry(self.value_config_frame, width=20) symbol_entry.insert(tk.END, val_config.symbol) symbol_entry.config(state = tk.DISABLED) symbol_entry.config(disabledforeground = "black", disabledbackground = "white") symbol_entry.grid(row= i + 1, column = 0) detail_entry = tk.Entry(self.value_config_frame, width=40) detail_entry.insert(tk.END, val_config.detail) detail_entry.config(state = tk.DISABLED) detail_entry.config(disabledforeground = "black", disabledbackground = "white") detail_entry.grid(row= i + 1, column = 1) value_entry = tk.Entry(self.value_config_frame, width=10) value_entry.insert(tk.END, val_config.value) value_entry.config(state = tk.DISABLED) value_entry.config(disabledforeground = "black", disabledbackground = "white") value_entry.grid(row= i + 1, column = 2) bt_enable = tk.Button(self.value_config_frame, text="ON", width= 5) bt_enable["text"] = "ON" if val_config.enable else "OFF" color = "green" if val_config.enable else "red" bt_enable.config(bg=color, activebackground = color) bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button) bt_enable.grid(row = i + 1, column = 3) self.value_config_frame.pack(side=tk.TOP, anchor=tk.W) # Generator Button self.bt_generate = tk.Button(self.root) self.bt_generate["text"] = "Generate Header" self.bt_generate["command"] = self.generateHeader self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE) def start(self): self.root.mainloop() def generateHeader(self): self.control.generateHeader() tk.messagebox.showinfo("Header Generator Info", "Generated:{0}".format(self.control.header_config.path)) def update(self): pass def toggle_config_enable(self, id, button : tk.Button): config = self.control.getConfigs()[id] config.enable = not config.enable button["text"] = "ON" if config.enable else "OFF" color = "green" if config.enable else "red" button.config(bg=color, activebackground = color) def toggle_val_config_enable(self, id, button : tk.Button): val_config = self.control.getValConfigs()[id] val_config.enable = not val_config.enable button["text"] = "ON" if val_config.enable else "OFF" color = "green" if val_config.enable else "red" button.config(bg=color, activebackground = color)
[((10, 20, 10, 27), 'tkinter.Tk', 'tk.Tk', ({}, {}), '()', True, 'import tkinter as tk\n'), ((14, 28, 14, 47), 'tkinter.Frame', 'tk.Frame', ({(14, 37, 14, 46): 'self.root'}, {}), '(self.root)', True, 'import tkinter as tk\n'), ((17, 20, 17, 59), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((21, 25, 21, 64), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((25, 20, 25, 59), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((51, 34, 51, 53), 'tkinter.Frame', 'tk.Frame', ({(51, 43, 51, 52): 'self.root'}, {}), '(self.root)', True, 'import tkinter as tk\n'), ((54, 20, 54, 65), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((58, 25, 58, 70), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((62, 19, 62, 64), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((66, 20, 66, 65), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((98, 27, 98, 47), 'tkinter.Button', 'tk.Button', ({(98, 37, 98, 46): 'self.root'}, {}), '(self.root)', True, 'import tkinter as tk\n'), ((30, 27, 30, 64), 'tkinter.Entry', 'tk.Entry', (), '', True, 'import tkinter as tk\n'), ((36, 27, 36, 64), 'tkinter.Entry', 'tk.Entry', (), '', True, 'import tkinter as tk\n'), ((42, 24, 42, 73), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n'), ((71, 27, 71, 70), 'tkinter.Entry', 'tk.Entry', (), '', True, 'import tkinter as tk\n'), ((77, 27, 77, 70), 'tkinter.Entry', 'tk.Entry', (), '', True, 'import tkinter as tk\n'), ((83, 26, 83, 69), 'tkinter.Entry', 'tk.Entry', (), '', True, 'import tkinter as tk\n'), ((89, 24, 89, 79), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n')]
FirebirdSQL/firebird-qa
tests/bugs/core_3355_test.py
96af2def7f905a06f178e2a80a2c8be4a4b44782
#coding:utf-8 # # id: bugs.core_3355 # title: Wrong comparsion of DATE and TIMESTAMP if index is used # decription: # tracker_id: CORE-3355 # min_versions: ['2.1.5'] # versions: 3.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """create table tdate (id integer not null primary key, val date); create index tdateix1 on tdate (val); commit; insert into tdate values (0, '1997-12-31'); insert into tdate values (1, '1998-01-01'); insert into tdate values (2, '1998-01-02'); insert into tdate values (3, '1998-01-03'); insert into tdate values (4, '1998-01-04'); insert into tdate values (5, '1998-01-05'); commit; """ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) test_script_1 = """select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000'; select count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000'; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ COUNT ===================== 1 COUNT ===================== 5 """ @pytest.mark.version('>=3.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout
[((31, 7, 31, 68), 'firebird.qa.db_factory', 'db_factory', (), '', False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((37, 8, 37, 70), 'firebird.qa.isql_act', 'isql_act', (), '', False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((51, 1, 51, 29), 'pytest.mark.version', 'pytest.mark.version', ({(51, 21, 51, 28): '""">=3.0"""'}, {}), "('>=3.0')", False, 'import pytest\n')]
hms-dbmi/bch-pic-sure-airflow-dags
dags/download_decrypt_transfer_files.py
0c1e6f07da4e270581942e551ac30284474921d4
""" @author: anilkdegala """ import os from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator, BranchPythonOperator from datetime import date, timedelta, datetime from collections import OrderedDict from scripts.dag_pebbles import DagPebbles from airflow.configuration import conf from scripts.configurations import * from airflow.operators.dummy_operator import DummyOperator default_args = { "owner": "anilkdegala", "depends_on_past": True, "max_active_runs": 1, "start_date": datetime(2015, 6, 1), "is_active": True, "is_paused_upon_creation": False, } def begin_pipeline(**kwargs): print("begin_pipeline:") files = kwargs['dag_run'].conf.get('files') download_decrypt_arguments = '' transfer_arguments_list = [] for f in files: print("download_decrypt_transfer_files: file: ", f['name'], ', location: ', f['path']) output = f['name']+','+f['path']+','+f['final_name'] download_decrypt_arguments = download_decrypt_arguments + " " + output transfer_arguments_list.append(DATA_LOCATION + "/"+f['final_name']) transfer_arguments = ",".join(transfer_arguments_list) print("final download_decrypt_arguments: ",download_decrypt_arguments) print("final transfer_arguments: ",transfer_arguments) kwargs["ti"].xcom_push(key="download_decrypt_arguments", value=download_decrypt_arguments) kwargs["ti"].xcom_push(key="transfer_arguments", value=transfer_arguments) def pipeline_enable_check(**kwargs): dp = DagPebbles() if dp.pipeline_enable_check('DATA_LOAD'): return "pipeline_check_passed" else: return "pipeline_check_skipped" def pipeline_check_passed(**kwargs): print("pipeline_check_passed:") def end_pipeline(**kwargs): print("end_pipeline:") def pipeline_check_skipped(**kwargs): print("pipeline_check_skipped:") def cleanup(**kwargs): dp = DagPebbles() print("cleanup") def notify(**kwargs): dp = DagPebbles() print("notify") def end(**kwargs): dp = DagPebbles() print("end") with DAG( "DOWNLOAD_DECRYPT_TRANSFER", description="Download, Decrypt, Transfer files (Source: S3, Staging: EC2: Target: RDS Oracle)", default_args=default_args, schedule_interval=None, catchup=False, orientation="TB", tags=['Utils'], dagrun_timeout=timedelta(hours=240) ) as dag: t_pipeline_begin = PythonOperator( task_id="begin_pipeline", python_callable=begin_pipeline, provide_context=True, dag=dag, ) t_check_pipeline = BranchPythonOperator( task_id="check_pipeline", python_callable=pipeline_enable_check, provide_context=True, dag=dag, ) t_pipeline_check_passed = PythonOperator( task_id="pipeline_check_passed", python_callable=pipeline_check_passed, provide_context=True, dag=dag, ) t_pipeline_check_skipped = PythonOperator( task_id="pipeline_check_skipped", python_callable=pipeline_check_skipped, provide_context=True, dag=dag, ) download_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/download_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}}" t_download_files = BashOperator( task_id='download_files', bash_command=download_files_cmd, dag=dag) decrypt_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/decrypt_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}} " t_decrypt_files = BashOperator( task_id='decrypt_files', bash_command=decrypt_files_cmd, dag=dag) transfer_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/transfer_files_rds.pl "+"{{ ti.xcom_pull(key='transfer_arguments')}} " t_transfer_files = BashOperator( task_id='transfer_files', bash_command=transfer_files_cmd, dag=dag) t_end_pipeline = PythonOperator( task_id="end_pipeline", python_callable=end_pipeline, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_notify = PythonOperator( task_id="send_notifications", python_callable=notify, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_cleanup = PythonOperator( task_id="cleanup", python_callable=cleanup, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_end = PythonOperator( task_id="end", python_callable=end, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_pipeline_begin >> t_check_pipeline t_check_pipeline >> t_pipeline_check_skipped >> t_end_pipeline t_check_pipeline >> t_pipeline_check_passed >> t_download_files >> t_decrypt_files >> t_transfer_files >> t_end_pipeline t_end_pipeline >> t_cleanup >> t_notify >> t_end
[((19, 18, 19, 38), 'datetime.datetime', 'datetime', ({(19, 27, 19, 31): '(2015)', (19, 33, 19, 34): '(6)', (19, 36, 19, 37): '(1)'}, {}), '(2015, 6, 1)', False, 'from datetime import date, timedelta, datetime\n'), ((45, 9, 45, 21), 'scripts.dag_pebbles.DagPebbles', 'DagPebbles', ({}, {}), '()', False, 'from scripts.dag_pebbles import DagPebbles\n'), ((62, 9, 62, 21), 'scripts.dag_pebbles.DagPebbles', 'DagPebbles', ({}, {}), '()', False, 'from scripts.dag_pebbles import DagPebbles\n'), ((67, 9, 67, 21), 'scripts.dag_pebbles.DagPebbles', 'DagPebbles', ({}, {}), '()', False, 'from scripts.dag_pebbles import DagPebbles\n'), ((72, 9, 72, 21), 'scripts.dag_pebbles.DagPebbles', 'DagPebbles', ({}, {}), '()', False, 'from scripts.dag_pebbles import DagPebbles\n'), ((86, 23, 91, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((93, 23, 98, 5), 'airflow.operators.python_operator.BranchPythonOperator', 'BranchPythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((100, 30, 105, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((107, 31, 112, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((115, 23, 118, 16), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', (), '', False, 'from airflow.operators.bash_operator import BashOperator\n'), ((121, 22, 124, 16), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', (), '', False, 'from airflow.operators.bash_operator import BashOperator\n'), ((127, 23, 130, 16), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', (), '', False, 'from airflow.operators.bash_operator import BashOperator\n'), ((132, 21, 138, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((140, 15, 146, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((148, 16, 154, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((156, 12, 162, 5), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((82, 25, 82, 45), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import date, timedelta, datetime\n')]
hashnfv/hashnfv-moon
keystone-moon/keystone/endpoint_policy/controllers.py
daaba34fa2ed4426bc0fde359e54a5e1b872208c
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import controller from keystone.common import dependency from keystone import notifications @dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api') class EndpointPolicyV3Controller(controller.V3Controller): collection_name = 'endpoints' member_name = 'endpoint' def __init__(self): super(EndpointPolicyV3Controller, self).__init__() notifications.register_event_callback( 'deleted', 'endpoint', self._on_endpoint_delete) notifications.register_event_callback( 'deleted', 'service', self._on_service_delete) notifications.register_event_callback( 'deleted', 'region', self._on_region_delete) notifications.register_event_callback( 'deleted', 'policy', self._on_policy_delete) def _on_endpoint_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_endpoint( payload['resource_info']) def _on_service_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_service( payload['resource_info']) def _on_region_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_region( payload['resource_info']) def _on_policy_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_policy( payload['resource_info']) @controller.protected() def create_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Create an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.create_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def check_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Check an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.check_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def delete_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Delete an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.delete_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def create_policy_association_for_service(self, context, policy_id, service_id): """Create an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id) @controller.protected() def check_policy_association_for_service(self, context, policy_id, service_id): """Check an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id) @controller.protected() def delete_policy_association_for_service(self, context, policy_id, service_id): """Delete an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id) @controller.protected() def create_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Create an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def check_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Check an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def delete_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Delete an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def get_policy_for_endpoint(self, context, endpoint_id): """Get the effective policy for an endpoint.""" self.catalog_api.get_endpoint(endpoint_id) ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id) # NOTE(henry-nash): since the collection and member for this class is # set to endpoints, we have to handle wrapping this policy entity # ourselves. self._add_self_referential_link(context, ref) return {'policy': ref} # NOTE(henry-nash): As in the catalog controller, we must ensure that the # legacy_endpoint_id does not escape. @classmethod def filter_endpoint(cls, ref): if 'legacy_endpoint_id' in ref: ref.pop('legacy_endpoint_id') return ref @classmethod def wrap_member(cls, context, ref): ref = cls.filter_endpoint(ref) return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref) @controller.protected() def list_endpoints_for_policy(self, context, policy_id): """List endpoints with the effective association to a policy.""" self.policy_api.get_policy(policy_id) refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id) return EndpointPolicyV3Controller.wrap_collection(context, refs)
[((20, 1, 20, 72), 'keystone.common.dependency.requires', 'dependency.requires', ({(20, 21, 20, 33): '"""policy_api"""', (20, 35, 20, 48): '"""catalog_api"""', (20, 50, 20, 71): '"""endpoint_policy_api"""'}, {}), "('policy_api', 'catalog_api', 'endpoint_policy_api')", False, 'from keystone.common import dependency\n'), ((52, 5, 52, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((61, 5, 61, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((70, 5, 70, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((79, 5, 79, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((88, 5, 88, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((97, 5, 97, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((106, 5, 106, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((116, 5, 116, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((126, 5, 126, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((136, 5, 136, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((161, 5, 161, 27), 'keystone.common.controller.protected', 'controller.protected', ({}, {}), '()', False, 'from keystone.common import controller\n'), ((27, 8, 28, 60), 'keystone.notifications.register_event_callback', 'notifications.register_event_callback', ({(28, 12, 28, 21): '"""deleted"""', (28, 23, 28, 33): '"""endpoint"""', (28, 35, 28, 59): 'self._on_endpoint_delete'}, {}), "('deleted', 'endpoint', self.\n _on_endpoint_delete)", False, 'from keystone import notifications\n'), ((29, 8, 30, 58), 'keystone.notifications.register_event_callback', 'notifications.register_event_callback', ({(30, 12, 30, 21): '"""deleted"""', (30, 23, 30, 32): '"""service"""', (30, 34, 30, 57): 'self._on_service_delete'}, {}), "('deleted', 'service', self.\n _on_service_delete)", False, 'from keystone import notifications\n'), ((31, 8, 32, 56), 'keystone.notifications.register_event_callback', 'notifications.register_event_callback', ({(32, 12, 32, 21): '"""deleted"""', (32, 23, 32, 31): '"""region"""', (32, 33, 32, 55): 'self._on_region_delete'}, {}), "('deleted', 'region', self.\n _on_region_delete)", False, 'from keystone import notifications\n'), ((33, 8, 34, 56), 'keystone.notifications.register_event_callback', 'notifications.register_event_callback', ({(34, 12, 34, 21): '"""deleted"""', (34, 23, 34, 31): '"""policy"""', (34, 33, 34, 55): 'self._on_policy_delete'}, {}), "('deleted', 'policy', self.\n _on_policy_delete)", False, 'from keystone import notifications\n')]
ipacheco-uy/NiBetaSeries
src/nibetaseries/cli/run.py
3d8716552f22f925524d80af9aace09469c22d4d
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Module that contains the command line app. Why does this file exist, and why not put this in __main__? You might be tempted to import things from __main__ later, but that will cause problems: the code will get executed twice: - When you run `python -m nibetaseries` python will execute ``__main__.py`` as a script. That means there won't be any ``nibetaseries.__main__`` in ``sys.modules``. - When you import __main__ it will get executed again (as a module) because there's no ``nibetaseries.__main__`` in ``sys.modules``. Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration """ from __future__ import absolute_import import os import argparse from argparse import RawTextHelpFormatter from glob import glob from multiprocessing import cpu_count from nipype import config as ncfg def get_parser(): """Build parser object""" from ..__init__ import __version__ import sys verstr = 'nibs v{}'.format(__version__) parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments', formatter_class=RawTextHelpFormatter) parser.add_argument('bids_dir', help='The directory with the input dataset ' 'formatted according to the BIDS standard.') parser.add_argument('derivatives_pipeline', help='The pipeline that contains ' 'minimally preprocessed img, brainmask, and confounds.tsv') parser.add_argument('output_dir', help='The directory where the output directory ' 'and files should be stored. If you are running group level analysis ' 'this folder should be prepopulated with the results of the' 'participant level analysis.') parser.add_argument('analysis_level', choices=['participant', 'group'], help='Level of the analysis that will be performed ' 'Multiple participant level analyses can be run independently ' '(in parallel) using the same output_dir') parser.add_argument('-v', '--version', action='version', version=verstr) # Atlas Arguments (Required Options) atlas_args = parser.add_argument_group('Required Atlas Arguments') atlas_args.add_argument('-a', '--atlas-img', action='store', required=('-l' in sys.argv or '--atlas-lut' in sys.argv), help='input atlas nifti where each voxel within a "region" ' 'is labeled with the same integer and there is a unique ' 'integer associated with each region of interest.') atlas_args.add_argument('-l', '--atlas-lut', action='store', required=('-a' in sys.argv or '--atlas-img' in sys.argv), help='atlas look up table (tsv) formatted with the columns: ' 'index, regions which correspond to the regions in the ' 'nifti file specified by --atlas-img.') # preprocessing options proc_opts = parser.add_argument_group('Options for processing') proc_opts.add_argument('--estimator', default='lss', choices=['lss', 'lsa'], help='beta series modeling method') proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0, help='select a smoothing kernel (mm)') proc_opts.add_argument('-hp', '--high-pass', action='store', type=float, default=0.0078125, help='high pass filter (Hz)') proc_opts.add_argument('-c', '--confounds', help='The confound column names ' 'that are to be included in nuisance regression. ' 'write the confounds you wish to include separated by a space', nargs="+") proc_opts.add_argument('--hrf-model', default='glover', choices=['glover', 'spm', 'fir', 'glover + derivative', 'glover + derivative + dispersion', 'spm + derivative', 'spm + derivative + dispersion'], help='convolve your regressors ' 'with one of the following hemodynamic response functions') proc_opts.add_argument('--fir-delays', default=None, nargs='+', type=int, help='FIR delays in volumes', metavar='VOL') proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files ' 'are stored (i.e. non-essential files). ' 'This directory can be deleted once you are reasonably ' 'certain nibs finished as expected.') # Image Selection options image_opts = parser.add_argument_group('Options for selecting images') parser.add_argument('--participant-label', nargs="+", help='The label(s) of the participant(s) ' 'that should be analyzed. The label ' 'corresponds to sub-<participant_label> from the BIDS spec ' '(so it does not include "sub-"). If this parameter is not ' 'provided all subjects should be analyzed. Multiple ' 'participants can be specified with a space separated list.') image_opts.add_argument('--session-label', action='store', default=None, help='select a session to analyze') image_opts.add_argument('-t', '--task-label', action='store', default=None, help='select a specific task to be processed') image_opts.add_argument('--run-label', action='store', default=None, help='select a run to analyze') image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym', choices=['MNI152NLin2009cAsym'], help='select a bold derivative in a specific space to be used') image_opts.add_argument('--description-label', action='store', default=None, help='select a bold file with particular ' '`desc` label to process') image_opts.add_argument('--exclude-description-label', action='store_true', default=False, help='exclude this `desc` label from nibetaseries') # performance options g_perfm = parser.add_argument_group('Options to handle performance') g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int, help='maximum number of threads across all processes') g_perfm.add_argument('--use-plugin', action='store', default=None, help='nipype plugin configuration file') # misc options misc = parser.add_argument_group('misc options') misc.add_argument('--graph', action='store_true', default=False, help='generates a graph png of the workflow') return parser def main(): from ..workflows.base import init_nibetaseries_participant_wf # get commandline options opts = get_parser().parse_args() # check inputs if (opts.hrf_model == 'fir') and (opts.fir_delays is None): raise ValueError('If the FIR HRF model is selected, ' 'FIR delays must be provided.') # Set up directories # TODO: set up some sort of versioning system bids_dir = os.path.abspath(opts.bids_dir) derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline) output_dir = os.path.abspath(opts.output_dir) os.makedirs(output_dir, exist_ok=True) log_dir = os.path.join(output_dir, 'logs') os.makedirs(log_dir, exist_ok=True) if opts.work_dir: work_dir = os.path.abspath(opts.work_dir) else: work_dir = os.path.join(os.getcwd(), 'nibetaseries_work') os.makedirs(work_dir, exist_ok=True) # only for a subset of subjects if opts.participant_label: subject_list = opts.participant_label # for all subjects else: subject_dirs = glob(os.path.join(bids_dir, "sub-*")) subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs] # Nipype plugin configuration # Load base plugin_settings from file if --use-plugin if opts.use_plugin is not None: from yaml import load as loadyml with open(opts.use_plugin) as f: plugin_settings = loadyml(f) plugin_settings.setdefault('plugin_args', {}) else: # Defaults plugin_settings = { 'plugin': 'MultiProc', 'plugin_args': { 'raise_insufficient': False, 'maxtasksperchild': 1, } } # Resource management options # Note that we're making strong assumptions about valid plugin args # This may need to be revisited if people try to use batch plugins nthreads = plugin_settings['plugin_args'].get('n_procs') # Permit overriding plugin config with specific CLI options if nthreads is None or opts.nthreads is not None: nthreads = opts.nthreads if nthreads is None or nthreads < 1: nthreads = cpu_count() plugin_settings['plugin_args']['n_procs'] = nthreads # Nipype config (logs and execution) ncfg.update_config({ 'logging': {'log_directory': log_dir, 'log_to_file': True}, 'execution': {'crashdump_dir': log_dir, 'crashfile_format': 'txt', 'parameterize_dirs': False}, }) # running participant level if opts.analysis_level == "participant": nibetaseries_participant_wf = init_nibetaseries_participant_wf( estimator=opts.estimator, atlas_img=os.path.abspath(opts.atlas_img), atlas_lut=os.path.abspath(opts.atlas_lut), bids_dir=bids_dir, derivatives_pipeline_dir=derivatives_pipeline_dir, exclude_description_label=opts.exclude_description_label, fir_delays=opts.fir_delays, hrf_model=opts.hrf_model, high_pass=opts.high_pass, output_dir=output_dir, run_label=opts.run_label, selected_confounds=opts.confounds, session_label=opts.session_label, smoothing_kernel=opts.smoothing_kernel, space_label=opts.space_label, subject_list=subject_list, task_label=opts.task_label, description_label=opts.description_label, work_dir=work_dir, ) if opts.graph: nibetaseries_participant_wf.write_graph(graph2use='colored', format='svg', simple_form=True) try: nibetaseries_participant_wf.run(**plugin_settings) except RuntimeError as e: if "Workflow did not execute cleanly" in str(e): print("Workflow did not execute cleanly") else: raise e elif opts.analysis_level == "group": raise NotImplementedError('group analysis not currently implemented') def init(): if __name__ == "__main__": raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n" "Please `pip install` NiBetaSeries and use the `nibs` command") init()
[((35, 13, 36, 74), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((146, 15, 146, 45), 'os.path.abspath', 'os.path.abspath', ({(146, 31, 146, 44): 'opts.bids_dir'}, {}), '(opts.bids_dir)', False, 'import os\n'), ((148, 31, 148, 95), 'os.path.join', 'os.path.join', ({(148, 44, 148, 52): 'bids_dir', (148, 54, 148, 67): '"""derivatives"""', (148, 69, 148, 94): 'opts.derivatives_pipeline'}, {}), "(bids_dir, 'derivatives', opts.derivatives_pipeline)", False, 'import os\n'), ((150, 17, 150, 49), 'os.path.abspath', 'os.path.abspath', ({(150, 33, 150, 48): 'opts.output_dir'}, {}), '(opts.output_dir)', False, 'import os\n'), ((151, 4, 151, 42), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((153, 14, 153, 46), 'os.path.join', 'os.path.join', ({(153, 27, 153, 37): 'output_dir', (153, 39, 153, 45): '"""logs"""'}, {}), "(output_dir, 'logs')", False, 'import os\n'), ((154, 4, 154, 39), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((161, 4, 161, 40), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((200, 4, 206, 6), 'nipype.config.update_config', 'ncfg.update_config', ({(200, 23, 206, 5): "{'logging': {'log_directory': log_dir, 'log_to_file': True}, 'execution': {\n 'crashdump_dir': log_dir, 'crashfile_format': 'txt',\n 'parameterize_dirs': False}}"}, {}), "({'logging': {'log_directory': log_dir, 'log_to_file': \n True}, 'execution': {'crashdump_dir': log_dir, 'crashfile_format':\n 'txt', 'parameterize_dirs': False}})", True, 'from nipype import config as ncfg\n'), ((157, 19, 157, 49), 'os.path.abspath', 'os.path.abspath', ({(157, 35, 157, 48): 'opts.work_dir'}, {}), '(opts.work_dir)', False, 'import os\n'), ((159, 32, 159, 43), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((168, 28, 168, 59), 'os.path.join', 'os.path.join', ({(168, 41, 168, 49): 'bids_dir', (168, 51, 168, 58): '"""sub-*"""'}, {}), "(bids_dir, 'sub-*')", False, 'import os\n'), ((176, 30, 176, 40), 'yaml.load', 'loadyml', ({(176, 38, 176, 39): 'f'}, {}), '(f)', True, 'from yaml import load as loadyml\n'), ((196, 23, 196, 34), 'multiprocessing.cpu_count', 'cpu_count', ({}, {}), '()', False, 'from multiprocessing import cpu_count\n'), ((212, 22, 212, 53), 'os.path.abspath', 'os.path.abspath', ({(212, 38, 212, 52): 'opts.atlas_img'}, {}), '(opts.atlas_img)', False, 'import os\n'), ((213, 22, 213, 53), 'os.path.abspath', 'os.path.abspath', ({(213, 38, 213, 52): 'opts.atlas_lut'}, {}), '(opts.atlas_lut)', False, 'import os\n')]
astrandb/senz_hass
custom_components/senz/config_flow.py
6725d37fd9c6d250ac10a16e68c56908bf1c8404
"""Config flow for SENZ WiFi.""" from __future__ import annotations import logging from typing import Any import voluptuous as vol from homeassistant.components import persistent_notification from homeassistant.data_entry_flow import FlowResult from homeassistant.helpers import config_entry_oauth2_flow from .const import DOMAIN from .pysenz import PreAPI class OAuth2FlowHandler( config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN ): """Config flow to handle SENZ WiFi OAuth2 authentication.""" DOMAIN = DOMAIN @property def logger(self) -> logging.Logger: """Return logger.""" return logging.getLogger(__name__) @property def extra_authorize_data(self) -> dict: """Extra data that needs to be appended to the authorize url.""" return { "scope": "restapi offline_access", } async def async_step_reauth( self, entry: dict[str, Any] | None = None ) -> FlowResult: """Perform reauth upon an API authentication error.""" self.entry = entry persistent_notification.async_create( self.hass, f"Senz integration for account {entry['auth_implementation']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it.", "Senz re-authentication", "senz_reauth", ) return await self.async_step_reauth_confirm() async def async_step_reauth_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Dialog that informs the user that reauth is required.""" if user_input is None: return self.async_show_form( step_id="reauth_confirm", description_placeholders={"account": self.entry["auth_implementation"]}, data_schema=vol.Schema({}), errors={}, ) persistent_notification.async_dismiss(self.hass, "senz_reauth") return await self.async_step_user() async def async_oauth_create_entry(self, data: dict) -> dict: """Create an oauth config entry or update existing entry for reauth.""" pre_api = PreAPI(self.hass) resp = await pre_api.getAccount(data["token"]["access_token"]) account = resp["userName"] existing_entry = await self.async_set_unique_id(account) if existing_entry: self.hass.config_entries.async_update_entry(existing_entry, data=data) await self.hass.config_entries.async_reload(existing_entry.entry_id) return self.async_abort(reason="reauth_successful") return self.async_create_entry(title=account, data=data)
[((27, 15, 27, 42), 'logging.getLogger', 'logging.getLogger', ({(27, 33, 27, 41): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((43, 8, 48, 9), 'homeassistant.components.persistent_notification.async_create', 'persistent_notification.async_create', ({(44, 12, 44, 21): 'self.hass', (45, 12, 45, 183): 'f"""Senz integration for account {entry[\'auth_implementation\']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it."""', (46, 12, 46, 36): '"""Senz re-authentication"""', (47, 12, 47, 25): '"""senz_reauth"""'}, {}), '(self.hass,\n f"Senz integration for account {entry[\'auth_implementation\']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it."\n , \'Senz re-authentication\', \'senz_reauth\')', False, 'from homeassistant.components import persistent_notification\n'), ((63, 8, 63, 71), 'homeassistant.components.persistent_notification.async_dismiss', 'persistent_notification.async_dismiss', ({(63, 46, 63, 55): 'self.hass', (63, 57, 63, 70): '"""senz_reauth"""'}, {}), "(self.hass, 'senz_reauth')", False, 'from homeassistant.components import persistent_notification\n'), ((59, 28, 59, 42), 'voluptuous.Schema', 'vol.Schema', ({(59, 39, 59, 41): '{}'}, {}), '({})', True, 'import voluptuous as vol\n')]
bsipocz/astropy-helpers
astropy_helpers/git_helpers.py
4999df1cfb6a5022347b0cef9caf8a556517c625
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for retrieving revision information from a project's git repository. """ # Do not remove the following comment; it is used by # astropy_helpers.version_helpers to determine the beginning of the code in # this module # BEGIN import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 128: # git returns 128 if the command is not run from within a git # repository tree. In this case, a warning is produced above but we # return the default dev version of '0'. return '0' elif not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() # This function is tested but it is only ever executed within a subprocess when # creating a fake package, so it doesn't get picked up by coverage metrics. def _get_repo_path(pathname, levels=None): # pragma: no cover """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None
[((174, 7, 174, 31), 'os.path.isfile', 'os.path.isfile', ({(174, 22, 174, 30): 'pathname'}, {}), '(pathname)', False, 'import os\n'), ((92, 15, 92, 26), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((94, 11, 94, 30), 'os.path.isdir', 'os.path.isdir', ({(94, 25, 94, 29): 'path'}, {}), '(path)', False, 'import os\n'), ((176, 9, 176, 32), 'os.path.isdir', 'os.path.isdir', ({(176, 23, 176, 31): 'pathname'}, {}), '(pathname)', False, 'import os\n'), ((191, 22, 191, 50), 'os.path.dirname', 'os.path.dirname', ({(191, 38, 191, 49): 'current_dir'}, {}), '(current_dir)', False, 'import os\n'), ((95, 31, 95, 52), 'os.path.dirname', 'os.path.dirname', ({(95, 47, 95, 51): 'path'}, {}), '(path)', False, 'import os\n'), ((105, 16, 108, 55), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((175, 38, 175, 63), 'os.path.dirname', 'os.path.dirname', ({(175, 54, 175, 62): 'pathname'}, {}), '(pathname)', False, 'import os\n'), ((177, 22, 177, 47), 'os.path.abspath', 'os.path.abspath', ({(177, 38, 177, 46): 'pathname'}, {}), '(pathname)', False, 'import os\n'), ((184, 26, 184, 59), 'os.path.join', 'os.path.join', ({(184, 39, 184, 50): 'current_dir', (184, 52, 184, 58): '""".git"""'}, {}), "(current_dir, '.git')", False, 'import os\n'), ((188, 26, 188, 54), 'os.path.dirname', 'os.path.dirname', ({(188, 42, 188, 53): 'current_dir'}, {}), '(current_dir)', False, 'import os\n'), ((21, 25, 21, 50), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ({}, {}), '()', False, 'import locale\n')]
imaroger/sot-talos-balance
src/sot_talos_balance/test/test_feet_admittance.py
5e56700b4e105273ecf6feb3474789beac469a77
'''Test feet admittance control''' from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient try: # Python 2 input = raw_input # noqa except NameError: pass run_test('appli_feet_admittance.py') run_ft_calibration('robot.ftc') input("Wait before running the test") print('Set saturation value') runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]') input("Wait before dumping the data") runCommandClient('dump_tracer(robot.tracer)')
[((10, 0, 10, 36), 'sot_talos_balance.utils.run_test_utils.run_test', 'run_test', ({(10, 9, 10, 35): '"""appli_feet_admittance.py"""'}, {}), "('appli_feet_admittance.py')", False, 'from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient\n'), ((12, 0, 12, 31), 'sot_talos_balance.utils.run_test_utils.run_ft_calibration', 'run_ft_calibration', ({(12, 19, 12, 30): '"""robot.ftc"""'}, {}), "('robot.ftc')", False, 'from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient\n'), ((16, 0, 16, 88), 'sot_talos_balance.utils.run_test_utils.runCommandClient', 'runCommandClient', ({(16, 17, 16, 87): '"""robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]"""'}, {}), "(\n 'robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')", False, 'from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient\n'), ((20, 0, 20, 45), 'sot_talos_balance.utils.run_test_utils.runCommandClient', 'runCommandClient', ({(20, 17, 20, 44): '"""dump_tracer(robot.tracer)"""'}, {}), "('dump_tracer(robot.tracer)')", False, 'from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient\n')]
davebryson/py-tendermint
tests/test_db.py
ec6a38a54950d9841759b0f2ed93659b58948a03
import os from tendermint.db import VanillaDB from tendermint.utils import home_dir def test_database(): dbfile = home_dir('temp', 'test.db') db = VanillaDB(dbfile) db.set(b'dave',b'one') result = db.get(b'dave') assert(b'one' == result) db.set(b'dave',b'two') result = db.get(b'dave') assert(b'two' == result) assert(None == db.get(b'doesntexist')) assert(db.exists(b'dave')) db.delete(b'dave') assert(db.exists(b'dave') == False) if os.path.exists(dbfile): os.remove(dbfile)
[((7, 13, 7, 40), 'tendermint.utils.home_dir', 'home_dir', ({(7, 22, 7, 28): '"""temp"""', (7, 30, 7, 39): '"""test.db"""'}, {}), "('temp', 'test.db')", False, 'from tendermint.utils import home_dir\n'), ((8, 9, 8, 26), 'tendermint.db.VanillaDB', 'VanillaDB', ({(8, 19, 8, 25): 'dbfile'}, {}), '(dbfile)', False, 'from tendermint.db import VanillaDB\n'), ((24, 7, 24, 29), 'os.path.exists', 'os.path.exists', ({(24, 22, 24, 28): 'dbfile'}, {}), '(dbfile)', False, 'import os\n'), ((25, 8, 25, 25), 'os.remove', 'os.remove', ({(25, 18, 25, 24): 'dbfile'}, {}), '(dbfile)', False, 'import os\n')]
asb29/Redundant
auth/tests/test_views.py
ee816fd41f9217610bd11f757cf9175288723c70
from django.test import TestCase from django.test import Client class RegisterTestCase(TestCase): def test_register(self): c = Client() # on success redirects to / response = c.post('/accounts/register/', { 'username': 'asdas', 'password1': 'asdasdasd12', 'password2': 'asdasdasd12' }) self.assertRedirects(response, '/') # passwords don't match response = c.post('/accounts/register/', { 'username': 'asdasdasd1', 'password1': 'asdasdasd1', 'password2': 'asdasdasd2' }) self.assertEquals(response.status_code, 200) # username is empty response = c.post('/accounts/register/', { 'username': '', 'password1': 'asdasdasd12', 'password2': 'asdasdasd12' }) self.assertEquals(response.status_code, 200) # no password response = c.post('/accounts/register/', { 'username': 'asdasdasd', 'password1': '', 'password2': '' }) self.assertEquals(response.status_code, 200) # username and password are similar response = c.post('/accounts/register/', { 'username': 'asdasdasd0', 'password1': 'asdasdasd1', 'password2': 'asdasdasd1' }) self.assertEquals(response.status_code, 200)
[((7, 12, 7, 20), 'django.test.Client', 'Client', ({}, {}), '()', False, 'from django.test import Client\n')]
iFighting/OneNet
projects/OneNet/onenet/head.py
6e33b46d2aa13131262833c75f0fd1c3d224ef03
# # Modified by Peize Sun # Contact: [email protected] # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ OneNet Transformer class. Copy-paste from torch.nn.Transformer with modifications: * positional encodings are passed in MHattention * extra LN at the end of encoder is removed * decoder returns a stack of activations from all decoding layers """ import copy import math from typing import Optional, List import torch from torch import nn, Tensor import torch.nn.functional as F from detectron2.modeling.poolers import ROIPooler, cat from detectron2.structures import Boxes from .deconv import CenternetDeconv class Head(nn.Module): def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]): super().__init__() # Build heads. num_classes = cfg.MODEL.OneNet.NUM_CLASSES d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1] activation = cfg.MODEL.OneNet.ACTIVATION self.deconv = CenternetDeconv(cfg, backbone_shape) self.num_classes = num_classes self.d_model = d_model self.num_classes = num_classes self.activation = _get_activation_fn(activation) self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1) self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1) self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1) # Init parameters. prior_prob = cfg.MODEL.OneNet.PRIOR_PROB self.bias_value = -math.log((1 - prior_prob) / prior_prob) self._reset_parameters() def _reset_parameters(self): # init all parameters. for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # initialize the bias for focal loss. if p.shape[-1] == self.num_classes: nn.init.constant_(p, self.bias_value) def forward(self, features_list): features = self.deconv(features_list) locations = self.locations(features)[None] feat = self.activation(self.feat1(features)) class_logits = self.cls_score(feat) pred_ltrb = F.relu(self.ltrb_pred(feat)) pred_bboxes = self.apply_ltrb(locations, pred_ltrb) return class_logits, pred_bboxes def apply_ltrb(self, locations, pred_ltrb): """ :param locations: (1, 2, H, W) :param pred_ltrb: (N, 4, H, W) """ pred_boxes = torch.zeros_like(pred_ltrb) pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1 pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1 pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2 pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2 return pred_boxes @torch.no_grad() def locations(self, features, stride=4): """ Arguments: features: (N, C, H, W) Return: locations: (2, H, W) """ h, w = features.size()[-2:] device = features.device shifts_x = torch.arange( 0, w * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( 0, h * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2 locations = locations.reshape(h, w, 2).permute(2, 0, 1) return locations def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
[((89, 5, 89, 20), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((43, 21, 43, 94), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', False, 'from torch import nn, Tensor\n'), ((44, 25, 44, 92), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', False, 'from torch import nn, Tensor\n'), ((45, 25, 45, 82), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', False, 'from torch import nn, Tensor\n'), ((81, 21, 81, 48), 'torch.zeros_like', 'torch.zeros_like', ({(81, 38, 81, 47): 'pred_ltrb'}, {}), '(pred_ltrb)', False, 'import torch\n'), ((101, 19, 104, 9), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((105, 19, 108, 9), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((109, 27, 109, 61), 'torch.meshgrid', 'torch.meshgrid', ({(109, 42, 109, 50): 'shifts_y', (109, 52, 109, 60): 'shifts_x'}, {}), '(shifts_y, shifts_x)', False, 'import torch\n'), ((49, 27, 49, 66), 'math.log', 'math.log', ({(49, 36, 49, 65): '((1 - prior_prob) / prior_prob)'}, {}), '((1 - prior_prob) / prior_prob)', False, 'import math\n'), ((112, 20, 112, 58), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((56, 16, 56, 42), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', ({(56, 40, 56, 41): 'p'}, {}), '(p)', False, 'from torch import nn, Tensor\n'), ((60, 16, 60, 53), 'torch.nn.init.constant_', 'nn.init.constant_', ({(60, 34, 60, 35): 'p', (60, 37, 60, 52): 'self.bias_value'}, {}), '(p, self.bias_value)', False, 'from torch import nn, Tensor\n')]
HastingsGreer/mermaid
mermaid/utils.py
bd13c5fc427eb8cd9054973a8eaaeb302078182d
"""Various utility functions. .. todo:: Reorganize this package in a more meaningful way. """ from __future__ import print_function from __future__ import absolute_import # from builtins import str # from builtins import range import torch from torch.nn.parameter import Parameter from torch.autograd import Variable from .libraries.modules.stn_nd import STN_ND_BCXYZ from .data_wrapper import AdaptVal from .data_wrapper import MyTensor from . import smoother_factory as sf from .data_wrapper import USE_CUDA import numpy as np from . import finite_differences as fd import torch.nn as nn import torch.nn.init as init from . import module_parameters as pars from .spline_interpolation import SplineInterpolation_ND_BCXYZ import os try: from .libraries.functions.nn_interpolation import get_nn_interpolation except ImportError: print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). ' 'Some functionality may not be available.') def my_hasnan(x): """Check if any input elements are NaNs. :param x: numpy array :return: True if NaNs are present, False else """ return (x != x).any() def create_symlink_with_correct_ext(sf, tf): abs_s = os.path.abspath(sf) ext_s = os.path.splitext(abs_s)[1] abs_t = os.path.abspath(tf) root_t,ext_t = os.path.splitext(abs_t) abs_t_with_right_ext = root_t + ext_s if os.path.isfile(abs_t_with_right_ext): if os.path.samefile(abs_s,abs_t_with_right_ext): # nothing to do here, these are already the same file return else: os.remove(abs_t_with_right_ext) # now we can do the symlink os.symlink(abs_s,abs_t_with_right_ext) def combine_dict(d1,d2): """Creates a dictionary which has entries from both of them. :param d1: dictionary 1 :param d2: dictionary 2 :return: resulting dictionary """ d = d1.copy() d.update(d2) return d def get_parameter_list_from_parameter_dict(pd): """Takes a dictionary which contains key value pairs for model parameters and converts it into a list of parameters that can be used as an input to an optimizer. :param pd: parameter dictionary :return: list of parameters """ pl = [] for key in pd: pl.append(pd[key]) return pl def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd): """Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys based on memory id. :param pd: parameter dictionary :return: tuple of (parameter_list, name_dictionary) """ par_to_name_dict = dict() pl = [] for key in pd: pl.append(pd[key]) par_to_name_dict[pd[key]] = key return pl, par_to_name_dict def remove_infs_from_variable(v): # 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor # 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor # 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor # todo: maybe find a cleaner way of handling this # this is to make sure that subsequent sums work (hence will be smaller than it could be, # but values of this size should not occur in practice anyway sz = v.size() reduction_factor = np.prod(np.array(sz)) condition = True if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32: return torch.clamp(v, min=(np.asscalar(np.finfo('float32').min))/reduction_factor, max=(np.asscalar(np.finfo('float32').max))/reduction_factor) elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor: return torch.clamp(v, min=(np.asscalar(np.finfo('float64').min))/reduction_factor, max=(np.asscalar(np.finfo('float64').max))/reduction_factor) elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor: return torch.clamp(v, min=(np.asscalar(np.finfo('float16').min))/reduction_factor, max=(np.asscalar(np.finfo('float16').max))/reduction_factor) else: raise ValueError('Unknown data type: ' + str( type(v.data))) def lift_to_dimension(A, dim): """Creates a view of A of dimension dim (by adding dummy dimensions if necessary). :param A: numpy array :param dim: desired dimension of view :return: returns view of A of appropriate dimension """ current_dim = len(A.shape) if current_dim > dim: raise ValueError('Can only add dimensions, but not remove them') if current_dim == dim: return A else: return A.reshape([1]*(dim-current_dim)+list(A.shape)) def get_dim_of_affine_transform(Ab): """Returns the number of dimensions corresponding to an affine transformation of the form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply [a1;a2;a3;b], i.e., all columns stacked on top of each other. :param Ab: parameter vector :return: dimensionality of transform (1,2,or 3) """ nr = len(Ab) if nr==2: return 1 elif nr==6: return 2 elif nr==12: return 3 else: raise ValueError('Only supports dimensions 1, 2, and 3.') def set_affine_transform_to_identity(Ab): """Sets the affine transformation as given by the column vector Ab to the identity transform. :param Ab: Affine parameter vector (will be overwritten with the identity transform) :return: """ dim = get_dim_of_affine_transform(Ab) if dim==1: Ab.zero_() Ab[0]=1. elif dim==2: Ab.zero_() Ab[0]=1. Ab[3]=1. elif dim==3: Ab.zero_() Ab[0]=1. Ab[4]=1. Ab[8]=1. else: raise ValueError('Only supports dimensions 1, 2, and 3.') def set_affine_transform_to_identity_multiN(Ab): """Set the affine transforms to the identity (in the case of arbitrary batch size). :param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans. :return: """ sz = Ab.size() nr_of_images = sz[0] for nrI in range(nr_of_images): set_affine_transform_to_identity(Ab[nrI, :]) def get_inverse_affine_param(Ab): """Computes inverse of affine transformation. Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb :param Ab: B x pars (batch size x param. vector) :return: Inverse of affine parameters """ dim =0 if Ab.shape[1] == 2: dim = 1 elif Ab.shape[1] == 6: dim = 2 elif Ab.shape[1] == 12: dim = 3 if dim not in [1, 2, 3]: raise ValueError('Only supports dimensions 1, 2, and 3.') Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2) Ab_inv = torch.zeros_like(Ab) for n in range(Ab.shape[0]): tm_inv = torch.inverse(Ab[n, :, :dim]) Ab_inv[n, :, :dim] = tm_inv Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim]) inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1) return inv_affine_param def update_affine_param(Ab, Cd): """Update affine parameters. Formally: C(Ax+b)+d = CAx+Cb+d :param Ab: B x pars (batch size x param. vector) :return: Updated affine parameters """ dim = 0 if Ab.shape[1]==2: dim = 1 elif Ab.shape[1]==6: dim = 2 elif Ab.shape[1]==12: dim = 3 if dim not in [1, 2, 3]: raise ValueError('Only supports dimensions 1, 2, and 3.') Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2) Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2) updated_param = torch.zeros_like(Ab) for n in range(Ab.shape[0]): tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim]) updated_param[n,:,:dim] = tm_param updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim] updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1) return updated_param def apply_affine_transform_to_map(Ab,phi): """Applies an affine transform to a map. :param Ab: affine transform parameter column vector :param phi: map; format nrCxXxYxZ (nrC corresponds to dimension) :return: returns transformed map """ sz = phi.size() dim = len(sz) - 1 if dim not in [1,2,3]: raise ValueError('Only supports dimensions 1, 2, and 3.') phiR = MyTensor(sz).zero_().type_as(phi) if dim == 1: phiR = phi * Ab[0] + Ab[1] elif dim == 2: phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1 phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2 elif dim == 3: phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9] phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10] phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11] else: raise ValueError('Only supports dimensions 1, 2, and 3.') return phiR def apply_affine_transform_to_map_multiNC(Ab,phi): """Applies an affine transform to maps (for arbitrary batch size). :param Ab: affine transform parameter column vectors (batch size x param. vector) :param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension) :return: returns transformed maps """ sz = phi.size() dim = get_dim_of_affine_transform(Ab[0,:]) nr_of_images = Ab.size()[0] if nr_of_images != sz[0]: raise ValueError('Incompatible number of affine transforms') if dim != len(sz)-2: raise ValueError('Incompatible number of affine transforms') phiR = MyTensor(sz).zero_().type_as(phi) for nrI in range(nr_of_images): phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...]) return phiR def compute_normalized_gaussian(X, mu, sig): """Computes a normalized Gaussian. :param X: map with coordinates at which to evaluate :param mu: array indicating the mean :param sig: array indicating the standard deviations for the different dimensions :return: Normalized Gaussian evaluated at coordinates in X Example:: >>> mu, sig = [1,1], [1,1] >>> X = [0,0] >>> print(compute_normalized_gaussian(X, mu, sig) """ dim = len(mu) if dim == 1: g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.))) g = g/g.sum() return g elif dim == 2: g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.)) - np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))) g = g/g.sum() return g elif dim == 3: g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.)) -np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)) -np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.))) g = g / g.sum() return g else: raise ValueError('Can only compute Gaussians in dimensions 1-3') def _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True): if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise ValueError('Currently only orders 0 to 9 are supported') if spline_order == 0: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=False, use_01_input=use_01_input) elif spline_order == 1: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=True, use_01_input=use_01_input) else: stn = SplineInterpolation_ND_BCXYZ(spacing, spline_order) I1_warped = stn(I0, phi) return I1_warped def _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True): if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise ValueError('Currently only orders 0 to 9 are supported') if spline_order == 0: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=False, use_01_input=use_01_input) elif spline_order == 1: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=True, use_01_input=use_01_input) else: stn = SplineInterpolation_ND_BCXYZ(spacing, spline_order) I1_warped = stn(I0, phi) return I1_warped def _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True): if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise ValueError('Currently only orders 0 to 9 are supported') if spline_order == 0: # return get_warped_label_map(I0,phi,spacing) stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=False, use_01_input=use_01_input) elif spline_order == 1: stn = STN_ND_BCXYZ(spacing,zero_boundary, use_bilinear=True, use_01_input=use_01_input) else: stn = SplineInterpolation_ND_BCXYZ(spacing, spline_order) I1_warped = stn(I0, phi) return I1_warped def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True): """Warps image. :param I0: image to warp, image size XxYxZ :param phi: map for the warping, size dimxXxYxZ :param spacing: image spacing [dx,dy,dz] :return: returns the warped image of size XxYxZ """ # implements this by creating a different view (effectively adding dimensions) Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))), phi.view(torch.Size([1] + list(phi.size()))), spacing, spline_order, zero_boundary, use_01_input) return Iw.view(I0.size()) def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True): """Warps image. :param I0: image to warp, image size BxCxXxYxZ :param phi: map for the warping, size BxdimxXxYxZ :param spacing: image spacing [dx,dy,dz] :return: returns the warped image of size BxCxXxYxZ """ dim = I0.dim()-2 if dim == 1: return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input) elif dim == 2: return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input) elif dim == 3: return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input) else: raise ValueError('Images can only be warped in dimensions 1 to 3') def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize): """Computes spacing for the low-res parametrization from image spacing. :param spacing: image spacing :param sz: size of image :param lowResSize: size of low re parameterization :return: returns spacing of low res parameterization """ #todo: check that this is the correct way of doing it return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1) def _get_low_res_size_from_size(sz, factor): """Returns the corresponding low-res size from a (high-res) sz. :param sz: size (high-res) :param factor: low-res factor (needs to be <1) :return: low res size """ if (factor is None) or (factor >= 1): print('WARNING: Could not compute low_res_size as factor was ' + str(factor)) return np.array(sz) else: low_res_sz = np.array(sz) low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16') return low_res_sz def _compute_low_res_image(I, spacing, low_res_size, spline_order): import mermaid.image_sampling as IS sampler = IS.ResampleImage() low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order) return low_res_image def individual_parameters_to_model_parameters(ind_pars): model_pars = dict() if type(ind_pars) == type(dict()): # should already be in the right format model_pars = ind_pars else: # if ind_pars is not a dictionary assume that they come from the optimizer # (i.e., list and each list element has a dictionary with keys 'name' and 'model_params' for par in ind_pars: model_pars[par['name']] = par['model_params'] return model_pars def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing): """Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`. :param lam: scalar momentum, BxCxXxYxZ :param I: image, BxCxXxYxZ :param sz: size of image :param spacing: spacing of image :return: returns the vector momentum """ nrOfI = sz[0] # number of images m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC nrOfC = sz[1] for c in range(nrOfC): # loop over all the channels and add the results m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...], I[:, c, ...], nrOfI, sz[2::], spacing) return m def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing): """Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`. :param lam: scalar momentum, batchxXxYxZ :param I: image, batchXxYxZ :param sz: size of image :param spacing: spacing of image :return: returns the vector momentum """ fdt = fd.FD_torch(spacing) dim = len(sz) m = create_ND_vector_field_variable_multiN(sz, nrOfI) if dim == 1: m[:, 0, :] = fdt.dXc(I)*lam elif dim == 2: m[:, 0, :, :] = fdt.dXc(I)*lam m[:, 1, :, :] = fdt.dYc(I)*lam elif dim == 3: m[:, 0, :, :, :] = fdt.dXc(I)*lam m[:, 1, :, :, :] = fdt.dYc(I)*lam m[:, 2, :, :, :] = fdt.dZc(I)*lam else: raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3') return m def create_ND_vector_field_variable_multiN(sz, nr_of_images=1): """ Create vector field torch Variable of given size :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :return: returns vector field of size nrOfIxdimxXxYxZ """ dim = len(sz) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nr_of_images, dim]+list(csz)) return MyTensor(*(csz.tolist())).normal_(0., 1e-7) def create_ND_vector_field_variable(sz): """Create vector field torch Variable of given size. :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :return: returns vector field of size dimxXxYxZ """ dim = len(sz) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([dim]+list(csz)) return MyTensor(*(csz.tolist())).normal_(0.,1e-7) def create_vector_parameter(nr_of_elements): """Creates a vector parameters with a specified number of elements. :param nr_of_elements: number of vector elements :return: returns the parameter vector """ return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7)) def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False): """Create vector field torch Parameter of given size. :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :return: returns vector field of size nrOfIxdimxXxYxZ """ dim = len(sz) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nrOfI, dim]+list(csz)) if get_field_from_external_network: tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7) tmp.requires_grad = True else: tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7)) return tmp def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False): """ Create vector field torch Parameter of given size :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :return: returns vector field of size nrOfIxdimxXxYxZ """ nr_of_mg_weights = len(gaussian_std_weights) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nrOfI,nr_of_mg_weights]+list(csz)) weights = torch.empty(*csz) # set the default if sched =='w_K_w': gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights] for g in range(nr_of_mg_weights): weights[:, g, ...] = gaussian_std_weights[g] tmp = AdaptVal(weights) if get_preweight_from_network: tmp.requires_grad = True else: tmp = Parameter(tmp) return tmp def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1): """ Create vector field torch Parameter of given size :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :param nrOfC: number of channels :return: returns vector field of size nrOfIxnrOfCxXxYxZ """ csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nrOfI,nrOfC]+list(csz)) return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7)) def centered_identity_map_multiN(sz, spacing, dtype='float32'): """ Create a centered identity map (shifted so it is centered around 0) :param sz: size of an image in BxCxXxYxZ format :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map """ dim = len(sz) - 2 nrOfI = sz[0] if dim == 1: id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype) elif dim == 2: id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype) elif dim == 3: id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype) else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') for n in range(nrOfI): id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype) return id def identity_map_multiN(sz,spacing,dtype='float32'): """ Create an identity map :param sz: size of an image in BxCxXxYxZ format :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map """ dim = len(sz)-2 nrOfI = int(sz[0]) if dim == 1: id = np.zeros([nrOfI,1,sz[2]],dtype=dtype) elif dim == 2: id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype) elif dim == 3: id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype) else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') for n in range(nrOfI): id[n,...] = identity_map(sz[2::],spacing,dtype=dtype) return id def centered_identity_map(sz, spacing, dtype='float32'): """ Returns a centered identity map (with 0 in the middle) if the sz is odd Otherwise shifts everything by 0.5*spacing :param sz: just the spatial dimensions, i.e., XxYxZ :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map of dimension dimxXxYxZ """ dim = len(sz) if dim == 1: id = np.mgrid[0:sz[0]] elif dim == 2: id = np.mgrid[0:sz[0], 0:sz[1]] elif dim == 3: id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]] else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') # now get it into range [0,(sz-1)*spacing]^d id = np.array(id.astype(dtype)) if dim == 1: id = id.reshape(1, sz[0]) # add a dummy first index for d in range(dim): id[d] *= spacing[d] if sz[d]%2==0: #even id[d] -= spacing[d]*(sz[d]//2) else: #odd id[d] -= spacing[d]*((sz[d]+1)//2) # and now store it in a dim+1 array if dim == 1: idnp = np.zeros([1, sz[0]], dtype=dtype) idnp[0, :] = id[0] elif dim == 2: idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype) idnp[0, :, :] = id[0] idnp[1, :, :] = id[1] elif dim == 3: idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype) idnp[0, :, :, :] = id[0] idnp[1, :, :, :] = id[1] idnp[2, :, :, :] = id[2] else: raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map') return idnp # # def centered_min_normalized_identity_map(sz, spacing, dtype='float32'): # """ # Returns a centered identity map (with 0 in the middle) if the sz is odd # Otherwise shifts everything by 0.5*spacing # # :param sz: just the spatial dimensions, i.e., XxYxZ # :param spacing: list with spacing information [sx,sy,sz] # :param dtype: numpy data-type ('float32', 'float64', ...) # :return: returns the identity map of dimension dimxXxYxZ # """ # dim = len(sz) # if dim == 1: # id = np.mgrid[0:sz[0]] # elif dim == 2: # id = np.mgrid[0:sz[0], 0:sz[1]] # elif dim == 3: # id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]] # else: # raise ValueError('Only dimensions 1-3 are currently supported for the identity map') # # min_spacing = np.min(spacing) # spacing_ratio = spacing/min_spacing # # # # now get it into range [0,(sz-1)*spacing]^d # id = np.array(id.astype(dtype)) # if dim == 1: # id = id.reshape(1, sz[0]) # add a dummy first index # # for d in range(dim): # id[d] *= spacing[d] # if sz[d]%2==0: # #even # id[d] -= spacing[d]*(sz[d]//2) # else: # #odd # id[d] -= spacing[d]*((sz[d]+1)//2) # # # and now store it in a dim+1 array and rescale by the ratio # if dim == 1: # idnp = np.zeros([1, sz[0]], dtype=dtype) # idnp[0, :] = id[0] * spacing_ratio[0] # elif dim == 2: # idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype) # idnp[0, :, :] = id[0] * spacing_ratio[0] # idnp[1, :, :] = id[1] * spacing_ratio[1] # elif dim == 3: # idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype) # idnp[0, :, :, :] = id[0] * spacing_ratio[0] # idnp[1, :, :, :] = id[1] * spacing_ratio[1] # idnp[2, :, :, :] = id[2] * spacing_ratio[2] # else: # raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map') # # return idnp # # def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True): # if do_transform: # min_spacing = np.min(spacing) # spacing_ratio =min_spacing/spacing # dim = spacing.size # spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio)) # sp_sz = [1]+[dim] +[1]*dim # spacing_ratio_t = spacing_ratio_t.view(*sp_sz) # new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list] # else: # new_var_list = var_list # return new_var_list # def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True): # if do_transform: # min_spacing = np.min(spacing) # spacing_ratio =spacing/min_spacing # dim = spacing.size # spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio)) # sp_sz = [1]+[dim] +[1]*dim # spacing_ratio_t = spacing_ratio_t.view(*sp_sz) # new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list] # else: # new_var_list = var_list # return new_var_list # def identity_map(sz,spacing,dtype='float32'): """ Returns an identity map. :param sz: just the spatial dimensions, i.e., XxYxZ :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map of dimension dimxXxYxZ """ dim = len(sz) if dim==1: id = np.mgrid[0:sz[0]] elif dim==2: id = np.mgrid[0:sz[0],0:sz[1]] elif dim==3: id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]] else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') # now get it into range [0,(sz-1)*spacing]^d id = np.array( id.astype(dtype) ) if dim==1: id = id.reshape(1,sz[0]) # add a dummy first index for d in range(dim): id[d]*=spacing[d] #id[d]*=2./(sz[d]-1) #id[d]-=1. # and now store it in a dim+1 array if dim==1: idnp = np.zeros([1, sz[0]], dtype=dtype) idnp[0,:] = id[0] elif dim==2: idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype) idnp[0,:, :] = id[0] idnp[1,:, :] = id[1] elif dim==3: idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype) idnp[0,:, :, :] = id[0] idnp[1,:, :, :] = id[1] idnp[2,:, :, :] = id[2] else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') return idnp def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05): """generate a smooth weight mask for the omt """ dim = len(img_sz) mask_sz = [1,1]+ list(img_sz) mask = AdaptVal(torch.ones(*mask_sz))*mask_value if dim ==2: mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1 elif dim==3: mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1 sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing) mask = sm.smooth(mask) return mask.detach() def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2): """generate a smooth weight mask for the omt """ dim = len(img_sz) mask_sz = [1,1]+ list(img_sz) mask = AdaptVal(torch.zeros(*mask_sz)) if dim ==2: mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1 elif dim==3: mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1 sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing) mask = sm.smooth(mask) if pow ==2: mask = mask**2 if pow ==3: mask = mask*mask*mask return mask # def compute_omt_const(stds,param,dim): # omt_power = param['forward_model']['smoother']['omt_power'] # omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty'] # min_std = torch.min(stds) # max_std = torch.max(stds) # omt_const = torch.abs(torch.log(max_std/stds))**omt_power # omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power) # omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2) # sz = [1]+ [len(stds)] +[1]*(dim+1) # return omt_const.view(*sz) def get_single_gaussian_smoother(gaussian_std,sz,spacing): s_m_params = pars.ParameterDict() s_m_params['smoother']['type'] = 'gaussian' s_m_params['smoother']['gaussian_std'] = gaussian_std s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params) return s_m def get_warped_label_map(label_map, phi, spacing, sched='nn'): if sched == 'nn': warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True) # check if here should be add assert assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise" else: raise ValueError(" the label warping method is not implemented") return warped_label_map def t2np(v): """ Takes a torch array and returns it as a numpy array on the cpu :param v: torch array :return: numpy array """ return (v.detach()).cpu().numpy() def cxyz_to_xyzc( v ): """ Takes a torch array and returns it as a numpy array on the cpu :param v: torch array :return: numpy array """ dim = len(v.shape)-2 if dim ==2: v = v.permute(0,2,3,1) if dim ==3: v = v.permute(0,2,3,4,1) return v def get_scalar(v): if isinstance(v, float): return v elif isinstance(v, np.ndarray) and v.size == 1: return float(v) def checkNan(x): """" input should be list of Variable """ return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x] def noramlized_spacing_to_smallest(spacing): min_sp = np.min(spacing) spacing[spacing>min_sp]=min_sp return spacing def time_warped_function(f): def __time_warped_function(input=None): start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() output = f(input) end.record() # Waits for everything to finish running torch.cuda.synchronize() print(start.elapsed_time(end)) return output return __time_warped_function def interoplate_boundary_right(tensor): dim = len(tensor.shape)-2 if dim==1: tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3] if dim==2: tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:] tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3] if dim==3: tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :] tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3] tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3] def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None): """ :param I: B C X Y Z :param spacing: spx spy spz :param desiredSize: B C X Y Z :param spline_order: :param zero_boundary: :param identity_map: :return: """ if spacing is None: img_sz = I.shape[2:] spacing = 1. / (np.array(img_sz) - 1) if identity_map is not None: # todo will remove, currently fix for symmetric training if I.shape[0] != identity_map.shape[0]: n_batch = I.shape[0] desiredSize = desiredSize.copy() desiredSize[0] = n_batch identity_map = identity_map[:n_batch] resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order, zero_boundary=zero_boundary, identity_map=identity_map) return resampled def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None): """ Resample an image to a given desired size :param I: Input image (expected to be of BxCxXxYxZ format) :param spacing: array describing the spatial spacing :param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D) :return: returns a tuple: the downsampled image, the new spacing after downsampling """ desiredSize = desiredSize[2:] is_numpy = False if not isinstance(I, torch.Tensor): I = torch.Tensor(I) is_numpy = True sz = np.array(list(I.size())) # check that the batch size and the number of channels is the same nrOfI = sz[0] nrOfC = sz[1] desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize)) newspacing = spacing * ((sz[2::].astype('float') - 1.) / ( desiredSizeNC[2::].astype('float') - 1.)) ########################################### if identity_map is not None: idDes = identity_map else: idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing))) # now use this map for resampling ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary) return ID if not is_numpy else ID.numpy(), newspacing def get_res_size_from_size(sz, factor): """ Returns the corresponding low-res size from a (high-res) sz :param sz: size (high-res) :param factor: low-res factor (needs to be <1) :return: low res size """ if (factor is None): print('WARNING: Could not compute low_res_size as factor was ' + str(factor)) return sz else: lowResSize = np.array(sz) if not isinstance(factor, list): lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16') else: lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16') if lowResSize[-1] % 2 != 0: lowResSize[-1] -= 1 print( '\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n') return lowResSize def get_res_spacing_from_spacing(spacing, sz, lowResSize): """ Computes spacing for the low-res parameterization from image spacing :param spacing: image spacing :param sz: size of image :param lowResSize: size of low re parameterization :return: returns spacing of low res parameterization """ # todo: check that this is the correct way of doing it return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1) ########################################## Adaptive Net ###################################################3 def space_normal(tensors, std=0.1): """ space normalize for the net kernel :param tensor: :param mean: :param std: :return: """ if isinstance(tensors, Variable): space_normal(tensors.data, std=std) return tensors for n in range(tensors.size()[0]): for c in range(tensors.size()[1]): dim = tensors[n][c].dim() sz = tensors[n][c].size() mus = np.zeros(dim) stds = std * np.ones(dim) print('WARNING: What should the spacing be here? Needed for new identity map code') raise ValueError('Double check the spacing here before running this code') spacing = np.ones(dim) centered_id = centered_identity_map(sz,spacing) g = compute_normalized_gaussian(centered_id, mus, stds) tensors[n,c] = torch.from_numpy(g) def weights_init_uniform(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.uniform(m.weight.data, 0.038, 0.042) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_normal(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: space_normal(m.weight.data) elif classname.find('Linear') != -1: space_normal(m.weight.data) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_rd_normal(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.normal(m.weight.data) elif classname.find('Linear') != -1: init.normal(m.weight.data) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_xavier(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_kaiming(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_orthogonal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def init_weights(net, init_type='normal'): print('initialization method [%s]' % init_type) if init_type == 'rd_normal': net.apply(weights_init_rd_normal) elif init_type == 'normal': net.apply(weights_init_normal) elif init_type == 'uniform': net.apply(weights_init_uniform) elif init_type == 'xavier': net.apply(weights_init_xavier) elif init_type == 'kaiming': net.apply(weights_init_kaiming) elif init_type == 'orthogonal': net.apply(weights_init_orthogonal) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) def organize_data(moving, target, sched='depth_concat'): if sched == 'depth_concat': input = torch.cat([moving, target], dim=1) elif sched == 'width_concat': input = torch.cat((moving, target), dim=3) elif sched == 'list_concat': input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0) elif sched == 'difference': input = moving-target return input def bh(m,gi,go): print("Grad Input") print((torch.sum(gi[0].data), torch.sum(gi[1].data))) print("Grad Output") print(torch.sum(go[0].data)) return gi[0], gi[1], gi[2] class ConvBnRel(nn.Module): # conv + bn (optional) + relu def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False, bn=False, reverse=False, bias=False): super(ConvBnRel, self).__init__() padding = int((kernel_size - 1) // 2) if same_padding else 0 if not reverse: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) else: self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias) #y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta #When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants. self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None if active_unit == 'relu': self.active_unit = nn.ReLU(inplace=True) elif active_unit == 'elu': self.active_unit = nn.ELU(inplace=True) else: self.active_unit = None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.active_unit is not None: x = self.active_unit(x) return x class FcRel(nn.Module): # fc+ relu(option) def __init__(self, in_features, out_features, active_unit='relu'): super(FcRel, self).__init__() self.fc = nn.Linear(in_features, out_features) if active_unit == 'relu': self.active_unit = nn.ReLU(inplace=True) elif active_unit == 'elu': self.active_unit = nn.ELU(inplace=True) else: self.active_unit = None def forward(self, x): x = self.fc(x) if self.active_unit is not None: x = self.active_unit(x) return x class AdpSmoother(nn.Module): """ a simple conv. implementation, generate displacement field """ def __init__(self, inputs, dim, net_sched=None): # settings should include [using_bias, using bn, using elu] # inputs should be a dictionary could contain ['s'],['t'] super(AdpSmoother, self).__init__() self.dim = dim self.net_sched = 'm_only' self.s = inputs['s'].detach() self.t = inputs['t'].detach() self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True) self.get_net_sched() #self.net.register_backward_hook(bh) def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5): # return the self.net and self.net_input padding_size = (kernel_size-1)//2 if self.net_sched == 'm_only': if debugging: self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2) else: net = \ [ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched =='m_f_s': if debugging: self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched == 'm_d_s': if debugging: self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched == 'm_f_s_t': if debugging: self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched == 'm_d_s_f_t': if debugging: self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) def prepare_data(self, m, new_s): input=None if self.net_sched == 'm_only': input = m elif self.net_sched == 'm_f_s': input = organize_data(m,self.s,sched='depth_concat') elif self.net_sched == 'm_d_s': input = organize_data(m, new_s, sched='depth_concat') elif self.net_sched == 'm_f_s_t': input = organize_data(m, self.s, sched='depth_concat') input = organize_data(input, self.t, sched='depth_concat') elif self.net_sched == 'm_f_s_t': input = organize_data(m, self.s, sched='depth_concat') input = organize_data(input, self.t, sched='depth_concat') elif self.net_sched == 'm_d_s_f_t': input = organize_data(m, new_s, sched='depth_concat') input = organize_data(input, self.t, sched='depth_concat') return input def forward(self, m,new_s=None): m = m * self.mask input = self.prepare_data(m,new_s) x= input x = self.net(x) return x
[((47, 12, 47, 31), 'os.path.abspath', 'os.path.abspath', ({(47, 28, 47, 30): 'sf'}, {}), '(sf)', False, 'import os\n'), ((50, 12, 50, 31), 'os.path.abspath', 'os.path.abspath', ({(50, 28, 50, 30): 'tf'}, {}), '(tf)', False, 'import os\n'), ((51, 19, 51, 42), 'os.path.splitext', 'os.path.splitext', ({(51, 36, 51, 41): 'abs_t'}, {}), '(abs_t)', False, 'import os\n'), ((55, 7, 55, 43), 'os.path.isfile', 'os.path.isfile', ({(55, 22, 55, 42): 'abs_t_with_right_ext'}, {}), '(abs_t_with_right_ext)', False, 'import os\n'), ((63, 4, 63, 42), 'os.symlink', 'os.symlink', ({(63, 15, 63, 20): 'abs_s', (63, 21, 63, 41): 'abs_t_with_right_ext'}, {}), '(abs_s, abs_t_with_right_ext)', False, 'import os\n'), ((229, 13, 229, 33), 'torch.zeros_like', 'torch.zeros_like', ({(229, 30, 229, 32): 'Ab'}, {}), '(Ab)', False, 'import torch\n'), ((263, 20, 263, 40), 'torch.zeros_like', 'torch.zeros_like', ({(263, 37, 263, 39): 'Ab'}, {}), '(Ab)', False, 'import torch\n'), ((502, 14, 502, 32), 'mermaid.image_sampling.ResampleImage', 'IS.ResampleImage', ({}, {}), '()', True, 'import mermaid.image_sampling as IS\n'), ((578, 10, 578, 22), 'numpy.array', 'np.array', ({(578, 19, 578, 21): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((590, 10, 590, 22), 'numpy.array', 'np.array', ({(590, 19, 590, 21): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((612, 10, 612, 22), 'numpy.array', 'np.array', ({(612, 19, 612, 21): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((631, 10, 631, 22), 'numpy.array', 'np.array', ({(631, 19, 631, 21): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((633, 14, 633, 31), 'torch.empty', 'torch.empty', ({(633, 26, 633, 30): '*csz'}, {}), '(*csz)', False, 'import torch\n'), ((657, 10, 657, 22), 'numpy.array', 'np.array', ({(657, 19, 657, 21): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((1013, 13, 1013, 28), 'numpy.min', 'np.min', ({(1013, 20, 1013, 27): 'spacing'}, {}), '(spacing)', True, 'import numpy as np\n'), ((48, 12, 48, 35), 'os.path.splitext', 'os.path.splitext', ({(48, 29, 48, 34): 'abs_s'}, {}), '(abs_s)', False, 'import os\n'), ((56, 11, 56, 55), 'os.path.samefile', 'os.path.samefile', ({(56, 28, 56, 33): 'abs_s', (56, 34, 56, 54): 'abs_t_with_right_ext'}, {}), '(abs_s, abs_t_with_right_ext)', False, 'import os\n'), ((116, 31, 116, 43), 'numpy.array', 'np.array', ({(116, 40, 116, 42): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((232, 17, 232, 46), 'torch.inverse', 'torch.inverse', ({(232, 31, 232, 45): 'Ab[(n), :, :dim]'}, {}), '(Ab[(n), :, :dim])', False, 'import torch\n'), ((265, 19, 265, 58), 'torch.matmul', 'torch.matmul', ({(265, 32, 265, 44): 'Cd[(n), :, :dim]', (265, 45, 265, 57): 'Ab[(n), :, :dim]'}, {}), '(Cd[(n), :, :dim], Ab[(n), :, :dim])', False, 'import torch\n'), ((492, 15, 492, 27), 'numpy.array', 'np.array', ({(492, 24, 492, 26): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((494, 21, 494, 33), 'numpy.array', 'np.array', ({(494, 30, 494, 32): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((644, 14, 644, 28), 'torch.nn.parameter.Parameter', 'Parameter', ({(644, 24, 644, 27): 'tmp'}, {}), '(tmp)', False, 'from torch.nn.parameter import Parameter\n'), ((674, 13, 674, 53), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((701, 13, 701, 50), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((752, 15, 752, 48), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((889, 15, 889, 48), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((923, 20, 923, 41), 'torch.zeros', 'torch.zeros', ({(923, 32, 923, 40): '*mask_sz'}, {}), '(*mask_sz)', False, 'import torch\n'), ((1020, 16, 1020, 52), 'torch.cuda.Event', 'torch.cuda.Event', (), '', False, 'import torch\n'), ((1021, 14, 1021, 50), 'torch.cuda.Event', 'torch.cuda.Event', (), '', False, 'import torch\n'), ((1028, 8, 1028, 32), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ({}, {}), '()', False, 'import torch\n'), ((1084, 12, 1084, 27), 'torch.Tensor', 'torch.Tensor', ({(1084, 25, 1084, 26): 'I'}, {}), '(I)', False, 'import torch\n'), ((1115, 21, 1115, 33), 'numpy.array', 'np.array', ({(1115, 30, 1115, 32): 'sz'}, {}), '(sz)', True, 'import numpy as np\n'), ((1178, 8, 1178, 49), 'torch.nn.init.uniform', 'init.uniform', ({(1178, 21, 1178, 34): 'm.weight.data', (1178, 36, 1178, 41): '(0.038)', (1178, 43, 1178, 48): '(0.042)'}, {}), '(m.weight.data, 0.038, 0.042)', True, 'import torch.nn.init as init\n'), ((1200, 8, 1200, 34), 'torch.nn.init.normal', 'init.normal', ({(1200, 20, 1200, 33): 'm.weight.data'}, {}), '(m.weight.data)', True, 'import torch.nn.init as init\n'), ((1211, 8, 1211, 49), 'torch.nn.init.xavier_normal', 'init.xavier_normal', (), '', True, 'import torch.nn.init as init\n'), ((1223, 8, 1223, 62), 'torch.nn.init.kaiming_normal', 'init.kaiming_normal', (), '', True, 'import torch.nn.init as init\n'), ((1235, 8, 1235, 46), 'torch.nn.init.orthogonal', 'init.orthogonal', (), '', True, 'import torch.nn.init as init\n'), ((1263, 16, 1263, 50), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((1277, 10, 1277, 31), 'torch.sum', 'torch.sum', ({(1277, 20, 1277, 30): 'go[0].data'}, {}), '(go[0].data)', False, 'import torch\n'), ((1313, 18, 1313, 54), 'torch.nn.Linear', 'nn.Linear', ({(1313, 28, 1313, 39): 'in_features', (1313, 41, 1313, 53): 'out_features'}, {}), '(in_features, out_features)', True, 'import torch.nn as nn\n'), ((60, 12, 60, 43), 'os.remove', 'os.remove', ({(60, 22, 60, 42): 'abs_t_with_right_ext'}, {}), '(abs_t_with_right_ext)', False, 'import os\n'), ((234, 30, 234, 63), 'torch.matmul', 'torch.matmul', ({(234, 43, 234, 49): 'tm_inv', (234, 51, 234, 62): 'Ab[(n), :, (dim)]'}, {}), '(tm_inv, Ab[(n), :, (dim)])', False, 'import torch\n'), ((267, 33, 267, 72), 'torch.matmul', 'torch.matmul', ({(267, 46, 267, 58): 'Cd[(n), :, :dim]', (267, 60, 267, 71): 'Ab[(n), :, (dim)]'}, {}), '(Cd[(n), :, :dim], Ab[(n), :, (dim)])', False, 'import torch\n'), ((480, 46, 480, 71), 'numpy.array', 'np.array', ({(480, 55, 480, 70): 'lowResSize[2:]'}, {}), '(lowResSize[2:])', True, 'import numpy as np\n'), ((636, 32, 636, 49), 'torch.sqrt', 'torch.sqrt', ({(636, 43, 636, 48): 'std_w'}, {}), '(std_w)', False, 'import torch\n'), ((676, 13, 676, 60), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((703, 13, 703, 56), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((755, 15, 755, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((892, 15, 892, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((909, 20, 909, 40), 'torch.ones', 'torch.ones', ({(909, 31, 909, 39): '*mask_sz'}, {}), '(*mask_sz)', False, 'import torch\n'), ((1137, 48, 1137, 73), 'numpy.array', 'np.array', ({(1137, 57, 1137, 72): 'lowResSize[2:]'}, {}), '(lowResSize[2:])', True, 'import numpy as np\n'), ((1164, 18, 1164, 31), 'numpy.zeros', 'np.zeros', ({(1164, 27, 1164, 30): 'dim'}, {}), '(dim)', True, 'import numpy as np\n'), ((1168, 22, 1168, 34), 'numpy.ones', 'np.ones', ({(1168, 30, 1168, 33): 'dim'}, {}), '(dim)', True, 'import numpy as np\n'), ((1171, 27, 1171, 46), 'torch.from_numpy', 'torch.from_numpy', ({(1171, 44, 1171, 45): 'g'}, {}), '(g)', False, 'import torch\n'), ((1180, 8, 1180, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1180, 21, 1180, 34): 'm.weight.data', (1180, 36, 1180, 39): '(0.0)', (1180, 41, 1180, 45): '(0.02)'}, {}), '(m.weight.data, 0.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1202, 8, 1202, 34), 'torch.nn.init.normal', 'init.normal', ({(1202, 20, 1202, 33): 'm.weight.data'}, {}), '(m.weight.data)', True, 'import torch.nn.init as init\n'), ((1213, 8, 1213, 49), 'torch.nn.init.xavier_normal', 'init.xavier_normal', (), '', True, 'import torch.nn.init as init\n'), ((1225, 8, 1225, 62), 'torch.nn.init.kaiming_normal', 'init.kaiming_normal', (), '', True, 'import torch.nn.init as init\n'), ((1237, 8, 1237, 46), 'torch.nn.init.orthogonal', 'init.orthogonal', (), '', True, 'import torch.nn.init as init\n'), ((1265, 16, 1265, 50), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((1275, 11, 1275, 32), 'torch.sum', 'torch.sum', ({(1275, 21, 1275, 31): 'gi[0].data'}, {}), '(gi[0].data)', False, 'import torch\n'), ((1275, 34, 1275, 55), 'torch.sum', 'torch.sum', ({(1275, 44, 1275, 54): 'gi[1].data'}, {}), '(gi[1].data)', False, 'import torch\n'), ((1288, 24, 1288, 109), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((1290, 24, 1290, 117), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (), '', True, 'import torch.nn as nn\n'), ((1293, 18, 1293, 83), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (), '', True, 'import torch.nn as nn\n'), ((1295, 31, 1295, 52), 'torch.nn.ReLU', 'nn.ReLU', (), '', True, 'import torch.nn as nn\n'), ((1315, 31, 1315, 52), 'torch.nn.ReLU', 'nn.ReLU', (), '', True, 'import torch.nn as nn\n'), ((480, 22, 480, 39), 'numpy.array', 'np.array', ({(480, 31, 480, 38): 'sz[2:]'}, {}), '(sz[2:])', True, 'import numpy as np\n'), ((678, 13, 678, 67), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((705, 13, 705, 62), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((759, 15, 759, 62), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((896, 15, 896, 61), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((1060, 24, 1060, 40), 'numpy.array', 'np.array', ({(1060, 33, 1060, 39): 'img_sz'}, {}), '(img_sz)', True, 'import numpy as np\n'), ((1137, 22, 1137, 39), 'numpy.array', 'np.array', ({(1137, 31, 1137, 38): 'sz[2:]'}, {}), '(sz[2:])', True, 'import numpy as np\n'), ((1165, 25, 1165, 37), 'numpy.ones', 'np.ones', ({(1165, 33, 1165, 36): 'dim'}, {}), '(dim)', True, 'import numpy as np\n'), ((1182, 8, 1182, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1182, 21, 1182, 34): 'm.weight.data', (1182, 36, 1182, 39): '(1.0)', (1182, 41, 1182, 45): '(0.02)'}, {}), '(m.weight.data, 1.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1183, 8, 1183, 39), 'torch.nn.init.constant', 'init.constant', ({(1183, 22, 1183, 33): 'm.bias.data', (1183, 35, 1183, 38): '(0.0)'}, {}), '(m.bias.data, 0.0)', True, 'import torch.nn.init as init\n'), ((1193, 8, 1193, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1193, 21, 1193, 34): 'm.weight.data', (1193, 36, 1193, 39): '(1.0)', (1193, 41, 1193, 45): '(0.02)'}, {}), '(m.weight.data, 1.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1194, 8, 1194, 39), 'torch.nn.init.constant', 'init.constant', ({(1194, 22, 1194, 33): 'm.bias.data', (1194, 35, 1194, 38): '(0.0)'}, {}), '(m.bias.data, 0.0)', True, 'import torch.nn.init as init\n'), ((1204, 8, 1204, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1204, 21, 1204, 34): 'm.weight.data', (1204, 36, 1204, 39): '(1.0)', (1204, 41, 1204, 45): '(0.02)'}, {}), '(m.weight.data, 1.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1205, 8, 1205, 39), 'torch.nn.init.constant', 'init.constant', ({(1205, 22, 1205, 33): 'm.bias.data', (1205, 35, 1205, 38): '(0.0)'}, {}), '(m.bias.data, 0.0)', True, 'import torch.nn.init as init\n'), ((1215, 8, 1215, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1215, 21, 1215, 34): 'm.weight.data', (1215, 36, 1215, 39): '(1.0)', (1215, 41, 1215, 45): '(0.02)'}, {}), '(m.weight.data, 1.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1216, 8, 1216, 39), 'torch.nn.init.constant', 'init.constant', ({(1216, 22, 1216, 33): 'm.bias.data', (1216, 35, 1216, 38): '(0.0)'}, {}), '(m.bias.data, 0.0)', True, 'import torch.nn.init as init\n'), ((1227, 8, 1227, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1227, 21, 1227, 34): 'm.weight.data', (1227, 36, 1227, 39): '(1.0)', (1227, 41, 1227, 45): '(0.02)'}, {}), '(m.weight.data, 1.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1228, 8, 1228, 39), 'torch.nn.init.constant', 'init.constant', ({(1228, 22, 1228, 33): 'm.bias.data', (1228, 35, 1228, 38): '(0.0)'}, {}), '(m.bias.data, 0.0)', True, 'import torch.nn.init as init\n'), ((1239, 8, 1239, 46), 'torch.nn.init.uniform', 'init.uniform', ({(1239, 21, 1239, 34): 'm.weight.data', (1239, 36, 1239, 39): '(1.0)', (1239, 41, 1239, 45): '(0.02)'}, {}), '(m.weight.data, 1.0, 0.02)', True, 'import torch.nn.init as init\n'), ((1240, 8, 1240, 39), 'torch.nn.init.constant', 'init.constant', ({(1240, 22, 1240, 33): 'm.bias.data', (1240, 35, 1240, 38): '(0.0)'}, {}), '(m.bias.data, 0.0)', True, 'import torch.nn.init as init\n'), ((1297, 31, 1297, 51), 'torch.nn.ELU', 'nn.ELU', (), '', True, 'import torch.nn as nn\n'), ((1317, 31, 1317, 51), 'torch.nn.ELU', 'nn.ELU', (), '', True, 'import torch.nn as nn\n'), ((1349, 27, 1349, 101), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((1356, 27, 1356, 46), 'torch.nn.Sequential', 'nn.Sequential', ({(1356, 41, 1356, 45): '*net'}, {}), '(*net)', True, 'import torch.nn as nn\n'), ((342, 20, 342, 49), 'numpy.power', 'np.power', ({(342, 29, 342, 44): 'X[(0), :] - mu[0]', (342, 46, 342, 48): '2.0'}, {}), '(X[(0), :] - mu[0], 2.0)', True, 'import numpy as np\n'), ((342, 53, 342, 73), 'numpy.power', 'np.power', ({(342, 62, 342, 68): 'sig[0]', (342, 70, 342, 72): '2.0'}, {}), '(sig[0], 2.0)', True, 'import numpy as np\n'), ((1360, 27, 1360, 108), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((1367, 27, 1367, 46), 'torch.nn.Sequential', 'nn.Sequential', ({(1367, 41, 1367, 45): '*net'}, {}), '(*net)', True, 'import torch.nn as nn\n'), ((347, 21, 347, 52), 'numpy.power', 'np.power', ({(347, 30, 347, 47): 'X[(1), :, :] - mu[1]', (347, 49, 347, 51): '2.0'}, {}), '(X[(1), :, :] - mu[1], 2.0)', True, 'import numpy as np\n'), ((495, 36, 495, 53), 'numpy.array', 'np.array', ({(495, 45, 495, 52): 'sz[2:]'}, {}), '(sz[2:])', True, 'import numpy as np\n'), ((1355, 28, 1355, 40), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((1371, 27, 1371, 108), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((1378, 27, 1378, 46), 'torch.nn.Sequential', 'nn.Sequential', ({(1378, 41, 1378, 45): '*net'}, {}), '(*net)', True, 'import torch.nn as nn\n'), ((121, 44, 121, 63), 'numpy.finfo', 'np.finfo', ({(121, 53, 121, 62): '"""float32"""'}, {}), "('float32')", True, 'import numpy as np\n'), ((122, 44, 122, 63), 'numpy.finfo', 'np.finfo', ({(122, 53, 122, 62): '"""float32"""'}, {}), "('float32')", True, 'import numpy as np\n'), ((346, 20, 346, 47), 'numpy.power', 'np.power', ({(346, 29, 346, 43): 'X[(0), :, :] - mu[0]', (346, 44, 346, 46): '2.0'}, {}), '(X[(0), :, :] - mu[0], 2.0)', True, 'import numpy as np\n'), ((346, 51, 346, 70), 'numpy.power', 'np.power', ({(346, 60, 346, 66): 'sig[0]', (346, 67, 346, 69): '2.0'}, {}), '(sig[0], 2.0)', True, 'import numpy as np\n'), ((347, 60, 347, 80), 'numpy.power', 'np.power', ({(347, 69, 347, 75): 'sig[1]', (347, 77, 347, 79): '2.0'}, {}), '(sig[1], 2.0)', True, 'import numpy as np\n'), ((353, 20, 353, 54), 'numpy.power', 'np.power', ({(353, 29, 353, 49): 'X[(2), :, :, :] - mu[2]', (353, 51, 353, 53): '2.0'}, {}), '(X[(2), :, :, :] - mu[2], 2.0)', True, 'import numpy as np\n'), ((1117, 40, 1117, 56), 'numpy.array', 'np.array', ({(1117, 49, 1117, 55): 'sz[2:]'}, {}), '(sz[2:])', True, 'import numpy as np\n'), ((1119, 40, 1119, 56), 'numpy.array', 'np.array', ({(1119, 49, 1119, 55): 'sz[2:]'}, {}), '(sz[2:])', True, 'import numpy as np\n'), ((1119, 59, 1119, 75), 'numpy.array', 'np.array', ({(1119, 68, 1119, 74): 'factor'}, {}), '(factor)', True, 'import numpy as np\n'), ((1366, 28, 1366, 40), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((1382, 27, 1382, 108), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((1389, 27, 1389, 46), 'torch.nn.Sequential', 'nn.Sequential', ({(1389, 41, 1389, 45): '*net'}, {}), '(*net)', True, 'import torch.nn as nn\n'), ((125, 44, 125, 63), 'numpy.finfo', 'np.finfo', ({(125, 53, 125, 62): '"""float64"""'}, {}), "('float64')", True, 'import numpy as np\n'), ((126, 44, 126, 63), 'numpy.finfo', 'np.finfo', ({(126, 53, 126, 62): '"""float64"""'}, {}), "('float64')", True, 'import numpy as np\n'), ((352, 20, 352, 54), 'numpy.power', 'np.power', ({(352, 29, 352, 49): 'X[(1), :, :, :] - mu[1]', (352, 51, 352, 53): '2.0'}, {}), '(X[(1), :, :, :] - mu[1], 2.0)', True, 'import numpy as np\n'), ((353, 62, 353, 82), 'numpy.power', 'np.power', ({(353, 71, 353, 77): 'sig[2]', (353, 79, 353, 81): '2.0'}, {}), '(sig[2], 2.0)', True, 'import numpy as np\n'), ((1377, 28, 1377, 40), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((1392, 27, 1392, 110), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((1399, 27, 1399, 46), 'torch.nn.Sequential', 'nn.Sequential', ({(1399, 41, 1399, 45): '*net'}, {}), '(*net)', True, 'import torch.nn as nn\n'), ((129, 44, 129, 63), 'numpy.finfo', 'np.finfo', ({(129, 53, 129, 62): '"""float16"""'}, {}), "('float16')", True, 'import numpy as np\n'), ((130, 44, 130, 63), 'numpy.finfo', 'np.finfo', ({(130, 53, 130, 62): '"""float16"""'}, {}), "('float16')", True, 'import numpy as np\n'), ((351, 20, 351, 54), 'numpy.power', 'np.power', ({(351, 29, 351, 49): 'X[(0), :, :, :] - mu[0]', (351, 51, 351, 53): '2.0'}, {}), '(X[(0), :, :, :] - mu[0], 2.0)', True, 'import numpy as np\n'), ((351, 62, 351, 82), 'numpy.power', 'np.power', ({(351, 71, 351, 77): 'sig[0]', (351, 79, 351, 81): '2.0'}, {}), '(sig[0], 2.0)', True, 'import numpy as np\n'), ((352, 62, 352, 82), 'numpy.power', 'np.power', ({(352, 71, 352, 77): 'sig[1]', (352, 79, 352, 81): '2.0'}, {}), '(sig[1], 2.0)', True, 'import numpy as np\n'), ((1388, 28, 1388, 40), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((1398, 28, 1398, 40), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n')]
fmamashli/mne-python
examples/io/plot_read_evoked.py
52f064415e7c9fa8fe243d22108dcdf3d86505b9
""" ================================== Reading and writing an evoked file ================================== This script shows how to read and write evoked datasets. """ # Author: Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) from mne import read_evokeds from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname = data_path + '/MEG/sample/sample_audvis-ave.fif' # Reading condition = 'Left Auditory' evoked = read_evokeds(fname, condition=condition, baseline=(None, 0), proj=True) ############################################################################### # Show result as a butterfly plot: # By using exclude=[] bad channels are not excluded and are shown in red evoked.plot(exclude=[], time_unit='s') # Show result as a 2D image (x: time, y: channels, color: amplitude) evoked.plot_image(exclude=[], time_unit='s') ############################################################################### # Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked # responses to a file.
[((17, 12, 17, 30), 'mne.datasets.sample.data_path', 'sample.data_path', ({}, {}), '()', False, 'from mne.datasets import sample\n'), ((23, 9, 24, 32), 'mne.read_evokeds', 'read_evokeds', (), '', False, 'from mne import read_evokeds\n')]
lukaszgo1/nvda
source/monkeyPatches/__init__.py
38a2efd1e1bff7db4471cb7afa03ab1590b7adef
# A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2021 NV Access Limited # This file is covered by the GNU General Public License. # See the file COPYING for more details. from . import wxMonkeyPatches applyWxMonkeyPatches = wxMonkeyPatches.apply def applyMonkeyPatches(): # Apply several monkey patches to comtypes # F401 - imported but unused: Patches are applied during import from . import comtypesMonkeyPatches # noqa: F401 # Apply patches to Enum, prevent cyclic references on ValueError during construction from . import enumPatches enumPatches.replace__new__()
[]
nxtreaming/yt-dlp
yt_dlp/extractor/ninenow.py
385ffb467b2285e85a2a5495b90314ba1f8e0700
from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, float_or_none, smuggle_url, str_or_none, try_get, unified_strdate, unified_timestamp, ) class NineNowIE(InfoExtractor): IE_NAME = '9now.com.au' _VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)' _GEO_COUNTRIES = ['AU'] _TESTS = [{ # clip 'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc', 'md5': '17cf47d63ec9323e562c9957a968b565', 'info_dict': { 'id': '16801', 'ext': 'mp4', 'title': 'St. Kilda\'s Joey Montagna on the potential for a player\'s strike', 'description': 'Is a boycott of the NAB Cup "on the table"?', 'uploader_id': '4460760524001', 'upload_date': '20160713', 'timestamp': 1468421266, }, 'skip': 'Only available in Australia', }, { # episode 'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19', 'only_matching': True, }, { # DRM protected 'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1', 'only_matching': True, }, { # episode of series 'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3', 'info_dict': { 'id': '6249614030001', 'title': 'Episode 3', 'ext': 'mp4', 'season_number': 3, 'episode_number': 3, 'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.', 'uploader_id': '4460760524001', 'timestamp': 1619002200, 'upload_date': '20210421', }, 'expected_warnings': ['Ignoring subtitle tracks'], 'params':{ 'skip_download': True, } }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) page_data = self._parse_json(self._search_regex( r'window\.__data\s*=\s*({.*?});', webpage, 'page data', default='{}'), display_id, fatal=False) if not page_data: page_data = self._parse_json(self._parse_json(self._search_regex( r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;', webpage, 'page data'), display_id), display_id) for kind in ('episode', 'clip'): current_key = page_data.get(kind, {}).get( 'current%sKey' % kind.capitalize()) if not current_key: continue cache = page_data.get(kind, {}).get('%sCache' % kind, {}) if not cache: continue common_data = { 'episode': (cache.get(current_key) or list(cache.values())[0])[kind], 'season': (cache.get(current_key) or list(cache.values())[0]).get('season', None) } break else: raise ExtractorError('Unable to find video data') if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool): self.report_drm(display_id) brightcove_id = try_get( common_data, lambda x: x['episode']['video']['brightcoveId'], compat_str) or 'ref:%s' % common_data['episode']['video']['referenceId'] video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id title = try_get(common_data, lambda x: x['episode']['name'], compat_str) season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int) episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int) timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], compat_str)) release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], compat_str)) thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {} thumbnails = [{ 'id': thumbnail_id, 'url': thumbnail_url, 'width': int_or_none(thumbnail_id[1:]), } for thumbnail_id, thumbnail_url in thumbnails_data.items()] return { '_type': 'url_transparent', 'url': smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': self._GEO_COUNTRIES}), 'id': video_id, 'title': title, 'description': try_get(common_data, lambda x: x['episode']['description'], compat_str), 'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000), 'thumbnails': thumbnails, 'ie_key': 'BrightcoveNew', 'season_number': season_number, 'episode_number': episode_number, 'timestamp': timestamp, 'release_date': release_date, }
[]
mcarilli/apex
apex/fp16_utils/fused_weight_norm.py
766e36c9e10fe4efd847c3f77c3b38974c89eab1
import torch from torch.autograd import Variable from torch.autograd.function import Function, once_differentiable import apex_C def check_contig_cuda(tensors, names): for tensor, name in zip(tensors, names): if not tensor.is_contiguous(): raise RuntimeError(name+" with size {} is not contiguous" .format(tensor.size())) if not tensor.is_cuda: raise RuntimeError(name+".is_cuda = False." "Currently, only cuda tensors are supported.") class Fused_Weight_Norm(Function): """ Custom autograd function that implements weight norm, as presented in `<https://arxiv.org/abs/1602.07868>`_, along a tensor's slowest or fastest dimension using fused kernel launches for the forward and backward passes. Accepts fp32 or fp16 input; the output type will match the input type. Within the kernels, all calculations are performed in fp32 for numerical stability, regardless of input/output precision. """ @staticmethod def forward(ctx, input, g, dim=0): """ Args: input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous. g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``. dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported. Returns: Output tensor corresponding to **w** in the paper. Output type and precision will match type and precision of ``input``. """ # torch.cuda.nvtx.range_push("FusedNorm.forward, input.size() = {}" # .format(input.size())) check_contig_cuda((input,g),("input","g")) """ This is ok, new() treats a torch.Size object properly. No need to unpack with an asterisk via new(*input.size()). """ output = input.new(input.size()).contiguous() """ For output with size (slow, faster, faster, ...fastest), we want norms with size (slow, 1, 1, ...1), so that if you want retrieve norms and apply the same normalizing factors to another Tensor "t" with the same size as output, "t/norms" will broadcast each element of norms across the corresponding slowest dim of t. """ if dim == 0: norm_size = (output.size(0),) + (1,)*(output.dim() - 1) elif dim == output.dim() - 1: norm_size = (1,)*(output.dim() - 1) + (output.size(-1),) else: raise RuntimeError("Currently, Fused_Weight_Norm only supports first or last dimension.") norms = torch.cuda.FloatTensor(*norm_size).contiguous() """ Beware: If you call the following: norms = torch.cuda.FloatTensor(norm_size).contiguous() the constructor sees a tuple: FloatTensor( (output_size(0),1,1,...) ) and creates a 1D tensor with values from the tuple: [output_size(0),1,1,...]. """ apex_C.weight_norm_fwd(output, norms, input, g, dim) ctx.save_for_backward(input, g) # save_for_backward can only save input or output tensors, # use ctx state to save the norms and dimension: ctx.norms = norms ctx.dim = dim return output @staticmethod @once_differentiable def backward(ctx, grad_output): """ Args: grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance. Returns: Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``. """ check_contig_cuda((grad_output), ("grad_output")) savedInput, savedg = ctx.saved_tensors savedNorms = ctx.norms # We expect that these .contiguous() calls will be no-ops. They're present for safety. grad_output_contig = grad_output.contiguous() grad_input = grad_output_contig.new(grad_output.size()).contiguous() grad_g = savedg.new(savedg.size()).contiguous() apex_C.weight_norm_bwd(grad_input, grad_g, grad_output_contig, savedInput, savedg, savedNorms, ctx.dim) return grad_input, grad_g, None
[((74, 8, 74, 60), 'apex_C.weight_norm_fwd', 'apex_C.weight_norm_fwd', ({(74, 31, 74, 37): 'output', (74, 39, 74, 44): 'norms', (74, 46, 74, 51): 'input', (74, 53, 74, 54): 'g', (74, 56, 74, 59): 'dim'}, {}), '(output, norms, input, g, dim)', False, 'import apex_C\n'), ((105, 8, 111, 40), 'apex_C.weight_norm_bwd', 'apex_C.weight_norm_bwd', ({(105, 31, 105, 41): 'grad_input', (106, 32, 106, 38): 'grad_g', (107, 32, 107, 50): 'grad_output_contig', (108, 32, 108, 42): 'savedInput', (109, 32, 109, 38): 'savedg', (110, 32, 110, 42): 'savedNorms', (111, 32, 111, 39): 'ctx.dim'}, {}), '(grad_input, grad_g, grad_output_contig, savedInput,\n savedg, savedNorms, ctx.dim)', False, 'import apex_C\n'), ((64, 16, 64, 50), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', ({(64, 39, 64, 49): '*norm_size'}, {}), '(*norm_size)', False, 'import torch\n')]
gerardorf/taurus
bzt/modules/grinder.py
610872b4cf70af31d79a346db1aebd3466310d77
""" Module holds all stuff regarding Grinder tool usage Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import re import time from bzt import TaurusConfigError, ToolError from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader from bzt.modules.console import WidgetProvider, ExecutorWidget from bzt.modules.java import TaurusJavaHelper from bzt.requests_model import HTTPRequest from bzt.six import iteritems from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable): """ Grinder executor module """ def __init__(self): super(GrinderExecutor, self).__init__() self.script = None self.exec_id = "grinder-bzt-%s" % id(self) self.properties_file = None self.kpi_file = None self.cmd_line = None self.process = None self.end_time = None self.retcode = None self.java_helper = None def __write_base_props(self, fds): """ write base properties and base properties file contents to fds :param fds: fds :return: """ base_props_file = self.settings.get("properties-file") if base_props_file: fds.write("# Base Properies File Start: %s\n" % base_props_file) with open(base_props_file) as bpf: fds.write(bpf.read()) fds.write("# Base Properies File End: %s\n\n" % base_props_file) # base props base_props = self.settings.get("properties") if base_props: fds.write("# Base Properies Start\n") for key, val in iteritems(base_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Base Properies End\n\n") def __write_scenario_props(self, fds, scenario): """ Write scenario props and scenario file props to fds :param fds: :param scenario: dict :return: """ script_props_file = scenario.get("properties-file") if script_props_file: fds.write("# Script Properies File Start: %s\n" % script_props_file) with open(script_props_file) as spf: fds.write(spf.read()) fds.write("# Script Properies File End: %s\n\n" % script_props_file) # scenario props local_props = scenario.get("properties") if local_props: fds.write("# Scenario Properies Start\n") for key, val in iteritems(local_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Scenario Properies End\n\n") def __write_bzt_props(self, fds): """ Write bzt properties to fds :param fds: :return: """ fds.write("# BZT Properies Start\n") fds.write("grinder.hostID=%s\n" % self.exec_id) fds.write("grinder.script=%s\n" % self.script.replace(os.path.sep, "/")) fds.write("grinder.logDirectory=%s\n" % self.engine.artifacts_dir.replace(os.path.sep, "/")) load = self.get_load() if load.iterations or load.concurrency: fds.write("grinder.runs=%s\n" % load.iterations or 0) if load.concurrency: fds.write("grinder.threads=%s\n" % load.concurrency) if load.duration: fds.write("grinder.duration=%s\n" % int(load.duration * 1000)) fds.write("# taurus load values in case you need them\n") fds.write("taurus.concurrency=%s\n" % load.concurrency) fds.write("taurus.throughput=%s\n" % load.throughput) fds.write("taurus.ramp_up=%s\n" % load.ramp_up) fds.write("taurus.steps=%s\n" % load.steps) fds.write("taurus.hold_for=%s\n" % load.hold) fds.write("taurus.iterations=%s\n" % load.iterations) fds.write("# BZT Properies End\n") def prepare(self): self.stdout = open(self.engine.create_artifact("grinder", ".out"), "w") self.stderr = open(self.engine.create_artifact("grinder", ".err"), "w") self.install_required_tools() scenario = self.get_scenario() self.exec_id = self.label self.script = self.get_script_path() if not self.script: if "requests" in scenario: self.script = self.__scenario_from_requests() else: msg = "There must be a script file or requests for its generation " msg += "to run Grinder tool (%s)" % self.execution.get('scenario') raise TaurusConfigError(msg) self.properties_file = self.engine.create_artifact("grinder", ".properties") with open(self.properties_file, 'w') as fds: self.__write_base_props(fds) self.__write_scenario_props(fds, scenario) self.__write_bzt_props(fds) self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + "-kpi.log") self.reader = DataLogReader(self.kpi_file, self.log) self.reader.report_by_url = self.settings.get("report-by-url", False) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader) # add logback configurations used by worker processes (logback-worker.xml) self.env.add_path({"CLASSPATH": RESOURCES_DIR}, finish=True) self.env.add_path({"CLASSPATH": self.java_helper.tool_path}, finish=True) self.env.add_path({"CLASSPATH": self.settings.get("path", None)}, finish=True) self.cmd_line = ["java", "net.grinder.Grinder", self.properties_file] def startup(self): """ Should start the tool as fast as possible. """ self.env.set({"T_GRINDER_PREFIX": self.exec_id}) self.process = self.execute(self.cmd_line) def check(self): """ Checks if tool is still running. Also checks if resulting logs contains any data and throws exception otherwise. :return: bool :raise TaurusToolError: """ self.retcode = self.process.poll() if self.retcode is not None: if self.retcode != 0: raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode, self.get_error_diagnostics()) return True return False def shutdown(self): """ If tool is still running - let's stop it. """ shutdown_process(self.process, self.log) if self.start_time: self.end_time = time.time() self.log.debug("Grinder worked for %s seconds", self.end_time - self.start_time) def post_process(self): """ Collect data file artifact """ if self.kpi_file: self.engine.existing_artifact(self.kpi_file) super(GrinderExecutor, self).post_process() def __scenario_from_requests(self): """ Generate grinder scenario from requests :return: script """ script = self.engine.create_artifact("grinder_requests", ".py") builder = GrinderScriptBuilder(self.get_scenario(), self.log) builder.label = self.label builder.build_source_code() builder.save(script) return script def install_required_tools(self): grinder = self._get_tool(Grinder, config=self.settings) self.settings["path"] = grinder.tool_path self.java_helper = self._get_tool(TaurusJavaHelper) required_tools = [self._get_tool(TclLibrary), self._get_tool(JavaVM), self.java_helper, grinder] for tool in required_tools: if not tool.check_if_installed(): tool.install() def get_widget(self): if not self.widget: if self.script is not None: label = "Grinder: %s" % os.path.basename(self.script) else: label = None self.widget = ExecutorWidget(self, label) if self.get_load().ramp_up: self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup return self.widget def resource_files(self): resource_files = [] script_file_path = self.get_script_path() if script_file_path: resource_files.append(script_file_path) prop_file = self.get_scenario().get("properties-file") if prop_file: resource_files.append(prop_file) return resource_files def get_error_diagnostics(self): diagnostics = [] if self.stdout is not None: with open(self.stdout.name) as fds: contents = fds.read().strip() if contents.strip(): diagnostics.append("Grinder STDOUT:\n" + contents) if self.stderr is not None: with open(self.stderr.name) as fds: contents = fds.read().strip() if contents.strip(): diagnostics.append("Grinder STDOUT:\n" + contents) return diagnostics class DataLogReader(ResultsReader): """ Class to read KPI from data log """ DELIMITER = "," DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes") def __init__(self, filename, parent_logger): super(DataLogReader, self).__init__() self.report_by_url = False self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.idx = {} self.partial_buffer = "" self.start_time = 0 self.end_time = 0 self.concurrency = 0 self.test_names = {} self.known_threads = set() def _read(self, last_pass=False): """ Generator method that returns next portion of data :param last_pass: """ self.log.debug("Reading grinder results...") self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) lnum = None start = time.time() for lnum, line in enumerate(self.lines): if not self.idx: if not line.startswith('data.'): self.__split(line) # to capture early test name records continue line = line[line.find(' '):] header_list = line.strip().split(self.DELIMITER) for _ix, field in enumerate(header_list): self.idx[field.strip()] = _ix data_fields, worker_id = self.__split(line) if not data_fields: self.log.debug("Skipping line: %s", line.strip()) continue yield self.parse_line(data_fields, worker_id, lnum) if lnum is not None: duration = time.time() - start if duration < 0.001: duration = 0.001 self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration) def parse_line(self, data_fields, worker_id, lnum): worker_id = worker_id.split('.')[1] t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0) r_time = int(data_fields[self.idx["Test time"]]) / 1000.0 latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0 r_code = data_fields[self.idx["HTTP response code"]].strip() con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0 con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0 bytes_count = int(data_fields[self.idx["HTTP response length"]].strip()) test_id = data_fields[self.idx["Test"]].strip() thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip() if thread_id not in self.known_threads: self.known_threads.add(thread_id) self.concurrency += 1 url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count) if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]): if not error_msg: if r_code != '0': error_msg = "HTTP %s" % r_code else: error_msg = "Java exception calling TestRunner" else: error_msg = None # suppress errors if self.report_by_url: label = url elif test_id in self.test_names: label = self.test_names[test_id] else: label = "Test #%s" % test_id source_id = '' # maybe use worker_id somehow? return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count def __split(self, line): if not line.endswith("\n"): self.partial_buffer += line return None, None line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() if not line.startswith('data.'): line_parts = line.split(' ') if len(line_parts) > 1: if line_parts[1] == 'starting,': # self.concurrency += 1 pass elif line_parts[1] == 'finished': if self.concurrency > 0: self.concurrency -= 1 elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}: test_id = line_parts[5][:-1] test_name = ' '.join(line_parts[6:]) self.test_names[test_id] = test_name self.log.debug("Recognized test id %s => %s", test_id, test_name) return None, None worker_id = line[:line.find(' ')] line = line[line.find(' '):] data_fields = line.split(self.DELIMITER) if not data_fields[1].strip().isdigit(): return None, None if len(data_fields) < max(self.idx.values()): return None, None return data_fields, worker_id def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count): url = '' error_msg = None for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize? line = self.lines[lineNo].strip() matched = self.DETAILS_REGEX.match(line) if not matched: continue if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5): return matched.group(2), matched.group(4) return url, error_msg class Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?) VERSION = "3.11" LOCAL_PATH = "~/.bzt/grinder-taurus/lib/grinder.jar" def __init__(self, config=None, **kwargs): settings = config or {} grinder_path = settings.get("path", self.LOCAL_PATH) grinder_path = get_full_path(grinder_path) download_link = settings.get("download-link", "") super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs) self.version = self.VERSION self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version) def check_if_installed(self): self.log.debug("Trying %s: %s", self.tool_name, self.tool_path) try: out, err = self.call(["java", "-classpath", self.tool_path, "net.grinder.Grinder"]) if err: out += err self.log.debug("%s stdout: %s", self.tool_name, out) return True except CALL_PROBLEMS as exc: self.log.warning("%s check failed: %s", self.tool_name, exc) return False def install(self): dest = get_full_path(self.tool_path, step_up=2) self.log.info("Will install %s into %s", self.tool_name, dest) grinder_dist = self._download(use_link=bool(self.download_link)) self.log.info("Unzipping %s", grinder_dist) unzip(grinder_dist, dest, 'grinder-' + self.version) os.remove(grinder_dist) self.log.info("Installed grinder successfully") if not self.check_if_installed(): raise ToolError("Unable to run %s after installation!" % self.tool_name) class GrinderMirrorsManager(MirrorsManager): MIRRORS_SOURCE = "https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder" \ "%203/{version}/grinder-{version}-binary.zip&dialog=true" DOWNLOAD_LINK = "https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \ "/grinder-{version}-binary.zip?r=&ts=" + str(int(time.time())) + "&use_mirror=autoselect" def __init__(self, http_client, parent_logger, grinder_version): self.grinder_version = grinder_version base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version) super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger) def _parse_mirrors(self): links = [] if self.page_source is not None: self.log.debug('Parsing mirrors...') base_link = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}" \ "-binary.zip/download?use_mirror={mirror}" li_search_pattern = re.compile(r'<li id=".*?">') li_elements = li_search_pattern.findall(self.page_source) if li_elements: links = [base_link.format(version=self.grinder_version, mirror=link.strip('<li id="').strip('">')) for link in li_elements] default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version) if default_link not in links: links.append(default_link) self.log.debug('Total mirrors: %d', len(links)) return links class GrinderScriptBuilder(PythonGenerator): IMPORTS = """ from net.grinder.script import Test from net.grinder.script.Grinder import grinder from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities from HTTPClient import NVPair """ def __init__(self, scenario, parent_logger): super(GrinderScriptBuilder, self).__init__(scenario, parent_logger) self.label = "BZT Requests" def build_source_code(self): self.log.debug("Generating Python script for Grinder") self.root.append(self.gen_comment("This script was generated by Taurus", indent=0)) self.root.append(self.add_imports()) self.root.append(self.gen_new_line()) default_address = self.scenario.get("default-address") url_arg = "url=%r" % default_address if default_address else "" self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0)) self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0)) self.root.append(self.gen_statement('test.record(request)', indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0)) self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0)) headers = self.scenario.get_headers() if not self.scenario.get("keepalive", True): headers['Connection'] = 'close' if headers: self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0)) for header, value in iteritems(headers): self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4)) self.root.append(self.gen_statement("])", indent=0)) global_timeout = dehumanize_time(self.scenario.get("timeout", None)) if global_timeout: self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0)) cookie_flag = int(self.scenario.get("store-cookie", True)) self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_runner_class()) @staticmethod def __list_to_nvpair_list(items): return "[" + ",".join("NVPair(%r, %r)" % (header, value) for header, value in items) + "]" def gen_runner_class(self): runner_classdef = self.gen_class_definition("TestRunner", ["object"]) sleep_method = self.gen_method_definition("rampUpSleeper", ["self"]) sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return")) sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')")) sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)")) sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)")) sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)")) sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)")) sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')")) sleep_method.append(self.gen_new_line()) runner_classdef.append(sleep_method) main_method = self.gen_method_definition("__call__", ["self"]) main_method.append(self.gen_statement("self.rampUpSleeper()")) for req in self.scenario.get_requests(): if not isinstance(req, HTTPRequest): msg = "Grinder script generator doesn't support '%s' blocks, skipping" self.log.warning(msg, req.NAME) continue method = req.method.upper() url = req.url local_headers = req.headers params = "[]" headers = self.__list_to_nvpair_list(iteritems(local_headers)) main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers))) think_time = dehumanize_time(req.priority_option('think-time')) if think_time: main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000))) runner_classdef.append(main_method) return runner_classdef
[((269, 20, 269, 80), 're.compile', 're.compile', ({(269, 31, 269, 79): '"""worker\\\\.(\\\\S+) (.+) -> (\\\\S+) (.+), (\\\\d+) bytes"""'}, {}), "('worker\\\\.(\\\\S+) (.+) -> (\\\\S+) (.+), (\\\\d+) bytes')", False, 'import re\n'), ((146, 24, 146, 90), 'os.path.join', 'os.path.join', ({(146, 37, 146, 62): 'self.engine.artifacts_dir', (146, 64, 146, 89): "self.exec_id + '-kpi.log'"}, {}), "(self.engine.artifacts_dir, self.exec_id + '-kpi.log')", False, 'import os\n'), ((188, 8, 188, 48), 'bzt.utils.shutdown_process', 'shutdown_process', ({(188, 25, 188, 37): 'self.process', (188, 39, 188, 47): 'self.log'}, {}), '(self.process, self.log)', False, 'from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n'), ((275, 20, 275, 73), 'bzt.utils.FileReader', 'FileReader', (), '', False, 'from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n'), ((295, 16, 295, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((416, 23, 416, 50), 'bzt.utils.get_full_path', 'get_full_path', ({(416, 37, 416, 49): 'grinder_path'}, {}), '(grinder_path)', False, 'from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS\n'), ((437, 15, 437, 55), 'bzt.utils.get_full_path', 'get_full_path', (), '', False, 'from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS\n'), ((441, 8, 441, 60), 'bzt.utils.unzip', 'unzip', ({(441, 14, 441, 26): 'grinder_dist', (441, 28, 441, 32): 'dest', (441, 34, 441, 59): "('grinder-' + self.version)"}, {}), "(grinder_dist, dest, 'grinder-' + self.version)", False, 'from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n'), ((442, 8, 442, 31), 'os.remove', 'os.remove', ({(442, 18, 442, 30): 'grinder_dist'}, {}), '(grinder_dist)', False, 'import os\n'), ((66, 28, 66, 49), 'bzt.six.iteritems', 'iteritems', ({(66, 38, 66, 48): 'base_props'}, {}), '(base_props)', False, 'from bzt.six import iteritems\n'), ((88, 28, 88, 50), 'bzt.six.iteritems', 'iteritems', ({(88, 38, 88, 49): 'local_props'}, {}), '(local_props)', False, 'from bzt.six import iteritems\n'), ((190, 28, 190, 39), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((234, 26, 234, 53), 'bzt.modules.console.ExecutorWidget', 'ExecutorWidget', ({(234, 41, 234, 45): 'self', (234, 47, 234, 52): 'label'}, {}), '(self, label)', False, 'from bzt.modules.console import WidgetProvider, ExecutorWidget\n'), ((445, 18, 445, 84), 'bzt.ToolError', 'ToolError', ({(445, 28, 445, 83): "('Unable to run %s after installation!' % self.tool_name)"}, {}), "('Unable to run %s after installation!' % self.tool_name)", False, 'from bzt import TaurusConfigError, ToolError\n'), ((465, 32, 465, 60), 're.compile', 're.compile', ({(465, 43, 465, 59): '"""<li id=".*?">"""'}, {}), '(\'<li id=".*?">\')', False, 'import re\n'), ((513, 33, 513, 51), 'bzt.six.iteritems', 'iteritems', ({(513, 43, 513, 50): 'headers'}, {}), '(headers)', False, 'from bzt.six import iteritems\n'), ((137, 22, 137, 44), 'bzt.TaurusConfigError', 'TaurusConfigError', ({(137, 40, 137, 43): 'msg'}, {}), '(msg)', False, 'from bzt import TaurusConfigError, ToolError\n'), ((317, 23, 317, 34), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((560, 49, 560, 73), 'bzt.six.iteritems', 'iteritems', ({(560, 59, 560, 72): 'local_headers'}, {}), '(local_headers)', False, 'from bzt.six import iteritems\n'), ((231, 40, 231, 69), 'os.path.basename', 'os.path.basename', ({(231, 57, 231, 68): 'self.script'}, {}), '(self.script)', False, 'import os\n'), ((452, 69, 452, 80), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
moroten/scons
test/Fortran/fixture/myfortran_flags.py
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
import getopt import sys comment = ('#' + sys.argv[1]).encode() opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy') optstring = '' length = len(comment) for opt, arg in opts: if opt == '-o': out = arg elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt infile = open(args[0], 'rb') outfile = open(out, 'wb') outfile.write((optstring + "\n").encode()) for l in infile.readlines(): if l[:length] != comment: outfile.write(l) sys.exit(0)
[((4, 13, 4, 51), 'getopt.getopt', 'getopt.getopt', ({(4, 27, 4, 39): 'sys.argv[2:]', (4, 41, 4, 50): '"""cf:o:xy"""'}, {}), "(sys.argv[2:], 'cf:o:xy')", False, 'import getopt\n'), ((16, 0, 16, 11), 'sys.exit', 'sys.exit', ({(16, 9, 16, 10): '(0)'}, {}), '(0)', False, 'import sys\n')]
Zen-Reportz/zen_knit
zen_knit/organizer/__init__.py
104c2693d2cc61520657131da769f5d59d2df8e9
import io import os import base64 from pathlib import Path from nbconvert import filters from pygments.formatters.latex import LatexFormatter from zen_knit import formattor from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData from zen_knit.formattor.html_formatter import HTMLFormatter mime_extensions = {"image/png" : "png", "image/jpg" : "jpg"} class BaseOrganizer: def __init__(self, executed_data: ExecutedData): self.format_started = False self.collected_string = "" self.fig_folder = None self.executed_data = executed_data self.formatted_doc = [] self.organized_data = OrganizedData( global_options = self.executed_data.global_options, chunks = [] ) self._create_output_folder_name() self._create_fig_folder() self._organize_doc() self._create_output_file_name() def _create_output_file_name(self): global_options = self.organized_data.global_options global_options.output.file_name = global_options.input.file_name.split(".")[0] + "."+ global_options.output.format def _create_output_folder_name(self): global_options = self.organized_data.global_options if global_options.output.dir is None: global_options.output.dir = global_options.input.dir def _create_fig_folder(self): output_folder = self.organized_data.global_options.output.dir Path(output_folder).mkdir(parents=True, exist_ok=True) fig_folder = os.path.join(output_folder, self.organized_data.global_options.output.fig_dir) self.fig_folder = fig_folder Path(fig_folder).mkdir(parents=True, exist_ok=True) def _parse_raw(self, data, output_type): if data.get("code_text_raw") is not None: if self._clean_up(data['code_text_raw']) is not None: if output_type in ("code"): t = {"type": "code", "str_data": data['code_text_raw'] } elif output_type in ("sql"): t = {"type": "sql", "str_data": data['code_text_raw'] } else: t = {"type": "markdown", "str_data": data['code_text_raw'] } self.organized_data.chunks.append(OrganizedChunk(**t)) return True else: return False def _coder_string(self, data): list_ = ["stream", "error"] if data["output_type"] is None: return False if data["output_type"] in list_: if data["output_type"] == "stream": if self._clean_up(data['text']) is not None: t = {"type": "se_data", "str_data": data['text'] } self.organized_data.chunks.append(OrganizedChunk(**t)) if data["output_type"] == "error": t = {"type": "se_data", "str_data": data["evalue"] + filters.strip_ansi("".join(data["traceback"])) } self.organized_data.chunks.append(OrganizedChunk(**t)) return True return False def _raw_string(self, data): if data["output_type"] is None: return False if data["output_type"] == "execute_result": if data.get("data") is not None: if 'matplotlib' in data["data"]["text/plain"]: # Doing nothing here return True else: if ((data["data"]["text/plain"][0] == "'") or (data["data"]["text/plain"][0] == '"')): temp = data["data"]["text/plain"][1:-1] else: temp = data["data"]["text/plain"] if "<table" in temp: t = {"type": "html_data", "str_data":temp.encode().decode() } self.organized_data.chunks.append(OrganizedChunk(**t)) return True # if "BokehJS" in temp: # t = {"type": "html_data", "str_data": "<script type='text/javascript'>" + temp.encode().decode() + "</script>" } # self.organized_data.chunks.append(OrganizedChunk(**t)) # return True if self._clean_up(temp) is not None: t = {"type": "e_data", "str_data":temp } self.organized_data.chunks.append(OrganizedChunk(**t)) return True return True return False def _raw_plots(self, data, chunk_option:ChunkOption): if data["output_type"] is None: return False if data["output_type"] == "display_data": plot_infos = self._save_plots(data, chunk_option) t = {"type": "plot", "complex_data":{"plots": plot_infos, "options": chunk_option }} self.organized_data.chunks.append(OrganizedChunk(**t)) return True return False def _save_plots(self, data, chunk_option:ChunkOption): figs = [] i = 1 for m in mime_extensions: if m in data["data"]: fig_full_path, fig_relative_path = self._build_file(mime_extensions[m], i, chunk_option.fig_caption, chunk_option.name) figs.append(fig_relative_path) bfig = base64.b64decode(data["data"][m]) with open(fig_full_path, "wb") as f: f.write(bfig) i += 1 return figs def _build_file(self, extension, index, fig_caption= None, name =None): fig_name = "" if fig_caption is not None: fig_name = fig_name + "_" + fig_caption if name is not None: fig_name = fig_name + "_" + name fig_name = fig_name + "_" + str(index) fig_name = fig_name + "." + extension return os.path.join(self.fig_folder, fig_name), os.path.join(self.fig_folder, fig_name) def _interactive_plots(self, data): if data["output_type"] is None: return False if data["output_type"] == "display_data": if "text/html" in data["data"]: print(self.executed_data.global_options.output.format) if self.executed_data.global_options.output.format != "html": raise Exception("output format is not HTML") else: t = {"type": "html_data", "str_data":data["data"]["text/html"].encode().decode() } self.organized_data.chunks.append(OrganizedChunk(**t)) return True return False def _organize_doc(self): for index, chunk in enumerate(self.executed_data.chunks): chunk_option = chunk.chunk.options if chunk_option.name: print(f"organizing {chunk_option.name}") else: print(f"organizing index {index}") results = chunk.results for result in results: data = result.data present = self._parse_raw(data, result.output_type) if present: continue present = self._coder_string(data) if present: continue present = self._raw_string(data) if present: continue present = self._interactive_plots(data) if present: continue present = self._raw_plots(data, chunk_option) if present: continue print("not supported format", data) t = [] c: OrganizedChunk for c in self.organized_data.chunks: last_chank: OrganizedChunk if len(t)> 0: last_chank = t[-1] else: last_chank = None if last_chank is None: t.append(c) else: if (c.type == last_chank.type) & (c.type != "plot"): last_chank.str_data = last_chank.str_data + "\n" + c.str_data else: t.append(c) self.organized_data.chunks = t @staticmethod def _clean_up(doc): d = doc.replace(" ", "").replace("\n", "") if len(d) != 0: return doc else: return None # markdown_file = self.executed_data.global_options.input_file_name.split(".")[0] + ".md" # markdown_file = os.path.join(self.executed_data.global_options.output_file_dir , markdown_file) # with open(markdown_file, "w") as f: # text = "\n".join(self.formatted_doc) # f.write(text)
[((25, 30, 28, 9), 'zen_knit.data_types.OrganizedData', 'OrganizedData', (), '', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((50, 21, 50, 99), 'os.path.join', 'os.path.join', ({(50, 34, 50, 47): 'output_folder', (50, 49, 50, 98): 'self.organized_data.global_options.output.fig_dir'}, {}), '(output_folder, self.organized_data.global_options.output.fig_dir)', False, 'import os\n'), ((159, 16, 159, 55), 'os.path.join', 'os.path.join', ({(159, 29, 159, 44): 'self.fig_folder', (159, 46, 159, 54): 'fig_name'}, {}), '(self.fig_folder, fig_name)', False, 'import os\n'), ((159, 57, 159, 96), 'os.path.join', 'os.path.join', ({(159, 70, 159, 85): 'self.fig_folder', (159, 87, 159, 95): 'fig_name'}, {}), '(self.fig_folder, fig_name)', False, 'import os\n'), ((48, 8, 48, 27), 'pathlib.Path', 'Path', ({(48, 13, 48, 26): 'output_folder'}, {}), '(output_folder)', False, 'from pathlib import Path\n'), ((52, 8, 52, 24), 'pathlib.Path', 'Path', ({(52, 13, 52, 23): 'fig_folder'}, {}), '(fig_folder)', False, 'from pathlib import Path\n'), ((129, 46, 129, 65), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((140, 23, 140, 56), 'base64.b64decode', 'base64.b64decode', ({(140, 40, 140, 55): "data['data'][m]"}, {}), "(data['data'][m])", False, 'import base64\n'), ((65, 50, 65, 69), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((83, 50, 83, 69), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((79, 54, 79, 73), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((173, 54, 173, 73), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((106, 58, 106, 77), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n'), ((115, 58, 115, 77), 'zen_knit.data_types.OrganizedChunk', 'OrganizedChunk', ({}, {}), '(**t)', False, 'from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\n')]
mcaniot/qibullet
qibullet/robot_virtual.py
9c5e1b319a18dd289263eb82f9d7303429bcbe21
#!/usr/bin/env python # coding: utf-8 import sys import pybullet from qibullet.camera import * from qibullet.link import Link from qibullet.joint import Joint IS_VERSION_PYTHON_3 = sys.version_info[0] >= 3 class RobotVirtual: """ Mother class representing a virtual robot """ def __init__(self, description_file): """ Constructor Parameters: description_file - The file giving the description of the virtual robot. For now, only URDF is handled """ self.description_file = description_file self.physics_client = 0 self.active_camera = None self.camera_dict = dict() self.joint_dict = dict() self.link_dict = dict() def loadRobot(self, translation, quaternion, physicsClientId=0): """ Loads the robot into a simulation, loads the joints and the links descriptions. The joints are set to 0 rad. Parameters: translation - List containing 3 elements, the translation [x, y, z] of the robot in the WORLD frame quaternion - List containing 4 elements, the quaternion [x, y, z, q] of the robot in the WORLD frame physicsClientId - The id of the simulated instance in which the robot is supposed to be loaded Returns: boolean - True if the method ran correctly, False otherwise """ try: self.physics_client = physicsClientId self.robot_model = pybullet.loadURDF( self.description_file, translation, quaternion, useFixedBase=False, globalScaling=1.0, physicsClientId=self.physics_client, flags=pybullet.URDF_USE_SELF_COLLISION | pybullet.URDF_USE_MATERIAL_COLORS_FROM_MTL) except pybullet.error as e: raise pybullet.error("Cannot load robot model: " + str(e)) for i in range(pybullet.getNumJoints( self.robot_model, physicsClientId=self.physics_client)): if IS_VERSION_PYTHON_3: # PYTHON 3 version needs a conversion bytes to str joint_info = pybullet.getJointInfo( self.robot_model, i, physicsClientId=self.physics_client) self.link_dict[joint_info[12].decode('utf-8')] =\ Link(joint_info) if joint_info[2] == pybullet.JOINT_PRISMATIC or\ joint_info[2] == pybullet.JOINT_REVOLUTE: self.joint_dict[joint_info[1].decode('utf-8')] =\ Joint(joint_info) else: # PYTHON 2 Version joint_info = pybullet.getJointInfo( self.robot_model, i, physicsClientId=self.physics_client) self.link_dict[joint_info[12]] = Link(joint_info) if joint_info[2] == pybullet.JOINT_PRISMATIC or\ joint_info[2] == pybullet.JOINT_REVOLUTE: self.joint_dict[joint_info[1]] = Joint(joint_info) def getRobotModel(self): """ Returns the pybullet model to which the module is associated. Returns: robot_model - The pybullet model of the robot """ return self.robot_model def getPhysicsClientId(self): """ Returns the id of the simulated instance in which the module is loaded. Returns: physics_client - The id of the simulation in which the robot (possessing the module) is spawned """ return self.physics_client def setAngles(self, joint_names, joint_values, percentage_speeds): """ Set angles on the robot's joints. Tests have to be performed by the child class to guarantee the validity of the input parameters. Parameters: joint_names - List of string containing the name of the joints to be controlled joint_values - List of values corresponding to the angles in radians to be applied percentage_speeds - Percentages of the max speed to be used for each joint, has to be strictly superior to 0 and inferior or equal to 1 """ try: assert len(joint_names) ==\ len(joint_values) ==\ len(percentage_speeds) assert all( speed >= 0.0 and speed <= 1.0 for speed in percentage_speeds) except AssertionError: raise pybullet.error("Error in the setAngles parameters") for joint_name, joint_value, percentage_speed in zip( joint_names, joint_values, percentage_speeds): joint_speed =\ self.joint_dict[joint_name].getMaxVelocity() *\ percentage_speed pybullet.setJointMotorControl2( self.robot_model, self.joint_dict[joint_name].getIndex(), pybullet.POSITION_CONTROL, targetPosition=joint_value, maxVelocity=joint_speed, force=self.joint_dict[joint_name].getMaxEffort(), physicsClientId=self.physics_client) def getAnglesPosition(self, joint_names): """ Gets the position of the robot's joints in radians. If one of the joint doesn't exist, the method will raise a KeyError. Parameters: joint_names - List of string containing the names of the joints Returns: joint_positions - List of floats containing the joint's positions """ joint_positions = list() for joint_name in joint_names: joint_positions.append(pybullet.getJointState( self.robot_model, self.joint_dict[joint_name].getIndex(), physicsClientId=self.physics_client)[0]) return joint_positions def getAnglesVelocity(self, joint_names): """ Gets the velocity of the robot's joints in rad/s. If one of the joint doesn't exist, the method will raise a KeyError. Parameters: joint_names - List of string containing the names of the joints Returns: joint_velocities - List of floats containing the joint's velocities """ joint_velocities = list() for joint_name in joint_names: joint_velocities.append(pybullet.getJointState( self.robot_model, self.joint_dict[joint_name].getIndex(), physicsClientId=self.physics_client)[1]) return joint_velocities def subscribeCamera(self, camera_id, resolution=Camera.K_QVGA): """ Subscribe to the camera holding the camera id. WARNING: at the moment, only one camera can be subscribed. Parameters: camera_id - The id of the camera to be subscribed resolution - CameraResolution object, the resolution of the camera """ try: self.active_camera = self.camera_dict[camera_id] self.active_camera.subscribe(resolution=resolution) except KeyError: print("This camera does not exist, use a valid camera id") def unsubscribeCamera(self, camera_id): """ Unsubscribe from a camera, the one holding the camera id. Parameters: camera_id - The id of the camera to be unsubscribed """ try: # If no active camera is found, nothing is unsubscribed assert self.active_camera is not None if self.active_camera.getCameraId() == camera_id: self.active_camera.unsubscribe() self.active_camera = None except KeyError: print("This camera does not exist, use a valid camera id") except AssertionError: pass def getCameraFrame(self): """ Returns a camera frame. Be advised that the subscribeCamera method needs to be called beforehand, otherwise a pybullet error will be raised. Returns: frame - The current camera frame as a formatted numpy array, directly exploitable from OpenCV """ try: assert self.active_camera is not None return self.active_camera.getFrame() except AssertionError: raise pybullet.error("No active camera, cannot retrieve any frame") def getCameraResolution(self): """ Returns the resolution of the active camera. Be advised that the subscribeCamera method needs to be called beforehand, otherwise a pybullet error will be raised. Returns: resolution - a CameraResolution object describing the resolution of the active camera """ try: assert self.active_camera is not None return self.active_camera.getResolution() except KeyError: raise pybullet.error("No active camera, resolution unavailable") def getCameraLink(self): """ Returns the link of the active camera. Be advised that the subscribeCamera method needs to be called beforehand, otherwise a pybullet error will be raised. Returns: resolution - a Link object describing the link to which the active camera is attached """ try: assert self.active_camera is not None return self.active_camera.getCameraLink() except KeyError: raise pybullet.error("No active camera, cannot retrieve any link") def getActiveCamera(self): """ Returns the active camera of the robot. Returns: active_camera - Camera (CameraRgb or CameraDepth) object, the active camera of the robot. If there is no active camera, a None is returned """ return self.active_camera def getPosition(self): """ Gets the position of the robot's base in the world frame. Returns: x - The position of the robot's base on the x axis, in meters y - The positions of the robot's base on the y axis in meters theta - The rotation of the robot's base on the z axis in meters """ position, quaternions = pybullet.getBasePositionAndOrientation( self.robot_model, physicsClientId=self.physics_client) theta = pybullet.getEulerFromQuaternion(quaternions)[2] return position[0], position[1], theta def isSelfColliding(self, link_names): """ Specifies if a link is colliding with the rest of the virtual robot. Parameters: link_names - String or list of string containing the names of the links to be checked for self collision. WARNING: only the links with corresponding meshes should be used, otherwise the link cannot self collide Returns: self_colliding - Boolean, if True at least one of the links is self colliding """ try: if type(link_names) is str: assert link_names in self.link_dict.keys() names = [link_names] else: assert set(link_names).issubset(self.link_dict.keys()) names = list(link_names) for name in names: contact_tuple = pybullet.getContactPoints( bodyA=self.robot_model, bodyB=self.robot_model, linkIndexA=self.link_dict[name].getIndex(), physicsClientId=self.physics_client) contact_tuple += pybullet.getContactPoints( bodyA=self.robot_model, bodyB=self.robot_model, linkIndexB=self.link_dict[name].getIndex(), physicsClientId=self.physics_client) if len(contact_tuple) != 0: return True return False except AssertionError: raise pybullet.error( "Unauthorized link checking for self collisions")
[((304, 32, 306, 48), 'pybullet.getBasePositionAndOrientation', 'pybullet.getBasePositionAndOrientation', (), '', False, 'import pybullet\n'), ((51, 31, 59, 59), 'pybullet.loadURDF', 'pybullet.loadURDF', (), '', False, 'import pybullet\n'), ((64, 23, 66, 52), 'pybullet.getNumJoints', 'pybullet.getNumJoints', (), '', False, 'import pybullet\n'), ((308, 16, 308, 60), 'pybullet.getEulerFromQuaternion', 'pybullet.getEulerFromQuaternion', ({(308, 48, 308, 59): 'quaternions'}, {}), '(quaternions)', False, 'import pybullet\n'), ((69, 29, 72, 56), 'pybullet.getJointInfo', 'pybullet.getJointInfo', (), '', False, 'import pybullet\n'), ((74, 20, 74, 36), 'qibullet.link.Link', 'Link', ({(74, 25, 74, 35): 'joint_info'}, {}), '(joint_info)', False, 'from qibullet.link import Link\n'), ((82, 29, 85, 56), 'pybullet.getJointInfo', 'pybullet.getJointInfo', (), '', False, 'import pybullet\n'), ((87, 49, 87, 65), 'qibullet.link.Link', 'Link', ({(87, 54, 87, 64): 'joint_info'}, {}), '(joint_info)', False, 'from qibullet.link import Link\n'), ((135, 18, 135, 69), 'pybullet.error', 'pybullet.error', ({(135, 33, 135, 68): '"""Error in the setAngles parameters"""'}, {}), "('Error in the setAngles parameters')", False, 'import pybullet\n'), ((248, 18, 248, 79), 'pybullet.error', 'pybullet.error', ({(248, 33, 248, 78): '"""No active camera, cannot retrieve any frame"""'}, {}), "('No active camera, cannot retrieve any frame')", False, 'import pybullet\n'), ((265, 18, 265, 76), 'pybullet.error', 'pybullet.error', ({(265, 33, 265, 75): '"""No active camera, resolution unavailable"""'}, {}), "('No active camera, resolution unavailable')", False, 'import pybullet\n'), ((282, 18, 282, 78), 'pybullet.error', 'pybullet.error', ({(282, 33, 282, 77): '"""No active camera, cannot retrieve any link"""'}, {}), "('No active camera, cannot retrieve any link')", False, 'import pybullet\n'), ((351, 18, 352, 65), 'pybullet.error', 'pybullet.error', ({(352, 16, 352, 64): '"""Unauthorized link checking for self collisions"""'}, {}), "('Unauthorized link checking for self collisions')", False, 'import pybullet\n'), ((79, 24, 79, 41), 'qibullet.joint.Joint', 'Joint', ({(79, 30, 79, 40): 'joint_info'}, {}), '(joint_info)', False, 'from qibullet.joint import Joint\n'), ((91, 53, 91, 70), 'qibullet.joint.Joint', 'Joint', ({(91, 59, 91, 69): 'joint_info'}, {}), '(joint_info)', False, 'from qibullet.joint import Joint\n')]
hbraux/kafkacli
tests/test_formatter.py
5f7ed23150932b66b484fb43dd6210b6c0968776
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import pytest import json from kafkacli.formatter import Formatter sampleJson = json.loads('{"a":"s", "b":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out == '{"a": "s", "b": 1}\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\n "a": "s",\n "b": 1\n}\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == \ '{"a": \x1b[34m"s"\x1b[39m, "b": \x1b[31m1\x1b[39m}\n'
[((9, 13, 9, 43), 'json.loads', 'json.loads', ({(9, 24, 9, 42): '"""{"a":"s", "b":1}"""'}, {}), '(\'{"a":"s", "b":1}\')', False, 'import json\n'), ((13, 4, 13, 15), 'kafkacli.formatter.Formatter', 'Formatter', ({}, {}), '()', False, 'from kafkacli.formatter import Formatter\n'), ((19, 4, 19, 27), 'kafkacli.formatter.Formatter', 'Formatter', (), '', False, 'from kafkacli.formatter import Formatter\n'), ((25, 4, 25, 26), 'kafkacli.formatter.Formatter', 'Formatter', (), '', False, 'from kafkacli.formatter import Formatter\n')]
arc198/DJANGO-JOB-SITE
src/jobs/forms.py
d9547c4ee85751677ba6458380b609973c3b4a8d
from django import forms from .models import Application class ApplicationForm(forms.ModelForm): class Meta: model = Application fields = ('resume', 'cover_letter',)
[]
ylee88/visit
src/test/tests/unit/protocol.py
8e0920996d84fef70a7014b0d770360918d849d5
# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: protocolo.py # # Tests: vistprotocol unit test # # Mark C. Miller, Tue Jan 11 10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path("visitprotocol") res = sexe(tapp,ret_output=True) if res["return_code"] == 0: excode = 111 else: excode = 113 Exit(excode)
[]
Dozed12/pyMazeBacktrack
pyMazeBacktrack.py
aaa2a902fdca17dca6e2ee00e672b6bb38da5639
import libtcodpy as libtcod from random import randint nSquares = 30 nTiles = nSquares * 2 + 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction == 1: if y - 2 <= 0: return 0 if table[x][y-2] == white: return 0 elif direction == 2: if x + 2 >= size: return 0 if table[x+2][y] == white: return 0 elif direction == 3: if y + 2 >= size: return 0 if table[x][y+2] == white: return 0 elif direction == 4: if x - 2 <= 0: return 0 if table[x-2][y] == white: return 0 return 1 def Possible(x,y,table,size): if x+2 < size: if table[x+2][y] == black: return 1 if x-2 > 0: if table[x-2][y] == black: return 1 if y+2 < size: if table[x][y+2] == black: return 1 if y-2 > 0: if table[x][y-2] == black: return 1 return 0 black = libtcod.black white = libtcod.white Table = [[0 for i in range(nTiles)]for i in range(nTiles)] for x in range(nTiles): for y in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX = 1 CurrY = 1 Table[CurrX][CurrY] = white end = 0 while end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4) if Dir == 1: Table[CurrX][CurrY - 1] = white CurrY -= 2 Table[CurrX][CurrY] = white elif Dir == 2: Table[CurrX + 1][CurrY] = white CurrX += 2 Table[CurrX][CurrY] = white elif Dir == 3: Table[CurrX][CurrY + 1] = white CurrY += 2 Table[CurrX][CurrY] = white elif Dir == 4: Table[CurrX - 1][CurrY] = white CurrX -= 2 Table[CurrX][CurrY] = white Memory.append(Dir) #print for x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory) Dir = Memory[MemorySize-1] if Dir == 1: CurrY += 2 elif Dir == 2: CurrX -= 2 elif Dir == 3: CurrY -= 2 elif Dir == 4: CurrX += 2 del Memory[MemorySize-1] if CurrX == 1 and CurrY == 1: end = 1 break #print for x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() libtcod.console_wait_for_keypress(True)
[((10, 0, 10, 83), 'libtcodpy.console_set_custom_font', 'libtcod.console_set_custom_font', ({(10, 32, 10, 49): '"""cp437_12x12.png"""', (10, 51, 10, 82): 'libtcod.FONT_LAYOUT_ASCII_INROW'}, {}), "('cp437_12x12.png', libtcod.\n FONT_LAYOUT_ASCII_INROW)", True, 'import libtcodpy as libtcod\n'), ((11, 0, 11, 105), 'libtcodpy.console_init_root', 'libtcod.console_init_root', ({(11, 26, 11, 38): 'SCREEN_WIDTH', (11, 40, 11, 53): 'SCREEN_HEIGHT', (11, 55, 11, 72): '"""pyMazeBacktrack"""', (11, 74, 11, 79): '(False)', (11, 81, 11, 104): 'libtcod.RENDERER_OPENGL'}, {}), "(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', \n False, libtcod.RENDERER_OPENGL)", True, 'import libtcodpy as libtcod\n'), ((65, 0, 65, 23), 'libtcodpy.console_flush', 'libtcod.console_flush', ({}, {}), '()', True, 'import libtcodpy as libtcod\n'), ((133, 0, 133, 23), 'libtcodpy.console_flush', 'libtcod.console_flush', ({}, {}), '()', True, 'import libtcodpy as libtcod\n'), ((135, 0, 135, 39), 'libtcodpy.console_wait_for_keypress', 'libtcod.console_wait_for_keypress', ({(135, 34, 135, 38): '(True)'}, {}), '(True)', True, 'import libtcodpy as libtcod\n'), ((63, 8, 63, 75), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', ({(63, 36, 63, 40): 'None', (63, 41, 63, 42): 'x', (63, 43, 63, 44): 'y', (63, 45, 63, 48): '(219)', (63, 49, 63, 60): 'Table[x][y]', (63, 61, 63, 74): 'libtcod.white'}, {}), '(None, x, y, 219, Table[x][y], libtcod.white)', True, 'import libtcodpy as libtcod\n'), ((79, 14, 79, 26), 'random.randint', 'randint', ({(79, 22, 79, 23): '1', (79, 24, 79, 25): '4'}, {}), '(1, 4)', False, 'from random import randint\n'), ((106, 8, 106, 31), 'libtcodpy.console_flush', 'libtcod.console_flush', ({}, {}), '()', True, 'import libtcodpy as libtcod\n'), ((132, 8, 132, 75), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', ({(132, 36, 132, 40): 'None', (132, 41, 132, 42): 'x', (132, 43, 132, 44): 'y', (132, 45, 132, 48): '(219)', (132, 49, 132, 60): 'Table[x][y]', (132, 61, 132, 74): 'libtcod.white'}, {}), '(None, x, y, 219, Table[x][y], libtcod.white)', True, 'import libtcodpy as libtcod\n'), ((81, 18, 81, 30), 'random.randint', 'randint', ({(81, 26, 81, 27): '1', (81, 28, 81, 29): '4'}, {}), '(1, 4)', False, 'from random import randint\n'), ((105, 16, 105, 83), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', ({(105, 44, 105, 48): 'None', (105, 49, 105, 50): 'x', (105, 51, 105, 52): 'y', (105, 53, 105, 56): '(219)', (105, 57, 105, 68): 'Table[x][y]', (105, 69, 105, 82): 'libtcod.white'}, {}), '(None, x, y, 219, Table[x][y], libtcod.white)', True, 'import libtcodpy as libtcod\n')]
aws-solutions/maintaining-personalized-experiences-with-machine-learning
source/tests/test_resources.py
3f6f1b0069df4828eae9b0835b717500189e4f71
# ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # # the specific language governing permissions and limitations under the License. # # ###################################################################################################################### import pytest from shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( "klass,camel,dash,snake", [ (DatasetGroup, "datasetGroup", "dataset-group", "dataset_group"), (Schema, "schema", "schema", "schema"), (Dataset, "dataset", "dataset", "dataset"), ( DatasetImportJob, "datasetImportJob", "dataset-import-job", "dataset_import_job", ), (Solution, "solution", "solution", "solution"), (SolutionVersion, "solutionVersion", "solution-version", "solution_version"), (Campaign, "campaign", "campaign", "campaign"), (EventTracker, "eventTracker", "event-tracker", "event_tracker"), ( BatchInferenceJob, "batchInferenceJob", "batch-inference-job", "batch_inference_job", ), (BatchSegmentJob, "batchSegmentJob", "batch-segment-job", "batch_segment_job"), ], ids=[ "DatasetGroup", "Schema", "Dataset", "DatasetImportJob", "Solution", "SolutionVersion", "Campaign", "EventTracker", "BatchInferenceJob", "BatchSegmentJob,", ], ) def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel == camel assert klass().name.dash == dash assert klass().name.snake == snake
[((30, 1, 66, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (), '', False, 'import pytest\n')]
orlandofv/sianna
app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
f07dd6dbc62a9604f31ab800e482e62f14fba766
"""Auto-generated file, do not edit by hand. AG metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_AG = PhoneMetadata(id='AG', country_code=1, international_prefix='011', general_desc=PhoneNumberDesc(national_number_pattern='(?:268|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)), fixed_line=PhoneNumberDesc(national_number_pattern='268(?:4(?:6[0-38]|84)|56[0-2])\\d{4}', example_number='2684601234', possible_length=(10,), possible_length_local_only=(7,)), mobile=PhoneNumberDesc(national_number_pattern='268(?:464|7(?:1[3-9]|[28]\\d|3[0246]|64|7[0-689]))\\d{4}', example_number='2684641234', possible_length=(10,), possible_length_local_only=(7,)), toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)), premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)), personal_number=PhoneNumberDesc(national_number_pattern='52(?:355[0-46-9]|4(?:5(?:2[024-9]|5[0-46-9])|60[1-9]|9(?:2[0-5]|49)))\\d{4}|52(?:3(?:[2-46-9][02-9]|5[02-46-9])|4(?:[2-478][02-9]|5[034]|6[2-9]|9[05-9])|7[2-4]\\d)\\d{5}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[1256]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)), voip=PhoneNumberDesc(national_number_pattern='26848[01]\\d{4}', example_number='2684801234', possible_length=(10,), possible_length_local_only=(7,)), pager=PhoneNumberDesc(national_number_pattern='26840[69]\\d{4}', example_number='2684061234', possible_length=(10,), possible_length_local_only=(7,)), national_prefix='1', national_prefix_for_parsing='1|([457]\\d{6})$', national_prefix_transform_rule='268\\1', leading_digits='268', mobile_number_portable_region=True)
[]
lapets/bu-gsubmit-grading
gradefiles-send.py
69c40a763908be1c954dce3e5e5aab854ac379ff
##################################################################### ## ## gradefiles-send.py ## ## Script to send grade files by email to enrolled students; the ## input grade file names should correspond to the user names of ## the students. ## ## from email.mime.text import MIMEText # For creating a message string. from subprocess import Popen, PIPE # For sending email on linux. import sys # For command line arguments. import os # For commands and file manipulation (walk, path, system). ##################################################################### ## Sending a simple email message. ## def send(txt, courseNumber, task, sender, targets): msg = MIMEText(txt) msg["From"] = sender + "@bu.edu" msg["To"] = ",".join([target + "@bu.edu" for target in targets]) msg["Cc"] = sender + "@bu.edu" msg["Subject"] = "CS " + courseNumber + " " + task + " grade" p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the command line parameters. ## if len(sys.argv) == 6\ and (int(sys.argv[1][0:3]) in range(100,1000))\ and sys.argv[2] in ['Fall', 'Spring']\ and int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1] # Accepts course names like "591 X1." season = sys.argv[2] year = sys.argv[3] task = sys.argv[4] sender = sys.argv[5] else: print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n') exit() ##################################################################### ## Check for list of files. ## if not os.path.exists('./data'): print('No folder "data" containing grade files found. Exiting.') exit() ##################################################################### ## Send the grade files. ## for curdir, dirs, files in os.walk('./data/'): for file in files: txt = open('./data/'+file, 'r').read() targets = file.split('.')[0].split("_") send(txt, courseNumber, task, sender, targets) print('Sent grade file to ' + str(targets) + '.') #eof
[((58, 27, 58, 45), 'os.walk', 'os.walk', ({(58, 35, 58, 44): '"""./data/"""'}, {}), "('./data/')", False, 'import os\n'), ((21, 10, 21, 23), 'email.mime.text.MIMEText', 'MIMEText', ({(21, 19, 21, 22): 'txt'}, {}), '(txt)', False, 'from email.mime.text import MIMEText\n'), ((26, 8, 26, 55), 'subprocess.Popen', 'Popen', (), '', False, 'from subprocess import Popen, PIPE\n'), ((50, 7, 50, 31), 'os.path.exists', 'os.path.exists', ({(50, 22, 50, 30): '"""./data"""'}, {}), "('./data')", False, 'import os\n')]
GalAster/16
Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
import os import pickle import tensorflow as tf import wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl', 'rb') sess = tf.InteractiveSession() G, D, Gs = pickle.load(file) saver = tf.train.Saver() save_path = "./target/" + name + "/" model_name = 'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf') # Save as protobuf with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型 file.write(output_graph_def.SerializeToString()) # 序列化输出
[((8, 7, 8, 30), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((9, 11, 9, 28), 'pickle.load', 'pickle.load', ({(9, 23, 9, 27): 'file'}, {}), '(file)', False, 'import pickle\n'), ((10, 8, 10, 24), 'tensorflow.train.Saver', 'tf.train.Saver', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((15, 17, 15, 52), 'os.path.join', 'os.path.join', ({(15, 30, 15, 39): 'save_path', (15, 41, 15, 51): 'model_name'}, {}), '(save_path, model_name)', False, 'import os\n'), ((18, 7, 18, 47), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', ({(18, 37, 18, 46): 'save_path'}, {}), '(save_path)', True, 'import tensorflow as tf\n'), ((19, 9, 19, 65), 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', ({(19, 38, 19, 64): 'ckpt.model_checkpoint_path'}, {}), '(ckpt.model_checkpoint_path)', True, 'import tensorflow as tf\n'), ((22, 0, 22, 51), 'wolframclient.serializers.export', 'wxf.export', (), '', True, 'import wolframclient.serializers as wxf\n'), ((13, 7, 13, 32), 'os.path.exists', 'os.path.exists', ({(13, 22, 13, 31): 'save_path'}, {}), '(save_path)', False, 'import os\n'), ((14, 4, 14, 26), 'os.makedirs', 'os.makedirs', ({(14, 16, 14, 25): 'save_path'}, {}), '(save_path)', False, 'import os\n'), ((25, 5, 25, 17), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((27, 23, 32, 5), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (), '', True, 'import tensorflow as tf\n'), ((34, 9, 34, 57), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', ({(34, 24, 34, 50): "('./target/' + name + '.pb')", (34, 52, 34, 56): '"""wb"""'}, {}), "('./target/' + name + '.pb', 'wb')", True, 'import tensorflow as tf\n'), ((26, 4, 26, 33), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ({}, {}), '()', True, 'import tensorflow as tf\n')]
Quanta-Robotics/Robot-Blueberry
src/moveGoogle.py
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
#!/usr/bin/env python import os import os.path import yaml import time import random import multiprocessing import RPi.GPIO as GPIO from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1 = {'EN': 27, 'input1': 19, 'input2': 16} Motor2 = {'EN': 22, 'input1': 26, 'input2': 20} for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml() if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if servo == None: print('close') exit() Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize = len(pin) for i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i in range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]: update = -update for deg in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c, d): for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r = select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i in range(0,15): r = random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln = len(select) for i in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print (' i got value of t is : ',t) if(t==0): random0() elif(t==1): random1() elif(t==2): random2() elif(t==3): random3() else: randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech): t = int(len(speech)/15) print ('Offline t value is : ',t) p1 = multiprocessing.Process(target=expression,args=[t]) p1.start() say(speech)
[((11, 0, 11, 23), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', ({(11, 17, 11, 22): '(False)'}, {}), '(False)', True, 'import RPi.GPIO as GPIO\n'), ((12, 0, 12, 22), 'RPi.GPIO.setmode', 'GPIO.setmode', ({(12, 13, 12, 21): 'GPIO.BCM'}, {}), '(GPIO.BCM)', True, 'import RPi.GPIO as GPIO\n'), ((22, 6, 22, 33), 'RPi.GPIO.PWM', 'GPIO.PWM', ({(22, 15, 22, 27): "Motor1['EN']", (22, 29, 22, 32): '100'}, {}), "(Motor1['EN'], 100)", True, 'import RPi.GPIO as GPIO\n'), ((23, 6, 23, 33), 'RPi.GPIO.PWM', 'GPIO.PWM', ({(23, 15, 23, 27): "Motor2['EN']", (23, 29, 23, 32): '100'}, {}), "(Motor2['EN'], 100)", True, 'import RPi.GPIO as GPIO\n'), ((29, 7, 29, 28), 'adafruit_servokit.ServoKit', 'ServoKit', (), '', False, 'from adafruit_servokit import ServoKit\n'), ((19, 4, 19, 35), 'RPi.GPIO.setup', 'GPIO.setup', ({(19, 15, 19, 24): 'Motor1[x]', (19, 26, 19, 34): 'GPIO.OUT'}, {}), '(Motor1[x], GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((20, 4, 20, 35), 'RPi.GPIO.setup', 'GPIO.setup', ({(20, 15, 20, 24): 'Motor2[x]', (20, 26, 20, 34): 'GPIO.OUT'}, {}), '(Motor2[x], GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((31, 29, 31, 63), 'os.path.join', 'os.path.join', ({(31, 42, 31, 50): '__file__', (31, 52, 31, 56): '""".."""', (31, 58, 31, 62): '""".."""'}, {}), "(__file__, '..', '..')", False, 'import os\n'), ((68, 4, 68, 36), 'RPi.GPIO.setup', 'GPIO.setup', ({(68, 15, 68, 25): 'GpioPin[i]', (68, 27, 68, 35): 'GPIO.OUT'}, {}), '(GpioPin[i], GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((117, 4, 117, 43), 'RPi.GPIO.output', 'GPIO.output', ({(117, 16, 117, 32): "Motor1['input1']", (117, 34, 117, 42): 'GPIO.LOW'}, {}), "(Motor1['input1'], GPIO.LOW)", True, 'import RPi.GPIO as GPIO\n'), ((118, 4, 118, 43), 'RPi.GPIO.output', 'GPIO.output', ({(118, 16, 118, 32): "Motor1['input2']", (118, 34, 118, 42): 'GPIO.LOW'}, {}), "(Motor1['input2'], GPIO.LOW)", True, 'import RPi.GPIO as GPIO\n'), ((119, 4, 119, 43), 'RPi.GPIO.output', 'GPIO.output', ({(119, 16, 119, 32): "Motor2['input1']", (119, 34, 119, 42): 'GPIO.LOW'}, {}), "(Motor2['input1'], GPIO.LOW)", True, 'import RPi.GPIO as GPIO\n'), ((120, 4, 120, 43), 'RPi.GPIO.output', 'GPIO.output', ({(120, 16, 120, 32): "Motor2['input2']", (120, 34, 120, 42): 'GPIO.LOW'}, {}), "(Motor2['input2'], GPIO.LOW)", True, 'import RPi.GPIO as GPIO\n'), ((188, 8, 188, 29), 'random.randrange', 'random.randrange', ({(188, 25, 188, 26): '1', (188, 27, 188, 28): '3'}, {}), '(1, 3)', False, 'import random\n'), ((267, 9, 267, 60), 'multiprocessing.Process', 'multiprocessing.Process', (), '', False, 'import multiprocessing\n'), ((269, 4, 269, 15), 'talk.say', 'say', ({(269, 8, 269, 14): 'speech'}, {}), '(speech)', False, 'from talk import say\n'), ((35, 16, 35, 55), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((51, 22, 51, 61), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((71, 17, 71, 40), 'RPi.GPIO.PWM', 'GPIO.PWM', ({(71, 26, 71, 36): 'GpioPin[i]', (71, 37, 71, 39): '(50)'}, {}), '(GpioPin[i], 50)', True, 'import RPi.GPIO as GPIO\n'), ((91, 8, 91, 25), 'time.sleep', 'time.sleep', ({(91, 19, 91, 24): 'time1'}, {}), '(time1)', False, 'import time\n'), ((123, 8, 123, 48), 'RPi.GPIO.output', 'GPIO.output', ({(123, 20, 123, 36): "Motor1['input1']", (123, 38, 123, 47): 'GPIO.HIGH'}, {}), "(Motor1['input1'], GPIO.HIGH)", True, 'import RPi.GPIO as GPIO\n'), ((125, 8, 125, 48), 'RPi.GPIO.output', 'GPIO.output', ({(125, 20, 125, 36): "Motor1['input2']", (125, 38, 125, 47): 'GPIO.HIGH'}, {}), "(Motor1['input2'], GPIO.HIGH)", True, 'import RPi.GPIO as GPIO\n'), ((127, 8, 127, 48), 'RPi.GPIO.output', 'GPIO.output', ({(127, 20, 127, 36): "Motor2['input1']", (127, 38, 127, 47): 'GPIO.HIGH'}, {}), "(Motor2['input1'], GPIO.HIGH)", True, 'import RPi.GPIO as GPIO\n'), ((129, 8, 129, 48), 'RPi.GPIO.output', 'GPIO.output', ({(129, 20, 129, 36): "Motor2['input2']", (129, 38, 129, 47): 'GPIO.HIGH'}, {}), "(Motor2['input2'], GPIO.HIGH)", True, 'import RPi.GPIO as GPIO\n'), ((142, 8, 142, 23), 'time.sleep', 'time.sleep', ({(142, 19, 142, 22): '(0.5)'}, {}), '(0.5)', False, 'import time\n'), ((148, 8, 148, 23), 'time.sleep', 'time.sleep', ({(148, 19, 148, 22): '(0.5)'}, {}), '(0.5)', False, 'import time\n'), ((154, 8, 154, 24), 'time.sleep', 'time.sleep', ({(154, 19, 154, 23): '(0.08)'}, {}), '(0.08)', False, 'import time\n'), ((156, 8, 156, 24), 'time.sleep', 'time.sleep', ({(156, 19, 156, 23): '(0.08)'}, {}), '(0.08)', False, 'import time\n'), ((161, 8, 161, 23), 'time.sleep', 'time.sleep', ({(161, 19, 161, 22): '(0.2)'}, {}), '(0.2)', False, 'import time\n'), ((163, 8, 163, 23), 'time.sleep', 'time.sleep', ({(163, 19, 163, 22): '(0.2)'}, {}), '(0.2)', False, 'import time\n'), ((176, 8, 176, 36), 'random.randrange', 'random.randrange', ({(176, 25, 176, 26): '(1)', (176, 27, 176, 35): '(10000000)'}, {}), '(1, 10000000)', False, 'import random\n'), ((42, 12, 42, 33), 'yaml.dump', 'yaml.dump', ({(42, 22, 42, 27): 'servo', (42, 28, 42, 32): 'conf'}, {}), '(servo, conf)', False, 'import yaml\n'), ((44, 12, 44, 29), 'yaml.dump', 'yaml.dump', ({(44, 22, 44, 23): 's', (44, 24, 44, 28): 'conf'}, {}), '(s, conf)', False, 'import yaml\n'), ((111, 12, 111, 32), 'time.sleep', 'time.sleep', ({(111, 23, 111, 31): 'duration'}, {}), '(duration)', False, 'import time\n'), ((228, 12, 228, 39), 'random.randrange', 'random.randrange', ({(228, 29, 228, 30): '(1)', (228, 31, 228, 38): '(1000000)'}, {}), '(1, 1000000)', False, 'import random\n')]
politbuero-kampagnen/onegov-cloud
src/onegov/translator_directory/models/language.py
20148bf321b71f617b64376fe7249b2b9b9c4aa9
from uuid import uuid4 from sqlalchemy import Index, Column, Text, Table, ForeignKey from sqlalchemy.orm import object_session from onegov.core.orm import Base from onegov.core.orm.types import UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table = Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__ = 'languages' __table_args__ = ( Index('unique_name', 'name', unique=True), ) id = Column(UUID, primary_key=True, default=uuid4) name = Column(Text, nullable=False) @property def speakers_count(self): session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): """Having it as mother tongue...""" session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return ( self.speakers_count + self.writers_count + self.native_speakers_count ) == 0
[((52, 9, 52, 54), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((53, 11, 53, 39), 'sqlalchemy.Column', 'Column', (), '', False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((16, 8, 16, 36), 'sqlalchemy.ForeignKey', 'ForeignKey', ({(16, 19, 16, 35): '"""translators.id"""'}, {}), "('translators.id')", False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((18, 28, 18, 54), 'sqlalchemy.ForeignKey', 'ForeignKey', ({(18, 39, 18, 53): '"""languages.id"""'}, {}), "('languages.id')", False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((27, 8, 27, 36), 'sqlalchemy.ForeignKey', 'ForeignKey', ({(27, 19, 27, 35): '"""translators.id"""'}, {}), "('translators.id')", False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((29, 28, 29, 54), 'sqlalchemy.ForeignKey', 'ForeignKey', ({(29, 39, 29, 53): '"""languages.id"""'}, {}), "('languages.id')", False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((38, 8, 38, 36), 'sqlalchemy.ForeignKey', 'ForeignKey', ({(38, 19, 38, 35): '"""translators.id"""'}, {}), "('translators.id')", False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((40, 28, 40, 54), 'sqlalchemy.ForeignKey', 'ForeignKey', ({(40, 39, 40, 53): '"""languages.id"""'}, {}), "('languages.id')", False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((49, 8, 49, 49), 'sqlalchemy.Index', 'Index', (), '', False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((57, 18, 57, 38), 'sqlalchemy.orm.object_session', 'object_session', ({(57, 33, 57, 37): 'self'}, {}), '(self)', False, 'from sqlalchemy.orm import object_session\n'), ((63, 18, 63, 38), 'sqlalchemy.orm.object_session', 'object_session', ({(63, 33, 63, 37): 'self'}, {}), '(self)', False, 'from sqlalchemy.orm import object_session\n'), ((70, 18, 70, 38), 'sqlalchemy.orm.object_session', 'object_session', ({(70, 33, 70, 37): 'self'}, {}), '(self)', False, 'from sqlalchemy.orm import object_session\n')]
djemeljanovs/tfjs
tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import re from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util # Custom op name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler op name for fused MatMul which starts with '_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name): """Pulls a node def from a dictionary for a given name. Args: node_map: Dictionary containing an entry indexed by name for every node. name: Identifies the node we want to find. Returns: NodeDef of the node with the given name. Raises: ValueError: If the node isn't present in the dictionary. """ stripped_name = node_name_from_input(name) if stripped_name not in node_map: raise ValueError("No node named '%s' found in map." % name) return node_map[stripped_name] def values_from_const(node_def): """Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const. """ if node_def.op != "Const": raise ValueError( "Node named '%s' should be a Const op for values_from_const." % node_def.name) input_tensor = node_def.attr["value"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale by gamma after normalization. def scale_after_normalization(node): if node.op == "BatchNormWithGlobalNormalization": return node.attr["scale_after_normalization"].b return True def node_name_from_input(node_name): """Strips off ports and other decorations to get the underlying node name.""" if node_name.startswith("^"): node_name = node_name[1:] m = re.search(r"(.*):\d+$", node_name) if m: node_name = m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): """Clean up the graph def by removing the skipped nodes and clean up the nodes with inputs that have been removed. Args: input_graph_def: GraphDef object to be cleaned. node_to_skip: Dict with node names to be skipped. inputs_to_remove: List of nodes to be removed from inputs of all nodes. Returns: GraphDef that has been cleaned. """ result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove: for i, input_node in enumerate(new_node.input): if input_node == value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node]) result_graph_def.library.CopyFrom(input_graph_def.library) result_graph_def.versions.CopyFrom(input_graph_def.versions) return result_graph_def
[((62, 17, 62, 54), 'tensorflow.python.framework.tensor_util.MakeNdarray', 'tensor_util.MakeNdarray', ({(62, 41, 62, 53): 'input_tensor'}, {}), '(input_tensor)', False, 'from tensorflow.python.framework import tensor_util\n'), ((75, 6, 75, 40), 're.search', 're.search', ({(75, 16, 75, 28): '"""(.*):\\\\d+$"""', (75, 30, 75, 39): 'node_name'}, {}), "('(.*):\\\\d+$', node_name)", False, 'import re\n'), ((92, 21, 92, 41), 'tensorflow.core.framework.graph_pb2.GraphDef', 'graph_pb2.GraphDef', ({}, {}), '()', False, 'from tensorflow.core.framework import graph_pb2\n'), ((96, 15, 96, 37), 'tensorflow.core.framework.node_def_pb2.NodeDef', 'node_def_pb2.NodeDef', ({}, {}), '()', False, 'from tensorflow.core.framework import node_def_pb2\n')]
apple/ml-cvnets
loss_fn/classification_loss_fns/binary_cross_entropy.py
84d992f413e52c0468f86d23196efd9dad885e6f
# # For licensing see accompanying LICENSE file. # Copyright (C) 2022 Apple Inc. All Rights Reserved. # from torch.nn import functional as F from torch import Tensor import argparse from . import register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name="binary_cross_entropy") class ClsBinaryCrossEntropy(BaseCriteria): """Binary CE for classification tasks""" def __init__(self, opts, *args, **kwargs) -> None: super().__init__() def forward( self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs ) -> Tensor: if target.dim() != prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction="sum", ) def __repr__(self) -> str: return "{}()".format(self.__class__.__name__)
[((25, 21, 25, 72), 'torch.nn.functional.one_hot', 'F.one_hot', (), '', True, 'from torch.nn import functional as F\n')]
lakshyarawal/pythonPractice
Sorting/insertion_sort.py
4b400342198a8270c5ac0c6306afb555f927c6c1
""" Insertion Sort Algorithm:""" """Implementation""" def insertion_sort(arr) -> list: n = len(arr) for i in range(1, n): swap_index = i for j in range(i-1, -1, -1): if arr[swap_index] < arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index -= 1 else: break return arr def main(): arr_input = [10, 5, 30, 1, 2, 5, 10, 10] a2 = insertion_sort(arr_input) print(a2) # Using the special variable # __name__ if __name__ == "__main__": main()
[]
felixsc1/nipype
nipype/interfaces/spm/__init__.py
e722d6170593583f16ddfcb95473e5d30b5f1d7c
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for spm.""" from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation, ResliceToReference, DicomImport)
[]
tobloef/neural-network
network.py
bd05a8b9eccc0f5a973782247d39f9b5aa33156c
import numpy as np from mathUtils import * class Network(object): """ Model for a feedforward Neural Network that use backpropagation with stochastic gradient decent. """ def __init__(self, layerSizes, biasVectors, weightMatrices): """ Initialise the network with a list of layer sizes and lists for biases and weights for the neurons in the network. The first layer is the input layer and the last layer is the output layer. """ self.layerSizes = layerSizes self.biasVectors = biasVectors self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes): """ Initialise a new network with random weights and biases. Input and output layers are included in the layerSizes list. The random weights and biases are generated using a Gaussian distribution, so the results are more probable to be around 0. """ biasVectors = [] """Generate biases for each neuron in each layer, except the input layer.""" for size in layerSizes[1:]: """ np.random.randn generates arrays of arrays of random numbers, based on the paramters. np.random.randn(3,2) will generate an array of 3 arrays with 2 random numbers. """ biasVectors.append(np.random.randn(size, 1)) """Generate weights for connections between layers.""" weightMatrices = [] for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs): """Return a vector of the network's outputs based on the given inputs, using feedforward.""" activations = inputs for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): """ For every layer, get the bias vector and the weight matrix. Then get dot product between the weight matrix and the output vector and add the bias vector. This is the activation vector for the current layer. """ zVector = np.dot(weightMatrix, activations) + biasVector activations = sigmoid(zVector) return activations def train(self, data, epochs, batchSize, rate, testData=None): """ Train the neural network using stochastic gradient descent. Smaller batches of random samples from the training are used to reduce the training time. The training date is a list of tuples (inputs, expected outputs). The learning rate is how much to change the values each batch. """ print("Training network with shape {}, batch size {} and learning rate {} for {} epochs...".format(self.layerSizes, batchSize, rate, epochs)) for e in range(epochs): np.random.shuffle(data) batches = [] for i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch, rate) if (testData): result = self._evaluate(testData) print("Epoch #{} completed with {:.2f}% correctness.".format(e+1, 100/len(testData)*result)) else: print("Epoch #{} completed.".format(e)) def _tuneNetwork(self, batch, rate): """ Tune the weights and biases of the network by using backpropagation with gradient descend. """ """ Setup matrix and vector based on the weight matrix and bias vector filled with zeroes. This is used for storing each change to make for each vector, for each set of training date. """ sumBiasVectors = [] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch: """ Get a matrix/vector with the required changes to the network, based on that set of training data, and add it to a set of matrix/vector totalling the changes needed from all the training data. """ deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors = [] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices """ Take each change for each set of training data, get the average of these and subtract them from the current weights and biases. Then use these as the new weights and biases. """ newBiasVectors = [] for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices = [] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs, expected): """ Return a tuple with gradient of the cost function for each bias and weight, in the format (vector of bias changes, matrix of weight changes), for the specified set of training data. """ deltaBiasVectors = [] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) """Store all activations for the entire network, starting with the input layer.""" activationVector = inputs activationVectors = [inputs] """Find the z-vector for layer in the network""" zVectors = [] for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) """ * Start with output compared to expected, tune weights and biases based on the derivative of the cost function with respect to the weight/bias. * Then move onto each hidden layer and the input layer. """ deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): """Test the network with the specified test data and return the number of correct guesses.""" correctGuesses = 0 for inputs, expected in testData: """Increment correct guesses if the most active output is the expected one.""" outputs = self.getOutputs(inputs) guess = np.argmax(outputs) if (guess == expected): correctGuesses += 1 return correctGuesses
[((57, 12, 57, 35), 'numpy.random.shuffle', 'np.random.shuffle', ({(57, 30, 57, 34): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((152, 20, 152, 38), 'numpy.argmax', 'np.argmax', ({(152, 30, 152, 37): 'outputs'}, {}), '(outputs)', True, 'import numpy as np\n'), ((31, 31, 31, 55), 'numpy.random.randn', 'np.random.randn', ({(31, 47, 31, 51): 'size', (31, 53, 31, 54): '(1)'}, {}), '(size, 1)', True, 'import numpy as np\n'), ((35, 34, 35, 65), 'numpy.random.randn', 'np.random.randn', ({(35, 50, 35, 58): 'prevSize', (35, 60, 35, 64): 'size'}, {}), '(prevSize, size)', True, 'import numpy as np\n'), ((46, 22, 46, 55), 'numpy.dot', 'np.dot', ({(46, 29, 46, 41): 'weightMatrix', (46, 43, 46, 54): 'activations'}, {}), '(weightMatrix, activations)', True, 'import numpy as np\n'), ((79, 34, 79, 60), 'numpy.zeros', 'np.zeros', ({(79, 43, 79, 59): 'biasVector.shape'}, {}), '(biasVector.shape)', True, 'import numpy as np\n'), ((82, 37, 82, 65), 'numpy.zeros', 'np.zeros', ({(82, 46, 82, 64): 'weightMatrix.shape'}, {}), '(weightMatrix.shape)', True, 'import numpy as np\n'), ((115, 36, 115, 62), 'numpy.zeros', 'np.zeros', ({(115, 45, 115, 61): 'biasVector.shape'}, {}), '(biasVector.shape)', True, 'import numpy as np\n'), ((118, 39, 118, 67), 'numpy.zeros', 'np.zeros', ({(118, 48, 118, 66): 'weightMatrix.shape'}, {}), '(weightMatrix.shape)', True, 'import numpy as np\n'), ((125, 22, 125, 60), 'numpy.dot', 'np.dot', ({(125, 29, 125, 41): 'weightMatrix', (125, 43, 125, 59): 'activationVector'}, {}), '(weightMatrix, activationVector)', True, 'import numpy as np\n'), ((141, 30, 141, 67), 'numpy.dot', 'np.dot', ({(141, 37, 141, 49): 'weightMatrix', (141, 51, 141, 66): 'deltaBiasVector'}, {}), '(weightMatrix, deltaBiasVector)', True, 'import numpy as np\n')]
phixMe/marquez
examples/airflow/dags/etl_orders_7_days.py
06d71635369893b371a8a9c9e7023f11d7cbb1f8
from datetime import datetime from marquez_airflow import DAG from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import days_ago default_args = { 'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email': ['[email protected]'] } dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed orders weekly.' ) t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT EXISTS orders_7_days ( order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id) );''', dag=dag ) t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id FROM orders AS o INNER JOIN menu_items AS mi ON mi.id = o.menu_item_id INNER JOIN categories AS c ON c.id = mi.category_id INNER JOIN menus AS m ON m.id = c.menu_id WHERE o.placed_on >= NOW() - interval '7 days' ''', dag=dag ) t1 >> t2 >> t3
[((15, 6, 21, 1), 'marquez_airflow.DAG', 'DAG', (), '', False, 'from marquez_airflow import DAG\n'), ((23, 5, 37, 1), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', (), '', False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((39, 5, 44, 1), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', (), '', False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((46, 5, 62, 1), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', (), '', False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((9, 18, 9, 29), 'airflow.utils.dates.days_ago', 'days_ago', ({(9, 27, 9, 28): '(1)'}, {}), '(1)', False, 'from airflow.utils.dates import days_ago\n')]
marianarmorgado/python-starter
sample/pizza.py
8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
# store information about a pizza being ordered pizza = { 'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan cheese'] } # summarize the order print("You ordered a " + pizza['crust'] + "-crust pizza" + "with the following toppings:") for topping in pizza['toppings']: print("\t" + topping)
[]
Fh-Shadow/Progamando
YouTube/CursoEmVideo/python/ex012.py
f496d83c36e9a079ed06b4e7c34396c57f539de9
a = float(input('Qual é o preço do produto? R$')) d = a - (a * 23 / 100) print('O produto que custava R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))
[]
gperdrizet/gansformer
dnnlib/submission/submit.py
c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
# Submit a function to be run either locally or in a computing cluster. # Compared to original StyleGAN implementation, we extend the support for automatic training resumption, # and network recompilation. import copy import inspect import os import pathlib import pickle import platform import pprint import re import shutil import sys import time import traceback from enum import Enum from .. import util from ..util import EasyDict from . import internal class SubmitTarget(Enum): # The target where the function should be run # LOCAL: Run it locally LOCAL = 1 class PathType(Enum): # Determines in which format should a path be formatted # WINDOWS: Format with Windows style # LINUX: Format with Linux/Posix style # AUTO: Use current OS type to select either WINDOWS or LINUX WINDOWS = 1 LINUX = 2 AUTO = 3 class PlatformExtras: # A mixed bag of values used by dnnlib heuristics # Attributes: # data_reader_buffer_size: Used by DataReader to size internal shared memory buffers # data_reader_process_count: Number of worker processes to spawn (zero for single # thread operation) def __init__(self): self.data_reader_buffer_size = 1<<30 # 1 GB self.data_reader_process_count = 0 # single threaded default _user_name_override = None class SubmitConfig(util.EasyDict): # Strongly typed config dict needed to submit runs # Attributes: # run_dir_root: Path to the run dir root. Can be optionally templated with tags # Needs to always be run through get_path_from_template # run_desc: Description of the run. Will be used in the run dir and task name # run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir # run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will # be the src directory inside the run dir # submit_target: Submit target enum value. Used to select where the run is actually launched # num_gpus: Number of GPUs used/requested for the run # print_info: Whether to print debug information when submitting # local.do_not_copy_source_files: Do not copy source files from the working directory to the # run dir. # run_id: Automatically populated value during submit # run_name: Automatically populated value during submit # run_dir: Automatically populated value during submit # run_func_name: Automatically populated value during submit # run_func_kwargs: Automatically populated value during submit # user_name: Automatically populated value during submit. Can be set by the user which will then # override the automatic value # task_name: Automatically populated value during submit # host_name: Automatically populated value during submit # platform_extras: Automatically populated values during submit. Used by various dnnlib libraries # such as the DataReader class def __init__(self): super().__init__() # run (set these) self.run_dir_root = "" # should always be passed through get_path_from_template self.run_desc = "" self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode", "_cudacache"] self.run_dir_extra_files = [] # submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False self.nvprof = False self.local = internal.local.TargetOptions() self.datasets = [] # (automatically populated) self.run_id = None self.run_name = None self.run_dir = None self.run_func_name = None self.run_func_kwargs = None self.user_name = None self.task_name = None self.host_name = "localhost" self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: # Replace tags in the given path template and return either Windows or Linux formatted path # automatically select path type depending on running OS if path_type == PathType.AUTO: if platform.system() == "Windows": path_type = PathType.WINDOWS elif platform.system() == "Linux": path_type = PathType.LINUX else: raise RuntimeError("Unknown platform") path_template = path_template.replace("<USERNAME>", get_user_name()) # return correctly formatted path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError("Unknown platform") def get_template_from_path(path: str) -> str: # Convert a normal path back to its template representation path = path.replace("\\", "/") return path def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: # Convert a normal path to template and the convert it back to a normal path with given path type path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str) -> None: # Set the global username override value global _user_name_override _user_name_override = name def get_user_name(): # Get the current user name if _user_name_override is not None: return _user_name_override elif platform.system() == "Windows": return os.getlogin() elif platform.system() == "Linux": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return "unknown" else: raise RuntimeError("Unknown platform") def make_run_dir_path(*paths): # Make a path/filename that resides under the current submit run_dir # Args: # *paths: Path components to be passed to os.path.join # Returns: # A file/dirname rooted at submit_config.run_dir. If there's no # submit_config or run_dir, the base directory is the current # working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))` import dnnlib if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str: # Create a new run dir with increasing ID number at the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if not resume: if os.path.exists(run_dir) and create_new: raise RuntimeError("The run dir already exists! ({0})".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) -> int: # Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id # Assumes IDs are numbers at the start of the directory names dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile("^\\d+") # match one or more digits at the start of the string run_id = 0 for dir_name in dir_names: m = r.match(dir_name) if m is not None: i = int(m.group()) run_id = max(run_id, i + 1) return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None: # Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb")) with open(os.path.join(run_dir, "submit_config.txt"), "w") as f: pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False) if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert "." in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(".") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True) files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files] files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None: # Wrap the actual run function call for handling logging, exceptions, typing, etc is_local = submit_config.submit_target == SubmitTarget.LOCAL # when running locally, redirect stderr to stdout, log stdout to a file, and force flushing if is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir, "log.txt"), file_mode="a", should_flush = True) else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh) logger = util.Logger(file_name = None, should_flush = True) import dnnlib dnnlib.submit_config = submit_config exit_with_errcode = False try: print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if "submit_config" in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, "log.txt") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to happen after we close the logs and create a _finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() # If we hit an error, get out of the script now and signal the error # to whatever process that started this script. if exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = ".stylegan2-cache") return open(file_or_url, "rb") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding = "latin1") def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False, resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None: # Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place. # create_newdir: enforces the creation of a new run directory # resume: resumes a prior experiment using its existing run directory # load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm = None if submit_target == SubmitTarget.LOCAL: farm = internal.local.Target() assert farm is not None # unknown target # Disallow submitting jobs with zero num_gpus if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0): raise RuntimeError("submit_config.num_gpus must be set to a non-zero value") if submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by populating the run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name = "{}-{:05d}-{}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$" if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name) # Farm specific preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir) # In case of resumption, load_config = True to load the prior submit_config file from the directory # (so to maintain the original configuration of the experiment rather than the newly provided # command-line arguments. if load_config: config_file = os.path.join(host_run_dir, "submit_config.pkl") if os.path.exists(config_file): old_submit_config = submit_config submit_config = load_pkl(config_file) submit_config["run_id"] = old_submit_config["run_id"] submit_config["run_name"] = old_submit_config["run_name"] if "resume_pkl" in old_submit_config["run_func_kwargs"]: submit_config["run_func_kwargs"]["resume_pkl"] = old_submit_config["run_func_kwargs"]["resume_pkl"] submit_config["run_func_kwargs"]["resume_kimg"] = old_submit_config["run_func_kwargs"]["resume_kimg"] _populate_run_dir(submit_config, host_run_dir) return farm.submit(submit_config, host_run_dir)
[((168, 11, 168, 61), 'os.path.join', 'os.path.join', ({(168, 24, 168, 52): 'dnnlib.submit_config.run_dir', (168, 54, 168, 60): '*paths'}, {}), '(dnnlib.submit_config.run_dir, *paths)', False, 'import os\n'), ((177, 14, 177, 64), 'os.path.join', 'os.path.join', ({(177, 27, 177, 39): 'run_dir_root', (177, 41, 177, 63): 'submit_config.run_name'}, {}), '(run_dir_root, submit_config.run_name)', False, 'import os\n'), ((191, 8, 191, 27), 're.compile', 're.compile', ({(191, 19, 191, 26): '"""^\\\\d+"""'}, {}), "('^\\\\d+')", False, 'import re\n'), ((298, 20, 298, 48), 'copy.deepcopy', 'copy.deepcopy', ({(298, 34, 298, 47): 'submit_config'}, {}), '(submit_config)', False, 'import copy\n'), ((174, 11, 174, 39), 'os.path.exists', 'os.path.exists', ({(174, 26, 174, 38): 'run_dir_root'}, {}), '(run_dir_root)', False, 'import os\n'), ((175, 8, 175, 33), 'os.makedirs', 'os.makedirs', ({(175, 20, 175, 32): 'run_dir_root'}, {}), '(run_dir_root)', False, 'import os\n'), ((207, 8, 207, 90), 'pprint.pprint', 'pprint.pprint', (), '', False, 'import pprint\n'), ((217, 35, 217, 76), 'os.path.dirname', 'os.path.dirname', ({(217, 51, 217, 75): 'run_func_module_dir_path'}, {}), '(run_func_module_dir_path)', False, 'import os\n'), ((246, 21, 246, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((250, 14, 250, 45), 'inspect.signature', 'inspect.signature', ({(250, 32, 250, 44): 'run_func_obj'}, {}), '(run_func_obj)', False, 'import inspect\n'), ((279, 8, 279, 19), 'sys.exit', 'sys.exit', ({(279, 17, 279, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((290, 15, 290, 53), 'pickle.load', 'pickle.load', (), '', False, 'import pickle\n'), ((323, 11, 323, 69), 're.match', 're.match', ({(323, 20, 323, 43): 'docker_valid_name_regex', (323, 45, 323, 68): 'submit_config.task_name'}, {}), '(docker_valid_name_regex, submit_config.task_name)', False, 'import re\n'), ((333, 22, 333, 69), 'os.path.join', 'os.path.join', ({(333, 35, 333, 47): 'host_run_dir', (333, 49, 333, 68): '"""submit_config.pkl"""'}, {}), "(host_run_dir, 'submit_config.pkl')", False, 'import os\n'), ((334, 11, 334, 38), 'os.path.exists', 'os.path.exists', ({(334, 26, 334, 37): 'config_file'}, {}), '(config_file)', False, 'import os\n'), ((108, 11, 108, 28), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((119, 19, 119, 57), 'pathlib.PureWindowsPath', 'pathlib.PureWindowsPath', ({(119, 43, 119, 56): 'path_template'}, {}), '(path_template)', False, 'import pathlib\n'), ((145, 9, 145, 26), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((146, 15, 146, 28), 'os.getlogin', 'os.getlogin', ({}, {}), '()', False, 'import os\n'), ((167, 28, 167, 39), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((180, 11, 180, 34), 'os.path.exists', 'os.path.exists', ({(180, 26, 180, 33): 'run_dir'}, {}), '(run_dir)', False, 'import os\n'), ((182, 15, 182, 38), 'os.path.exists', 'os.path.exists', ({(182, 30, 182, 37): 'run_dir'}, {}), '(run_dir)', False, 'import os\n'), ((183, 12, 183, 32), 'os.makedirs', 'os.makedirs', ({(183, 24, 183, 31): 'run_dir'}, {}), '(run_dir)', False, 'import os\n'), ((190, 28, 190, 52), 'os.listdir', 'os.listdir', ({(190, 39, 190, 51): 'run_dir_root'}, {}), '(run_dir_root)', False, 'import os\n'), ((205, 36, 205, 78), 'os.path.join', 'os.path.join', ({(205, 49, 205, 56): 'run_dir', (205, 58, 205, 77): '"""submit_config.pkl"""'}, {}), "(run_dir, 'submit_config.pkl')", False, 'import os\n'), ((206, 14, 206, 56), 'os.path.join', 'os.path.join', ({(206, 27, 206, 34): 'run_dir', (206, 36, 206, 55): '"""submit_config.txt"""'}, {}), "(run_dir, 'submit_config.txt')", False, 'import os\n'), ((225, 20, 225, 54), 'os.path.join', 'os.path.join', ({(225, 33, 225, 40): 'run_dir', (225, 42, 225, 47): '"""src"""', (225, 49, 225, 53): 'f[1]'}, {}), "(run_dir, 'src', f[1])", False, 'import os\n'), ((226, 15, 226, 87), 'os.path.join', 'os.path.join', ({(226, 28, 226, 50): 'dnnlib_module_dir_path', (226, 52, 226, 64): '"""submission"""', (226, 66, 226, 76): '"""internal"""', (226, 78, 226, 86): '"""run.py"""'}, {}), "(dnnlib_module_dir_path, 'submission', 'internal', 'run.py')", False, 'import os\n'), ((226, 89, 226, 120), 'os.path.join', 'os.path.join', ({(226, 102, 226, 109): 'run_dir', (226, 111, 226, 119): '"""run.py"""'}, {}), "(run_dir, 'run.py')", False, 'import os\n'), ((272, 4, 272, 27), 'dnnlib.RunContext.get', 'dnnlib.RunContext.get', ({}, {}), '()', False, 'import dnnlib\n'), ((110, 13, 110, 30), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((121, 19, 121, 55), 'pathlib.PurePosixPath', 'pathlib.PurePosixPath', ({(121, 41, 121, 54): 'path_template'}, {}), '(path_template)', False, 'import pathlib\n'), ((147, 9, 147, 26), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((190, 70, 190, 99), 'os.path.join', 'os.path.join', ({(190, 83, 190, 95): 'run_dir_root', (190, 97, 190, 98): 'd'}, {}), '(run_dir_root, d)', False, 'import os\n'), ((236, 41, 236, 87), 'os.path.join', 'os.path.join', ({(236, 54, 236, 75): 'submit_config.run_dir', (236, 77, 236, 86): '"""log.txt"""'}, {}), "(submit_config.run_dir, 'log.txt')", False, 'import os\n'), ((261, 12, 261, 33), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((263, 22, 263, 68), 'os.path.join', 'os.path.join', ({(263, 35, 263, 56): 'submit_config.run_dir', (263, 58, 263, 67): '"""log.txt"""'}, {}), "(submit_config.run_dir, 'log.txt')", False, 'import os\n'), ((265, 12, 265, 45), 'shutil.copyfile', 'shutil.copyfile', ({(265, 28, 265, 35): 'log_src', (265, 37, 265, 44): 'log_dst'}, {}), '(log_src, log_dst)', False, 'import shutil\n'), ((270, 13, 270, 65), 'os.path.join', 'os.path.join', ({(270, 26, 270, 47): 'submit_config.run_dir', (270, 49, 270, 64): '"""_finished.txt"""'}, {}), "(submit_config.run_dir, '_finished.txt')", False, 'import os\n'), ((256, 100, 256, 111), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((150, 32, 150, 44), 'os.geteuid', 'os.geteuid', ({}, {}), '()', False, 'import os\n')]
zilong305/pycharts
pyecharts/custom/grid.py
6cf1bb7f17001a36da6a766615a78b1dbef5918f
#!/usr/bin/env python # coding=utf-8 from pyecharts.option import grid class Grid(object): def __init__(self): self._chart = None self._js_dependencies = set() def add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): """ :param chart: chart instance :param grid_width: Width of grid component. Adaptive by default. :param grid_height: Height of grid component. Adaptive by default. :param grid_top: Distance between grid component and the top side of the container. :param grid_bottom: Distance between grid component and the bottom side of the container. :param grid_left: Distance between grid component and the left side of the container. :param grid_right: Distance between grid component and the right side of the container. :return: """ if self._chart is None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) if _grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is only identify for every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s in self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): """ :param series: series data :return: """ _series, _xaxis, _yaxis, _legend, _title = series for s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title def render(self, path="render.html"): """ :param path: :return: """ self._chart.render(path) def render_embed(self): """ :return: """ return self._chart.render_embed() def show_config(self): """ :return: """ import pprint return pprint.pprint(self._chart._option) @property def chart(self): """ :return: """ return self._chart def _repr_html_(self): """ :return: """ return self._chart._repr_html_()
[((115, 15, 115, 49), 'pprint.pprint', 'pprint.pprint', ({(115, 29, 115, 48): 'self._chart._option'}, {}), '(self._chart._option)', False, 'import pprint\n'), ((43, 20, 43, 95), 'pyecharts.option.grid', 'grid', ({(43, 25, 43, 35): 'grid_width', (43, 37, 43, 48): 'grid_height', (43, 50, 43, 58): 'grid_top', (43, 60, 43, 71): 'grid_bottom', (43, 73, 43, 82): 'grid_left', (43, 84, 43, 94): 'grid_right'}, {}), '(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)', False, 'from pyecharts.option import grid\n'), ((77, 20, 77, 95), 'pyecharts.option.grid', 'grid', ({(77, 25, 77, 35): 'grid_width', (77, 37, 77, 48): 'grid_height', (77, 50, 77, 58): 'grid_top', (77, 60, 77, 71): 'grid_bottom', (77, 73, 77, 82): 'grid_left', (77, 84, 77, 94): 'grid_right'}, {}), '(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)', False, 'from pyecharts.option import grid\n')]
devinmcgloin/smooch
smooch/conversations.py
c9561c3e7f1546efc58daa472b70f738d0d35e13
import logging from .endpoint import ask def send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id, message): logging.warning("send message called with invalid args user_id={} message={}".format(user_id, message)) return logging.debug("Sending message: user_id={0} message={1} sent_by_maker={2}".format(user_id, message, sent_by_maker)) role = "appMaker" if not sent_by_maker: role = "appUser" data = {"text": message, "role": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if not user_id: logging.warning("get conversation called with invalid arg user_id={}".format(user_id)) return logging.debug("Get conversation: user_id={}".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message, options): """Note that amount is a integer which specifies the amount of cents in the transaction Smooch will default to the currency specified in your account settings.""" if not valid_args(user_id, message, options): logging.warning("request payment called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for short_text, result in options: buttons.append({ "type": "buy", "text": short_text, "amount": result}) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message, options): """Sends a series of links. The options field is a dictionary in which the keys are descriptions and values uris""" if not valid_args(user_id, message, options): logging.warning("send links called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for short_text, result in options: buttons.append({ "type": "link", "text": short_text, "uri": result}) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message, options): """Sends a series of options that you can listen for on your webhook. The options field is a dictionary in which the keys are descriptions and values the postback payload. You need to set up a webhook to listen for the postback.""" if not valid_args(user_id, message, options): logging.warning("send postback called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for short_text, result in options: buttons.append({ "type": "postback", "text": short_text, "payload": result }) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message, options): """Options is a list of tuples in which the first element is the type of the button, second the short text, and third the result for the specified type.""" if not valid_args(user_id, message, options): logging.warning("send buttons called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for text, kind, result in options: buttons.append({ "type": kind, "text": text, "payload": result }) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message, options=None): if options is not None: if user_id and message and options and type(options) is list: return True return False else: if user_id and message: return True return False
[]
Sharkbyteprojects/IRIS-ML_and_Deep-Learning
cifar/evalit.py
f0e053cf7a0e69019bbba36e6da3e60d76105fe9
import keras from keras.models import load_model from PIL import Image import matplotlib.pylab as plt import numpy as np import zipfile print("Extract") zip_ref = zipfile.ZipFile("./asset.zip", 'r') zip_ref.extractall(".") zip_ref.close() print("Load Model") model=load_model("cifar-model.h5") CIFAR_10_CLASSES=["Plane","Car","bird","cat","deer","dog","frog","horse","ship","truck"] def calc(imname): test_image =Image.open("asset/"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype="float32") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title("Complete: {}".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print("START TEST") calc("lkw-image.jpg") calc("cat.jpg") calc("frog.jpg") calc("fog.jpg") calc("lfog.jpg") calc("d.jpg") calc("b.jpg") calc("bs.jpg") calc("plapper.jpg") calc("ds.jpg") print("Complete") print("End") quit(0)
[((8, 10, 8, 45), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(8, 26, 8, 39): '"""./asset.zip"""', (8, 41, 8, 44): '"""r"""'}, {}), "('./asset.zip', 'r')", False, 'import zipfile\n'), ((12, 6, 12, 34), 'keras.models.load_model', 'load_model', ({(12, 17, 12, 33): '"""cifar-model.h5"""'}, {}), "('cifar-model.h5')", False, 'from keras.models import load_model\n'), ((15, 16, 15, 43), 'PIL.Image.open', 'Image.open', ({(15, 27, 15, 42): "'asset/' + imname"}, {}), "('asset/' + imname)", False, 'from PIL import Image\n'), ((17, 15, 17, 51), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((21, 19, 21, 41), 'numpy.argmax', 'np.argmax', ({(21, 29, 21, 40): 'predictions'}, {}), '(predictions)', True, 'import numpy as np\n'), ((25, 4, 25, 14), 'matplotlib.pylab.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pylab as plt\n')]
samiksha-patil/Knowledge-Sharing-Platform
tt/urls.py
22e61a659d5ad63fe656fa639dc897cbdebad4fe
""" tt URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ # Uncomment next two lines to enable admin: from django.contrib import admin from django.urls import path, include from users import views as user_views from django.contrib.auth import views as auth_views from upload import views as upload_views from django.conf import settings from django.conf.urls.static import static urlpatterns = [ # Uncomment the next line to enable the admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[((33, 4, 33, 35), 'django.urls.path', 'path', ({(33, 9, 33, 17): '"""admin/"""', (33, 19, 33, 34): 'admin.site.urls'}, {}), "('admin/', admin.site.urls)", False, 'from django.urls import path, include\n'), ((35, 4, 35, 59), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((38, 4, 38, 56), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((39, 4, 39, 57), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((40, 4, 40, 67), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((44, 20, 44, 81), 'django.conf.urls.static.static', 'static', (), '', False, 'from django.conf.urls.static import static\n'), ((34, 13, 34, 33), 'django.urls.include', 'include', ({(34, 21, 34, 32): '"""blog.urls"""'}, {}), "('blog.urls')", False, 'from django.urls import path, include\n'), ((36, 18, 36, 80), 'django.contrib.auth.views.LoginView.as_view', 'auth_views.LoginView.as_view', (), '', True, 'from django.contrib.auth import views as auth_views\n'), ((37, 19, 37, 83), 'django.contrib.auth.views.LogoutView.as_view', 'auth_views.LogoutView.as_view', (), '', True, 'from django.contrib.auth import views as auth_views\n')]
danihodovic/dht
src/git/cmd.py
636f54d70f8c6ca60ab48f2815b3e9e1a336d78f
import os import click os.environ["GIT_PYTHON_REFRESH"] = "quiet" @click.group() def git(): pass
[((8, 1, 8, 14), 'click.group', 'click.group', ({}, {}), '()', False, 'import click\n')]
Tymec/Playground
TwitterImage2JPG.py
5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
import glob import os def main(): os.chdir("F:/Downloads") extensions = ["*.jpg_large", "*.png_large", "*.jpg_orig"] file_list = list() for extension in extensions: file_list = file_list + glob.glob(extension) for file in file_list: for extension in extensions: new_extension = extension.replace('*', '') if file.endswith(new_extension): new_name = file.replace(new_extension, '') + ".jpg" os.rename(file, new_name) print("Done!") if __name__ == __name__: main()
[((6, 4, 6, 28), 'os.chdir', 'os.chdir', ({(6, 13, 6, 27): '"""F:/Downloads"""'}, {}), "('F:/Downloads')", False, 'import os\n'), ((11, 32, 11, 52), 'glob.glob', 'glob.glob', ({(11, 42, 11, 51): 'extension'}, {}), '(extension)', False, 'import glob\n'), ((18, 16, 18, 41), 'os.rename', 'os.rename', ({(18, 26, 18, 30): 'file', (18, 32, 18, 40): 'new_name'}, {}), '(file, new_name)', False, 'import os\n')]
Riccardo95Facchini/DIL-2019
Data Analysis/classification.py
febeda55fd647943a1b8c49b3c5192fcd69fdaf5
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import classification_report #EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header = 0) dataset.head() #DELETE NEXT CALLS DATA dataset = dataset.drop("contact", axis=1) dataset = dataset.drop("day", axis=1) dataset = dataset.drop("month", axis=1) dataset = dataset.drop("duration", axis=1) dataset = dataset.drop("campaign", axis=1) dataset = dataset.drop("pdays", axis=1) dataset = dataset.drop("previous", axis=1) dataset = dataset.drop("poutcome", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums = {"marital": {"married": 1, "single": 0, "divorced":-1}, "education": {"primary": 1, "secondary": 2, "tertiary": 3}, "default": {"yes": 1, "no": 0}, "housing": {"yes": 1, "no": 0}, "loan": {"yes": 1, "no": 0}, "y": {"yes": 1, "no": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7] y = dataset.iloc[:, 7] X = pd.get_dummies(X, columns=["job"], prefix=["job"]) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) #DECISION TREE from sklearn import tree from sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy as np import matplotlib.pyplot as plt import pandas as pd # TRAINING - TEST from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # SCALING from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(X_train, y_train) # PREDICTION y_pred = classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample dataset_sample = pd.get_dummies(dataset, columns=["job"], prefix=["job"]) #SPLIT FEATURE AND TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) X = pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES not_sub = X[X.y==0] sub = X[X.y==1] not_sub_downsampled = resample(not_sub, replace = False, n_samples = len(sub), random_state = 27) # COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE from imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from imblearn.over_sampling import SMOTE y = dataset_sample.y X = dataset_sample.drop('y', axis=1) # setting up testing and training sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='lower right')
[((10, 10, 10, 54), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((47, 0, 47, 27), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((49, 0, 49, 46), 'seaborn.heatmap', 'sns.heatmap', (), '', True, 'import seaborn as sns\n'), ((50, 0, 50, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((57, 4, 57, 54), 'pandas.get_dummies', 'pd.get_dummies', (), '', True, 'import pandas as pd\n'), ((60, 35, 60, 73), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((67, 9, 67, 33), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ({}, {}), '()', False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((76, 5, 76, 36), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ({(76, 22, 76, 28): 'y_test', (76, 30, 76, 35): 'esito'}, {}), '(y_test, esito)', False, 'from sklearn.metrics import confusion_matrix\n'), ((79, 0, 79, 15), 'matplotlib.pyplot.hist', 'plt.hist', ({(79, 9, 79, 14): 'esito'}, {}), '(esito)', True, 'import matplotlib.pyplot as plt\n'), ((85, 9, 85, 33), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((94, 5, 94, 36), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ({(94, 22, 94, 28): 'y_test', (94, 30, 94, 35): 'esito'}, {}), '(y_test, esito)', False, 'from sklearn.metrics import confusion_matrix\n'), ((97, 0, 97, 15), 'matplotlib.pyplot.hist', 'plt.hist', ({(97, 9, 97, 14): 'esito'}, {}), '(esito)', True, 'import matplotlib.pyplot as plt\n'), ((107, 35, 107, 93), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((111, 5, 111, 21), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import StandardScaler\n'), ((117, 13, 117, 79), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (), '', False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((125, 5, 125, 37), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ({(125, 22, 125, 28): 'y_test', (125, 30, 125, 36): 'y_pred'}, {}), '(y_test, y_pred)', False, 'from sklearn.metrics import confusion_matrix\n'), ((132, 0, 132, 16), 'matplotlib.pyplot.hist', 'plt.hist', ({(132, 9, 132, 15): 'y_pred'}, {}), '(y_pred)', True, 'import matplotlib.pyplot as plt\n'), ((138, 17, 138, 73), 'pandas.get_dummies', 'pd.get_dummies', (), '', True, 'import pandas as pd\n'), ((145, 35, 145, 93), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((147, 4, 147, 41), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((160, 14, 160, 51), 'pandas.concat', 'pd.concat', ({(160, 24, 160, 50): '[not_sub_downsampled, sub]'}, {}), '([not_sub_downsampled, sub])', True, 'import pandas as pd\n'), ((167, 9, 167, 33), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ({}, {}), '()', False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((179, 9, 179, 33), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((196, 35, 196, 93), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((199, 5, 199, 38), 'imblearn.over_sampling.SMOTE', 'SMOTE', (), '', False, 'from imblearn.over_sampling import SMOTE\n'), ((202, 9, 202, 33), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ({}, {}), '()', False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((221, 35, 221, 93), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((223, 5, 223, 38), 'imblearn.over_sampling.SMOTE', 'SMOTE', (), '', False, 'from imblearn.over_sampling import SMOTE\n'), ((226, 9, 226, 33), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((237, 4, 237, 16), 'numpy.arange', 'np.arange', ({(237, 14, 237, 15): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((238, 0, 238, 76), 'matplotlib.pyplot.bar', 'plt.bar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((239, 0, 239, 72), 'matplotlib.pyplot.bar', 'plt.bar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((240, 0, 240, 45), 'matplotlib.pyplot.xticks', 'plt.xticks', ({(240, 11, 240, 16): '(x - 0.1)', (240, 18, 240, 44): "['Normal', 'Under', 'Smote']"}, {}), "(x - 0.1, ['Normal', 'Under', 'Smote'])", True, 'import matplotlib.pyplot as plt\n'), ((241, 0, 241, 29), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((245, 4, 245, 16), 'numpy.arange', 'np.arange', ({(245, 14, 245, 15): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((246, 0, 246, 76), 'matplotlib.pyplot.bar', 'plt.bar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((247, 0, 247, 72), 'matplotlib.pyplot.bar', 'plt.bar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((248, 0, 248, 45), 'matplotlib.pyplot.xticks', 'plt.xticks', ({(248, 11, 248, 16): '(x - 0.1)', (248, 18, 248, 44): "['Normal', 'Under', 'Smote']"}, {}), "(x - 0.1, ['Normal', 'Under', 'Smote'])", True, 'import matplotlib.pyplot as plt\n'), ((249, 0, 249, 29), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((73, 6, 73, 68), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n'), ((91, 6, 91, 68), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n'), ((128, 6, 128, 69), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n'), ((173, 6, 173, 68), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n'), ((185, 6, 185, 68), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n'), ((211, 6, 211, 73), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n'), ((233, 6, 233, 73), 'sklearn.metrics.classification_report', 'classification_report', (), '', False, 'from sklearn.metrics import classification_report\n')]
anastasiia-zolochevska/cloud-custodian
tools/c7n_azure/tests/test_route_table.py
f25315a01bec808c16ab0e2d433d6151cf5769e4
# Copyright 2015-2018 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from azure_common import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ] }) resources = p.run() self.assertEqual(len(resources), 0, "A route table is routing to a disallowed subnet") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, { 'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1 } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, "Only one route table should be found") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), "The wrong route table was found") properties = route_table.get('properties') self.assertIsNotNone(properties, "Missing properties") subnets = properties.get('subnets') self.assertIsNotNone(subnets, "Missing subnets") self.assertEqual(1, len(subnets), "There should only be one subnet") subnet = subnets[0] self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), "Incorrect subnet")
[((37, 5, 37, 46), 'azure_common.arm_template', 'arm_template', ({(37, 18, 37, 45): '"""route-table-and-vnet.json"""'}, {}), "('route-table-and-vnet.json')", False, 'from azure_common import BaseTest, arm_template\n'), ((56, 5, 56, 46), 'azure_common.arm_template', 'arm_template', ({(56, 18, 56, 45): '"""route-table-and-vnet.json"""'}, {}), "('route-table-and-vnet.json')", False, 'from azure_common import BaseTest, arm_template\n'), ((82, 5, 82, 46), 'azure_common.arm_template', 'arm_template', ({(82, 18, 82, 45): '"""route-table-and-vnet.json"""'}, {}), "('route-table-and-vnet.json')", False, 'from azure_common import BaseTest, arm_template\n'), ((108, 5, 108, 46), 'azure_common.arm_template', 'arm_template', ({(108, 18, 108, 45): '"""route-table-and-vnet.json"""'}, {}), "('route-table-and-vnet.json')", False, 'from azure_common import BaseTest, arm_template\n')]
pkthein/sparts_all_fam
proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
ff162e4ea8c3919a197dc0cc13fde6b32da113c7
# Copyright 2016 Intel Corporation # Copyright 2017 Wind River # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES # ################################################################################ import hashlib import logging import json from collections import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler: """ Class for handling the Transaction Family : Artifact Attributes: namespace_prefix (str): The namespace prefix of the transaction family """ def __init__(self, namespace_prefix): """ Constructs the ArtifactTransactionHandler object. Args: namespace_prefix (str): The namepsace prefix of the transaction family """ self._namespace_prefix = namespace_prefix @property def family_name(self): """ type: str Returns the family name of the handler object. """ return "artifact" @property def family_versions(self): """ type: list of str Returns the family version of the handler object. """ return ["1.0"] @property def encodings(self): """ type: list of str Returns the encoding scheme used for the data for the handler object. """ return ["csv-utf8"] @property def namespaces(self): """ type: list of str Returns the namespaces associating with the handler object. """ return [self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################ def apply(self, transaction, context): """ Applys the payload from transaction onto the state storage. Args: transaction (Transaction): The transaction pertaining the payload context (State): The current state of the ledger Returns: type: State The new state of the ledger, which includes the data from the transaction, is returned to be stored on the state storage. Raises: InvalidTransaction: * If deserialization for payload from transaction failed * If "create" was called on non-unique uuid * If "amend" was called on non-existing uuid * If "Add..." were called on non-existing uuid * If invalid operation was called InternalError: * If deserialization of State.data failed """ # Parsing required fields from transaction payload try: payload = json.loads(transaction.payload.decode()) artifact_id = payload["uuid"] artifact_alias = payload["alias"] artifact_name = payload["name"] artifact_type = payload["content_type"] artifact_checksum = payload["checksum"] artifact_label = payload["label"] artifact_openchain = payload["openchain"] action = payload["action"] prev = payload["prev_block"] cur = payload["cur_block"] timestamp = payload["timestamp"] artifact_list = payload["artifact_list"] uri_list = payload["uri_list"] except ValueError: raise InvalidTransaction("Invalid payload serialization") # Soft sanity check and loading required data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) # Hard sanity check before creating final payload for the state storage if len(state_entries) != 0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact["uuid"] except ValueError: raise InternalError("Failed to deserialize data.") else: stored_artifact_id = stored_artifact = None if action == "create" and stored_artifact_id is not None: raise InvalidTransaction("Invalid Action-artifact already exists.") elif action == "create": artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp) elif action == "amend" and stored_artifact_id is not None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) elif action == "AddArtifact" or action == "AddURI": if stored_artifact_id is None: raise InvalidTransaction( "Invalid Action-requires an existing artifact." ) artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) # Adding the final payload to the state storage data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses ################################################################################ # HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]): """ Constructs the payload to be stored in the state storage. Args: artifact_uuid (str): The uuid of the artifact artifact_alias (str): The alias of the artifact artifact_name (str): The name of the artifact artifact_type (str): The type of the artifact artifact_checksum (str): The checksum of the artifact artifact_label (str): The label of the artifact artifact_openchain (str): The openchain of the artifact prev (str): The previous block id of the transaction (default "0") cur (str): the current block id of the transaction timestamp (str): The UTC time for when the transaction was submitted artifact_list (list of dict): The list of the artifact uuid associated with the artifact (default []) uri_list (list of dict): The list of the uri associated with the artifact (default []) Returns: type: dict The dictionary pertaining all the param is created and returned to be stored on the state storage. """ return { "uuid" : artifact_id, "alias" : artifact_alias, "name" : artifact_name, "content_type" : artifact_type, "checksum" : artifact_checksum, "label" : artifact_label, "openchain" : artifact_openchain, "prev_block" : prev, "cur_block" : cur, "timestamp" : timestamp, "artifact_list" : artifact_list, "uri_list" : uri_list } def validate_transaction(artifact_id, action): """ Performs soft sanity check in order to improve runtime by eliminating the obvious exception errors. Args: artifact_id (str): The uuid of the artifact action (str): The command to be performed Raises: InvalidTransaction: If the uuid or the action are not passed in or the action is not a valid action. """ if not artifact_id: raise InvalidTransaction("Artifact ID is required") if not action: raise InvalidTransaction("Action is required") if action not in ("AddArtifact", "create", "AddURI", "amend"): raise InvalidTransaction("Invalid action: {}".format(action)) def make_artifact_address(namespace_prefix, artifact_id): """ Creates an artifact address which will be used to recover the associated UUID if the artifact already exists in the state storage; or, used as a key to store the new data into the state storage. Args: namespace_prefix (str): The prefix associating with the transaction family artifact_id (str): The uuid of the artifact Returns: type: str The address-to-be, which associates the uuid and the namespace prefix. """ return namespace_prefix + \ hashlib.sha512(artifact_id.encode("utf-8")).hexdigest()[:64] def _display(msg): """ Logs the message to the debug logger. Args: msg (str): The message that is to be logged into the debug logger """ n = msg.count("\n") if n > 0: msg = msg.split("\n") length = max(len(line) for line in msg) else: length = len(msg) msg = [msg] LOGGER.debug("+" + (length + 2) * "-" + "+") for line in msg: LOGGER.debug("+ " + line.center(length) + " +") LOGGER.debug("+" + (length + 2) * "-" + "+") ################################################################################ # # ################################################################################
[((26, 9, 26, 36), 'logging.getLogger', 'logging.getLogger', ({(26, 27, 26, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((245, 14, 245, 59), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', ({(245, 33, 245, 58): '"""Artifact ID is required"""'}, {}), "('Artifact ID is required')", False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((247, 14, 247, 54), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', ({(247, 33, 247, 53): '"""Action is required"""'}, {}), "('Action is required')", False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((154, 18, 154, 79), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', ({(154, 37, 154, 78): '"""Invalid Action-artifact already exists."""'}, {}), "('Invalid Action-artifact already exists.')", False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((132, 18, 132, 69), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', ({(132, 37, 132, 68): '"""Invalid payload serialization"""'}, {}), "('Invalid payload serialization')", False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((178, 15, 178, 35), 'json.dumps', 'json.dumps', ({(178, 26, 178, 34): 'artifact'}, {}), '(artifact)', False, 'import json\n'), ((148, 22, 148, 66), 'sawtooth_sdk.processor.exceptions.InternalError', 'InternalError', ({(148, 36, 148, 65): '"""Failed to deserialize data."""'}, {}), "('Failed to deserialize data.')", False, 'from sawtooth_sdk.processor.exceptions import InternalError\n'), ((168, 22, 170, 17), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', ({(169, 20, 169, 67): '"""Invalid Action-requires an existing artifact."""'}, {}), "('Invalid Action-requires an existing artifact.')", False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n')]
fsandx/moodybooks
ReviewsCollector.py
5c13fe43849e4fa861a163c74411e9f796518bc9
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ STEP 2 Takes the list of urls in the json files and downloads the html files to local drive Start with: scrapy runspider ReviewsCollector.py """ import scrapy import json class ReviewsCollector(scrapy.Spider): def start_requests(self): with open("data/books.json") as f: self.data = json.load(f) for item in self.data: if (item['url'] is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename = response.url.split("/")[-1] + '.html' with open('data/reviews/' + filename, 'wb+') as f: f.write(response.body)
[((17, 24, 17, 36), 'json.load', 'json.load', ({(17, 34, 17, 35): 'f'}, {}), '(f)', False, 'import json\n'), ((20, 26, 20, 124), 'scrapy.Request', 'scrapy.Request', (), '', False, 'import scrapy\n')]
roshie548/firelight
firelight/interfaces/light.py
3a5af5e2a1e5784127baebcf1517ffddcaff4062
from abc import ABC, abstractmethod from .color import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): """Discover the lights and groups in this LightSystem.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of all the lights in the LightSystem.""" raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): """Turn on the lights in this group.""" raise NotImplementedError @abstractmethod def turn_off(self): """Turn off the lights in this group.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of this light.""" raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): """Turn on this light.""" raise NotImplementedError @abstractmethod def turn_off(self): """Turn off the light.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of this light.""" raise NotImplementedError
[]
cadeng23/oop-cjgustafson
PolymorphismPYTHON/Polypy.py
cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
import random class Family: def __init__(self,first, last, hair): self.first = first self.last = last self.hair = hair def fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2]) #using the punnet square in genetics we know thatt a donor #with blue eyes and one with brown makes it 50/50 odds #that the childs eyes will be brown or blue if (temp == 1): self.EYES = ("Brown") else: self.EYES = ("Blue") return self.EYES def Apply_eyes(self): self.eyes = self.EYES Daughter = Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids being born it will define what color hair and eyes # they may randomly get through inheritance class Kids(Family): pass #Eyes are marked as Grey because they are unknown for now # hair colors are brown because brown is the dominant hair color Daughter = Kids('Danielle', 'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes) Daughter.Apply_eyes() Son.Apply_eyes() print(Daughter.eyes) print(Son.eyes)
[((16, 15, 16, 35), 'random.choice', 'random.choice', ({(16, 29, 16, 34): '[1, 2]'}, {}), '([1, 2])', False, 'import random\n')]
evancohen/home-assistant
homeassistant/components/device_tracker/owntracks.py
dafc0ced6b07025c03417d8e7a2c0133b4c622fc
""" homeassistant.components.device_tracker.owntracks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OwnTracks platform for the device tracker. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ """ import json import logging import homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config, see): """ Set up a OwnTracksks tracker. """ def owntracks_location_update(topic, payload, qos): """ MQTT message received. """ # Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except ValueError: # If invalid JSON logging.getLogger(__name__).error( 'Unable to parse payload as JSON: %s', payload) return if not isinstance(data, dict) or data.get('_type') != 'location': return parts = topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']), } if 'acc' in data: kwargs['gps_accuracy'] = data['acc'] if 'batt' in data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1) return True
[((51, 4, 51, 70), 'homeassistant.components.mqtt.subscribe', 'mqtt.subscribe', ({(51, 19, 51, 23): 'hass', (51, 25, 51, 39): 'LOCATION_TOPIC', (51, 41, 51, 66): 'owntracks_location_update', (51, 68, 51, 69): '(1)'}, {}), '(hass, LOCATION_TOPIC, owntracks_location_update, 1)', True, 'import homeassistant.components.mqtt as mqtt\n'), ((28, 19, 28, 38), 'json.loads', 'json.loads', ({(28, 30, 28, 37): 'payload'}, {}), '(payload)', False, 'import json\n'), ((31, 12, 31, 39), 'logging.getLogger', 'logging.getLogger', ({(31, 30, 31, 38): '__name__'}, {}), '(__name__)', False, 'import logging\n')]
luyaojie/E3C
src/models/end_to_end_event_coreference.py
4b2f33da4629211fd6a3738077794f821c7f7c8b
#!/usr/bin/env python # -*- coding:utf-8 -*- # Created by Roger on 2019-09-10 # Mostly by AllenNLP import logging import math from typing import Any, Dict, List, Optional, Tuple import torch import torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from allennlp.nn import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average from overrides import overrides from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register("end-to-end-event-coreference") class End2EndEventCoreferenceResolver(Model): """ This ``Model`` implements the coreference resolution model described "End-to-end Neural Coreference Resolution" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017. The basic outline of this model is to get an embedded representation of each span in the document. These span representations are scored and used to prune away spans that are unlikely to occur in a coreference cluster. For the remaining spans, the model decides which antecedent span (if any) they are coreferent with. The resulting coreference links, after applying transitivity, imply a clustering of the spans in the document. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we get as input to the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information for each word in the document. mention_feedforward : ``FeedForward`` This feedforward network is applied to the span representations which is then scored by a linear layer. antecedent_feedforward: ``FeedForward`` This feedforward network is applied to pairs of span representation, along with any pairwise features, which is then scored by a linear layer. feature_size: ``int`` The embedding size for all the embedded features, such as distances or span widths. max_span_width: ``int`` The maximum width of candidate spans. spans_per_word: float, required. A multiplier between zero and one which controls what percentage of candidate mention spans we retain with respect to the number of words in the document. max_antecedents: int, required. For each mention which survives the pruning stage, we consider this many antecedents. lexical_dropout: ``int`` The probability of dropping out dimensions of the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder = None, max_span_width: int = 1, spans_per_word: float = 0.1, max_antecedents: int = 50, lexical_dropout: float = 0.2, pretrain_ed: bool = False, pretrain_coref: bool = False, coref_loss_weight: float = 1.0, bce_loss_weight: float = 1.0, bce_pos_weight: float = None, local_window_size: int = 10, attention_type: str = 'dot', decoding: str = 'type-guided', type_threshold: float = -1., type_refine: bool = True, type_match_in_eval: bool = True, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type = attention_type self._decoding = decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, "labels")) if context_layer is not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination="x,y", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer = None else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination="x,y", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer = None else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate = None # NIL for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1 # 10 possible distance buckets. self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric = Average() self._coref_loss_metric = Average() self._coref_label_metric = Average() self._type_label_metric = Average() self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x initializer(self) def _get_event_embedding(self, span_mask): """ :param span_mask: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) """ event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): """ :param top_event_type_labels: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) """ event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device < 0: device = 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask = attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def forward(self, # type: ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ """ Parameters ---------- text : ``Dict[str, torch.LongTensor]``, required. The output of a ``TextField`` representing the text of the document. spans : ``torch.IntTensor``, required. A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of indices into the text of the document. coref_labels : ``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size, num_spans), representing the cluster ids of each span, or -1 for those which do not appear in any clusters. event_type_labels : ``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size, num_spans), representing the event label of the specific span. realis_labels : ``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size, num_spans), representing the realis label of the specific span. metadata : ``List[Dict[str, Any]]``, optional (default = None). A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys from this dictionary, which respectively have the original text and the annotated gold coreference clusters for that instance. Returns ------- An output dictionary consisting of: top_spans : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing the start and end word indices of the top spans that survived the pruning stage. antecedent_indices : ``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span the index (with respect to top_spans) of the possible antecedents the model considered. predicted_antecedents : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the index (with respect to antecedent_indices) of the most likely antecedent. -1 means there was no predicted link. loss : ``torch.FloatTensor``, optional A scalar loss to be optimised. """ # Shape: (batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans = spans.size(1) # Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float() # SpanFields return -1 when they are used as padding. As we do # some comparisons based on span widths when we attend over the # span representations that we generate from these indices, we # need them to be <= 0. This is only relevant in edge cases where # the number of spans we consider after the pruning stage is >= the # total number of spans, because in this case, it is possible we might # consider a masked span. # Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is not None: # Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep) # torch.index_select only accepts 1D indices, but here # we need to select spans for each element in the batch. # This reformats the indices to take into account their # index into the batch. We precompute this here to make # the multiple calls to util.batched_index_select below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions for which spans to consider as mentions. # Shape: (batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices for antecedent spans to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() # Now that we have our variables in terms of num_spans_to_keep, we need to # compare span pairs to decide each span's antecedent. Each span can only # have prior spans as antecedents, and we only consider up to max_antecedents # prior spans. So the first thing we do is construct a matrix mapping a span's # index to the indices of its allowed antecedents. Note that this is independent # of the batch dimension - it's just a function of the span's position in # top_spans. The spans are in document order, so we can just use the relative # index of the spans to know which other spans are allowed antecedents. # Once we have this matrix, we reformat our variables again to get embeddings # for all valid antecedents for each span. This gives us variables with shapes # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which # we can use to make coreference decisions between valid span pairs. # Shapes: # (num_spans_to_keep, max_antecedents), # (1, max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating to the antecedent spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores. # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have, for each span which survived the pruning stage, # a predicted antecedent. This implies a clustering if we group # mentions which refer to each other in a chain. # Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) # Subtract one here because index 0 is the "no antecedent" class, # so this makes the indices line up with actual spans if the prediction # is greater than -1. predicted_antecedents -= 1 output_dict = {"top_spans": top_spans, "antecedent_indices": valid_antecedent_indices, "predicted_antecedents": predicted_antecedents, "coreference_scores": coreference_scores, } if coref_labels is not None and event_type_labels is not None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels for the spans which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight # Now, compute the loss using the negative marginal log-likelihood. # This is equal to the log of the sum of the probabilities of all antecedent predictions # that would be consistent with the data, in the sense that we are minimising, for a # given span, the negative marginal log likelihood of all antecedents which are in the # same gold cluster as the span we are currently considering. Each span i predicts a # single antecedent j, but there might be several prior mentions k in the same # coreference cluster that would be valid antecedents. Our loss is the sum of the # probability assigned to all valid antecedents. This is a valid objective for # clustering as we don't mind which antecedent is predicted, so long as they are in # the same coreference cluster. if self._pretrain_ed: # All antecedent mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict["loss"] = coref_loss + bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is not None: output_dict["document"] = [x["original_text"] for x in metadata] output_dict["offset"] = [x["token_offset"] for x in metadata] output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata] return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): """ Converts the list of spans and predicted antecedent indices into clusters of spans for each element in the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward` on an instance or batch of instances. Returns ------- The same output dictionary, but with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each instance in the batch, the list of clusters, which are in turn comprised of a list of (start, end) inclusive spans into the original document. """ return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool = False) -> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {"c_p": coref_precision, "c_r": coref_recall, "c_f1": coref_f1, "m_p": mention_result['precision'], "m_r": mention_result['recall'], "m_f1": mention_result['f1-score'], "nil": self._nil_label_metric.get_metric(reset), "type": self._type_label_metric.get_metric(reset), "coref": self._coref_label_metric.get_metric(reset), "t_l": self._type_loss_metric.get_metric(reset), "c_l": self._coref_loss_metric.get_metric(reset), "a_f1": (mention_result['f1-score'] + coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): """ event_embeddings: ``torch.FloatTensor``, required. Embedding representations of the event types. Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the antecedent spans we are considering for each top span. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) """ event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): """ Computes an embedding representation of pairs of spans for the pairwise scoring function to consider. This includes both the original span representations, the element-wise similarity of the span representations, and an embedding representation of the distance between the two spans. Parameters ---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations of the top spans. Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the antecedent spans we are considering for each top span. Has shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The offsets between each top span and its antecedent spans in terms of spans we are considering. Has shape (1, max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of the pair of spans to consider. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) """ # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) # Shape: (1, 1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): """ Generates a binary indicator for every pair of spans. This label is one if and only if the pair of spans belong to the same cluster. The labels are augmented with a dummy antecedent at the zeroth position, which represents the prediction that a span does not have any antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``, required. The cluster id label for every span. The id is arbitrary, as we just care about the clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The cluster id label for every antecedent span. The id is arbitrary, as we just care about the clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor representing whether a given pair of spans belong to the same cluster in the gold clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents + 1). """ # Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels * 0 else: # for pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item()) # print(pairwise_labels) # # # Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: """ Computes scores for every pair of spans. Additionally, a dummy label is included, representing the decision that the span is not coreferent with anything. For the dummy label, the score is always zero. For the true antecedent spans, the score consists of the pairwise antecedent score and the unary mention scores for the span and its antecedent. The factoring allows the model to blame many of the absent links on bad spans, enabling the pruning strategy used in the forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of pairs of spans. Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for every span. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for every antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log of the mask for valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1), representing the unormalised score for each (span, antecedent) pair we considered. """ antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: """ This method generates possible antecedents per span which survived the pruning stage. This procedure is `generic across the batch`. The reason this is the case is that each span in a batch can be coreferent with any previous span, but here we are computing the possible `indices` of these spans. So, regardless of the batch, the 1st span _cannot_ have any antecedents, because there are none to select from. Similarly, each element can only predict previous spans, so this returns a matrix of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to (i - 1) - j if j <= i, or zero otherwise. Parameters ---------- num_spans_to_keep : ``int``, required. The number of spans that were kept while pruning. max_antecedents : ``int``, required. The maximum number of antecedent spans to consider for every span. device: ``int``, required. The CUDA device to use. Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The indices of every antecedent to consider with respect to the top k spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance between the span and each of its antecedents in terms of the number of considered spans (i.e not the word distance between the spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask representing whether each antecedent span is valid. Required since different spans have different numbers of valid antecedents. For example, the first span in the document should have no valid antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``. """ # Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This is a broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets # In our matrix of indices, the upper triangular part will be negative # because the offsets will be > the target indices. We want to mask these, # because these are exactly the indices which we don't want to predict, per span. # We're generating a logspace mask here because we will eventually create a # distribution over these indices, so we need the 0 elements of the mask to be -inf # in order to not mess up the normalisation of the distribution. # Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long() return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
[((29, 9, 29, 36), 'logging.getLogger', 'logging.getLogger', ({(29, 27, 29, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((32, 1, 32, 47), 'allennlp.models.model.Model.register', 'Model.register', ({(32, 16, 32, 46): '"""end-to-end-event-coreference"""'}, {}), "('end-to-end-event-coreference')", False, 'from allennlp.models.model import Model\n'), ((97, 54, 97, 77), 'allennlp.nn.InitializerApplicator', 'InitializerApplicator', ({}, {}), '()', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((103, 39, 103, 78), 'allennlp.modules.TimeDistributed', 'TimeDistributed', ({(103, 55, 103, 77): 'antecedent_feedforward'}, {}), '(antecedent_feedforward)', False, 'from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\n'), ((111, 31, 111, 57), 'allennlp.modules.Pruner', 'Pruner', ({(111, 38, 111, 56): 'self._event_scorer'}, {}), '(self._event_scorer)', False, 'from allennlp.modules import FeedForward, Pruner\n'), ((194, 35, 194, 86), 'allennlp.modules.token_embedders.Embedding', 'Embedding', ({(194, 45, 194, 71): 'self._num_distance_buckets', (194, 73, 194, 85): 'feature_size'}, {}), '(self._num_distance_buckets, feature_size)', False, 'from allennlp.modules.token_embedders import Embedding\n'), ((203, 33, 203, 55), 'src.metrics.mention_f1.TopSpanMentionTypeF1', 'TopSpanMentionTypeF1', ({}, {}), '()', False, 'from src.metrics.mention_f1 import TopSpanMentionTypeF1\n'), ((204, 35, 204, 84), 'src.metrics.event_coref_scores.EventCorefScores', 'EventCorefScores', (), '', False, 'from src.metrics.event_coref_scores import EventCorefScores\n'), ((205, 33, 205, 42), 'allennlp.training.metrics.Average', 'Average', ({}, {}), '()', False, 'from allennlp.training.metrics import Average\n'), ((206, 35, 206, 44), 'allennlp.training.metrics.Average', 'Average', ({}, {}), '()', False, 'from allennlp.training.metrics import Average\n'), ((207, 34, 207, 43), 'allennlp.training.metrics.Average', 'Average', ({}, {}), '()', False, 'from allennlp.training.metrics import Average\n'), ((208, 35, 208, 44), 'allennlp.training.metrics.Average', 'Average', ({}, {}), '()', False, 'from allennlp.training.metrics import Average\n'), ((209, 34, 209, 43), 'allennlp.training.metrics.Average', 'Average', ({}, {}), '()', False, 'from allennlp.training.metrics import Average\n'), ((210, 33, 210, 42), 'allennlp.training.metrics.Average', 'Average', ({}, {}), '()', False, 'from allennlp.training.metrics import Average\n'), ((269, 21, 269, 62), 'torch.cat', 'torch.cat', ({(269, 31, 269, 57): '[dummy_scores, event_prob]', (269, 59, 269, 61): '-1'}, {}), '([dummy_scores, event_prob], -1)', False, 'import torch\n'), ((270, 21, 270, 50), 'torch.softmax', 'torch.softmax', ({(270, 35, 270, 45): 'event_prob', (270, 47, 270, 49): '-1'}, {}), '(event_prob, -1)', False, 'import torch\n'), ((280, 17, 280, 66), 'allennlp.nn.util.get_device_of', 'util.get_device_of', ({(280, 36, 280, 65): 'raw_contextualized_embeddings'}, {}), '(raw_contextualized_embeddings)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((421, 32, 421, 92), 'allennlp.nn.util.flatten_and_batch_shift_indices', 'util.flatten_and_batch_shift_indices', ({(421, 69, 421, 80): 'top_indices', (421, 82, 421, 91): 'num_spans'}, {}), '(top_indices, num_spans)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((424, 20, 426, 68), 'allennlp.nn.util.batched_index_select', 'util.batched_index_select', ({(424, 46, 424, 51): 'spans', (425, 46, 425, 57): 'top_indices', (426, 46, 426, 67): 'flat_top_span_indices'}, {}), '(spans, top_indices, flat_top_span_indices)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((462, 42, 463, 95), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', ({(462, 70, 462, 84): 'top_embeddings', (463, 70, 463, 94): 'valid_antecedent_indices'}, {}), '(top_embeddings, valid_antecedent_indices)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((487, 46, 489, 59), 'torch.cat', 'torch.cat', ({(487, 56, 488, 93): '[event_type_prior_scores, candidate_antecedent_mention_scores]', (489, 56, 489, 58): '-1'}, {}), '([event_type_prior_scores, candidate_antecedent_mention_scores], -1)', False, 'import torch\n'), ((606, 15, 609, 63), 'src.utils.cluster_decoding_utils.node_decode', 'node_decode', (), '', False, 'from src.utils.cluster_decoding_utils import node_decode\n'), ((647, 15, 647, 70), 'torch.cat', 'torch.cat', ({(647, 25, 647, 66): '[event_embeddings, antecedent_embeddings]', (647, 68, 647, 69): '(2)'}, {}), '([event_embeddings, antecedent_embeddings], 2)', False, 'import torch\n'), ((683, 24, 683, 108), 'allennlp.nn.util.bucket_values', 'util.bucket_values', (), '', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((702, 31, 705, 78), 'torch.cat', 'torch.cat', ({(702, 41, 705, 73): '[target_embeddings, antecedent_embeddings, antecedent_embeddings *\n target_embeddings, antecedent_distance_embeddings]', (705, 75, 705, 77): '-1'}, {}), '([target_embeddings, antecedent_embeddings, antecedent_embeddings *\n target_embeddings, antecedent_distance_embeddings], -1)', False, 'import torch\n'), ((762, 43, 762, 99), 'torch.cat', 'torch.cat', ({(762, 53, 762, 94): '[type_antecedent_labels, pairwise_labels]', (762, 96, 762, 98): '-1'}, {}), '([type_antecedent_labels, pairwise_labels], -1)', False, 'import torch\n'), ((816, 29, 816, 77), 'torch.cat', 'torch.cat', ({(816, 39, 816, 72): '[dummy_scores, antecedent_scores]', (816, 74, 816, 76): '-1'}, {}), '([dummy_scores, antecedent_scores], -1)', False, 'import torch\n'), ((105, 12, 105, 48), 'allennlp.modules.TimeDistributed', 'TimeDistributed', ({(105, 28, 105, 47): 'mention_feedforward'}, {}), '(mention_feedforward)', False, 'from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\n'), ((123, 44, 126, 104), 'allennlp.modules.span_extractors.EndpointSpanExtractor', 'EndpointSpanExtractor', (), '', False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((127, 45, 127, 111), 'allennlp.modules.span_extractors.SelfAttentiveSpanExtractor', 'SelfAttentiveSpanExtractor', (), '', False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((155, 45, 155, 111), 'allennlp.modules.span_extractors.SelfAttentiveSpanExtractor', 'SelfAttentiveSpanExtractor', (), '', False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((215, 29, 215, 64), 'torch.nn.BCEWithLogitsLoss', 'BCEWithLogitsLoss', (), '', False, 'from torch.nn import BCEWithLogitsLoss\n'), ((218, 36, 218, 71), 'torch.nn.Dropout', 'torch.nn.Dropout', (), '', False, 'import torch\n'), ((265, 47, 265, 86), 'torch.transpose', 'torch.transpose', ({(265, 63, 265, 79): 'event_embeddings', (265, 81, 265, 82): '1', (265, 84, 265, 85): '2'}, {}), '(event_embeddings, 1, 2)', False, 'import torch\n'), ((272, 20, 272, 69), 'torch.bmm', 'torch.bmm', ({(272, 30, 272, 50): 'event_prob[:, :, 1:]', (272, 52, 272, 68): 'event_embeddings'}, {}), '(event_prob[:, :, 1:], event_embeddings)', False, 'import torch\n'), ((274, 45, 274, 87), 'torch.cat', 'torch.cat', ({(274, 55, 274, 82): '[event_rep, top_embeddings]', (274, 84, 274, 86): '-1'}, {}), '([event_rep, top_embeddings], -1)', False, 'import torch\n'), ((287, 40, 287, 95), 'torch.tril', 'torch.tril', ({(287, 51, 287, 69): 'new_attention_mask', (287, 71, 287, 94): 'self._local_window_size'}, {}), '(new_attention_mask, self._local_window_size)', False, 'import torch\n'), ((379, 30, 379, 97), 'torch.cat', 'torch.cat', ({(379, 40, 379, 92): '[endpoint_span_embeddings, attended_span_embeddings]', (379, 94, 379, 96): '-1'}, {}), '([endpoint_span_embeddings, attended_span_embeddings], -1)', False, 'import torch\n'), ((398, 30, 398, 65), 'torch.cat', 'torch.cat', ({(398, 40, 398, 60): 'span_embeddings_list', (398, 62, 398, 64): '-1'}, {}), '(span_embeddings_list, -1)', False, 'import torch\n'), ((406, 50, 406, 100), 'math.floor', 'math.floor', ({(406, 61, 406, 99): 'self._spans_per_word * document_length'}, {}), '(self._spans_per_word * document_length)', False, 'import math\n'), ((455, 40, 455, 69), 'allennlp.nn.util.get_device_of', 'util.get_device_of', ({(455, 59, 455, 68): 'text_mask'}, {}), '(text_mask)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((515, 39, 515, 86), 'torch.gather', 'torch.gather', ({(515, 52, 515, 69): 'event_type_labels', (515, 71, 515, 72): '1', (515, 74, 515, 85): 'top_indices'}, {}), '(event_type_labels, 1, top_indices)', False, 'import torch\n'), ((554, 36, 554, 89), 'allennlp.nn.util.masked_log_softmax', 'util.masked_log_softmax', ({(554, 60, 554, 78): 'coreference_scores', (554, 80, 554, 88): 'top_mask'}, {}), '(coreference_scores, top_mask)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((688, 12, 688, 62), 'torch.cat', 'torch.cat', ({(688, 22, 688, 58): '[bucket_values, label_bucket_values]', (688, 60, 688, 61): '1'}, {}), '([bucket_values, label_bucket_values], 1)', False, 'import torch\n'), ((860, 21, 860, 69), 'allennlp.nn.util.get_range_vector', 'util.get_range_vector', ({(860, 43, 860, 60): 'num_spans_to_keep', (860, 62, 860, 68): 'device'}, {}), '(num_spans_to_keep, device)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((138, 40, 142, 71), 'allennlp.modules.seq2seq_encoders.IntraSentenceAttentionEncoder', 'IntraSentenceAttentionEncoder', (), '', False, 'from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder\n'), ((148, 48, 151, 108), 'allennlp.modules.span_extractors.EndpointSpanExtractor', 'EndpointSpanExtractor', (), '', False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((165, 40, 169, 71), 'allennlp.modules.seq2seq_encoders.IntraSentenceAttentionEncoder', 'IntraSentenceAttentionEncoder', (), '', False, 'from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder\n'), ((179, 16, 179, 34), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ({}, {}), '()', False, 'import torch\n'), ((254, 53, 254, 94), 'allennlp.nn.util.get_device_of', 'util.get_device_of', ({(254, 72, 254, 93): 'top_event_type_labels'}, {}), '(top_event_type_labels)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((347, 20, 347, 50), 'allennlp.nn.util.get_text_field_mask', 'util.get_text_field_mask', ({(347, 45, 347, 49): 'text'}, {}), '(text)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((465, 46, 466, 99), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', ({(465, 74, 465, 84): 'top_scores', (466, 74, 466, 98): 'valid_antecedent_indices'}, {}), '(top_scores, valid_antecedent_indices)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((863, 32, 863, 78), 'allennlp.nn.util.get_range_vector', 'util.get_range_vector', ({(863, 54, 863, 69): 'max_antecedents', (863, 71, 863, 77): 'device'}, {}), '(max_antecedents, device)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((134, 42, 134, 81), 'allennlp.modules.similarity_functions.DotProductSimilarity', 'DotProductSimilarity', (), '', False, 'from allennlp.modules.similarity_functions import DotProductSimilarity\n'), ((161, 42, 161, 81), 'allennlp.modules.similarity_functions.DotProductSimilarity', 'DotProductSimilarity', (), '', False, 'from allennlp.modules.similarity_functions import DotProductSimilarity\n'), ((178, 32, 178, 93), 'torch.nn.Linear', 'torch.nn.Linear', ({(178, 48, 178, 71): 'span_embedding_size * 2', (178, 73, 178, 92): 'span_embedding_size'}, {}), '(span_embedding_size * 2, span_embedding_size)', False, 'import torch\n'), ((213, 76, 213, 110), 'torch.tensor', 'torch.tensor', ({(213, 89, 213, 109): 'self._bce_pos_weight'}, {}), '(self._bce_pos_weight)', False, 'import torch\n'), ((231, 80, 231, 109), 'allennlp.nn.util.get_device_of', 'util.get_device_of', ({(231, 99, 231, 108): 'span_mask'}, {}), '(span_mask)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((523, 32, 524, 85), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', ({(523, 60, 523, 78): 'pruned_gold_labels', (524, 60, 524, 84): 'valid_antecedent_indices'}, {}), '(pruned_gold_labels, valid_antecedent_indices)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((752, 33, 752, 59), 'torch.sum', 'torch.sum', ({(752, 43, 752, 58): 'pairwise_labels'}, {}), '(pairwise_labels)', False, 'import torch\n'), ((753, 31, 753, 73), 'torch.sum', 'torch.sum', ({(753, 41, 753, 72): 'type_antecedent_labels[:, :, (0)]'}, {}), '(type_antecedent_labels[:, :, (0)])', False, 'import torch\n'), ((754, 32, 754, 105), 'torch.sum', 'torch.sum', ({(754, 42, 754, 104): 'type_antecedent_labels[:, :, 1:self._positive_label_size + 1]'}, {}), '(type_antecedent_labels[:, :, 1:self._positive_label_size + 1])', False, 'import torch\n'), ((232, 37, 232, 68), 'torch.zeros_like', 'torch.zeros_like', ({(232, 54, 232, 67): 'event_indices'}, {}), '(event_indices)', False, 'import torch\n'), ((556, 48, 556, 92), 'allennlp.nn.util.logsumexp', 'util.logsumexp', ({(556, 63, 556, 91): 'correct_antecedent_log_probs'}, {}), '(correct_antecedent_log_probs)', False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n')]
vietanhtran2710/ArtificialIntelligenceHomework
week2/7litersProblem.py
f4da761016d67477b50856cadf1e2560230d3f79
""" Given 3 bottles of capacities 3, 5, and 9 liters, count number of all possible solutions to get 7 liters """ current_path = [[0, 0, 0]] CAPACITIES = (3, 5, 9) solutions_count = 0 def move_to_new_state(current_state): global solutions_count, current_path if 7 in current_state: solutions_count += 1 else: # Empty bottle for i in range(3): if current_state[i] != 0: new_state = list(current_state) new_state[i] = 0 if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle for i in range(3): if current_state[i] != CAPACITIES[i]: new_state = list(current_state) new_state[i] = CAPACITIES[i] if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from one bottle to another for i in range(3): for j in range(3): if i != j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]: new_state = list(current_state) liters_change = min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] += liters_change new_state[i] -= liters_change if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ == "__main__": try: current_state = [0, 0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result: at least 44900799 solution
[]
avezraj/st2
st2common/st2common/bootstrap/rulesregistrar.py
519c7f6819e52fb289c440bb7d1df7b558bb9ed7
# Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import six from st2common import log as logging from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import RuleAPI from st2common.models.system.common import ResourceReference from st2common.persistence.rule import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__ = [ 'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs): """ :return: Number of rules registered. :rtype: ``int`` """ # Register packs first self.register_packs(base_dirs=base_dirs) registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in six.iteritems(content): if not rules_dir: LOG.debug('Pack %s does not contain rules.', pack) continue try: LOG.debug('Registering rules from pack: %s', pack) rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count += count except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed registering all rules from pack: %s', rules_dir) return registered_count def register_from_pack(self, pack_dir): """ Register all the rules from the provided pack. :return: Number of rules registered. :rtype: ``int`` """ pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _, pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if not rules_dir: return registered_count LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed registering all rules from pack: %s', rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count = 0 # TODO: Refactor this monstrosity for rule in rules: LOG.debug('Loading rule from %s.', rule) try: content = self._meta_loader.load(rule) pack_field = content.get('pack', None) if not pack_field: content['pack'] = pack pack_field = pack if pack_field != pack: raise Exception('Model is in pack "%s" but field "pack" is different: %s' % (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration from rule without pack to rule with pack. # There might be a rule with same name but in pack `default` # generated in migration script. In this case, we want to # delete so we don't have duplicates. if pack_field != DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in pack %s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if existing: LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating new one.', rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra) except Exception: LOG.exception('Failed to create rule %s.', rule_api.name) # If there was an existing rule then the ref count was updated in # to_model so it needs to be adjusted down here. Also, update could # lead to removal of a Trigger so now is a good time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception as e: if self._fail_on_failure: msg = ('Failed to register rule "%s" from pack "%s": %s' % (rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule from %s.', rule) else: registered_count += 1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result = registrar.register_from_packs(base_dirs=packs_base_paths) return result
[((36, 6, 36, 33), 'st2common.log.getLogger', 'logging.getLogger', ({(36, 24, 36, 32): '__name__'}, {}), '(__name__)', True, 'from st2common import log as logging\n'), ((53, 31, 53, 53), 'six.iteritems', 'six.iteritems', ({(53, 45, 53, 52): 'content'}, {}), '(content)', False, 'import six\n'), ((78, 18, 78, 41), 'os.path.split', 'os.path.split', ({(78, 32, 78, 40): 'pack_dir'}, {}), '(pack_dir)', False, 'import os\n'), ((190, 27, 190, 63), 'st2common.content.utils.get_packs_base_paths', 'content_utils.get_packs_base_paths', ({}, {}), '()', True, 'import st2common.content.utils as content_utils\n'), ((121, 32, 123, 89), 'st2common.content.utils.get_relative_path_to_pack_file', 'content_utils.get_relative_path_to_pack_file', (), '', True, 'import st2common.content.utils as content_utils\n'), ((126, 27, 126, 45), 'st2common.models.api.rule.RuleAPI', 'RuleAPI', ({}, {}), '(**content)', False, 'from st2common.models.api.rule import RuleAPI\n'), ((128, 26, 128, 52), 'st2common.models.api.rule.RuleAPI.to_model', 'RuleAPI.to_model', ({(128, 43, 128, 51): 'rule_api'}, {}), '(rule_api)', False, 'from st2common.models.api.rule import RuleAPI\n'), ((149, 31, 150, 90), 'st2common.models.system.common.ResourceReference.to_string_reference', 'ResourceReference.to_string_reference', (), '', False, 'from st2common.models.system.common import ResourceReference\n'), ((151, 31, 151, 56), 'st2common.persistence.rule.Rule.get_by_ref', 'Rule.get_by_ref', ({(151, 47, 151, 55): 'rule_ref'}, {}), '(rule_ref)', False, 'from st2common.persistence.rule import Rule\n'), ((159, 30, 159, 57), 'st2common.persistence.rule.Rule.add_or_update', 'Rule.add_or_update', ({(159, 49, 159, 56): 'rule_db'}, {}), '(rule_db)', False, 'from st2common.persistence.rule import Rule\n'), ((160, 20, 160, 66), 'st2common.services.triggers.increment_trigger_ref_count', 'increment_trigger_ref_count', (), '', False, 'from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count\n'), ((170, 20, 170, 57), 'st2common.services.triggers.cleanup_trigger_db_for_rule', 'cleanup_trigger_db_for_rule', ({(170, 48, 170, 56): 'existing'}, {}), '(existing)', False, 'from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count\n'), ((136, 35, 137, 96), 'st2common.models.system.common.ResourceReference.to_string_reference', 'ResourceReference.to_string_reference', (), '', False, 'from st2common.models.system.common import ResourceReference\n'), ((140, 35, 140, 60), 'st2common.persistence.rule.Rule.get_by_ref', 'Rule.get_by_ref', ({(140, 51, 140, 59): 'rule_ref'}, {}), '(rule_ref)', False, 'from st2common.persistence.rule import Rule\n'), ((144, 28, 144, 49), 'st2common.persistence.rule.Rule.delete', 'Rule.delete', ({(144, 40, 144, 48): 'existing'}, {}), '(existing)', False, 'from st2common.persistence.rule import Rule\n'), ((174, 80, 174, 96), 'six.text_type', 'six.text_type', ({(174, 94, 174, 95): 'e'}, {}), '(e)', False, 'import six\n')]
aiven/azure-sdk-for-python
sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
8764dc07423beca46ed0b51212d81289d9e52c60
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._cost_management_client_enums import * class Resource(msrest.serialization.Model): """The Resource model definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.tags = None class Alert(Resource): """An individual alert. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :param definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param source: Source of alert. Possible values include: "Preset", "User". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param status: alert status. Possible values include: "None", "Active", "Overridden", "Resolved", "Dismissed". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created. :type creation_time: str :param close_time: dateTime in which alert was closed. :type close_time: str :param modification_time: dateTime in which alert was last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which the alert status was last modified. :type status_modification_time: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition: Optional["AlertPropertiesDefinition"] = None, description: Optional[str] = None, source: Optional[Union[str, "AlertSource"]] = None, details: Optional["AlertPropertiesDetails"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, "AlertStatus"]] = None, creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition = definition self.description = description self.source = source self.details = details self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): """defines the type of alert. :param type: type of alert. Possible values include: "Budget", "Invoice", "Credit", "Quota", "General", "xCloud", "BudgetForecast". :type type: str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible values include: "Cost", "Usage", "Billing", "System". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered alert. Possible values include: "CostThresholdExceeded", "UsageThresholdExceeded", "CreditThresholdApproaching", "CreditThresholdReached", "QuotaThresholdApproaching", "QuotaThresholdReached", "MultiCurrency", "ForecastCostThresholdExceeded", "ForecastUsageThresholdExceeded", "InvoiceDueDateApproaching", "InvoiceDueDateReached", "CrossCloudNewDataAvailable", "CrossCloudCollectionError", "GeneralThresholdError". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'category': {'key': 'category', 'type': 'str'}, 'criteria': {'key': 'criteria', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, "AlertType"]] = None, category: Optional[Union[str, "AlertCategory"]] = None, criteria: Optional[Union[str, "AlertCriteria"]] = None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category = category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): """Alert details. :param time_grain_type: Type of timegrain cadence. Possible values include: "None", "Monthly", "Quarterly", "Annually", "BillingMonth", "BillingQuarter", "BillingAnnual". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate. :type period_start_date: str :param triggered_by: notificationId that triggered this alert. :type triggered_by: str :param resource_group_filter: array of resourceGroups to filter by. :type resource_group_filter: list[object] :param resource_filter: array of resources to filter by. :type resource_filter: list[object] :param meter_filter: array of meters to filter by. :type meter_filter: list[object] :param tag_filter: tags to filter by. :type tag_filter: object :param threshold: notification threshold percentage as a decimal which activated this alert. :type threshold: float :param operator: operator used to compare currentSpend with amount. Possible values include: "None", "EqualTo", "GreaterThan", "GreaterThanOrEqualTo", "LessThan", "LessThanOrEqualTo". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount. :type amount: float :param unit: unit of currency being used. :type unit: str :param current_spend: current spend. :type current_spend: float :param contact_emails: list of emails to contact. :type contact_emails: list[str] :param contact_groups: list of action groups to broadcast to. :type contact_groups: list[str] :param contact_roles: list of contact roles. :type contact_roles: list[str] :param overriding_alert: overriding alert. :type overriding_alert: str """ _attribute_map = { 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold', 'type': 'float'}, 'operator': {'key': 'operator', 'type': 'str'}, 'amount': {'key': 'amount', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, } def __init__( self, *, time_grain_type: Optional[Union[str, "AlertTimeGrainType"]] = None, period_start_date: Optional[str] = None, triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]] = None, tag_filter: Optional[object] = None, threshold: Optional[float] = None, operator: Optional[Union[str, "AlertOperator"]] = None, amount: Optional[float] = None, unit: Optional[str] = None, current_spend: Optional[float] = None, contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str] = None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date = period_start_date self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter self.meter_filter = meter_filter self.tag_filter = tag_filter self.threshold = threshold self.operator = operator self.amount = amount self.unit = unit self.current_spend = current_spend self.contact_emails = contact_emails self.contact_groups = contact_groups self.contact_roles = contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): """Result of alerts. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get the next set of alerts results if there are any. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link = None class CommonExportProperties(msrest.serialization.Model): """The common properties of the export. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime """ _validation = { 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__( self, *, delivery_info: "ExportDeliveryInfo", definition: "ExportDefinition", format: Optional[Union[str, "FormatType"]] = None, run_history: Optional["ExportExecutionListResult"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None class Dimension(Resource): """Dimension. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :ivar description: Dimension description. :vartype description: str :ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param data: :type data: list[str] :ivar total: Total number of data for the dimension. :vartype total: int :ivar category: Dimension category. :vartype category: str :ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link: The link (url) to the next page of results. :vartype next_link: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def __init__( self, *, data: Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled = None self.grouping_enabled = None self.data = data self.total = None self.category = None self.usage_start = None self.usage_end = None self.next_link = None class DimensionsListResult(msrest.serialization.Model): """Result of listing dimensions. It contains a list of available dimensions. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Dimension]'}, } def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): """The request payload to update an alert. :param definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param source: Source of alert. Possible values include: "Preset", "User". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param status: alert status. Possible values include: "None", "Active", "Overridden", "Resolved", "Dismissed". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created. :type creation_time: str :param close_time: dateTime in which alert was closed. :type close_time: str :param modification_time: dateTime in which alert was last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which the alert status was last modified. :type status_modification_time: str """ _attribute_map = { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition: Optional["AlertPropertiesDefinition"] = None, description: Optional[str] = None, source: Optional[Union[str, "AlertSource"]] = None, details: Optional["AlertPropertiesDetails"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, "AlertStatus"]] = None, creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description = description self.source = source self.details = details self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): """The details of the error. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: Error code. :vartype code: str :ivar message: Error message indicating why the operation failed. :vartype message: str """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message = None class ErrorResponse(msrest.serialization.Model): """Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message. Some Error responses: * 429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the "x-ms-ratelimit-microsoft.consumption-retry-after" header. * 503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the "Retry-After" header. :param error: The details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self, *, error: Optional["ErrorDetails"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): """The Resource model definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, } def __init__( self, *, e_tag: Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.e_tag = e_tag class Export(ProxyResource): """An export resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, e_tag: Optional[str] = None, format: Optional[Union[str, "FormatType"]] = None, delivery_info: Optional["ExportDeliveryInfo"] = None, definition: Optional["ExportDefinition"] = None, run_history: Optional["ExportExecutionListResult"] = None, schedule: Optional["ExportSchedule"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None self.schedule = schedule class ExportDataset(msrest.serialization.Model): """The definition for data in the export. :param granularity: The granularity of rows in the export. Currently only 'Daily' is supported. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration """ _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__( self, *, granularity: Optional[Union[str, "GranularityType"]] = None, configuration: Optional["ExportDatasetConfiguration"] = None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): """The export dataset configuration. Allows columns to be selected for the export. If not provided then the export will include all available columns. :param columns: Array of column names to be included in the export. If not provided then the export will include all available columns. The available columns can vary by customer channel (see examples). :type columns: list[str] """ _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model): """The definition of an export. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the export. Note that 'Usage' is equivalent to 'ActualCost' and is applicable to exports that do not yet provide data for charges or amortization for service reservations. Possible values include: "Usage", "ActualCost", "AmortizedCost". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for pulling data for the export. If custom, then a specific time period must be provided. Possible values include: "MonthToDate", "BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling data for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for data in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, } def __init__( self, *, type: Union[str, "ExportType"], timeframe: Union[str, "TimeframeType"], time_period: Optional["ExportTimePeriod"] = None, data_set: Optional["ExportDataset"] = None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): """The destination information for the delivery of the export. To allow access to a storage account, you must register the account's subscription with the Microsoft.CostManagementExports resource provider. This is required once per subscription. When creating an export in the Azure portal, it is done automatically, however API users need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must be populated in order to send to Azure. :param resource_id: Required. The resource id of the storage account where exports will be delivered. :type resource_id: str :param container: Required. The name of the container where exports will be uploaded. :type container: str :param root_folder_path: The name of the directory where exports will be uploaded. :type root_folder_path: str """ _validation = { 'resource_id': {'required': True}, 'container': {'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container': {'key': 'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, } def __init__( self, *, resource_id: str, container: str, root_folder_path: Optional[str] = None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container = container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): """The delivery information associated with a export. All required parameters must be populated in order to send to Azure. :param destination: Required. Has destination for the export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination """ _validation = { 'destination': {'required': True}, } _attribute_map = { 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def __init__( self, *, destination: "ExportDeliveryDestination", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource): """An export execution. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :param execution_type: The type of the export execution. Possible values include: "OnDemand", "Scheduled". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known status of the export execution. Possible values include: "Queued", "InProgress", "Completed", "Failed", "Timeout", "NewDataNotAvailable", "DataNotAvailable". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for the entity that executed the export. For OnDemand executions it is the user email. For scheduled executions it is 'System'. :type submitted_by: str :param submitted_time: The time when export was queued to be executed. :type submitted_time: ~datetime.datetime :param processing_start_time: The time when export was picked up to be executed. :type processing_start_time: ~datetime.datetime :param processing_end_time: The time when the export execution finished. :type processing_end_time: ~datetime.datetime :param file_name: The name of the exported file. :type file_name: str :param run_settings: The export settings that were in effect for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, } def __init__( self, *, execution_type: Optional[Union[str, "ExecutionType"]] = None, status: Optional[Union[str, "ExecutionStatus"]] = None, submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str] = None, run_settings: Optional["CommonExportProperties"] = None, error: Optional["ErrorDetails"] = None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status = status self.submitted_by = submitted_by self.submitted_time = submitted_time self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time self.file_name = file_name self.run_settings = run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model): """Result of listing the execution history of an export. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: A list of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ExportExecution]'}, } def __init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model): """Result of listing exports. It contains a list of available exports in the scope provided. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Export]'}, } def __init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties): """The properties of the export. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule """ _validation = { 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, delivery_info: "ExportDeliveryInfo", definition: "ExportDefinition", format: Optional[Union[str, "FormatType"]] = None, run_history: Optional["ExportExecutionListResult"] = None, schedule: Optional["ExportSchedule"] = None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): """The start and end date for recurrence schedule. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date of recurrence. :type from_property: ~datetime.datetime :param to: The end date of recurrence. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ExportSchedule(msrest.serialization.Model): """The schedule associated with the export. All required parameters must be populated in order to send to Azure. :param status: The status of the export's schedule. If 'Inactive', the export's schedule is paused. Possible values include: "Active", "Inactive". :type status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule recurrence. Possible values include: "Daily", "Weekly", "Monthly", "Annually". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and end date of the recurrence. The start date must be in future. If present, the end date must be greater than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod """ _validation = { 'recurrence': {'required': True}, } _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__( self, *, recurrence: Union[str, "RecurrenceType"], status: Optional[Union[str, "StatusType"]] = None, recurrence_period: Optional["ExportRecurrencePeriod"] = None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence = recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): """The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date for export data. :type from_property: ~datetime.datetime :param to: Required. The end date for export data. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ForecastDataset(msrest.serialization.Model): """The definition of data present in the forecast. :param granularity: The granularity of rows in the forecast. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the data in the export. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the forecast. The key of each item in the dictionary is the alias for the aggregated column. forecast can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression to use in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter """ _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str, "GranularityType"]] = None, configuration: Optional["QueryDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "QueryAggregation"]] = None, filter: Optional["QueryFilter"] = None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model): """The definition of a forecast. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the forecast. Possible values include: "Usage", "ActualCost", "AmortizedCost". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a specific time period must be provided. Possible values include: "MonthToDate", "BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period for pulling data for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining if actualCost will be included. :type include_actual_cost: bool :param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included. :type include_fresh_partial_cost: bool """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, } def __init__( self, *, type: Union[str, "ForecastType"], timeframe: Union[str, "ForecastTimeframeType"], time_period: Optional["QueryTimePeriod"] = None, dataset: Optional["ForecastDataset"] = None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool] = None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): """Each KPI must contain a 'type' and 'enabled' key. :param type: KPI type (Forecast, Budget). Possible values include: "Forecast", "Budget". :type type: str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource related to metric (budget). :type id: str :param enabled: show the KPI in the UI?. :type enabled: bool """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, } def __init__( self, *, type: Optional[Union[str, "KpiType"]] = None, id: Optional[str] = None, enabled: Optional[bool] = None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type = type self.id = id self.enabled = enabled class Operation(msrest.serialization.Model): """A Cost management REST API operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name: str :param display: The object that represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay """ _validation = { 'name': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self, *, display: Optional["OperationDisplay"] = None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = None self.display = display class OperationDisplay(msrest.serialization.Model): """The object that represents the operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar provider: Service provider: Microsoft.CostManagement. :vartype provider: str :ivar resource: Resource on which the operation is performed: Dimensions, Query. :vartype resource: str :ivar operation: Operation type: Read, write, delete, etc. :vartype operation: str """ _validation = { 'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly': True}, } _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource = None self.operation = None class OperationListResult(msrest.serialization.Model): """Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: List of cost management operations supported by the Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get the next set of operation list results if there are any. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link = None class PivotProperties(msrest.serialization.Model): """Each pivot must contain a 'type' and 'name'. :param type: Data type to show in view. Possible values include: "Dimension", "TagKey". :type type: str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field to show in view. :type name: str """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, "PivotType"]] = None, name: Optional[str] = None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type = type self.name = name class QueryAggregation(msrest.serialization.Model): """The aggregation expression to be used in the query. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to aggregate. :type name: str :param function: Required. The name of the aggregation function to use. Possible values include: "Sum". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType """ _validation = { 'name': {'required': True}, 'function': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, } def __init__( self, *, name: str, function: Union[str, "FunctionType"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function = function class QueryColumn(msrest.serialization.Model): """QueryColumn. :param name: The name of column. :type name: str :param type: The type of column. :type type: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, *, name: Optional[str] = None, type: Optional[str] = None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name = name self.type = type class QueryComparisonExpression(msrest.serialization.Model): """The comparison expression to be used in the query. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to use in comparison. :type name: str :param operator: Required. The operator to use for comparison. Possible values include: "In", "Contains". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to use for comparison. :type values: list[str] """ _validation = { 'name': {'required': True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def __init__( self, *, name: str, operator: Union[str, "OperatorType"], values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values = values class QueryDataset(msrest.serialization.Model): """The definition of data present in the query. :param granularity: The granularity of rows in the query. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the data in the export. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the query. The key of each item in the dictionary is the alias for the aggregated column. Query can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group by expression to use in the query. Query can have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression to use in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter """ _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str, "GranularityType"]] = None, configuration: Optional["QueryDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "QueryAggregation"]] = None, grouping: Optional[List["QueryGrouping"]] = None, filter: Optional["QueryFilter"] = None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): """The configuration of dataset in the query. :param columns: Array of column names to be included in the query. Any valid query column name is allowed. If not provided, then query includes all columns. :type columns: list[str] """ _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model): """The definition of a query. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the query. Possible values include: "Usage", "ActualCost", "AmortizedCost". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: "MonthToDate", "BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling data for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, } def __init__( self, *, type: Union[str, "ExportType"], timeframe: Union[str, "TimeframeType"], time_period: Optional["QueryTimePeriod"] = None, dataset: Optional["QueryDataset"] = None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model): """The filter expression to be used in the export. :param and_property: The logical "AND" expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical "OR" expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical "NOT" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression """ _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, } def __init__( self, *, and_property: Optional[List["QueryFilter"]] = None, or_property: Optional[List["QueryFilter"]] = None, not_property: Optional["QueryFilter"] = None, dimension: Optional["QueryComparisonExpression"] = None, tag: Optional["QueryComparisonExpression"] = None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag = tag class QueryGrouping(msrest.serialization.Model): """The group by expression to be used in the query. All required parameters must be populated in order to send to Azure. :param type: Required. Has type of the column to group. Possible values include: "Tag", "Dimension". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name of the column to group. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, type: Union[str, "QueryColumnType"], name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name = name class QueryResult(Resource): """Result of query. It contains all columns listed under groupings and aggregation. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :param next_link: The link (url) to the next page of results. :type next_link: str :param columns: Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows. :type rows: list[list[object]] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'}, } def __init__( self, *, next_link: Optional[str] = None, columns: Optional[List["QueryColumn"]] = None, rows: Optional[List[List[object]]] = None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns = columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model): """The start and end date for pulling data for the query. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date to pull data from. :type from_property: ~datetime.datetime :param to: Required. The end date to pull data to. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model): """The aggregation expression to be used in the report. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to aggregate. :type name: str :param function: Required. The name of the aggregation function to use. Possible values include: "Sum". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType """ _validation = { 'name': {'required': True}, 'function': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, } def __init__( self, *, name: str, function: Union[str, "FunctionType"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): """The comparison expression to be used in the report. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to use in comparison. :type name: str :param operator: Required. The operator to use for comparison. Possible values include: "In", "Contains". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to use for comparison. :type values: list[str] """ _validation = { 'name': {'required': True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def __init__( self, *, name: str, operator: Union[str, "OperatorType"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values = values class ReportConfigDataset(msrest.serialization.Model): """The definition of data present in the report. :param granularity: The granularity of rows in the report. Possible values include: "Daily", "Monthly". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the data in the report. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the report. The key of each item in the dictionary is the alias for the aggregated column. Report can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression to use in the report. Report can have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter """ _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, } def __init__( self, *, granularity: Optional[Union[str, "ReportGranularityType"]] = None, configuration: Optional["ReportConfigDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None, grouping: Optional[List["ReportConfigGrouping"]] = None, sorting: Optional[List["ReportConfigSorting"]] = None, filter: Optional["ReportConfigFilter"] = None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): """The definition of data present in the report. :param granularity: The granularity of rows in the report. Possible values include: "Daily", "Monthly". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the data in the report. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the report. The key of each item in the dictionary is the alias for the aggregated column. Report can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression to use in the report. Report can have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated """ _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self, *, granularity: Optional[Union[str, "ReportGranularityType"]] = None, configuration: Optional["ReportConfigDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None, grouping: Optional[List["ReportConfigGrouping"]] = None, sorting: Optional[List["ReportConfigSorting"]] = None, filter: Optional["ReportConfigFilterAutoGenerated"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): """The configuration of dataset in the report. :param columns: Array of column names to be included in the report. Any valid report column name is allowed. If not provided, then report includes all columns. :type columns: list[str] """ _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): """The definition of a report config. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the report. Usage represents actual usage, forecast represents forecasted data and UsageAndForecast represents both usage and forecasted data. Actual usage and forecasted data can be differentiated based on dates. Possible values include: "Usage". :type type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time frame for pulling data for the report. If custom, then a specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate", "YearToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *, type: Union[str, "ReportType"], timeframe: Union[str, "ReportTimeframeType"], time_period: Optional["ReportConfigTimePeriod"] = None, dataset: Optional["ReportConfigDatasetAutoGenerated"] = None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): """The filter expression to be used in the report. :param and_property: The logical "AND" expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical "OR" expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical "NOT" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression """ _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List["ReportConfigFilter"]] = None, or_property: Optional[List["ReportConfigFilter"]] = None, not_property: Optional["ReportConfigFilter"] = None, dimension: Optional["ReportConfigComparisonExpression"] = None, tag: Optional["ReportConfigComparisonExpression"] = None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): """The filter expression to be used in the report. :param and_property: The logical "AND" expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical "OR" expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical "NOT" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression """ _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None, or_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None, not_property: Optional["ReportConfigFilterAutoGenerated"] = None, dimension: Optional["ReportConfigComparisonExpression"] = None, tag: Optional["ReportConfigComparisonExpression"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): """The group by expression to be used in the report. All required parameters must be populated in order to send to Azure. :param type: Required. Has type of the column to group. Possible values include: "Tag", "Dimension". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name of the column to group. This version supports subscription lowest possible grain. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, type: Union[str, "ReportConfigColumnType"], name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name = name class ReportConfigSorting(msrest.serialization.Model): """The order by expression to be used in the report. All required parameters must be populated in order to send to Azure. :param direction: Direction of sort. Possible values include: "Ascending", "Descending". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name of the column to sort. :type name: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'direction': {'key': 'direction', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, direction: Optional[Union[str, "ReportConfigSortingDirection"]] = None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): """The start and end date for pulling data for the report. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date to pull data from. :type from_property: ~datetime.datetime :param to: Required. The end date to pull data to. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class View(ProxyResource): """States and configurations of Cost Analysis. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str :param display_name: User input name of the view. Required. :type display_name: str :param scope: Cost Management scope to save the view on. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope: str :ivar created_on: Date the user created this view. :vartype created_on: ~datetime.datetime :ivar modified_on: Date when the user last modified this view. :vartype modified_on: ~datetime.datetime :param chart: Chart type of the main view in Cost Analysis. Required. Possible values include: "Area", "Line", "StackedColumn", "GroupedColumn", "Table". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated over time. Possible values include: "true", "false". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use when displaying costs. Possible values include: "ActualCost", "AmortizedCost", "AHUB". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs to show in Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3 sub-views in the Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of the report. Usage represents actual usage, forecast represents forecasted data and UsageAndForecast represents both usage and forecasted data. Actual usage and forecasted data can be differentiated based on dates. Possible values include: "Usage". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame for pulling data for the report. If custom, then a specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate", "YearToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__( self, *, e_tag: Optional[str] = None, display_name: Optional[str] = None, scope: Optional[str] = None, chart: Optional[Union[str, "ChartType"]] = None, accumulated: Optional[Union[str, "AccumulatedType"]] = None, metric: Optional[Union[str, "MetricType"]] = None, kpis: Optional[List["KpiProperties"]] = None, pivots: Optional[List["PivotProperties"]] = None, type_properties_query_type: Optional[Union[str, "ReportType"]] = None, timeframe: Optional[Union[str, "ReportTimeframeType"]] = None, time_period: Optional["ReportConfigTimePeriod"] = None, dataset: Optional["ReportConfigDataset"] = None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope = scope self.created_on = None self.modified_on = None self.chart = chart self.accumulated = accumulated self.metric = metric self.kpis = kpis self.pivots = pivots self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model): """Result of listing views. It contains a list of available views. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url) to the next page of results. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value = None self.next_link = None
[]
jayvdb/brotlipy
test/test_simple_compression.py
ffddf2ea5adc584c8c353d246bb1077b7e781b63
# -*- coding: utf-8 -*- """ test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single chunks. """ import brotli import pytest from hypothesis import given from hypothesis.strategies import binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): """ Roundtripping data through the compressor works correctly. """ with open(simple_compressed_file[0], 'rb') as f: uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): """ Confirm that the streaming compressor works as expected. """ compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): """ Confirm that the streaming compressor works as expected, including flushes after each chunk. """ compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize( "params", [ {"mode": 52}, {"quality": 52}, {"lgwin": 52}, {"lgblock": 52}, ] ) @pytest.mark.parametrize("exception_cls", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params, exception_cls): with pytest.raises(exception_cls): brotli.Compressor(**params)
[((117, 1, 125, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(118, 4, 118, 12): '"""params"""', (119, 4, 124, 5): "[{'mode': 52}, {'quality': 52}, {'lgwin': 52}, {'lgblock': 52}]"}, {}), "('params', [{'mode': 52}, {'quality': 52}, {'lgwin':\n 52}, {'lgblock': 52}])", False, 'import pytest\n'), ((126, 1, 126, 71), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(126, 25, 126, 40): '"""exception_cls"""', (126, 42, 126, 70): '[brotli.Error, brotli.error]'}, {}), "('exception_cls', [brotli.Error, brotli.error])", False, 'import pytest\n'), ((48, 8, 50, 5), 'brotli.Compressor', 'brotli.Compressor', (), '', False, 'import brotli\n'), ((86, 8, 88, 5), 'brotli.Compressor', 'brotli.Compressor', (), '', False, 'import brotli\n'), ((104, 7, 104, 15), 'hypothesis.strategies.binary', 'binary', ({}, {}), '()', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((111, 8, 111, 39), 'brotli.Decompressor', 'brotli.Decompressor', ({(111, 28, 111, 38): 'dictionary'}, {}), '(dictionary)', False, 'import brotli\n'), ((112, 17, 112, 58), 'brotli.compress', 'brotli.compress', (), '', False, 'import brotli\n'), ((109, 7, 109, 15), 'hypothesis.strategies.binary', 'binary', ({}, {}), '()', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((109, 17, 109, 25), 'hypothesis.strategies.binary', 'binary', ({}, {}), '()', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((29, 15, 29, 53), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((31, 12, 31, 47), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((32, 10, 32, 46), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((66, 15, 66, 53), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((68, 12, 68, 47), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((69, 10, 69, 46), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((128, 9, 128, 37), 'pytest.raises', 'pytest.raises', ({(128, 23, 128, 36): 'exception_cls'}, {}), '(exception_cls)', False, 'import pytest\n'), ((129, 8, 129, 35), 'brotli.Compressor', 'brotli.Compressor', ({}, {}), '(**params)', False, 'import brotli\n'), ((24, 8, 24, 42), 'brotli.compress', 'brotli.compress', ({(24, 24, 24, 41): 'uncompressed_data'}, {}), '(uncompressed_data)', False, 'import brotli\n'), ((34, 8, 34, 42), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((35, 8, 35, 44), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((71, 8, 71, 42), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((72, 8, 72, 44), 'hypothesis.strategies.integers', 'integers', (), '', False, 'from hypothesis.strategies import binary, integers, sampled_from, one_of\n'), ((106, 29, 106, 47), 'brotli.compress', 'brotli.compress', ({(106, 45, 106, 46): 's'}, {}), '(s)', False, 'import brotli\n')]
madmis/wexapi
wexapi/models/ticker.py
f5b1b9b566f767bca7d8fad1f08c3d1bca42355a
from decimal import Decimal class Ticker(object): def __init__( self, high: float, low: float, avg: float, vol: float, vol_cur: int, last: float, buy: float, sell: float, updated: int, ): self.high = high self.low = low self.avg = avg self.vol = vol self.vol_cur = vol_cur self.last = last self.buy = buy self.sell = sell self.updated = updated @property def high(self) -> Decimal: return self._high @high.setter def high(self, value: float): self._high = Decimal(value) @property def low(self) -> Decimal: return self._low @low.setter def low(self, value: float): self._low = Decimal(value) @property def avg(self) -> Decimal: return self._avg @avg.setter def avg(self, value: float): self._avg = Decimal(value) @property def vol(self) -> Decimal: return self._vol @vol.setter def vol(self, value: float): self._vol = Decimal(value) @property def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur = Decimal(value) @property def last(self) -> Decimal: return self._last @last.setter def last(self, value: float): self._last = Decimal(value) @property def buy(self) -> Decimal: return self._buy @buy.setter def buy(self, value: float): self._buy = Decimal(value) @property def sell(self) -> Decimal: return self._sell @sell.setter def sell(self, value: float): self._sell = Decimal(value) @property def updated(self) -> int: return self._updated @updated.setter def updated(self, value: int): self._updated = int(value)
[((33, 21, 33, 35), 'decimal.Decimal', 'Decimal', ({(33, 29, 33, 34): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((41, 20, 41, 34), 'decimal.Decimal', 'Decimal', ({(41, 28, 41, 33): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((49, 20, 49, 34), 'decimal.Decimal', 'Decimal', ({(49, 28, 49, 33): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((57, 20, 57, 34), 'decimal.Decimal', 'Decimal', ({(57, 28, 57, 33): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((65, 24, 65, 38), 'decimal.Decimal', 'Decimal', ({(65, 32, 65, 37): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((73, 21, 73, 35), 'decimal.Decimal', 'Decimal', ({(73, 29, 73, 34): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((81, 20, 81, 34), 'decimal.Decimal', 'Decimal', ({(81, 28, 81, 33): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n'), ((89, 21, 89, 35), 'decimal.Decimal', 'Decimal', ({(89, 29, 89, 34): 'value'}, {}), '(value)', False, 'from decimal import Decimal\n')]
jjhenkel/dockerizeme
hard-gists/98bb452dc14e8c40e403/snippet.py
eaa4fe5366f6b9adf74399eab01c712cacaeb279
from scryptos import * p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n = p1*p2*p3 e = 3 c = int(open("flag.enc", "rb").read().encode("hex"), 16) # from User's Guide to PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error("Impossible case in sqrtn"));if(type(x)=="t_INTMOD"||type(x)=="t_PADIC",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p1)])) c2 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p2)])) c3 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p3)])) """ c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] """ for x in c1: for y in c2: for z in c3: crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)]) d = hex(crt, 2)[2:].decode("hex") if "0ctf" in d: print d[d.find("0ctf"):].strip()
[]
ccsreenidhin/Music-Web-Django
musa/migrations/0001_initial.py
9b8286914f9099b9ed56c712c7ca384846f189d1
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-03-29 06:43 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import musa.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[((16, 8, 16, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(16, 40, 16, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((23, 23, 23, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((24, 26, 24, 80), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((25, 29, 25, 84), 'django.db.models.FileField', 'models.FileField', (), '', False, 'from django.db import migrations, models\n'), ((26, 32, 26, 82), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((27, 25, 27, 116), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((33, 23, 33, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((34, 29, 34, 72), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((35, 36, 35, 79), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((36, 26, 36, 70), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((37, 28, 37, 112), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((38, 25, 38, 119), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import migrations, models\n')]
RESP3CT88/Nuitka
nuitka/codegen/LoopCodes.py
0fcc25d9f00c4fc78c79a863c4b7987f573962e1
# Copyright 2021, Kay Hayen, mailto:[email protected] # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Loop codes. Code generation for loops, breaking them, or continuing them. In Nuitka, there are no for-loops or while-loops at this point. They have been re-formulated in a simpler loop without a condition, and statements there-in that break under certain conditions. See Developer Manual for how the CPython loops are mapped to these nodes. """ from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context): # Functions used for generation all accept statement, but this one does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context): # Functions used for generation all accept statement, but this one does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel("loop_start") if not statement.isStatementAborting(): loop_end_label = context.allocateLabel("loop_end") else: loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition="CONSIDER_THREADING() == false", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is not None: getLabelCode(loop_end_label, emit)
[]
L4mborg1n1-D14610/Algoritms_and_DataStructure
3_module/C_BloomFilter.py
f61b7434dbc600da02e8ec38648fa84beb160f17
import math from sys import exit # итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер # структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P # хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции, # pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число. # При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter # и будем хранить в структуре данных. # Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует, # поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить # указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив Mersen_31 = 2147483647 class BitArray: def __init__(self, size): self.__array = bytearray(int(math.ceil(size / 8))) self.__size = size def add_bit(self, i): # i-тый бит содержится в i//8 байте на i % 8 месте self.__array[i // 8] |= 2 ** (7 - (i % 8)) def check_bit(self, i): if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0: return False else: return True def print(self): array_str = "" for byte in self.__array: _line = str(bin(byte))[2:] if len(_line) != 8: _line = '0' * (8 - len(_line)) + _line array_str += _line return array_str[:self.__size] class BloomFilter: def __init__(self, n: int, p: float): self.size = int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size): # обычный проход по всем числам и их проверка на простоту - сложно по времени # немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на # делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные) if prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers) < prime_size: j = 1 prime_flag = True while j < len(self.__prime_numbers): if (i % self.__prime_numbers[j]) == 0: prime_flag = False break j += 1 if prime_flag: self.__prime_numbers.append(i) i += 2 def __get_hash(self, x, i): return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size def add(self, key: int): i = 0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def search(self, key: int): i = 0 while i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i += 1 return True def print(self): return self.__bitarray.print() bloom_filter = 0 while True: try: line = input().split() if len(line) == 0: continue else: if line[0] == "set": try: elements_number = int(line[1]) probability = float(line[2]) if (elements_number <= 0) | (probability <= 0) | (probability >= 1): print("error") continue bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0): print("error") continue break except TypeError: print("error") continue else: print("error") continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line = input().split() if len(line) == 0: continue elif line[0] == "print": print(bloom_filter.print()) elif (line[0] == "add") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == "search") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else: print("error") except EOFError: break
[((120, 8, 120, 14), 'sys.exit', 'exit', ({}, {}), '()', False, 'from sys import exit\n'), ((20, 37, 20, 56), 'math.ceil', 'math.ceil', ({(20, 47, 20, 55): 'size / 8'}, {}), '(size / 8)', False, 'import math\n'), ((46, 39, 46, 51), 'math.log2', 'math.log2', ({(46, 49, 46, 50): 'p'}, {}), '(p)', False, 'import math\n'), ((45, 50, 45, 61), 'math.log', 'math.log', ({(45, 59, 45, 60): '2'}, {}), '(2)', False, 'import math\n'), ((45, 35, 45, 47), 'math.log2', 'math.log2', ({(45, 45, 45, 46): 'p'}, {}), '(p)', False, 'import math\n')]
Surfndez/source-publish
pyzmq/examples/pubsub/subscriber.py
c3838b303c1a0806f21cd4e8d8c207015b3ce9c8
"""A test that subscribes to NumPy arrays. Uses REQ/REP (on PUB/SUB socket + 1) to synchronize """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import time import zmq import numpy def sync(connect_to): # use connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if len (sys.argv) != 3: print 'usage: subscriber <connect_to> <array-count>' sys.exit (1) try: connect_to = sys.argv[1] array_count = int (sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count must be integers' sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print "Receiving arrays..." for i in range(array_count): a = s.recv_pyobj() print " Done." end = time.clock() elapsed = (end - start) * 1000000 if elapsed == 0: elapsed = 1 throughput = (1000000.0 * float (array_count)) / float (elapsed) message_size = a.nbytes megabits = float (throughput * message_size * 8) / 1000000 print "message size: %.0f [B]" % (message_size, ) print "array count: %.0f" % (array_count, ) print "mean throughput: %.0f [msg/s]" % (throughput, ) print "mean throughput: %.3f [Mb/s]" % (megabits, ) time.sleep(1.0) if __name__ == "__main__": main()
[]
livioso/cpython
Doc/includes/sqlite3/load_extension.py
077061a7b24917aaf31057885c69919c5a553c88
import sqlite3 con = sqlite3.connect(":memory:") # enable extension loading con.enable_load_extension(True) # Load the fulltext search extension con.execute("select load_extension('./fts3.so')") # alternatively you can load the extension using an API call: # con.load_extension("./fts3.so") # disable extension loading again con.enable_load_extension(False) # example from SQLite wiki con.execute("create virtual table recipe using fts3(name, ingredients)") con.executescript(""" insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes'); insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery'); insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour'); insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter'); """) for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"): print(row)
[((3, 6, 3, 33), 'sqlite3.connect', 'sqlite3.connect', ({(3, 22, 3, 32): '""":memory:"""'}, {}), "(':memory:')", False, 'import sqlite3\n')]
RunzheYang/lingvo
lingvo/core/inference_graph_exporter.py
1291e29812f9ee9836f9cacbb05db9ec6b095234
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility for exporting an InferenceGraph proto from model params.""" import collections import contextlib import re import lingvo.compat as tf from lingvo.core import base_model from lingvo.core import bfloat16_variables from lingvo.core import inference_graph_pb2 from lingvo.core import py_utils import six from google.protobuf import text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains options to configure inference on the device. # device: Device to infer on. # retain_device_placement: If true, the specified device in the generated # inference graph nodes will be retained. Otherwise, the specified device # will be cleared, so that the runtime can choose automatically. # var_options: Options on handling variables. For TPUs, variables can be # either placed on device through 'ON_DEVICE' option, or treated as # constants with AS_CONSTANTS. # gen_init_op: Whether to serialize initialization ops for the device. For TPUs, # servers can be initialized globally once, in which case this should be # turned off to avoid tripping initialization checks. # dtype_override: Whether to override the dtype to use for activations and # weights in the model. Options supported are None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope(): """Disallow const gauranteeing variable with-in scope.""" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks variable as constants for compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs), name=name + '/GuaranteeConst') else: return getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): """Treats all variables under this scope as constants.""" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): """Returns a dictionary with name -> tf.Variable() mapping.""" vars_dict = {} for v in variables: vars_dict[_GetVarName(v)] = v return vars_dict def IsTpu(device_options): return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): """Converts dict of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a NestedMap. Returns: Equivalent InferenceGraph. """ # Build the output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items(): fetches = tensors[0] feeds = tensors[1] # Rewrite fetches and feeds to map to their tensor name instead of # Tensor instance. named_fetches = {k: v.name for k, v in fetches.items() if v is not None} named_feeds = {k: v.name for k, v in feeds.items() if v is not None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): """Gets output op names from an inference graph. Args: graph: The tf graph. inference_graph_proto: an InferenceGraph proto. subgraphs: an optional list of subgraph names. If provided, only output ops from these subgraphs are preserved. Otherwise, all subgraphs are included. preserve_colocation_nodes: a Python bool, default to True. Preserves nodes colocating with the closure of output ops in the returned array. preserve_saver_restore_nodes: a Python bool, default to False. Preserves nodes for restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an optional list of extra op names to preserve as long as they present in the graph. Returns: Array of tf op names that should be preserved in the graph. """ output_op_names = set() def _GetOpName(tensor_or_op_name): """Returns the op name of the given node name.""" # Tensor names have format <op_name>:<output_index>. Some inference # graphs put tensors and others put ops in the feeds/fetches (depends # on how it is used). We differentiate here. We still do the lookup in # the graph to sanity check (versus relying on the text manipulation). # If this logic ever breaks, TensorFlow will raise a ValueError with # a description of the syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue # Sometimes feeds aren't connected to any outputs but keep them in the graph # anyways to avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes for restoring is preserved. saver_def.save_tensor_name is # skipped because it's only used for saving. saver_def = inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name) if not preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names)) # We also need to preserve any nodes that are used for colocation. # E.g., a node may have this attr: # attr { # key: "_class" # value { # list { # s: "loc:@inference/embedding_lookup/Read/ReadVariableOp" # } # } # } # # In this case, we need to make sure the node # inference/embedding_lookup/Read/ReadVariableOp is not pruned. # # TODO(zhifengc): It's possible that it's better to fix in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for node in graph_def.node] for node in graph.get_operations(): if preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:] if loc_name not in reachable_vars: # Skip nodes that cannot be reached from the pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): """Tests whether param_name is contained in param_obj.""" if not param_obj: return for k, _ in param_obj.IterParams(): if k == param_name: return True return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): """Freezes a graph from a checkpoint. Args: graph: tf.Graph. saver: The tf.Saver to use for restoration. checkpoint: The checkpoint to restore. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. """ sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): """Default initializes a graph and freezes it. Args: graph: tf.Graph. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. """ with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: """Class for exporting inference graphs.""" @classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): """Exports a InferenceGraph proto with piecewise subgraphs. Sets FLAGS.enable_asserts to False unless user explicitly sets it to True. Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing and multi-core inference on TPUs work properly. Args: model_cfg: a Params instance as returned by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The task to generate an inference graph for. Should be None for single-task models. device_options: Device options for the accelerator used for serving. freeze_checkpoint: The checkpoint to load. Loads and freezes the model if given. freeze_defaults: Default initializes the graph and freeze. Useful for early testing of downstream tools without having a checkpoint. export_path: If not None, write the inference graph in ASCII to this path. subgraph_filter: A string or a list of subgraph names. If not None or empty, export only this list of inference subgraphs. random_seed: Fixes the random seed in the exported inference graph. disable_packed_input: Disable packed input for inference writing purposes. Returns: InferenceGraph proto. Raises: ValueError: if the model does not support the listed subgraphs. """ assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.') if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter] # Disable assertions unless user explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo): Work out how much we need to specify here in terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model. model_cfg.random_seed = random_seed model_cfg.is_inference = True if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for line in model_cfg.ToText().split('\n'): tf.logging.debug('%s', line) # Instantiate the graph. graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): # Do not specify devices for variables if we are marking them as # constants. device = '' tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags prior to instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu' try: mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults: # Replace variables with tensors using tf.identity in theta before # freezing to avoid the graph referencing types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are bad, however this seems to be the # easiest way to get this assets registered from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if asset.op.type == 'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0] # Add a table init op and global variable init op to the graph. # Tables can be declared anywhere in the graph, so this op has to be # added last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags after model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops: %r', [op.name for op in graph.get_operations()]) # Collection defs if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs ' 'since operating in eager mode.') # Freezing. if freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used with device ' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph and freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune the graph to just the parts we need. # To support restoring, we have to not prune out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to output ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: # Clear the device so that the runtime can choose. tf.logging.info('Clearing device placement for: %s', device_options.device) for node in graph_def.node: node.ClearField('device') for function in graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options): """Sets cluster params. Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. """ def Update(p): """Update cluster params `p`.""" p.name = '/job:localhost' p.replicas = 1 p.tpus_per_replica = 1 if IsTpu(device_options) else 0 p.gpus_per_replica = 0 p.devices_per_split = 1 cluster_params.mode = 'sync' cluster_params.job = 'decoder' cluster_params.add_summary = False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def _DeviceSupportsFreezing(cls, device_options): return IsTpu(device_options)
[((45, 25, 48, 2), 'collections.namedtuple', 'collections.namedtuple', ({(45, 48, 45, 72): '"""InferenceDeviceOptions"""', (45, 74, 48, 1): "['device', 'retain_device_placement', 'var_options', 'gen_init_op',\n 'dtype_override', 'fprop_dtype_override']"}, {}), "('InferenceDeviceOptions', ['device',\n 'retain_device_placement', 'var_options', 'gen_init_op',\n 'dtype_override', 'fprop_dtype_override'])", False, 'import collections\n'), ((57, 14, 57, 37), 'lingvo.compat.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((82, 14, 82, 37), 'lingvo.compat.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((135, 26, 135, 62), 'lingvo.core.inference_graph_pb2.InferenceGraph', 'inference_graph_pb2.InferenceGraph', ({}, {}), '()', False, 'from lingvo.core import inference_graph_pb2\n'), ((184, 7, 184, 48), 're.search', 're.search', ({(184, 17, 184, 28): '""":[0-9]+$"""', (184, 30, 184, 47): 'tensor_or_op_name'}, {}), "(':[0-9]+$', tensor_or_op_name)", False, 'import re\n'), ((381, 4, 381, 56), 'lingvo.compat.logging.debug', 'tf.logging.debug', ({(381, 21, 381, 39): '"""Model %s params:"""', (381, 41, 381, 55): 'model_cfg.name'}, {}), "('Model %s params:', model_cfg.name)", True, 'import lingvo.compat as tf\n'), ((386, 12, 386, 22), 'lingvo.compat.Graph', 'tf.Graph', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((71, 9, 71, 38), 'lingvo.compat.control_dependencies', 'tf.control_dependencies', ({(71, 33, 71, 37): 'None'}, {}), '(None)', True, 'import lingvo.compat as tf\n'), ((194, 6, 194, 57), 'lingvo.compat.logging.info', 'tf.logging.info', ({(194, 22, 194, 41): '"""Skip subgraph %s."""', (194, 43, 194, 56): 'subgraph_name'}, {}), "('Skip subgraph %s.', subgraph_name)", True, 'import lingvo.compat as tf\n'), ((273, 40, 273, 64), 'lingvo.core.py_utils.SessionConfig', 'py_utils.SessionConfig', ({}, {}), '()', False, 'from lingvo.core import py_utils\n'), ((383, 6, 383, 34), 'lingvo.compat.logging.debug', 'tf.logging.debug', ({(383, 23, 383, 27): '"""%s"""', (383, 29, 383, 33): 'line'}, {}), "('%s', line)", True, 'import lingvo.compat as tf\n'), ((388, 6, 388, 37), 'lingvo.compat.random.set_seed', 'tf.random.set_seed', ({(388, 25, 388, 36): 'random_seed'}, {}), '(random_seed)', True, 'import lingvo.compat as tf\n'), ((488, 11, 488, 33), 'lingvo.compat.executing_eagerly', 'tf.executing_eagerly', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((489, 19, 489, 58), 'lingvo.compat.train.export_meta_graph', 'tf.train.export_meta_graph', (), '', True, 'import lingvo.compat as tf\n'), ((495, 6, 496, 58), 'lingvo.compat.logging.warning', 'tf.logging.warning', ({(495, 25, 496, 57): '"""Not exporting collection defs since operating in eager mode."""'}, {}), "(\n 'Not exporting collection defs since operating in eager mode.')", True, 'import lingvo.compat as tf\n'), ((528, 6, 528, 73), 'lingvo.compat.logging.info', 'tf.logging.info', ({(528, 22, 528, 55): '"""Pruning graph to output ops: %r"""', (528, 57, 528, 72): 'output_op_names'}, {}), "('Pruning graph to output ops: %r', output_op_names)", True, 'import lingvo.compat as tf\n'), ((529, 18, 529, 77), 'lingvo.compat.graph_util.extract_sub_graph', 'tf.graph_util.extract_sub_graph', ({(529, 50, 529, 59): 'graph_def', (529, 61, 529, 76): 'output_op_names'}, {}), '(graph_def, output_op_names)', True, 'import lingvo.compat as tf\n'), ((533, 6, 534, 44), 'lingvo.compat.logging.info', 'tf.logging.info', ({(533, 22, 533, 57): '"""Clearing device placement for: %s"""', (534, 22, 534, 43): 'device_options.device'}, {}), "('Clearing device placement for: %s', device_options.device)", True, 'import lingvo.compat as tf\n'), ((289, 38, 289, 62), 'lingvo.core.py_utils.SessionConfig', 'py_utils.SessionConfig', ({}, {}), '()', False, 'from lingvo.core import py_utils\n'), ((399, 20, 399, 37), 'lingvo.compat.device', 'tf.device', ({(399, 30, 399, 36): 'device'}, {}), '(device)', True, 'import lingvo.compat as tf\n'), ((491, 8, 491, 53), 'lingvo.compat.logging.info', 'tf.logging.info', ({(491, 24, 491, 47): '"""copying collection %s"""', (491, 49, 491, 52): 'key'}, {}), "('copying collection %s', key)", True, 'import lingvo.compat as tf\n'), ((509, 8, 509, 80), 'lingvo.compat.logging.info', 'tf.logging.info', ({(509, 24, 509, 60): '"""Freezing graph from checkpoint: %s"""', (509, 62, 509, 79): 'freeze_checkpoint'}, {}), "('Freezing graph from checkpoint: %s', freeze_checkpoint)", True, 'import lingvo.compat as tf\n'), ((544, 11, 544, 46), 'lingvo.compat.io.gfile.GFile', 'tf.io.gfile.GFile', ({(544, 29, 544, 40): 'export_path', (544, 42, 544, 45): '"""w"""'}, {}), "(export_path, 'w')", True, 'import lingvo.compat as tf\n'), ((210, 8, 210, 77), 'lingvo.compat.logging.info', 'tf.logging.info', ({(210, 24, 210, 76): "('Op/tensor %s not in the graph. Ignoring.' % op_name)"}, {}), "('Op/tensor %s not in the graph. Ignoring.' % op_name)", True, 'import lingvo.compat as tf\n'), ((240, 14, 240, 43), 'six.ensure_text', 'six.ensure_text', ({(240, 30, 240, 33): 'loc', (240, 35, 240, 42): '"""utf-8"""'}, {}), "(loc, 'utf-8')", False, 'import six\n'), ((405, 10, 405, 54), 'lingvo.core.py_utils.UpdateDtype', 'py_utils.UpdateDtype', ({(405, 31, 405, 40): 'model_cfg', (405, 42, 405, 53): 'tf.bfloat16'}, {}), '(model_cfg, tf.bfloat16)', False, 'from lingvo.core import py_utils\n'), ((406, 10, 406, 59), 'lingvo.core.py_utils.UpdateFpropDtype', 'py_utils.UpdateFpropDtype', ({(406, 36, 406, 45): 'model_cfg', (406, 47, 406, 58): 'tf.bfloat16'}, {}), '(model_cfg, tf.bfloat16)', False, 'from lingvo.core import py_utils\n'), ((411, 10, 411, 59), 'lingvo.core.py_utils.UpdateFpropDtype', 'py_utils.UpdateFpropDtype', ({(411, 36, 411, 45): 'model_cfg', (411, 47, 411, 58): 'tf.bfloat16'}, {}), '(model_cfg, tf.bfloat16)', False, 'from lingvo.core import py_utils\n'), ((436, 18, 436, 48), 'lingvo.compat.train.Saver', 'tf.train.Saver', ({(436, 33, 436, 47): 'saver_var_spec'}, {}), '(saver_var_spec)', True, 'import lingvo.compat as tf\n'), ((451, 34, 451, 70), 'lingvo.core.inference_graph_pb2.InferenceGraph', 'inference_graph_pb2.InferenceGraph', ({}, {}), '()', False, 'from lingvo.core import inference_graph_pb2\n'), ((462, 30, 463, 53), 'lingvo.compat.compat.v1.get_collection', 'tf.compat.v1.get_collection', ({(463, 14, 463, 52): 'tf.compat.v1.GraphKeys.ASSET_FILEPATHS'}, {}), '(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)', True, 'import lingvo.compat as tf\n'), ((478, 10, 478, 55), 'lingvo.compat.tables_initializer', 'tf.tables_initializer', (), '', True, 'import lingvo.compat as tf\n'), ((513, 8, 513, 67), 'lingvo.compat.logging.info', 'tf.logging.info', ({(513, 24, 513, 66): '"""Default initializing graph and freezing."""'}, {}), "('Default initializing graph and freezing.')", True, 'import lingvo.compat as tf\n'), ((545, 16, 545, 66), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', ({(545, 44, 545, 65): 'inference_graph_proto'}, {}), '(inference_graph_proto)', False, 'from google.protobuf import text_format\n'), ((430, 16, 432, 41), 'lingvo.core.bfloat16_variables.get_saver_spec_for_variables_with_bf16_overrides', 'bfloat16_variables.get_saver_spec_for_variables_with_bf16_overrides', ({(432, 20, 432, 40): 'variables_to_restore'}, {}), '(\n variables_to_restore)', False, 'from lingvo.core import bfloat16_variables\n'), ((438, 14, 438, 35), 'lingvo.compat.global_variables', 'tf.global_variables', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((425, 38, 425, 59), 'lingvo.compat.global_variables', 'tf.global_variables', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((440, 21, 440, 47), 'lingvo.compat.tpu.initialize_system', 'tf.tpu.initialize_system', ({}, {}), '()', True, 'import lingvo.compat as tf\n'), ((469, 16, 470, 61), 'lingvo.compat.logging.info', 'tf.logging.info', ({(469, 32, 469, 59): '"""Found asset file_path: %s"""', (470, 32, 470, 60): 'constant_value.string_val[0]'}, {}), "('Found asset file_path: %s', constant_value.string_val[0])", True, 'import lingvo.compat as tf\n')]
VijayKalmath/USCrimeAnalysis
src/preprocessing/annual_hc_by_crime_loc.py
14c96aae52547a4f7ea140395c62a621a97def50
#! usr/env/bin python import glob import numpy as np import pandas as pd from tqdm import tqdm def main(): # Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year file_paths.sort(key = lambda x: int(x[-8:-4])) # Create a result dataframe to store the data df_res = get_place_crime_count(file_paths[0]) # Iterate over the rest of the files for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on = "Place", how = "left") # Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: """ Function to return """ # Extracting the table name from and year from the given file path t_name = " ".join(path[path.index("Table"):path.index("_Incidents")].split("_")) t_year = path[path.index(".xls")-4:path.index(".xls")] try: # Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the start and end indices of the interested datapoints start = df.index[df[t_name] == "Total"][0] + 1 end = df.index[df[t_name] == "Multiple locations"][0] # Slice the dataset df = df.iloc[start:end,0:2] # Reset the index for the reduced dataframe df.reset_index(drop = True, inplace = True) # Rename the columns df.rename(columns={t_name: "Place", "Unnamed: 1": t_year}, inplace = True) # Return the value return df except: # If there is no such data return an empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__ == '__main__': main()
[((11, 17, 11, 69), 'glob.glob', 'glob.glob', ({(11, 27, 11, 68): '"""./data/raw/ucr/hc_count_by_place/*.xls"""'}, {}), "('./data/raw/ucr/hc_count_by_place/*.xls')", False, 'import glob\n'), ((17, 13, 17, 33), 'tqdm.tqdm', 'tqdm', ({(17, 18, 17, 32): 'file_paths[1:]'}, {}), '(file_paths[1:])', False, 'from tqdm import tqdm\n'), ((19, 17, 19, 70), 'pandas.merge', 'pd.merge', (), '', True, 'import pandas as pd\n'), ((35, 13, 35, 50), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((50, 15, 50, 77), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n')]
ethanjperez/allennlp
allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
e520993f16f0da7e2c40f6e44b8dc56338f46b57
# pylint: disable=no-self-use,invalid-name import numpy as np from numpy.testing import assert_almost_equal import torch from allennlp.common import Params from allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace("1") self.vocab.add_token_to_namespace("2") self.vocab.add_token_to_namespace("3") self.vocab.add_token_to_namespace("4") def test_forward_calculates_bow_properly(self): params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params = Params({"projection_dim": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) assert embedder_output.shape[1] == 50
[((14, 21, 14, 33), 'allennlp.data.Vocabulary', 'Vocabulary', ({}, {}), '()', False, 'from allennlp.data import Vocabulary\n'), ((21, 17, 21, 27), 'allennlp.common.Params', 'Params', ({(21, 24, 21, 26): '{}'}, {}), '({})', False, 'from allennlp.common import Params\n'), ((22, 19, 22, 86), 'allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params', 'BagOfWordCountsTokenEmbedder.from_params', (), '', False, 'from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\n'), ((23, 23, 23, 57), 'numpy.array', 'np.array', ({(23, 32, 23, 56): '[[2, 0], [3, 0], [4, 4]]'}, {}), '([[2, 0], [3, 0], [4, 4]])', True, 'import numpy as np\n'), ((26, 23, 26, 93), 'numpy.array', 'np.array', ({(26, 32, 26, 92): '[[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]]'}, {}), '([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])', True, 'import numpy as np\n'), ((31, 17, 31, 47), 'allennlp.common.Params', 'Params', ({(31, 24, 31, 46): "{'projection_dim': 50}"}, {}), "({'projection_dim': 50})", False, 'from allennlp.common import Params\n'), ((32, 19, 32, 86), 'allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params', 'BagOfWordCountsTokenEmbedder.from_params', (), '', False, 'from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\n'), ((24, 17, 24, 47), 'torch.from_numpy', 'torch.from_numpy', ({(24, 34, 24, 46): 'numpy_tensor'}, {}), '(numpy_tensor)', False, 'import torch\n'), ((27, 24, 27, 54), 'torch.from_numpy', 'torch.from_numpy', ({(27, 41, 27, 53): 'numpy_tensor'}, {}), '(numpy_tensor)', False, 'import torch\n'), ((34, 17, 34, 47), 'torch.from_numpy', 'torch.from_numpy', ({(34, 34, 34, 46): 'numpy_tensor'}, {}), '(numpy_tensor)', False, 'import torch\n')]
hengkaiz/meshrcnn
demo/demo_shapenet.py
eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
import argparse import logging import multiprocessing as mp import logging import os from detectron2.evaluation import inference_context import torch import torch.distributed as dist import torch.multiprocessing as mp from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger from fvcore.common.file_io import PathManager from pathlib import Path from pytorch3d.io import save_obj from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as T import glob from PIL import Image import trimesh import pyvista as pv import pyacvd import numpy as np logger = logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser(): parser = argparse.ArgumentParser(description="MeshRCNN Demo") parser.add_argument( "--config-file", default="configs/shapenet/voxmesh_R50.yaml", metavar="FILE", help="path to config file", ) parser.add_argument("--input", help="A path to an input main folder") # parser.add_argument("--output", help="A directory to save output visualizations") parser.add_argument( "--focal-length", type=float, default=20.0, help="Focal length for the image" ) parser.add_argument( "--onlyhighest", action="store_true", help="will return only the highest scoring detection" ) parser.add_argument( "opts", help="Modify model config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh = clus.create_mesh() # verts = remesh.points # faces = remesh.faces.reshape((-1, 4))[:, 1:] return remesh if __name__ == "__main__": mp.set_start_method("spawn", force=True) args = get_parser().parse_args() device = torch.device("cuda:%d" % 0) logger = setup_logger(name="demo shapenet") logger.info("Arguments: " + str(args)) cfg = setup_cfgs(args) # load checkpoing and build model if cfg.MODEL.CHECKPOINT == "": raise ValueError("Invalid checkpoing provided") logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp["best_states"]["model"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info("Model loaded") model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd in sub_dir: curr_path = os.path.join(args.input, sd) images = glob.glob(curr_path + "/*.png") for img_dir in images: # load image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name = img_dir.split("/")[-1].split(".")[0] with PathManager.open(img_dir, "rb") as f: img = Image.open(f).convert("RGB") img = transform(img) img = img[None, :, :, :] img = img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img) # Save voxel_score voxel_odir = os.path.join(curr_path, "voxel_score") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, "%s.pt" % (im_name)) torch.save(voxel_scores, voxel_file) # Save image features imgfeat_odir = os.path.join(curr_path, "img_feat") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, "%s.pt" % (im_name)) torch.save(img_feats, img_feat_file) # Save P p_odir = os.path.join(curr_path, "P") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, "%s.pt" % (im_name)) torch.save(P, p_file) # Save cubified mesh cmesh_odir = os.path.join(curr_path, "cube_mesh") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, "%s_cube.obj" % (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted mesh mesh_odir = os.path.join(curr_path, "final_mesh") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, "%s.obj" % (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info("Predictions saved for %s/%s" % (curr_path.split('/')[-1], im_name))
[((32, 9, 32, 34), 'logging.getLogger', 'logging.getLogger', ({(32, 27, 32, 33): '"""demo"""'}, {}), "('demo')", False, 'import logging\n'), ((35, 10, 35, 28), 'shapenet.config.config.get_shapenet_cfg', 'get_shapenet_cfg', ({}, {}), '()', False, 'from shapenet.config.config import get_shapenet_cfg\n'), ((42, 13, 42, 65), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((67, 14, 67, 27), 'pyvista.wrap', 'pv.wrap', ({(67, 22, 67, 26): 'mesh'}, {}), '(mesh)', True, 'import pyvista as pv\n'), ((71, 11, 71, 37), 'pyacvd.Clustering', 'pyacvd.Clustering', ({(71, 29, 71, 36): 'pv_mesh'}, {}), '(pv_mesh)', False, 'import pyacvd\n'), ((84, 4, 84, 44), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (), '', True, 'import torch.multiprocessing as mp\n'), ((87, 13, 87, 40), 'torch.device', 'torch.device', ({(87, 26, 87, 39): "'cuda:%d' % 0"}, {}), "('cuda:%d' % 0)", False, 'import torch\n'), ((89, 13, 89, 47), 'detectron2.utils.logger.setup_logger', 'setup_logger', (), '', False, 'from detectron2.utils.logger import setup_logger\n'), ((99, 17, 99, 61), 'shapenet.utils.checkpoint.clean_state_dict', 'clean_state_dict', ({(99, 34, 99, 60): "cp['best_states']['model']"}, {}), "(cp['best_states']['model'])", False, 'from shapenet.utils.checkpoint import clean_state_dict\n'), ((100, 12, 100, 28), 'shapenet.modeling.mesh_arch.build_model', 'build_model', ({(100, 24, 100, 27): 'cfg'}, {}), '(cfg)', False, 'from shapenet.modeling.mesh_arch import build_model\n'), ((98, 20, 98, 68), 'fvcore.common.file_io.PathManager.get_local_path', 'PathManager.get_local_path', ({(98, 47, 98, 67): 'cfg.MODEL.CHECKPOINT'}, {}), '(cfg.MODEL.CHECKPOINT)', False, 'from fvcore.common.file_io import PathManager\n'), ((105, 21, 105, 43), 'os.listdir', 'os.listdir', ({(105, 32, 105, 42): 'args.input'}, {}), '(args.input)', False, 'import os\n'), ((108, 20, 108, 48), 'os.path.join', 'os.path.join', ({(108, 33, 108, 43): 'args.input', (108, 45, 108, 47): 'sd'}, {}), '(args.input, sd)', False, 'import os\n'), ((109, 17, 109, 48), 'glob.glob', 'glob.glob', ({(109, 27, 109, 47): "curr_path + '/*.png'"}, {}), "(curr_path + '/*.png')", False, 'import glob\n'), ((115, 24, 115, 44), 'torchvision.transforms.Compose', 'T.Compose', ({(115, 34, 115, 43): 'transform'}, {}), '(transform)', True, 'import torchvision.transforms as T\n'), ((130, 25, 130, 63), 'os.path.join', 'os.path.join', ({(130, 38, 130, 47): 'curr_path', (130, 49, 130, 62): '"""voxel_score"""'}, {}), "(curr_path, 'voxel_score')", False, 'import os\n'), ((134, 25, 134, 70), 'os.path.join', 'os.path.join', ({(134, 38, 134, 48): 'voxel_odir', (134, 50, 134, 69): "'%s.pt' % im_name"}, {}), "(voxel_odir, '%s.pt' % im_name)", False, 'import os\n'), ((135, 12, 135, 48), 'torch.save', 'torch.save', ({(135, 23, 135, 35): 'voxel_scores', (135, 37, 135, 47): 'voxel_file'}, {}), '(voxel_scores, voxel_file)', False, 'import torch\n'), ((138, 27, 138, 62), 'os.path.join', 'os.path.join', ({(138, 40, 138, 49): 'curr_path', (138, 51, 138, 61): '"""img_feat"""'}, {}), "(curr_path, 'img_feat')", False, 'import os\n'), ((142, 28, 142, 75), 'os.path.join', 'os.path.join', ({(142, 41, 142, 53): 'imgfeat_odir', (142, 55, 142, 74): "'%s.pt' % im_name"}, {}), "(imgfeat_odir, '%s.pt' % im_name)", False, 'import os\n'), ((143, 12, 143, 48), 'torch.save', 'torch.save', ({(143, 23, 143, 32): 'img_feats', (143, 34, 143, 47): 'img_feat_file'}, {}), '(img_feats, img_feat_file)', False, 'import torch\n'), ((146, 21, 146, 49), 'os.path.join', 'os.path.join', ({(146, 34, 146, 43): 'curr_path', (146, 45, 146, 48): '"""P"""'}, {}), "(curr_path, 'P')", False, 'import os\n'), ((150, 21, 150, 62), 'os.path.join', 'os.path.join', ({(150, 34, 150, 40): 'p_odir', (150, 42, 150, 61): "'%s.pt' % im_name"}, {}), "(p_odir, '%s.pt' % im_name)", False, 'import os\n'), ((151, 12, 151, 33), 'torch.save', 'torch.save', ({(151, 23, 151, 24): 'P', (151, 26, 151, 32): 'p_file'}, {}), '(P, p_file)', False, 'import torch\n'), ((154, 25, 154, 61), 'os.path.join', 'os.path.join', ({(154, 38, 154, 47): 'curr_path', (154, 49, 154, 60): '"""cube_mesh"""'}, {}), "(curr_path, 'cube_mesh')", False, 'import os\n'), ((158, 29, 158, 80), 'os.path.join', 'os.path.join', ({(158, 42, 158, 52): 'cmesh_odir', (158, 54, 158, 79): "'%s_cube.obj' % im_name"}, {}), "(cmesh_odir, '%s_cube.obj' % im_name)", False, 'import os\n'), ((160, 12, 160, 54), 'pytorch3d.io.save_obj', 'save_obj', ({(160, 21, 160, 35): 'cube_mesh_file', (160, 37, 160, 44): 'c_verts', (160, 46, 160, 53): 'c_faces'}, {}), '(cube_mesh_file, c_verts, c_faces)', False, 'from pytorch3d.io import save_obj\n'), ((163, 24, 163, 61), 'os.path.join', 'os.path.join', ({(163, 37, 163, 46): 'curr_path', (163, 48, 163, 60): '"""final_mesh"""'}, {}), "(curr_path, 'final_mesh')", False, 'import os\n'), ((167, 24, 167, 69), 'os.path.join', 'os.path.join', ({(167, 37, 167, 46): 'mesh_odir', (167, 48, 167, 68): "'%s.obj' % im_name"}, {}), "(mesh_odir, '%s.obj' % im_name)", False, 'import os\n'), ((169, 12, 169, 45), 'pytorch3d.io.save_obj', 'save_obj', ({(169, 21, 169, 30): 'save_file', (169, 32, 169, 37): 'verts', (169, 39, 169, 44): 'faces'}, {}), '(save_file, verts, faces)', False, 'from pytorch3d.io import save_obj\n'), ((113, 25, 113, 37), 'torchvision.transforms.ToTensor', 'T.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as T\n'), ((114, 29, 114, 50), 'shapenet.data.utils.imagenet_preprocess', 'imagenet_preprocess', ({}, {}), '()', False, 'from shapenet.data.utils import imagenet_preprocess\n'), ((119, 17, 119, 48), 'fvcore.common.file_io.PathManager.open', 'PathManager.open', ({(119, 34, 119, 41): 'img_dir', (119, 43, 119, 47): '"""rb"""'}, {}), "(img_dir, 'rb')", False, 'from fvcore.common.file_io import PathManager\n'), ((126, 17, 126, 41), 'detectron2.evaluation.inference_context', 'inference_context', ({(126, 35, 126, 40): 'model'}, {}), '(model)', False, 'from detectron2.evaluation import inference_context\n'), ((132, 16, 132, 36), 'os.mkdir', 'os.mkdir', ({(132, 25, 132, 35): 'voxel_odir'}, {}), '(voxel_odir)', False, 'import os\n'), ((140, 16, 140, 38), 'os.mkdir', 'os.mkdir', ({(140, 25, 140, 37): 'imgfeat_odir'}, {}), '(imgfeat_odir)', False, 'import os\n'), ((148, 16, 148, 32), 'os.mkdir', 'os.mkdir', ({(148, 25, 148, 31): 'p_odir'}, {}), '(p_odir)', False, 'import os\n'), ((156, 16, 156, 36), 'os.mkdir', 'os.mkdir', ({(156, 25, 156, 35): 'cmesh_odir'}, {}), '(cmesh_odir)', False, 'import os\n'), ((165, 16, 165, 35), 'os.mkdir', 'os.mkdir', ({(165, 25, 165, 34): 'mesh_odir'}, {}), '(mesh_odir)', False, 'import os\n'), ((120, 22, 120, 35), 'PIL.Image.open', 'Image.open', ({(120, 33, 120, 34): 'f'}, {}), '(f)', False, 'from PIL import Image\n'), ((131, 19, 131, 35), 'pathlib.Path', 'Path', ({(131, 24, 131, 34): 'voxel_odir'}, {}), '(voxel_odir)', False, 'from pathlib import Path\n'), ((139, 19, 139, 37), 'pathlib.Path', 'Path', ({(139, 24, 139, 36): 'imgfeat_odir'}, {}), '(imgfeat_odir)', False, 'from pathlib import Path\n'), ((147, 19, 147, 31), 'pathlib.Path', 'Path', ({(147, 24, 147, 30): 'p_odir'}, {}), '(p_odir)', False, 'from pathlib import Path\n'), ((155, 19, 155, 35), 'pathlib.Path', 'Path', ({(155, 24, 155, 34): 'cmesh_odir'}, {}), '(cmesh_odir)', False, 'from pathlib import Path\n'), ((164, 19, 164, 34), 'pathlib.Path', 'Path', ({(164, 24, 164, 33): 'mesh_odir'}, {}), '(mesh_odir)', False, 'from pathlib import Path\n')]
jshin13/progressive-learning
proglearn/voters.py
dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
import numpy as np # from sklearn.ensemble import BaggingClassifier # from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import ( check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target from .base import BaseVoter from tensorflow import keras from keras import layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): """ Doc strings here. """ self.finite_sample_correction = finite_sample_correction self._is_fitted = False self.multilabel = False def fit(self, X, y): """ Doc strings here. """ check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': # Fit multilabel binary task. self.multilabel = True return self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def fit_multilabel(self, X, y): num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y) # Each posterior is now a num_labels size vector or binary probabilities. self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this voter." ) raise NotFittedError(msg % {"name": type(self).__name__}) votes_per_example = [] for x in X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): """ Doc strings here. """ return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): """ encourage posteriors to approach uniform when there is low data """ correction_constant = 1 / (num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): """ Doc strings here. """ self._is_fitted = False self.k = k self.kwargs = kwargs def fit(self, X, y): """ Doc strings here. """ X, y = check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this transformer." ) raise NotFittedError(msg % {"name": type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X) def is_fitted(self): """ Doc strings here. """ return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss="mse", epochs=100, lr=1e-4, verbose=False, ): """ Doc strings here. """ self.validation_split = validation_split self.loss = loss self.epochs = epochs self.lr = lr self.verbose = verbose self._is_fitted = False def fit(self, X, y): """ Doc strings here. """ X, y = check_X_y(X, y) self.voter = keras.Sequential() self.voter.add( layers.Dense( 1, activation="linear", input_shape=(X.shape[1],), name="transform_to_vote", ) ) self.voter.compile( loss=self.loss, metrics=["mae"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor="val_loss")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this transformer." ) raise NotFittedError(msg % {"name": type(self).__name__}) X = check_array(X) return self.voter.predict(X) def is_fitted(self): """ Doc strings here. """ return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): """ Doc strings here. """ self._is_fitted = False def fit(self, X, y): """ Doc strings here. """ self.leaf_to_yhat = {} self.global_yhat = np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this voter." ) raise NotFittedError(msg % {"name": type(self).__name__}) votes_per_example = [] for x in X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): """ Doc strings here. """ return self._is_fitted
[((35, 8, 35, 39), 'sklearn.utils.multiclass.check_classification_targets', 'check_classification_targets', ({(35, 37, 35, 38): 'y'}, {}), '(y)', False, 'from sklearn.utils.multiclass import check_classification_targets, type_of_target\n'), ((47, 23, 47, 35), 'numpy.unique', 'np.unique', ({(47, 33, 47, 34): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((73, 23, 73, 35), 'numpy.unique', 'np.unique', ({(73, 33, 73, 34): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((106, 15, 106, 42), 'numpy.array', 'np.array', ({(106, 24, 106, 41): 'votes_per_example'}, {}), '(votes_per_example)', True, 'import numpy as np\n'), ((142, 15, 142, 30), 'sklearn.utils.validation.check_X_y', 'check_X_y', ({(142, 25, 142, 26): 'X', (142, 28, 142, 29): 'y'}, {}), '(X, y)', False, 'from sklearn.utils.validation import check_X_y, check_array, NotFittedError\n'), ((143, 19, 143, 62), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ({(143, 40, 143, 46): 'self.k'}, {}), '(self.k, **self.kwargs)', False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((160, 12, 160, 26), 'sklearn.utils.validation.check_array', 'check_array', ({(160, 24, 160, 25): 'X'}, {}), '(X)', False, 'from sklearn.utils.validation import check_X_y, check_array, NotFittedError\n'), ((190, 15, 190, 30), 'sklearn.utils.validation.check_X_y', 'check_X_y', ({(190, 25, 190, 26): 'X', (190, 28, 190, 29): 'y'}, {}), '(X, y)', False, 'from sklearn.utils.validation import check_X_y, check_array, NotFittedError\n'), ((192, 21, 192, 39), 'tensorflow.keras.Sequential', 'keras.Sequential', ({}, {}), '()', False, 'from tensorflow import keras\n'), ((228, 12, 228, 26), 'sklearn.utils.validation.check_array', 'check_array', ({(228, 24, 228, 25): 'X'}, {}), '(X)', False, 'from sklearn.utils.validation import check_X_y, check_array, NotFittedError\n'), ((255, 27, 255, 37), 'numpy.mean', 'np.mean', ({(255, 35, 255, 36): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((257, 23, 257, 35), 'numpy.unique', 'np.unique', ({(257, 33, 257, 34): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((285, 15, 285, 42), 'numpy.array', 'np.array', ({(285, 24, 285, 41): 'votes_per_example'}, {}), '(votes_per_example)', True, 'import numpy as np\n'), ((37, 11, 37, 28), 'sklearn.utils.multiclass.type_of_target', 'type_of_target', ({(37, 26, 37, 27): 'y'}, {}), '(y)', False, 'from sklearn.utils.multiclass import check_classification_targets, type_of_target\n'), ((42, 26, 42, 38), 'numpy.unique', 'np.unique', ({(42, 36, 42, 37): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((43, 33, 43, 53), 'numpy.ones', 'np.ones', ({(43, 41, 43, 52): 'num_classes'}, {}), '(num_classes)', True, 'import numpy as np\n'), ((121, 30, 121, 55), 'numpy.where', 'np.where', ({(121, 39, 121, 54): '(posteriors == 0)'}, {}), '(posteriors == 0)', True, 'import numpy as np\n'), ((194, 12, 199, 13), 'keras.layers.Dense', 'layers.Dense', (), '', False, 'from keras import layers\n'), ((48, 27, 48, 49), 'numpy.where', 'np.where', ({(48, 36, 48, 48): '(X == leaf_id)'}, {}), '(X == leaf_id)', True, 'import numpy as np\n'), ((74, 27, 74, 49), 'numpy.where', 'np.where', ({(74, 36, 74, 48): '(X == leaf_id)'}, {}), '(X == leaf_id)', True, 'import numpy as np\n'), ((202, 55, 202, 85), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ({(202, 77, 202, 84): 'self.lr'}, {}), '(self.lr)', False, 'from tensorflow import keras\n'), ((258, 27, 258, 49), 'numpy.where', 'np.where', ({(258, 36, 258, 48): '(X == leaf_id)'}, {}), '(X == leaf_id)', True, 'import numpy as np\n'), ((260, 55, 260, 79), 'numpy.mean', 'np.mean', ({(260, 63, 260, 78): 'y[idxs_in_leaf]'}, {}), '(y[idxs_in_leaf])', True, 'import numpy as np\n'), ((50, 72, 50, 84), 'numpy.unique', 'np.unique', ({(50, 82, 50, 83): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((52, 39, 52, 61), 'numpy.array', 'np.array', ({(52, 48, 52, 60): 'class_counts'}, {}), '(class_counts)', True, 'import numpy as np\n'), ((52, 64, 52, 84), 'numpy.sum', 'np.sum', ({(52, 71, 52, 83): 'class_counts'}, {}), '(class_counts)', True, 'import numpy as np\n'), ((78, 39, 78, 61), 'numpy.array', 'np.array', ({(78, 48, 78, 60): 'label_counts'}, {}), '(label_counts)', True, 'import numpy as np\n'), ((78, 64, 78, 84), 'numpy.sum', 'np.sum', ({(78, 71, 78, 83): 'label_counts'}, {}), '(label_counts)', True, 'import numpy as np\n'), ((208, 23, 208, 85), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', (), '', False, 'from tensorflow import keras\n'), ((50, 20, 50, 54), 'numpy.where', 'np.where', ({(50, 29, 50, 53): '(y[idxs_in_leaf] == y_val)'}, {}), '(y[idxs_in_leaf] == y_val)', True, 'import numpy as np\n'), ((56, 55, 56, 67), 'numpy.unique', 'np.unique', ({(56, 65, 56, 66): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((76, 20, 76, 53), 'numpy.where', 'np.where', ({(76, 29, 76, 52): '(y[idxs_in_leaf, j] == 1)'}, {}), '(y[idxs_in_leaf, j] == 1)', True, 'import numpy as np\n')]
jhattat/photoBooth
config.py
f6fe3ab418bb917792e10349597401ed34078766
# Tumblr Setup # Replace the values with your information # OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='ShbOqI5zErQXOL7Qnd5XduXpY9XQUlBgJDpCLeq1OYqnY2KzSt' #replace with your key consumer_secret='ulZradkbJGksjpl2MMlshAfJgEW6TNeSdZucykqeTp8jvwgnhu' #replace with your secret code oath_token='uUcBuvJx8yhk4HJIZ39sfcYo0W4VoqcvUetR2EwcI5Sn8SLgNt' #replace with your oath token oath_secret='iNJlqQJI6dwhAGmdNbMtD9u7VazmX2Rk5uW0fuIozIEjk97lz4' #replace with your oath secret code tumblr_blog = 'soniaetjeremie' # replace with your tumblr account name without .tumblr.com tagsForTumblr = "photobooth" # change to tags you want, separated with commas #Config settings to change behavior of photo booth monitor_w = 800 # width of the display monitor monitor_h = 480 # height of the display monitor file_path = '/home/pi/photobooth/pics/' # path to save images clear_on_startup = False # True will clear previously stored photos as the program launches. False will leave all previous photos. debounce = 0.3 # how long to debounce the button. Add more time if the button triggers too many times. post_online = True # True to upload images. False to store locally only. capture_count_pics = True # if true, show a photo count between taking photos. If false, do not. False is faster. make_gifs = True # True to make an animated gif. False to post 4 jpgs into one post. hi_res_pics = False # True to save high res pics from camera. # If also uploading, the program will also convert each image to a smaller image before making the gif. # False to first capture low res pics. False is faster. # Careful, each photo costs against your daily Tumblr upload max. camera_iso = 400 # adjust for lighting issues. Normal is 100 or 200. Sort of dark is 400. Dark is 800 max. # available options: 100, 200, 320, 400, 500, 640, 800
[]
GuilhemN/site-interludes
accounts/admin.py
69873810d5b0168aa57277ba51805117e6c53874
from django.contrib import admin from django.contrib.auth.models import Group from accounts.models import EmailUser from shared.admin import ExportCsvMixin # no need for groups - we only have regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): """option d'affichage des activités dans la vue django admin""" filename = "export_utilisateurs.csv" list_display = ("email", "last_name", "first_name", "is_superuser", "is_active", "email_confirmed",) list_filter = ("is_superuser","is_active", "email_confirmed",) fields = ("email", "last_name", "first_name", "is_superuser", "is_staff", "is_active", "email_confirmed", ("date_joined", "last_login",), ) ordering = ("last_name", "first_name") readonly_fields = ("date_joined", "last_login",) list_per_page = 200 csv_export_exclude = ["password"]
[((8, 0, 8, 28), 'django.contrib.admin.site.unregister', 'admin.site.unregister', ({(8, 22, 8, 27): 'Group'}, {}), '(Group)', False, 'from django.contrib import admin\n'), ((10, 1, 10, 26), 'django.contrib.admin.register', 'admin.register', ({(10, 16, 10, 25): 'EmailUser'}, {}), '(EmailUser)', False, 'from django.contrib import admin\n')]
vnavascues/rotki
rotkehlchen/exchanges/coinbase.py
8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
import hashlib import hmac import logging import time from json.decoder import JSONDecodeError from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from urllib.parse import urlencode import requests from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair, ) from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: """Turns a coinbase transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is not a trade related transaction returns None Throws: - UnknownAsset due to Asset instantiation - DeserializationError due to unexpected format of dict entries - KeyError due to dict entires missing an expected entry """ if raw_trade['status'] != 'completed': # We only want to deal with completed trades return None if raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you are buying/selling tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency rate = Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self) -> None: self.first_connection_made = True def _validate_single_api_key_action( self, method_str: str, ignore_pagination: bool = False, ) -> Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error = str(e) if 'transactions' in method_str: permission = 'wallet:transactions:read' elif 'buys' in method_str: permission = 'wallet:buys:read' elif 'sells' in method_str: permission = 'wallet:sells:read' elif 'deposits' in method_str: permission = 'wallet:deposits:read' elif 'withdrawals' in method_str: permission = 'wallet:withdrawals:read' elif 'trades' in method_str: permission = 'wallet:trades:read' # the accounts elif should be at the end since the word appears # in other endpoints elif 'accounts' in method_str: permission = 'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase method {method_str} at API key validation', ) msg = ( f'Provided Coinbase API key needs to have {permission} permission activated. ' f'Please log into your coinbase account and set all required permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return None, msg except RemoteError as e: error = str(e) if 'invalid signature' in error: return None, 'Failed to authenticate with the Provided API key/secret' elif 'invalid api key' in error: return None, 'Provided API Key is invalid' else: # any other remote error return None, error return result, '' def validate_api_key(self) -> Tuple[bool, str]: """Validates that the Coinbase API key is good for usage in Rotki Makes sure that the following permissions are given to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read """ result, msg = self._validate_single_api_key_action('accounts') if result is None: return False, msg # now get the account ids account_ids = self._get_account_ids(result) if len(account_ids) != 0: # and now try to get all transactions of an account to see if that's possible method = f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all buys of an account to see if that's possible method = f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all sells of an account to see if that's possible method = f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all deposits of an account to see if that's possible method = f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all withdrawals of an account to see if that's possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg return True, '' def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]: """Gets the account ids out of the accounts response""" account_ids = [] for account_data in accounts: if 'id' not in account_data: self.msg_aggregator.add_error( 'Found coinbase account entry without an id key. Skipping it. ', ) continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account entry with a non string id: ' f'{account_data["id"]}. Skipping it. ', ) continue account_ids.append(account_data['id']) return account_ids def _api_query( self, endpoint: str, options: Optional[Dict[str, Any]] = None, pagination_next_uri: str = None, ignore_pagination: bool = False, ) -> List[Any]: """Performs a coinbase API Query for endpoint You can optionally provide extra arguments to the endpoint via the options argument. If this is an ongoing paginating call then provide pagination_next_uri. If you want just the first results then set ignore_pagination to True. """ request_verb = "GET" if pagination_next_uri: request_url = pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options) timestamp = str(int(time.time())) message = timestamp + request_verb + request_url signature = hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This is needed to guarantee the up to the given date # API version response. 'CB-VERSION': '2019-08-25', }) full_url = self.base_uri + request_url try: response = self.session.get(full_url) except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API request failed due to {str(e)}') if response.status_code == 403: raise CoinbasePermissionError(f'API key does not have permission for {endpoint}') if response.status_code != 200: raise RemoteError( f'Coinbase query {full_url} responded with error status code: ' f'{response.status_code} and text: {response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}') if 'data' not in json_ret: raise RemoteError(f'Coinbase json response does not contain data: {response.text}') final_data = json_ret['data'] # If we got pagination and this is the first query, gather all the subsequent queries if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination: if 'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase json response contained no "next_uri" key') next_uri = json_ret['pagination']['next_uri'] if not next_uri: # As per the docs: https://developers.coinbase.com/api/v2?python#pagination # once we get an empty next_uri we are done return final_data additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp = self._api_query('accounts') except RemoteError as e: msg = ( 'Coinbase API request failed. Could not reach coinbase due ' 'to {}'.format(e) ) log.error(msg) return None, msg returned_balances: Dict[Asset, Dict[str, Any]] = {} for account in resp: try: if not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase returns zero balances for everything # a user does not own if amount == ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error( f'Error processing coinbase balance entry due to inability to ' f'query USD price: {str(e)}. Skipping balance entry', ) continue if asset in returned_balances: amount = returned_balances[asset]['amount'] + amount else: returned_balances[asset] = {} returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase account balance. Check logs ' 'for details. Ignoring it.', ) log.error( 'Error processing a coinbase account balance', account_balance=account, error=msg, ) continue return returned_balances, '' def query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[Trade]: account_data = self._api_query('accounts') # now get the account ids and for each one query buys/sells # Looking at coinbase's API no other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen would need to return in query_trade_history account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades = [] for raw_trade in raw_data: try: trade = trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase trade. Check logs ' 'for details. Ignoring it.', ) log.error( 'Error processing a coinbase trade', trade=raw_trade, error=msg, ) continue # limit coinbase trades in the requested time range here since there # is no argument in the API call if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]: """Processes a single deposit/withdrawal from coinbase and deserializes it Can log error/warning and return None if something went wrong at deserialization """ try: if raw_data['status'] != 'completed': return None payout_date = raw_data.get('payout_at', None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) # Only get address/transaction id for "send" type of transactions address = None transaction_id = None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data: # Then this should be a "send" which is the way Coinbase uses to send # crypto outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non "send" type found in coinbase deposit/withdrawal processing' assert raw_data['type'] == 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't see the fee being charged from the "send" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear in the docs but from an experiment of sending ETH # to an address from coinbase there is the network fee in the response fee = Fee(ZERO) raw_network = raw_data.get('network', None) if raw_network: raw_fee = raw_network.get('transaction_fee', None) if raw_fee: # Since this is a withdrawal the fee should be the same as the moved asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we set ZERO fee and ignore log.error( f'In a coinbase withdrawal of {asset.identifier} the fee' f'is denoted in {raw_fee["currency"]}', ) else: fee = deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown asset ' f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization of a coinbase ' 'asset movement. Check logs for details and open a bug report.', ) log.error( f'Unexpected data encountered during deserialization of coinbase ' f'asset_movement {raw_data}. Error was: {str(e)}', ) return None def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]: account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to get the "sends", which in Coinbase is the # way to send Crypto out of the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if 'type' not in tx: continue if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements = [] for raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in the requested time range # here since there is no argument in the API call if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts: movements.append(movement) return movements
[((45, 9, 45, 36), 'logging.getLogger', 'logging.getLogger', ({(45, 27, 45, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((46, 6, 46, 36), 'rotkehlchen.logging.RotkehlchenLogsAdapter', 'RotkehlchenLogsAdapter', ({(46, 29, 46, 35): 'logger'}, {}), '(logger)', False, 'from rotkehlchen.logging import RotkehlchenLogsAdapter\n'), ((69, 16, 69, 80), 'rotkehlchen.serialization.deserialize.deserialize_timestamp_from_date', 'deserialize_timestamp_from_date', ({(69, 48, 69, 56): 'raw_time', (69, 58, 69, 67): '"""iso8601"""', (69, 69, 69, 79): '"""coinbase"""'}, {}), "(raw_time, 'iso8601', 'coinbase')", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((70, 17, 70, 62), 'rotkehlchen.serialization.deserialize.deserialize_trade_type', 'deserialize_trade_type', ({(70, 40, 70, 61): "raw_trade['resource']"}, {}), "(raw_trade['resource'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((71, 16, 71, 71), 'rotkehlchen.serialization.deserialize.deserialize_asset_amount', 'deserialize_asset_amount', ({(71, 41, 71, 70): "raw_trade['amount']['amount']"}, {}), "(raw_trade['amount']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((72, 15, 72, 83), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', (), '', False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((73, 20, 73, 77), 'rotkehlchen.serialization.deserialize.deserialize_asset_amount', 'deserialize_asset_amount', ({(73, 45, 73, 76): "raw_trade['subtotal']['amount']"}, {}), "(raw_trade['subtotal']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((74, 19, 74, 89), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', (), '', False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((76, 11, 76, 72), 'rotkehlchen.typing.TradePair', 'TradePair', ({(76, 21, 76, 71): 'f"""{tx_asset.identifier}_{native_asset.identifier}"""'}, {}), "(f'{tx_asset.identifier}_{native_asset.identifier}')", False, 'from rotkehlchen.typing import ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair\n'), ((79, 11, 79, 43), 'rotkehlchen.typing.Price', 'Price', ({(79, 17, 79, 42): 'native_amount / tx_amount'}, {}), '(native_amount / tx_amount)', False, 'from rotkehlchen.typing import ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair\n'), ((80, 17, 80, 60), 'rotkehlchen.serialization.deserialize.deserialize_fee', 'deserialize_fee', ({(80, 33, 80, 59): "raw_trade['fee']['amount']"}, {}), "(raw_trade['fee']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((81, 16, 81, 81), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', (), '', False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((320, 5, 320, 24), 'rotkehlchen.utils.interfaces.protect_with_lock', 'protect_with_lock', ({}, {}), '()', False, 'from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock\n'), ((321, 5, 321, 30), 'rotkehlchen.utils.interfaces.cache_response_timewise', 'cache_response_timewise', ({}, {}), '()', False, 'from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock\n'), ((285, 18, 288, 13), 'rotkehlchen.errors.RemoteError', 'RemoteError', ({(286, 16, 287, 67): 'f"""Coinbase query {full_url} responded with error status code: {response.status_code} and text: {response.text}"""'}, {}), "(\n f'Coinbase query {full_url} responded with error status code: {response.status_code} and text: {response.text}'\n )", False, 'from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset\n'), ((291, 23, 291, 56), 'rotkehlchen.utils.serialization.rlk_jsonloads_dict', 'rlk_jsonloads_dict', ({(291, 42, 291, 55): 'response.text'}, {}), '(response.text)', False, 'from rotkehlchen.utils.serialization import rlk_jsonloads_dict\n'), ((296, 18, 296, 95), 'rotkehlchen.errors.RemoteError', 'RemoteError', ({(296, 30, 296, 94): 'f"""Coinbase json response does not contain data: {response.text}"""'}, {}), "(f'Coinbase json response does not contain data: {response.text}')", False, 'from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset\n'), ((255, 31, 255, 49), 'urllib.parse.urlencode', 'urlencode', ({(255, 41, 255, 48): 'options'}, {}), '(options)', False, 'from urllib.parse import urlencode\n'), ((257, 28, 257, 39), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((293, 18, 293, 90), 'rotkehlchen.errors.RemoteError', 'RemoteError', ({(293, 30, 293, 89): 'f"""Coinbase returned invalid JSON response: {response.text}"""'}, {}), "(f'Coinbase returned invalid JSON response: {response.text}')", False, 'from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset\n'), ((303, 22, 303, 87), 'rotkehlchen.errors.RemoteError', 'RemoteError', ({(303, 34, 303, 86): '"""Coinbase json response contained no "next_uri" key"""'}, {}), '(\'Coinbase json response contained no "next_uri" key\')', False, 'from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset\n'), ((339, 25, 339, 79), 'rotkehlchen.serialization.deserialize.deserialize_asset_amount', 'deserialize_asset_amount', ({(339, 50, 339, 78): "account['balance']['amount']"}, {}), "(account['balance']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((346, 24, 346, 75), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', ({(346, 44, 346, 74): "account['balance']['currency']"}, {}), "(account['balance']['currency'])", False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((462, 28, 462, 95), 'rotkehlchen.serialization.deserialize.deserialize_timestamp_from_date', 'deserialize_timestamp_from_date', ({(462, 60, 462, 71): 'payout_date', (462, 73, 462, 82): '"""iso8601"""', (462, 84, 462, 94): '"""coinbase"""'}, {}), "(payout_date, 'iso8601', 'coinbase')", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((464, 28, 468, 17), 'rotkehlchen.serialization.deserialize.deserialize_timestamp_from_date', 'deserialize_timestamp_from_date', ({(465, 20, 465, 42): "raw_data['created_at']", (466, 20, 466, 29): '"""iso8601"""', (467, 20, 467, 30): '"""coinbase"""'}, {}), "(raw_data['created_at'], 'iso8601', 'coinbase')", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((483, 25, 483, 94), 'rotkehlchen.serialization.deserialize.deserialize_asset_amount_force_positive', 'deserialize_asset_amount_force_positive', ({(483, 65, 483, 93): "raw_data['amount']['amount']"}, {}), "(raw_data['amount']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((484, 24, 484, 91), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', (), '', False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((487, 22, 487, 31), 'rotkehlchen.typing.Fee', 'Fee', ({(487, 26, 487, 30): 'ZERO'}, {}), '(ZERO)', False, 'from rotkehlchen.typing import ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair\n'), ((508, 36, 508, 93), 'rotkehlchen.serialization.deserialize.deserialize_asset_movement_category', 'deserialize_asset_movement_category', ({(508, 72, 508, 92): "raw_data['resource']"}, {}), "(raw_data['resource'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((509, 25, 509, 94), 'rotkehlchen.serialization.deserialize.deserialize_asset_amount_force_positive', 'deserialize_asset_amount_force_positive', ({(509, 65, 509, 93): "raw_data['amount']['amount']"}, {}), "(raw_data['amount']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((510, 22, 510, 64), 'rotkehlchen.serialization.deserialize.deserialize_fee', 'deserialize_fee', ({(510, 38, 510, 63): "raw_data['fee']['amount']"}, {}), "(raw_data['fee']['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((511, 24, 511, 91), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', (), '', False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((504, 37, 504, 84), 'rotkehlchen.exchanges.utils.get_key_if_has_val', 'get_key_if_has_val', ({(504, 56, 504, 75): "raw_data['network']", (504, 77, 504, 83): '"""hash"""'}, {}), "(raw_data['network'], 'hash')", False, 'from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val\n'), ((506, 30, 506, 98), 'rotkehlchen.exchanges.utils.deserialize_asset_movement_address', 'deserialize_asset_movement_address', ({(506, 65, 506, 79): "raw_data['to']", (506, 81, 506, 90): '"""address"""', (506, 92, 506, 97): 'asset'}, {}), "(raw_data['to'], 'address', asset)", False, 'from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val\n'), ((494, 32, 494, 88), 'rotkehlchen.assets.converters.asset_from_coinbase', 'asset_from_coinbase', (), '', False, 'from rotkehlchen.assets.converters import asset_from_coinbase\n'), ((501, 30, 501, 64), 'rotkehlchen.serialization.deserialize.deserialize_fee', 'deserialize_fee', ({(501, 46, 501, 63): "raw_fee['amount']"}, {}), "(raw_fee['amount'])", False, 'from rotkehlchen.serialization.deserialize import deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type\n'), ((349, 32, 349, 42), 'rotkehlchen.inquirer.Inquirer', 'Inquirer', ({}, {}), '()', False, 'from rotkehlchen.inquirer import Inquirer\n')]
aonrobot/MSC-thug-auth-provider
lib/python3.7/site-packages/ldap/controls/deref.py
aef37ef5a000586b8502cc536244f31e08b9c2db
# -*- coding: utf-8 -*- """ ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details. """ __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- # For compatibility with ASN.1 declaration in I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {} def _derefSpecs(self): deref_specs = DerefSpecs() i = 0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes = AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v) for v in tv[1]] for tv in deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)] KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl
[((52, 20, 52, 36), 'pyasn1_modules.rfc2251.AttributeValue', 'AttributeValue', ({}, {}), '()', False, 'from pyasn1_modules.rfc2251 import LDAPDN, AttributeDescription, AttributeDescriptionList, AttributeValue\n'), ((65, 4, 65, 59), 'pyasn1.type.tag.Tag', 'tag.Tag', ({(65, 12, 65, 31): 'tag.tagClassContext', (65, 32, 65, 56): 'tag.tagFormatConstructed', (65, 57, 65, 58): '0'}, {}), '(tag.tagClassContext, tag.tagFormatConstructed, 0)', False, 'from pyasn1.type import namedtype, univ, tag\n'), ((85, 4, 85, 59), 'ldap.controls.LDAPControl.__init__', 'LDAPControl.__init__', ({(85, 25, 85, 29): 'self', (85, 30, 85, 46): 'self.controlType', (85, 47, 85, 58): 'criticality'}, {}), '(self, self.controlType, criticality)', False, 'from ldap.controls import LDAPControl, KNOWN_RESPONSE_CONTROLS\n'), ((36, 6, 36, 28), 'pyasn1_modules.rfc2251.AttributeDescription', 'AttributeDescription', ({}, {}), '()', False, 'from pyasn1_modules.rfc2251 import LDAPDN, AttributeDescription, AttributeDescriptionList, AttributeValue\n'), ((57, 32, 57, 54), 'pyasn1_modules.rfc2251.AttributeDescription', 'AttributeDescription', ({}, {}), '()', False, 'from pyasn1_modules.rfc2251 import LDAPDN, AttributeDescription, AttributeDescriptionList, AttributeValue\n'), ((71, 37, 71, 59), 'pyasn1_modules.rfc2251.AttributeDescription', 'AttributeDescription', ({}, {}), '()', False, 'from pyasn1_modules.rfc2251 import LDAPDN, AttributeDescription, AttributeDescriptionList, AttributeValue\n'), ((72, 36, 72, 44), 'pyasn1_modules.rfc2251.LDAPDN', 'LDAPDN', ({}, {}), '()', False, 'from pyasn1_modules.rfc2251 import LDAPDN, AttributeDescription, AttributeDescriptionList, AttributeValue\n'), ((96, 48, 96, 80), 'pyasn1_modules.rfc2251.AttributeDescription', 'AttributeDescription', ({(96, 69, 96, 79): 'deref_attr'}, {}), '(deref_attr)', False, 'from pyasn1_modules.rfc2251 import LDAPDN, AttributeDescription, AttributeDescriptionList, AttributeValue\n')]
notagoat/Deepmoji
emoji.py
1ab922306c3647f9c7ea98caa2660a53b18fe4b6
import requests import urllib.request import os.path import shutil import csv def main(): with open("data.csv") as i: #Open the data.csv file instances = i.readlines() #Write them into memory instances = [x.strip() for x in instances] #Strip any weird issues from writing instances.sort() #Sort them alphabetically setup(instances) #Run setup to create all the necessary files and subfolders count = len(instances) #Get the count just for fun i = 0 try: for name in instances: try: i += 1 print("-----!"+name+"!-----") print(str(i) +" of " + str(count) + " remaining!") fetch(name) #Run the fetching code except Exception as e: print(e) #Print the error. We catch errors here for pleroma instances, weirdly encoded urls, etc pass #Don't stop the beat except Exception as e: print("Instance Error") print(e) pass clone(instances) #Clone all of them into one big folder for ease of access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard url for fetching data path = "emoji/%s/" % name #Because of the clone function we know all of these folders will exist try: for emoji in r.json(): #Emoji = the json code from the request try: if os.path.isfile(path+emoji['shortcode']+".png"): #Check to see if it exists. pass else: if "ms_" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most of them). #Mutant standard is huge and common #print(emoji['shortcode'] + " found!") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json open(path + emoji['shortcode']+".png",'wb').write(emojiimage.content) #Now save it as an image in the filesystem except Exception as e: print("Did not get: " + emoji['url']) #If somethings fucky throw a nice error then keep going. print(e) pass except Exception as e: print(e) def setup(instances): if (os.path.isdir("emoji/")): #Check to see if emoji/ exists pass else: os.mkdir("emoji/") #make it if it doesnt for name in instances: if (os.path.isdir("emoji/%s/"%name)): pass else: os.mkdir("emoji/%s/"%name) if (os.path.isdir("emoji/all")): pass else: os.mkdir("emoji/all") def clone(instances): for name in instances: print("Copying emoji for: %s"% name) path = "emoji/%s/" % name files = os.listdir(path) for name in files: #This gets alll files try: shutil.copyfile(path+name,"emoji/all/"+name) #Then copies them into the all folder except Exception as e: print(e) pass if __name__ == '__main__': main()
[((34, 8, 34, 83), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((76, 16, 76, 60), 'shutil.copyfile', 'shutil.copyfile', ({(76, 32, 76, 41): '(path + name)', (76, 42, 76, 59): "('emoji/all/' + name)"}, {}), "(path + name, 'emoji/all/' + name)", False, 'import shutil\n'), ((44, 37, 44, 91), 'requests.get', 'requests.get', (), '', False, 'import requests\n')]
Zhenye-Na/LxxxCode
String/640.One Edit Distance/Solution_DP.py
afd79d790d0a7495d75e6650f80adaa99bd0ff07
class Solution: """ @param s: a string @param t: a string @return: true if they are both one edit distance apart or false """ def isOneEditDistance(self, s, t): # write your code here if s == t: return False if abs(len(s) - len(t)) > 1: return False n, m = len(s), len(t) f = [[0] * (m + 1) for _ in range(2)] for j in range(m + 1): f[0][j] = j for i in range(1, n + 1): f[i % 2][0] = i for j in range(1, m + 1): if s[i - 1] == t[j - 1]: f[i % 2][j] = min(f[(i - 1) % 2][j - 1], f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1) else: f[i % 2][j] = min(f[(i - 1) % 2][j - 1] + 1, f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1) return f[n % 2][m] == 1
[]
jehiah/pynsq
nsq/__init__.py
899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96
from __future__ import absolute_import import signal import tornado.ioloop import logging from .protocol import ( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready, finish, touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import Message from .backoff_timer import BackoffTimer from .sync import SyncConn from .async import AsyncConn from .reader import Reader from .legacy_reader import LegacyReader from .writer import Writer from .version import __version__ # NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): """ Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` """ signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = "Matt Reiferson <[email protected]>" __all__ = ["Reader", "Writer", "run", "BackoffTimer", "Message", "Error", "LegacyReader", "SyncConn", "AsyncConn", "unpack_response", "decode_message", "identify", "subscribe", "ready", "finish", "touch", "requeue", "nop", "pub", "mpub", "valid_topic_name", "valid_channel_name", "FRAME_TYPE_RESPONSE", "FRAME_TYPE_ERROR", "FRAME_TYPE_MESSAGE"]
[]
Hespian/ParFastKer
scripts/summaryPlot.py
5ddf1685c0652e73c889cfc64c7ec1fd827f905c
import get_data_ours import get_data_akiba import get_data_NearLinear import get_data_LinearTime import os import matplotlib.pyplot as plt # graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "RHG-100000000-nodes-2000000000-edges", "delaunay_n24", "del26"] graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "delaunay_n24", "del26"] linearTimeDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs" partitioningDir = "../../LinearTimeKernels/partitions" ourTimeDir = "../../results/LinearTimeKernelsScalingAll" nearLinearDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear" akibaDir = "../../akiba_vertex_cover/results" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result["time"] = res["sequential_quasikernel_time"] + res["lineartime_time"] result["size"] = res["sequential_quasikernel_size"] return result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result["time"] = res["parallel_quasikernel_time"] + res["lineartime_time"] + res["partitioning_time"] result["size"] = res["parallel_quasikernel_size"] return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba, nearLinear, linearTime] # data = [oursequential, ourparallel, akiba, nearLinear] data = filter(lambda x : x >= 0, data) minimum = min(data) if minimum == 0: return 1 return minimum oursizeSequential = [] ourtimeSequential = [] oursizeParallel = [] ourtimeParallel = [] akibasize = [] akibatime = [] nearlinearsize = [] nearlineartime = [] lineartimesize = [] lineartimetime = [] for graph in graphs: minsize = getAkibaTimeAndSize(graph)["size"] mintime = getAkibaTimeAndSize(graph)["time"] oss = getOurTimeAndSizeSequential(graph)["size"] / minsize # print(graph + "(sequential): " + str(getOurTimeAndSizeSequential(graph)["size"])) ots = getOurTimeAndSizeSequential(graph)["time"] / mintime if oss > 0 and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)["size"] / minsize # print(graph + "(parallel): " + str(getOurTimeAndSizeParallel(graph)["size"])) otp = getOurTimeAndSizeParallel(graph)["time"] / mintime if osp > 0 and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)["size"] / minsize akt = getAkibaTimeAndSize(graph)["time"] / mintime if aks > 0 and akt > 0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)["size"] / minsize nlt = getNearLinearTimeAndSize(graph)["time"] / mintime if nls > 0 and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)["size"] / minsize ltt = getLinearTimeTimeAndSize(graph)["time"] / mintime if nls > 0 and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print("We") # print(oursizeSequential) # print(ourtimeSequential) # print("We (parallel)") # print(oursizeParallel) # print(ourtimeParallel) # print("Akiba") # print(akibasize) # print(akibatime) # print("NearLinear") # print(nearlinearsize) # print(nearlineartime) # print("LinearTime") # print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title("Summary", fontsize=14) ax.set_yscale("log") ax.set_xscale("log") ax.scatter(ourtimeSequential, oursizeSequential, label="FastKer", marker="x", color="green") ax.scatter(ourtimeParallel, oursizeParallel, label="ParFastKer", marker="+", color="black") # ax.scatter(akibatime, akibasize, label="VCSolver", marker="^", edgecolors="blue", facecolors="none") ax.scatter(nearlineartime, nearlinearsize, label="NearLinear", marker="o", edgecolors="red", facecolors="none") ax.scatter(lineartimetime, lineartimesize, label="LinearTime", marker="^", edgecolors="magenta", facecolors="none") plt.xlabel("time / VCSolver time") plt.ylabel("size / VCSolver size") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode="expand") plt.savefig("summaryplot_vcsolver_baseline.pdf", bbox_inches="tight") # plt.show()
[((120, 0, 120, 23), 'matplotlib.pyplot.rc', 'plt.rc', (), '', True, 'import matplotlib.pyplot as plt\n'), ((121, 6, 121, 36), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((123, 0, 123, 33), 'matplotlib.pyplot.title', 'plt.title', (), '', True, 'import matplotlib.pyplot as plt\n'), ((131, 0, 131, 34), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(131, 11, 131, 33): '"""time / VCSolver time"""'}, {}), "('time / VCSolver time')", True, 'import matplotlib.pyplot as plt\n'), ((132, 0, 132, 34), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(132, 11, 132, 33): '"""size / VCSolver size"""'}, {}), "('size / VCSolver size')", True, 'import matplotlib.pyplot as plt\n'), ((133, 0, 133, 29), 'matplotlib.pyplot.xticks', 'plt.xticks', ({(133, 11, 133, 28): '[0.0001, 0.01, 1]'}, {}), '([0.0001, 0.01, 1])', True, 'import matplotlib.pyplot as plt\n'), ((135, 0, 135, 69), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n'), ((17, 10, 17, 101), 'get_data_ours.getOurTimeAndSizeUltrafast', 'get_data_ours.getOurTimeAndSizeUltrafast', ({(17, 51, 17, 56): 'graph', (17, 58, 17, 71): 'linearTimeDir', (17, 73, 17, 88): 'partitioningDir', (17, 90, 17, 100): 'ourTimeDir'}, {}), '(graph, linearTimeDir,\n partitioningDir, ourTimeDir)', False, 'import get_data_ours\n'), ((24, 10, 24, 101), 'get_data_ours.getOurTimeAndSizeUltrafast', 'get_data_ours.getOurTimeAndSizeUltrafast', ({(24, 51, 24, 56): 'graph', (24, 58, 24, 71): 'linearTimeDir', (24, 73, 24, 88): 'partitioningDir', (24, 90, 24, 100): 'ourTimeDir'}, {}), '(graph, linearTimeDir,\n partitioningDir, ourTimeDir)', False, 'import get_data_ours\n'), ((31, 11, 31, 62), 'get_data_akiba.getAkibaTimeAndSize', 'get_data_akiba.getAkibaTimeAndSize', ({(31, 46, 31, 51): 'graph', (31, 53, 31, 61): 'akibaDir'}, {}), '(graph, akibaDir)', False, 'import get_data_akiba\n'), ((34, 11, 34, 77), 'get_data_NearLinear.getNearLinearTimeAndSize', 'get_data_NearLinear.getNearLinearTimeAndSize', ({(34, 56, 34, 61): 'graph', (34, 63, 34, 76): 'nearLinearDir'}, {}), '(graph, nearLinearDir)', False, 'import get_data_NearLinear\n'), ((37, 11, 37, 77), 'get_data_LinearTime.getLinearTimeTimeAndSize', 'get_data_LinearTime.getLinearTimeTimeAndSize', ({(37, 56, 37, 61): 'graph', (37, 63, 37, 76): 'linearTimeDir'}, {}), '(graph, linearTimeDir)', False, 'import get_data_LinearTime\n')]
lrnt/git-bouncer
bouncer/cli/base.py
3015e11a5d2c90986124de73bf1fd0f5a8563360
import configparser import sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method): if not hasattr(method, 'options'): method.options = [] method.options.append((args, kwargs)) return method return decorator def noopts(method): method.options = [] return method class HelpMixin(object): def help(self): print('available commands:') for name, command in self.commands.items(): description = str(command.__doc__ or '').strip('\n') print(' ', name.ljust(10), description) return 1 class SubParser(HelpMixin): def __init__(self, commands): self.commands = self._commands(commands) def _commands(self, commands): prog = sys.argv[0] result = {} for cmd in commands: name = getattr(cmd, '_name', None) if not name: continue cmd.prog = prog result[name] = cmd return result def run(self): args = sys.argv[1:] for index, arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help() class Command(HelpMixin): def __init__(self): self.global_options = [] self.commands = self._methods_with_opts() def _methods_with_opts(self): result = {} for name in dir(self): if name.startswith('__'): continue method = getattr(self, name) if not hasattr(method, 'options'): continue result[name] = method return result def _parse_args(self, method, args): prog = '{} {} {}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for opt in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method, args): # Find out which arguments the method expects expected_args, _, _, _ = inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args) method_args = {} # Get the expected method arguments, ignore rest for name in expected_args: if name in args: method_args[name] = args.pop(name) # Put rest of the arguments in self for name, value in self_args.items(): setattr(self, name, value) self.pre_command() return method(**method_args) def __call__(self, args): for index, arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return self.help() def opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf' ) def pre_command(self): config = configparser.ConfigParser() config.read(self.config_path) print(config.sections())
[((73, 17, 77, 9), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((86, 33, 86, 59), 'inspect.getargspec', 'inspect.getargspec', ({(86, 52, 86, 58): 'method'}, {}), '(method)', False, 'import inspect\n'), ((128, 17, 128, 44), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n')]
MahdadJafarzadeh/ssccoorriinngg
Examples/ExampleCodes_ssccoorriinngg.py
63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
#%% Import libs import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score import h5py import time from ssccoorriinngg import ssccoorriinngg import numpy as np from sklearn.model_selection import cross_validate #%% Picking featureset of interest and apply classification Object = ssccoorriinngg(filename='', channel='', fs = 200, T = 30) path = 'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train' feats = 'featureset' labels = 'labels' # Train set X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels) # Test set fname = 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels) # Define the scoring criteria: scoring = {'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation using logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv = 10) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000, cv = 10 , max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures # Defien required metrics here: Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric in Metrics: #RF r1 = results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}') # LR r4 = results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying Randomized grid search to find the best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring = scoring, n_estimators = [int(x) for x in np.arange(10, 500, 20)], max_features = ['log2', 'sqrt'], max_depth = [int(x) for x in np.arange(10, 100, 30)], min_samples_split = [2, 5, 10], min_samples_leaf = [1, 2, 4], bootstrap = [True, False], n_iter = 100, cv = 10) #%% Test feature selection methods ## # PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5) # Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20) #### NOW TEST CLASSIFIERS WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv = 10) #%% Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3') #%% Example load features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats = 'featureset', labels = 'labels') #%% Combining some REM and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How to save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname + '.h5'), 'w') as wf: # Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting features from more than one channel: tic = time.time() ########### Central electrodes ############# main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+"tr90_N3_C3-M2_C4-M1.h5") fname_C_REM = (main_path+"tr90_REM_C3-M2_C4-M1.h5") ch_C4 = 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3') ########### Occipital electrodes ############# main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/" fname_O_N3 = (main_path+"tr90_N3_O1-M2_O2-M1.h5") fname_O_REM = (main_path+"tr90_REM_O1-M2_O2-M1.h5") ch_O2 = 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3') ########### Fp electrodes ############# main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/" fname_fp_N3 = (main_path+"tr90_N3_fp1-M2_fp2-M1.h5") fname_fp_REM = (main_path+"tr90_REM_fp1-M2_fp2-M1.h5") ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3') toc = time.time() print(f'time taken: {toc - tic}') ########## Concatenate all features ######### # RIGHT hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both sides - N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS Object = ML_Depression(filename='', channel='', fs = 200, T = 30) # one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM') # Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM') #%% Load features from different brain regions, sleep stage and combine them Object = ML_Depression(filename='', channel='', fs = 200, T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels = 'labels' # Pick right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick right hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')
[((14, 9, 14, 66), 'ssccoorriinngg.ssccoorriinngg', 'ssccoorriinngg', (), '', False, 'from ssccoorriinngg import ssccoorriinngg\n'), ((127, 6, 127, 17), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((197, 6, 197, 17), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((201, 11, 201, 48), 'numpy.column_stack', 'np.column_stack', ({(201, 27, 201, 47): '(X_fp2_REM, X_C4_REM)'}, {}), '((X_fp2_REM, X_C4_REM))', True, 'import numpy as np\n'), ((202, 11, 202, 47), 'numpy.column_stack', 'np.column_stack', ({(202, 27, 202, 46): '(X_rh_REM, X_O2_REM)'}, {}), '((X_rh_REM, X_O2_REM))', True, 'import numpy as np\n'), ((204, 10, 204, 45), 'numpy.column_stack', 'np.column_stack', ({(204, 26, 204, 44): '(X_fp2_N3, X_C4_N3)'}, {}), '((X_fp2_N3, X_C4_N3))', True, 'import numpy as np\n'), ((205, 10, 205, 44), 'numpy.column_stack', 'np.column_stack', ({(205, 26, 205, 43): '(X_rh_N3, X_O2_N3)'}, {}), '((X_rh_N3, X_O2_N3))', True, 'import numpy as np\n'), ((207, 11, 207, 48), 'numpy.column_stack', 'np.column_stack', ({(207, 27, 207, 47): '(X_fp1_REM, X_C3_REM)'}, {}), '((X_fp1_REM, X_C3_REM))', True, 'import numpy as np\n'), ((208, 11, 208, 47), 'numpy.column_stack', 'np.column_stack', ({(208, 27, 208, 46): '(X_lh_REM, X_O1_REM)'}, {}), '((X_lh_REM, X_O1_REM))', True, 'import numpy as np\n'), ((210, 10, 210, 45), 'numpy.column_stack', 'np.column_stack', ({(210, 26, 210, 44): '(X_fp1_N3, X_C3_N3)'}, {}), '((X_fp1_N3, X_C3_N3))', True, 'import numpy as np\n'), ((211, 10, 211, 44), 'numpy.column_stack', 'np.column_stack', ({(211, 26, 211, 43): '(X_lh_N3, X_O1_N3)'}, {}), '((X_lh_N3, X_O1_N3))', True, 'import numpy as np\n'), ((214, 8, 214, 45), 'numpy.column_stack', 'np.column_stack', ({(214, 24, 214, 44): '(X_rh_REM, X_lh_REM)'}, {}), '((X_rh_REM, X_lh_REM))', True, 'import numpy as np\n'), ((216, 7, 216, 42), 'numpy.column_stack', 'np.column_stack', ({(216, 23, 216, 41): '(X_rh_N3, X_lh_N3)'}, {}), '((X_rh_N3, X_lh_N3))', True, 'import numpy as np\n'), ((218, 12, 218, 39), 'numpy.row_stack', 'np.row_stack', ({(218, 25, 218, 38): '(X_N3, X_REM)'}, {}), '((X_N3, X_REM))', True, 'import numpy as np\n'), ((219, 12, 219, 49), 'numpy.concatenate', 'np.concatenate', ({(219, 27, 219, 48): '(y_fp2_N3, y_fp2_REM)'}, {}), '((y_fp2_N3, y_fp2_REM))', True, 'import numpy as np\n'), ((252, 7, 252, 42), 'numpy.column_stack', 'np.column_stack', ({(252, 23, 252, 41): '(X_rh_N3, X_lh_N3)'}, {}), '((X_rh_N3, X_lh_N3))', True, 'import numpy as np\n'), ((254, 8, 254, 45), 'numpy.column_stack', 'np.column_stack', ({(254, 24, 254, 44): '(X_rh_REM, X_lh_REM)'}, {}), '((X_rh_REM, X_lh_REM))', True, 'import numpy as np\n'), ((26, 24, 26, 51), 'sklearn.metrics.make_scorer', 'make_scorer', ({(26, 36, 26, 50): 'accuracy_score'}, {}), '(accuracy_score)', False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((27, 25, 27, 53), 'sklearn.metrics.make_scorer', 'make_scorer', ({(27, 37, 27, 52): 'precision_score'}, {}), '(precision_score)', False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((28, 22, 28, 47), 'sklearn.metrics.make_scorer', 'make_scorer', ({(28, 34, 28, 46): 'recall_score'}, {}), '(recall_score)', False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((29, 24, 29, 45), 'sklearn.metrics.make_scorer', 'make_scorer', ({(29, 36, 29, 44): 'f1_score'}, {}), '(f1_score)', False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((104, 5, 104, 46), 'h5py.File', 'h5py.File', ({(104, 16, 104, 39): "(directory + fname + '.h5')", (104, 42, 104, 45): '"""w"""'}, {}), "(directory + fname + '.h5', 'w')", False, 'import h5py\n'), ((60, 36, 60, 60), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((61, 56, 61, 78), 'numpy.arange', 'np.arange', ({(61, 66, 61, 68): '10', (61, 70, 61, 73): '500', (61, 75, 61, 77): '20'}, {}), '(10, 500, 20)', True, 'import numpy as np\n'), ((63, 53, 63, 75), 'numpy.arange', 'np.arange', ({(63, 63, 63, 65): '10', (63, 67, 63, 70): '100', (63, 72, 63, 74): '30'}, {}), '(10, 100, 30)', True, 'import numpy as np\n')]
suresh-guttikonda/iGibson
igibson/examples/behavior/behavior_demo_collection.py
a69e623058180146466cd52d4bb3c00d1facdacf
""" Main BEHAVIOR demo collection entrypoint """ import argparse import copy import datetime import os import bddl import numpy as np import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator import Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices = [ "Beechwood_0_int", "Beechwood_1_int", "Benevolence_0_int", "Benevolence_1_int", "Benevolence_2_int", "Ihlen_0_int", "Ihlen_1_int", "Merom_0_int", "Merom_1_int", "Pomaria_0_int", "Pomaria_1_int", "Pomaria_2_int", "Rs_int", "Wainscott_0_int", "Wainscott_1_int", ] task_id_choices = [0, 1] parser = argparse.ArgumentParser(description="Run and collect an ATUS demo") parser.add_argument( "--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl." ) parser.add_argument( "--task_id", type=int, required=True, choices=task_id_choices, nargs="?", help="BDDL integer ID, matching suffix of bddl.", ) parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log") parser.add_argument( "--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes." ) parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.") parser.add_argument( "--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches." ) parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.") parser.add_argument( "--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions." ) parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.") return parser.parse_args() def main(): args = parse_args() bddl.set_backend("iGibson") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): # HDR files for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" ) background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") # VR rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR system settings mode = "headless" if no_vr else "vr" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling = True if not disable_scene_cache: scene_kwargs = { "urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None if not disable_save: timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if vr_log_path is None: vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps < 0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if steps < 2: action = np.zeros((28,)) action[19] = 1 action[27] = 1 else: action = np.random.uniform(-0.01, 0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if not no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"): vr_cs.toggle_show_state() if log_writer and not disable_save: log_writer.process_frame() if task_done: post_task_steps -= 1 if post_task_steps == 0: break steps += 1 if log_writer and not disable_save: log_writer.end_log_session() s.disconnect() if __name__ == "__main__": main()
[((44, 13, 44, 80), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((74, 4, 74, 31), 'bddl.set_backend', 'bddl.set_backend', ({(74, 21, 74, 30): '"""iGibson"""'}, {}), "('iGibson')", False, 'import bddl\n'), ((100, 18, 100, 95), 'os.path.join', 'os.path.join', ({(100, 31, 100, 54): 'igibson.ig_dataset_path', (100, 56, 100, 64): '"""scenes"""', (100, 66, 100, 78): '"""background"""', (100, 80, 100, 94): '"""probe_02.hdr"""'}, {}), "(igibson.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')", False, 'import os\n'), ((101, 19, 101, 96), 'os.path.join', 'os.path.join', ({(101, 32, 101, 55): 'igibson.ig_dataset_path', (101, 57, 101, 65): '"""scenes"""', (101, 67, 101, 79): '"""background"""', (101, 81, 101, 95): '"""probe_03.hdr"""'}, {}), "(igibson.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')", False, 'import os\n'), ((102, 36, 104, 5), 'os.path.join', 'os.path.join', ({(103, 8, 103, 31): 'igibson.ig_dataset_path', (103, 33, 103, 41): '"""scenes"""', (103, 43, 103, 51): '"""Rs_int"""', (103, 53, 103, 61): '"""layout"""', (103, 63, 103, 86): '"""floor_lighttype_0.png"""'}, {}), "(igibson.ig_dataset_path, 'scenes', 'Rs_int', 'layout',\n 'floor_lighttype_0.png')", False, 'import os\n'), ((105, 25, 105, 109), 'os.path.join', 'os.path.join', ({(105, 38, 105, 61): 'igibson.ig_dataset_path', (105, 63, 105, 71): '"""scenes"""', (105, 73, 105, 85): '"""background"""', (105, 87, 105, 108): '"""urban_street_01.jpg"""'}, {}), "(igibson.ig_dataset_path, 'scenes', 'background',\n 'urban_street_01.jpg')", False, 'import os\n'), ((108, 28, 119, 5), 'igibson.render.mesh_renderer.mesh_renderer_cpu.MeshRendererSettings', 'MeshRendererSettings', (), '', False, 'from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings\n'), ((130, 22, 130, 63), 'igibson.activity.activity_base.iGBEHAVIORActivityInstance', 'iGBEHAVIORActivityInstance', ({(130, 49, 130, 53): 'task', (130, 55, 130, 62): 'task_id'}, {}), '(task, task_id)', False, 'from igibson.activity.activity_base import iGBEHAVIORActivityInstance\n'), ((168, 22, 168, 52), 'copy.deepcopy', 'copy.deepcopy', ({(168, 36, 168, 51): 'POST_TASK_STEPS'}, {}), '(POST_TASK_STEPS)', False, 'import copy\n'), ((169, 28, 169, 64), 'copy.deepcopy', 'copy.deepcopy', ({(169, 42, 169, 63): 'PHYSICS_WARMING_STEPS'}, {}), '(PHYSICS_WARMING_STEPS)', False, 'import copy\n'), ((147, 16, 149, 9), 'igibson.render.mesh_renderer.mesh_renderer_vr.VrConditionSwitcher', 'VrConditionSwitcher', ({(148, 12, 148, 37): 'igbhvr_act_inst.simulator', (148, 39, 148, 71): 'igbhvr_act_inst.show_instruction', (148, 73, 148, 108): 'igbhvr_act_inst.iterate_instruction'}, {}), '(igbhvr_act_inst.simulator, igbhvr_act_inst.\n show_instruction, igbhvr_act_inst.iterate_instruction)', False, 'from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings\n'), ((156, 21, 164, 9), 'igibson.utils.ig_logging.IGLogWriter', 'IGLogWriter', (), '', False, 'from igibson.utils.ig_logging import IGLogWriter\n'), ((126, 20, 126, 43), 'igibson.render.mesh_renderer.mesh_renderer_vr.VrSettings', 'VrSettings', (), '', False, 'from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings\n'), ((153, 20, 153, 43), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((178, 25, 178, 40), 'numpy.zeros', 'np.zeros', ({(178, 34, 178, 39): '(28,)'}, {}), '((28,))', True, 'import numpy as np\n'), ((182, 25, 182, 67), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((186, 25, 186, 46), 'numpy.zeros_like', 'np.zeros_like', ({(186, 39, 186, 45): 'action'}, {}), '(action)', True, 'import numpy as np\n')]
digitalmarmalade/wagtail
wagtail/wagtailadmin/menu.py
ac4d23172ff3f42746625630583b17d243fb9822
from django.utils.text import slugify from django.utils.html import format_html class MenuItem(object): def __init__(self, label, url, name=None, classnames='', order=1000): self.label = label self.url = url self.classnames = classnames self.name = (name or slugify(unicode(label))) self.order = order def render_html(self): return format_html( u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>""", self.name, self.url, self.classnames, self.label)
[((14, 15, 16, 61), 'django.utils.html.format_html', 'format_html', ({(15, 12, 15, 78): 'u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>"""', (16, 12, 16, 21): 'self.name', (16, 23, 16, 31): 'self.url', (16, 33, 16, 48): 'self.classnames', (16, 50, 16, 60): 'self.label'}, {}), '(u\'<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>\',\n self.name, self.url, self.classnames, self.label)', False, 'from django.utils.html import format_html\n')]
timgates42/django-mfa
django_mfa/migrations/0001_initial.py
89eeb83f7da3ea24f205b40b13c7f9d33ea15b99
# Generated by Django 2.1.5 on 2019-03-26 11:35 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')), ], ), ]
[((13, 8, 13, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(13, 40, 13, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((20, 23, 20, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((21, 31, 21, 70), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((22, 33, 22, 64), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((23, 31, 23, 60), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((24, 31, 24, 49), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((25, 27, 25, 45), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((26, 25, 26, 141), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((32, 23, 32, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((33, 29, 33, 106), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((34, 31, 34, 75), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((35, 25, 35, 119), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import migrations, models\n'), ((41, 23, 41, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((42, 32, 42, 63), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((43, 25, 43, 112), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')]
khanh-nguyen-code/my-collection
app/logger_example/main.py
31581ef0b1dae67aafb1f4e64b9973a38cc01edf
from my_collection import logger if __name__ == "__main__": logger.now().debug("debug1") logger.now().debug("debug2") logger.now().info("hello1") logger.now().info("hello2") logger.now().with_field("key", "val").error("with field1") logger.now().with_field("key", "val").error("with field2")
[((4, 4, 4, 16), 'my_collection.logger.now', 'logger.now', ({}, {}), '()', False, 'from my_collection import logger\n'), ((5, 4, 5, 16), 'my_collection.logger.now', 'logger.now', ({}, {}), '()', False, 'from my_collection import logger\n'), ((6, 4, 6, 16), 'my_collection.logger.now', 'logger.now', ({}, {}), '()', False, 'from my_collection import logger\n'), ((7, 4, 7, 16), 'my_collection.logger.now', 'logger.now', ({}, {}), '()', False, 'from my_collection import logger\n'), ((8, 4, 8, 16), 'my_collection.logger.now', 'logger.now', ({}, {}), '()', False, 'from my_collection import logger\n'), ((9, 4, 9, 16), 'my_collection.logger.now', 'logger.now', ({}, {}), '()', False, 'from my_collection import logger\n')]
scathaig/robotframework-iperf3
robotframework_iperf3/__main__.py
cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
import argparse from robotremoteserver import RobotRemoteServer from .iperf3 import Iperf3 if __name__ == '__main__': # create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3' # add parser options parser.add_argument( "-a", "--address", type=str, help="server listen address", default='0.0.0.0') parser.add_argument( "-p", "--port", type=int, help="server listen port", default=8270) args = parser.parse_args() server = RobotRemoteServer( Iperf3(), host=args.address, port=args.port ) server.serve()
[((9, 13, 9, 92), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n')]
ahmedengu/h2o-3
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
df8.cbind(df9) # A B C D A0 B0 C0 D0 # ----- ------ ------ ------ ------ ----- ----- ----- # -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86 # -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27 # 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25 # 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63 # 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52 # 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09 # 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63 # 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42 # -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45 # 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05 # # [100 rows x 8 columns]
[]
xu6148152/Binea_Python_Project
FluentPython/dynamic_attr_and_prop/frozen_json.py
d943eb5f4685d08f080b372dcf1a7cbd5d63efed
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- from collections import abc from keyword import iskeyword class FronzenJSON: def __init__(self, mapping): self._data = {} for key, value in mapping.items(): if iskeyword(key): key += '_' # self._data = dict(mapping) self._data[key] = value def __getattr__(self, name): if hasattr(self._data, name): return getattr(self._data, name) else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for item in obj] else: return obj def __new__(cls, arg): if isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item] for item in arg] else: return arg
[((12, 15, 12, 29), 'keyword.iskeyword', 'iskeyword', ({(12, 25, 12, 28): 'key'}, {}), '(key)', False, 'from keyword import iskeyword\n')]
Semanti1/pomdp_findit
pomdp_problems/tag/models/transition_model.py
b96c1c06aab4b485fa005654cf6438ff63718083
"""The Tag problem. Implemented according to the paper `Anytime Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves deterministically. The target's movement depends on the robot; With Pr=0.8 the target moves away from the robot, and with Pr=0.2, the target stays at the same place. The target never moves closer to the robot. """ import copy import pomdp_py import pomdp_problems.util as util import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls, grid_map, position, action): if isinstance(action, MotionAction): dx, dy = action.motion next_position = (position[0] + dx, position[1] + dy) if grid_map.valid_pose(next_position): return next_position return position def probability(self, next_state, state, action, **kwargs): # Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position != next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction): if next_state.target_position == next_state.robot_position: if next_state.target_found: return 1.0 - constants.EPSILON else: return constants.EPSILON else: if next_state.target_found: return constants.EPSILON else: return 1.0 - constants.EPSILON # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state, action, argmax=False): # Robot motion next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag action if isinstance(action, TagAction): if not state.target_found: if state.robot_position == state.target_position: next_state.target_found = True return next_state # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def argmax(self, state, action, **kwargs): return self.sample(state, action, argmax=True)
[((62, 21, 62, 41), 'copy.deepcopy', 'copy.deepcopy', ({(62, 35, 62, 40): 'state'}, {}), '(state)', False, 'import copy\n')]
bocekm/packit
packit/fedpkg.py
b5da23c0fa3f205537551b9ed212d8f77d00d705
# MIT License # # Copyright (c) 2019 Red Hat, Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from pathlib import Path from typing import Optional from packit.exceptions import PackitCommandFailedError from packit.utils import commands # so we can mock utils from packit.utils.logging import logger class FedPKG: """ Part of the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py """ def __init__( self, fas_username: str = None, directory: str = None, stage: bool = False ): self.fas_username = fas_username self.directory = directory self.stage = stage self.fedpkg_exec = "fedpkg-stage" if stage else "fedpkg" def __repr__(self): return ( "FedPKG(" f"fas_username='{self.fas_username}', " f"directory='{self.directory}', " f"stage='{self.stage}')" ) def new_sources(self, sources="", fail=True): if not Path(self.directory).is_dir(): raise Exception("Cannot access fedpkg repository:") return commands.run_command_remote( cmd=[self.fedpkg_exec, "new-sources", sources], cwd=self.directory, error_message="Adding new sources failed:", fail=fail, ) def build( self, scratch: bool = False, nowait: bool = False, koji_target: Optional[str] = None, srpm_path: Optional[Path] = None, ): """ build in koji :param scratch: scratch (temporary) build or not? :param nowait: False == wait for the build to finish :param koji_target: koji target to build in (`koji list-targets`) :param srpm_path: use selected SRPM for build, not dist-git repo & ref :return: """ cmd = [self.fedpkg_exec, "build"] if scratch: cmd.append("--scratch") if nowait: cmd.append("--nowait") if koji_target: cmd += ["--target", koji_target] if srpm_path: cmd += ["--srpm", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message="Submission of build to koji failed.", fail=True, ) except PackitCommandFailedError as ex: # fail on the fedpkg side, the build is triggered if ( "watch_tasks() got an unexpected keyword argument 'ki_handler'" in ex.stderr_output ): logger.info( "The 'fedpkg build' command crashed which is a known issue: " "the build is submitted in koji anyway." ) logger.debug(ex.stdout_output) else: raise def clone(self, package_name: str, target_path: str, anonymous: bool = False): """ clone a dist-git repo; this has to be done in current env b/c we don't have the keytab in sandbox """ cmd = [self.fedpkg_exec] if self.fas_username: cmd += ["--user", self.fas_username] cmd += ["-q", "clone"] if anonymous: cmd += ["-a"] cmd += [package_name, target_path] error_msg = ( f"Packit failed to clone the repository {package_name}; " "please make sure that you are authorized to clone repositories " "from Fedora dist-git - this may require SSH keys set up or " "Kerberos ticket being active." ) commands.run_command(cmd=cmd, error_message=error_msg)
[((59, 15, 64, 9), 'packit.utils.commands.run_command_remote', 'commands.run_command_remote', (), '', False, 'from packit.utils import commands\n'), ((134, 8, 134, 62), 'packit.utils.commands.run_command', 'commands.run_command', (), '', False, 'from packit.utils import commands\n'), ((93, 12, 98, 13), 'packit.utils.commands.run_command_remote', 'commands.run_command_remote', (), '', False, 'from packit.utils import commands\n'), ((56, 15, 56, 35), 'pathlib.Path', 'Path', ({(56, 20, 56, 34): 'self.directory'}, {}), '(self.directory)', False, 'from pathlib import Path\n'), ((106, 16, 109, 17), 'packit.utils.logging.logger.info', 'logger.info', ({(107, 20, 108, 60): '"""The \'fedpkg build\' command crashed which is a known issue: the build is submitted in koji anyway."""'}, {}), '(\n "The \'fedpkg build\' command crashed which is a known issue: the build is submitted in koji anyway."\n )', False, 'from packit.utils.logging import logger\n'), ((110, 16, 110, 46), 'packit.utils.logging.logger.debug', 'logger.debug', ({(110, 29, 110, 45): 'ex.stdout_output'}, {}), '(ex.stdout_output)', False, 'from packit.utils.logging import logger\n')]
ahaldane/NDducktype_tests
tests/test_MaskedArrayCollection.py
4876416e5fbff7ba0d85445c0eeae432d6e80014
#!/usr/bin/env python from ndarray_ducktypes.ArrayCollection import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as np # Tests for Masked ArrayCollections. # # First try: Simply make an arraycollection of MaskedArrays. Downside: this # strategy does not give a "filled" method. Probably to get a masked # ArrayCollection we should really subclass ArrayCollection to have a # fill_value and a filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) ## second try: Subclass of ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) #print(repr(c.filled()))
[]
jasonjoo2010/core
rpc/gen/core_pb2.py
7c05ddbdac2e05a3d96db28f8bdfacf661907b82
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: core.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='core.proto', package='pb', syntax='proto3', serialized_pb=_b('\n\ncore.proto\x12\x02pb\"\x07\n\x05\x45mpty\"\xb4\x01\n\x15ListContainersOptions\x12\x0f\n\x07\x61ppname\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x35\n\x06labels\x18\x04 \x03(\x0b\x32%.pb.ListContainersOptions.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"L\n\x13\x44\x65ployStatusOptions\x12\x0f\n\x07\x61ppname\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\"v\n\x13\x44\x65ployStatusMessage\x12\x0e\n\x06\x61\x63tion\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppname\x18\x02 \x01(\t\x12\x12\n\nentrypoint\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\n\n\x02id\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"0\n\x03Pod\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x02 \x01(\t\x12\r\n\x05\x66\x61vor\x18\x03 \x01(\t\"\x1d\n\x04Pods\x12\x15\n\x04pods\x18\x01 \x03(\x0b\x32\x07.pb.Pod\"\xfc\x02\n\x0bPodResource\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x03\x63pu\x18\x02 \x03(\x0b\x32\x18.pb.PodResource.CpuEntry\x12+\n\x06memory\x18\x03 \x03(\x0b\x32\x1b.pb.PodResource.MemoryEntry\x12\'\n\x04\x64iff\x18\x04 \x03(\x0b\x32\x19.pb.PodResource.DiffEntry\x12+\n\x06\x64\x65tail\x18\x05 \x03(\x0b\x32\x1b.pb.PodResource.DetailEntry\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a-\n\x0bMemoryEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a+\n\tDiffEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x1a-\n\x0b\x44\x65tailEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"5\n\x12ListNetworkOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x0e\n\x06\x64river\x18\x02 \x01(\t\"(\n\x07Network\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07subnets\x18\x02 \x03(\t\")\n\x08Networks\x12\x1d\n\x08networks\x18\x01 \x03(\x0b\x32\x0b.pb.Network\"\x9e\x03\n\x04Node\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\x1e\n\x03\x63pu\x18\x04 \x03(\x0b\x32\x11.pb.Node.CpuEntry\x12\x10\n\x08\x63pu_used\x18\x05 \x01(\x01\x12\x0e\n\x06memory\x18\x06 \x01(\x03\x12\x13\n\x0bmemory_used\x18\x07 \x01(\x03\x12\x11\n\tavailable\x18\x08 \x01(\x08\x12$\n\x06labels\x18\t \x03(\x0b\x32\x14.pb.Node.LabelsEntry\x12\x13\n\x0binit_memory\x18\n \x01(\x03\x12\'\n\x08init_cpu\x18\x0b \x03(\x0b\x32\x15.pb.Node.InitCpuEntry\x12\x0c\n\x04info\x18\x0c \x01(\t\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cInitCpuEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\" \n\x05Nodes\x12\x17\n\x05nodes\x18\x01 \x03(\x0b\x32\x08.pb.Node\"E\n\rNodeAvailable\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\x12\x11\n\tavailable\x18\x03 \x01(\x08\"\xb8\x03\n\tContainer\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12#\n\x03\x63pu\x18\x05 \x03(\x0b\x32\x16.pb.Container.CpuEntry\x12\r\n\x05quota\x18\x06 \x01(\x01\x12\x0e\n\x06memory\x18\x07 \x01(\x03\x12\x12\n\nprivileged\x18\x08 \x01(\x08\x12)\n\x06labels\x18\t \x03(\x0b\x32\x19.pb.Container.LabelsEntry\x12+\n\x07publish\x18\n \x03(\x0b\x32\x1a.pb.Container.PublishEntry\x12\r\n\x05image\x18\x0b \x01(\t\x12\x0f\n\x07inspect\x18\x0c \x01(\x0c\x12\x13\n\x0bstatus_data\x18\r \x01(\x0c\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cPublishEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"k\n\x18\x43ontainerDeployedOptions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppname\x18\x02 \x01(\t\x12\x12\n\nentrypoint\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\"/\n\nContainers\x12!\n\ncontainers\x18\x01 \x03(\x0b\x32\r.pb.Container\"\x19\n\x0b\x43ontainerID\x12\n\n\x02id\x18\x01 \x01(\t\"\x1b\n\x0c\x43ontainerIDs\x12\x0b\n\x03ids\x18\x01 \x03(\t\"4\n\x16RemoveContainerOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\"7\n\x0eReallocOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\x0b\n\x03\x63pu\x18\x02 \x01(\x01\x12\x0b\n\x03mem\x18\x03 \x01(\x03\":\n\rAddPodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x66\x61vor\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x03 \x01(\t\" \n\x10RemovePodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\rGetPodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xf7\x01\n\x0e\x41\x64\x64NodeOptions\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\n\n\x02\x63\x61\x18\x04 \x01(\t\x12\x0c\n\x04\x63\x65rt\x18\x05 \x01(\t\x12\x0b\n\x03key\x18\x06 \x01(\t\x12\x0b\n\x03\x63pu\x18\x07 \x01(\x05\x12\r\n\x05share\x18\x08 \x01(\x05\x12\x0e\n\x06memory\x18\t \x01(\x03\x12.\n\x06labels\x18\n \x03(\x0b\x32\x1e.pb.AddNodeOptions.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"6\n\x11RemoveNodeOptions\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\"3\n\x0eGetNodeOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\"0\n\x10ListNodesOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x0b\n\x03\x61ll\x18\x02 \x01(\x08\"\x8e\x04\n\x05\x42uild\x12\x0c\n\x04\x62\x61se\x18\x01 \x01(\t\x12\x0c\n\x04repo\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x0b\n\x03\x64ir\x18\x04 \x01(\t\x12\x11\n\tsubmodule\x18\x05 \x01(\x08\x12\x10\n\x08\x63ommands\x18\x06 \x03(\t\x12!\n\x04\x65nvs\x18\x07 \x03(\x0b\x32\x13.pb.Build.EnvsEntry\x12!\n\x04\x61rgs\x18\x08 \x03(\x0b\x32\x13.pb.Build.ArgsEntry\x12%\n\x06labels\x18\t \x03(\x0b\x32\x15.pb.Build.LabelsEntry\x12+\n\tartifacts\x18\n \x03(\x0b\x32\x18.pb.Build.ArtifactsEntry\x12#\n\x05\x63\x61\x63he\x18\x0b \x03(\x0b\x32\x14.pb.Build.CacheEntry\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a,\n\nCacheEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"z\n\x06\x42uilds\x12\x0e\n\x06stages\x18\x01 \x03(\t\x12&\n\x06\x62uilds\x18\x02 \x03(\x0b\x32\x16.pb.Builds.BuildsEntry\x1a\x38\n\x0b\x42uildsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.pb.Build:\x02\x38\x01\"s\n\x11\x42uildImageOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x0b\n\x03uid\x18\x03 \x01(\x05\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12\x1a\n\x06\x62uilds\x18\x05 \x01(\x0b\x32\n.pb.Builds\x12\x0b\n\x03tar\x18\x06 \x01(\x0c\"F\n\x0bHookOptions\x12\x13\n\x0b\x61\x66ter_start\x18\x01 \x03(\t\x12\x13\n\x0b\x62\x65\x66ore_stop\x18\x02 \x03(\t\x12\r\n\x05\x66orce\x18\x03 \x01(\x08\"U\n\x12HealthCheckOptions\x12\x11\n\ttcp_ports\x18\x01 \x03(\t\x12\x11\n\thttp_port\x18\x02 \x01(\t\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\x05\"u\n\nLogOptions\x12\x0c\n\x04type\x18\x01 \x01(\t\x12*\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x1a.pb.LogOptions.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xca\x02\n\x11\x45ntrypointOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x12\n\nprivileged\x18\x03 \x01(\x08\x12\x0b\n\x03\x64ir\x18\x04 \x01(\t\x12\x1b\n\x03log\x18\x05 \x01(\x0b\x32\x0e.pb.LogOptions\x12\x0f\n\x07publish\x18\x06 \x03(\t\x12+\n\x0bhealthcheck\x18\x07 \x01(\x0b\x32\x16.pb.HealthCheckOptions\x12\x1d\n\x04hook\x18\x08 \x01(\x0b\x32\x0f.pb.HookOptions\x12\x16\n\x0erestart_policy\x18\t \x01(\t\x12\x33\n\x07sysctls\x18\n \x03(\x0b\x32\".pb.EntrypointOptions.SysctlsEntry\x1a.\n\x0cSysctlsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x88\x06\n\rDeployOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12)\n\nentrypoint\x18\x02 \x01(\x0b\x32\x15.pb.EntrypointOptions\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\r\n\x05image\x18\x05 \x01(\t\x12\x12\n\nextra_args\x18\x06 \x01(\t\x12\x11\n\tcpu_quota\x18\x07 \x01(\x01\x12\x0e\n\x06memory\x18\x08 \x01(\x03\x12\r\n\x05\x63ount\x18\t \x01(\x05\x12\x0b\n\x03\x65nv\x18\n \x03(\t\x12\x0b\n\x03\x64ns\x18\x0b \x03(\t\x12\x13\n\x0b\x65xtra_hosts\x18\x0c \x03(\t\x12\x0f\n\x07volumes\x18\r \x03(\t\x12\x31\n\x08networks\x18\x0e \x03(\x0b\x32\x1f.pb.DeployOptions.NetworksEntry\x12\x13\n\x0bnetworkmode\x18\x0f \x01(\t\x12\x0c\n\x04user\x18\x10 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x11 \x01(\x08\x12\x11\n\topenStdin\x18\x12 \x01(\x08\x12-\n\x06labels\x18\x13 \x03(\x0b\x32\x1d.pb.DeployOptions.LabelsEntry\x12\x35\n\nnodelabels\x18\x14 \x03(\x0b\x32!.pb.DeployOptions.NodelabelsEntry\x12\x15\n\rdeploy_method\x18\x15 \x01(\t\x12)\n\x04\x64\x61ta\x18\x16 \x03(\x0b\x32\x1b.pb.DeployOptions.DataEntry\x12\x11\n\tsoftlimit\x18\x17 \x01(\x08\x12\x13\n\x0bnodes_limit\x18\x18 \x01(\x05\x1a/\n\rNetworksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fNodelabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"\xb5\x02\n\x0eReplaceOptions\x12$\n\tdeployOpt\x18\x01 \x01(\x0b\x32\x11.pb.DeployOptions\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12;\n\rfilter_labels\x18\x03 \x03(\x0b\x32$.pb.ReplaceOptions.FilterLabelsEntry\x12*\n\x04\x63opy\x18\x04 \x03(\x0b\x32\x1c.pb.ReplaceOptions.CopyEntry\x12\x0b\n\x03ids\x18\x05 \x03(\t\x12\x16\n\x0enetworkinherit\x18\x06 \x01(\x08\x1a\x33\n\x11\x46ilterLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tCopyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x11\x43\x61\x63heImageOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\"d\n\x12RemoveImageOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\x12\r\n\x05prune\x18\x05 \x01(\x08\"\x1a\n\tCopyPaths\x12\r\n\x05paths\x18\x01 \x03(\t\"{\n\x0b\x43opyOptions\x12-\n\x07targets\x18\x01 \x03(\x0b\x32\x1c.pb.CopyOptions.TargetsEntry\x1a=\n\x0cTargetsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.pb.CopyPaths:\x02\x38\x01\",\n\x0b\x45rrorDetail\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x87\x01\n\x11\x42uildImageMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x10\n\x08progress\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\x12\x0e\n\x06stream\x18\x05 \x01(\t\x12%\n\x0c\x65rror_detail\x18\x06 \x01(\x0b\x32\x0f.pb.ErrorDetail\"\xea\x02\n\x16\x43reateContainerMessage\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0f\n\x07success\x18\x06 \x01(\x08\x12\x30\n\x03\x63pu\x18\x07 \x03(\x0b\x32#.pb.CreateContainerMessage.CpuEntry\x12\r\n\x05quota\x18\x08 \x01(\x01\x12\x0e\n\x06memory\x18\t \x01(\x03\x12\x38\n\x07publish\x18\n \x03(\x0b\x32\'.pb.CreateContainerMessage.PublishEntry\x12\x0c\n\x04hook\x18\x0b \x01(\x0c\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a.\n\x0cPublishEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x80\x01\n\x17ReplaceContainerMessage\x12*\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x1a.pb.CreateContainerMessage\x12*\n\x06remove\x18\x02 \x01(\x0b\x32\x1a.pb.RemoveContainerMessage\x12\r\n\x05\x65rror\x18\x03 \x01(\t\"7\n\x11RunAndWaitMessage\x12\x14\n\x0c\x63ontainer_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"V\n\x11\x43\x61\x63heImageMessage\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"F\n\x12RemoveImageMessage\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08messages\x18\x03 \x03(\t\"C\n\x16RemoveContainerMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0c\n\x04hook\x18\x03 \x01(\t\"5\n\x16ReallocResourceMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\"b\n\x0b\x43opyMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0c\n\x04path\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"J\n\x11RunAndWaitOptions\x12(\n\rDeployOptions\x18\x01 \x01(\x0b\x32\x11.pb.DeployOptions\x12\x0b\n\x03\x43md\x18\x02 \x01(\x0c\"4\n\x17\x43ontrolContainerOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"B\n\x17\x43ontrolContainerMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0c\n\x04hook\x18\x03 \x01(\x0c\x32\xcb\x0c\n\x07\x43oreRPC\x12!\n\x08ListPods\x12\t.pb.Empty\x1a\x08.pb.Pods\"\x00\x12&\n\x06\x41\x64\x64Pod\x12\x11.pb.AddPodOptions\x1a\x07.pb.Pod\"\x00\x12.\n\tRemovePod\x12\x14.pb.RemovePodOptions\x1a\t.pb.Empty\"\x00\x12&\n\x06GetPod\x12\x11.pb.GetPodOptions\x1a\x07.pb.Pod\"\x00\x12\x36\n\x0eGetPodResource\x12\x11.pb.GetPodOptions\x1a\x0f.pb.PodResource\"\x00\x12)\n\x07\x41\x64\x64Node\x12\x12.pb.AddNodeOptions\x1a\x08.pb.Node\"\x00\x12.\n\nRemoveNode\x12\x15.pb.RemoveNodeOptions\x1a\x07.pb.Pod\"\x00\x12\x31\n\x10SetNodeAvailable\x12\x11.pb.NodeAvailable\x1a\x08.pb.Node\"\x00\x12)\n\x07GetNode\x12\x12.pb.GetNodeOptions\x1a\x08.pb.Node\"\x00\x12\x30\n\x0cGetContainer\x12\x0f.pb.ContainerID\x1a\r.pb.Container\"\x00\x12\x33\n\rGetContainers\x12\x10.pb.ContainerIDs\x1a\x0e.pb.Containers\"\x00\x12/\n\rGetNodeByName\x12\x12.pb.GetNodeOptions\x1a\x08.pb.Node\"\x00\x12\x31\n\x0cListPodNodes\x12\x14.pb.ListNodesOptions\x1a\t.pb.Nodes\"\x00\x12\x36\n\x0cListNetworks\x12\x16.pb.ListNetworkOptions\x1a\x0c.pb.Networks\"\x00\x12=\n\x0eListContainers\x12\x19.pb.ListContainersOptions\x1a\x0e.pb.Containers\"\x00\x12:\n\x12ListNodeContainers\x12\x12.pb.GetNodeOptions\x1a\x0e.pb.Containers\"\x00\x12>\n\x11\x43ontainerDeployed\x12\x1c.pb.ContainerDeployedOptions\x1a\t.pb.Empty\"\x00\x12,\n\x04\x43opy\x12\x0f.pb.CopyOptions\x1a\x0f.pb.CopyMessage\"\x00\x30\x01\x12>\n\nBuildImage\x12\x15.pb.BuildImageOptions\x1a\x15.pb.BuildImageMessage\"\x00\x30\x01\x12>\n\nCacheImage\x12\x15.pb.CacheImageOptions\x1a\x15.pb.CacheImageMessage\"\x00\x30\x01\x12\x41\n\x0bRemoveImage\x12\x16.pb.RemoveImageOptions\x1a\x16.pb.RemoveImageMessage\"\x00\x30\x01\x12\x44\n\x0c\x44\x65ployStatus\x12\x17.pb.DeployStatusOptions\x1a\x17.pb.DeployStatusMessage\"\x00\x30\x01\x12@\n\nRunAndWait\x12\x15.pb.RunAndWaitOptions\x1a\x15.pb.RunAndWaitMessage\"\x00(\x01\x30\x01\x12\x44\n\x0f\x43reateContainer\x12\x11.pb.DeployOptions\x1a\x1a.pb.CreateContainerMessage\"\x00\x30\x01\x12G\n\x10ReplaceContainer\x12\x12.pb.ReplaceOptions\x1a\x1b.pb.ReplaceContainerMessage\"\x00\x30\x01\x12M\n\x0fRemoveContainer\x12\x1a.pb.RemoveContainerOptions\x1a\x1a.pb.RemoveContainerMessage\"\x00\x30\x01\x12P\n\x10\x43ontrolContainer\x12\x1b.pb.ControlContainerOptions\x1a\x1b.pb.ControlContainerMessage\"\x00\x30\x01\x12\x45\n\x0fReallocResource\x12\x12.pb.ReallocOptions\x1a\x1a.pb.ReallocResourceMessage\"\x00\x30\x01\x62\x06proto3') ) _EMPTY = _descriptor.Descriptor( name='Empty', full_name='pb.Empty', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=18, serialized_end=25, ) _LISTCONTAINERSOPTIONS_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.ListContainersOptions.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.ListContainersOptions.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.ListContainersOptions.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _LISTCONTAINERSOPTIONS = _descriptor.Descriptor( name='ListContainersOptions', full_name='pb.ListContainersOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='appname', full_name='pb.ListContainersOptions.appname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.ListContainersOptions.entrypoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.ListContainersOptions.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.ListContainersOptions.labels', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_LISTCONTAINERSOPTIONS_LABELSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=28, serialized_end=208, ) _DEPLOYSTATUSOPTIONS = _descriptor.Descriptor( name='DeployStatusOptions', full_name='pb.DeployStatusOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='appname', full_name='pb.DeployStatusOptions.appname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.DeployStatusOptions.entrypoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.DeployStatusOptions.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=286, ) _DEPLOYSTATUSMESSAGE = _descriptor.Descriptor( name='DeployStatusMessage', full_name='pb.DeployStatusMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='action', full_name='pb.DeployStatusMessage.action', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='appname', full_name='pb.DeployStatusMessage.appname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.DeployStatusMessage.entrypoint', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.DeployStatusMessage.nodename', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='id', full_name='pb.DeployStatusMessage.id', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.DeployStatusMessage.data', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=288, serialized_end=406, ) _POD = _descriptor.Descriptor( name='Pod', full_name='pb.Pod', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.Pod.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='desc', full_name='pb.Pod.desc', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='favor', full_name='pb.Pod.favor', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=408, serialized_end=456, ) _PODS = _descriptor.Descriptor( name='Pods', full_name='pb.Pods', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pods', full_name='pb.Pods.pods', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=458, serialized_end=487, ) _PODRESOURCE_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.PodResource.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.CpuEntry.value', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=689, serialized_end=731, ) _PODRESOURCE_MEMORYENTRY = _descriptor.Descriptor( name='MemoryEntry', full_name='pb.PodResource.MemoryEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.MemoryEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.MemoryEntry.value', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=733, serialized_end=778, ) _PODRESOURCE_DIFFENTRY = _descriptor.Descriptor( name='DiffEntry', full_name='pb.PodResource.DiffEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.DiffEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.DiffEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=780, serialized_end=823, ) _PODRESOURCE_DETAILENTRY = _descriptor.Descriptor( name='DetailEntry', full_name='pb.PodResource.DetailEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.DetailEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.DetailEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=825, serialized_end=870, ) _PODRESOURCE = _descriptor.Descriptor( name='PodResource', full_name='pb.PodResource', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.PodResource.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.PodResource.cpu', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.PodResource.memory', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='diff', full_name='pb.PodResource.diff', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='detail', full_name='pb.PodResource.detail', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_PODRESOURCE_CPUENTRY, _PODRESOURCE_MEMORYENTRY, _PODRESOURCE_DIFFENTRY, _PODRESOURCE_DETAILENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=490, serialized_end=870, ) _LISTNETWORKOPTIONS = _descriptor.Descriptor( name='ListNetworkOptions', full_name='pb.ListNetworkOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.ListNetworkOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='driver', full_name='pb.ListNetworkOptions.driver', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=872, serialized_end=925, ) _NETWORK = _descriptor.Descriptor( name='Network', full_name='pb.Network', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.Network.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='subnets', full_name='pb.Network.subnets', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=927, serialized_end=967, ) _NETWORKS = _descriptor.Descriptor( name='Networks', full_name='pb.Networks', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='networks', full_name='pb.Networks.networks', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=969, serialized_end=1010, ) _NODE_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.Node.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Node.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Node.CpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1290, serialized_end=1332, ) _NODE_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.Node.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Node.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Node.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _NODE_INITCPUENTRY = _descriptor.Descriptor( name='InitCpuEntry', full_name='pb.Node.InitCpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Node.InitCpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Node.InitCpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1381, serialized_end=1427, ) _NODE = _descriptor.Descriptor( name='Node', full_name='pb.Node', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.Node.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='endpoint', full_name='pb.Node.endpoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.Node.podname', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.Node.cpu', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu_used', full_name='pb.Node.cpu_used', index=4, number=5, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.Node.memory', index=5, number=6, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory_used', full_name='pb.Node.memory_used', index=6, number=7, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='available', full_name='pb.Node.available', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.Node.labels', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='init_memory', full_name='pb.Node.init_memory', index=9, number=10, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='init_cpu', full_name='pb.Node.init_cpu', index=10, number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='info', full_name='pb.Node.info', index=11, number=12, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_NODE_CPUENTRY, _NODE_LABELSENTRY, _NODE_INITCPUENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1013, serialized_end=1427, ) _NODES = _descriptor.Descriptor( name='Nodes', full_name='pb.Nodes', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodes', full_name='pb.Nodes.nodes', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1429, serialized_end=1461, ) _NODEAVAILABLE = _descriptor.Descriptor( name='NodeAvailable', full_name='pb.NodeAvailable', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodename', full_name='pb.NodeAvailable.nodename', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.NodeAvailable.podname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='available', full_name='pb.NodeAvailable.available', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1463, serialized_end=1532, ) _CONTAINER_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.Container.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Container.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Container.CpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1290, serialized_end=1332, ) _CONTAINER_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.Container.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Container.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Container.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _CONTAINER_PUBLISHENTRY = _descriptor.Descriptor( name='PublishEntry', full_name='pb.Container.PublishEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Container.PublishEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Container.PublishEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1929, serialized_end=1975, ) _CONTAINER = _descriptor.Descriptor( name='Container', full_name='pb.Container', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.Container.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.Container.podname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.Container.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='pb.Container.name', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.Container.cpu', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='quota', full_name='pb.Container.quota', index=5, number=6, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.Container.memory', index=6, number=7, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='privileged', full_name='pb.Container.privileged', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.Container.labels', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='publish', full_name='pb.Container.publish', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='image', full_name='pb.Container.image', index=10, number=11, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='inspect', full_name='pb.Container.inspect', index=11, number=12, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status_data', full_name='pb.Container.status_data', index=12, number=13, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_CONTAINER_CPUENTRY, _CONTAINER_LABELSENTRY, _CONTAINER_PUBLISHENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1535, serialized_end=1975, ) _CONTAINERDEPLOYEDOPTIONS = _descriptor.Descriptor( name='ContainerDeployedOptions', full_name='pb.ContainerDeployedOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ContainerDeployedOptions.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='appname', full_name='pb.ContainerDeployedOptions.appname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.ContainerDeployedOptions.entrypoint', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.ContainerDeployedOptions.nodename', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.ContainerDeployedOptions.data', index=4, number=5, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1977, serialized_end=2084, ) _CONTAINERS = _descriptor.Descriptor( name='Containers', full_name='pb.Containers', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='containers', full_name='pb.Containers.containers', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2086, serialized_end=2133, ) _CONTAINERID = _descriptor.Descriptor( name='ContainerID', full_name='pb.ContainerID', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ContainerID.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2135, serialized_end=2160, ) _CONTAINERIDS = _descriptor.Descriptor( name='ContainerIDs', full_name='pb.ContainerIDs', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.ContainerIDs.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2162, serialized_end=2189, ) _REMOVECONTAINEROPTIONS = _descriptor.Descriptor( name='RemoveContainerOptions', full_name='pb.RemoveContainerOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.RemoveContainerOptions.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='force', full_name='pb.RemoveContainerOptions.force', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2191, serialized_end=2243, ) _REALLOCOPTIONS = _descriptor.Descriptor( name='ReallocOptions', full_name='pb.ReallocOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.ReallocOptions.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.ReallocOptions.cpu', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mem', full_name='pb.ReallocOptions.mem', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2245, serialized_end=2300, ) _ADDPODOPTIONS = _descriptor.Descriptor( name='AddPodOptions', full_name='pb.AddPodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.AddPodOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='favor', full_name='pb.AddPodOptions.favor', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='desc', full_name='pb.AddPodOptions.desc', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2302, serialized_end=2360, ) _REMOVEPODOPTIONS = _descriptor.Descriptor( name='RemovePodOptions', full_name='pb.RemovePodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.RemovePodOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2362, serialized_end=2394, ) _GETPODOPTIONS = _descriptor.Descriptor( name='GetPodOptions', full_name='pb.GetPodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.GetPodOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2396, serialized_end=2425, ) _ADDNODEOPTIONS_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.AddNodeOptions.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.AddNodeOptions.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.AddNodeOptions.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _ADDNODEOPTIONS = _descriptor.Descriptor( name='AddNodeOptions', full_name='pb.AddNodeOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodename', full_name='pb.AddNodeOptions.nodename', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='endpoint', full_name='pb.AddNodeOptions.endpoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.AddNodeOptions.podname', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ca', full_name='pb.AddNodeOptions.ca', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cert', full_name='pb.AddNodeOptions.cert', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key', full_name='pb.AddNodeOptions.key', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.AddNodeOptions.cpu', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='share', full_name='pb.AddNodeOptions.share', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.AddNodeOptions.memory', index=8, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.AddNodeOptions.labels', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_ADDNODEOPTIONS_LABELSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2428, serialized_end=2675, ) _REMOVENODEOPTIONS = _descriptor.Descriptor( name='RemoveNodeOptions', full_name='pb.RemoveNodeOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodename', full_name='pb.RemoveNodeOptions.nodename', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.RemoveNodeOptions.podname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2677, serialized_end=2731, ) _GETNODEOPTIONS = _descriptor.Descriptor( name='GetNodeOptions', full_name='pb.GetNodeOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.GetNodeOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.GetNodeOptions.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2733, serialized_end=2784, ) _LISTNODESOPTIONS = _descriptor.Descriptor( name='ListNodesOptions', full_name='pb.ListNodesOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.ListNodesOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='all', full_name='pb.ListNodesOptions.all', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2786, serialized_end=2834, ) _BUILD_ENVSENTRY = _descriptor.Descriptor( name='EnvsEntry', full_name='pb.Build.EnvsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.EnvsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.EnvsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3132, serialized_end=3175, ) _BUILD_ARGSENTRY = _descriptor.Descriptor( name='ArgsEntry', full_name='pb.Build.ArgsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.ArgsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.ArgsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3177, serialized_end=3220, ) _BUILD_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.Build.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _BUILD_ARTIFACTSENTRY = _descriptor.Descriptor( name='ArtifactsEntry', full_name='pb.Build.ArtifactsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.ArtifactsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.ArtifactsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3269, serialized_end=3317, ) _BUILD_CACHEENTRY = _descriptor.Descriptor( name='CacheEntry', full_name='pb.Build.CacheEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.CacheEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.CacheEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3319, serialized_end=3363, ) _BUILD = _descriptor.Descriptor( name='Build', full_name='pb.Build', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='base', full_name='pb.Build.base', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='repo', full_name='pb.Build.repo', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='version', full_name='pb.Build.version', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dir', full_name='pb.Build.dir', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='submodule', full_name='pb.Build.submodule', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='commands', full_name='pb.Build.commands', index=5, number=6, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='envs', full_name='pb.Build.envs', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='args', full_name='pb.Build.args', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.Build.labels', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='artifacts', full_name='pb.Build.artifacts', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cache', full_name='pb.Build.cache', index=10, number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_BUILD_ENVSENTRY, _BUILD_ARGSENTRY, _BUILD_LABELSENTRY, _BUILD_ARTIFACTSENTRY, _BUILD_CACHEENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2837, serialized_end=3363, ) _BUILDS_BUILDSENTRY = _descriptor.Descriptor( name='BuildsEntry', full_name='pb.Builds.BuildsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Builds.BuildsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Builds.BuildsEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3431, serialized_end=3487, ) _BUILDS = _descriptor.Descriptor( name='Builds', full_name='pb.Builds', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='stages', full_name='pb.Builds.stages', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='builds', full_name='pb.Builds.builds', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_BUILDS_BUILDSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3365, serialized_end=3487, ) _BUILDIMAGEOPTIONS = _descriptor.Descriptor( name='BuildImageOptions', full_name='pb.BuildImageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.BuildImageOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='user', full_name='pb.BuildImageOptions.user', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='uid', full_name='pb.BuildImageOptions.uid', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='pb.BuildImageOptions.tags', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='builds', full_name='pb.BuildImageOptions.builds', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tar', full_name='pb.BuildImageOptions.tar', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3489, serialized_end=3604, ) _HOOKOPTIONS = _descriptor.Descriptor( name='HookOptions', full_name='pb.HookOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='after_start', full_name='pb.HookOptions.after_start', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='before_stop', full_name='pb.HookOptions.before_stop', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='force', full_name='pb.HookOptions.force', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3606, serialized_end=3676, ) _HEALTHCHECKOPTIONS = _descriptor.Descriptor( name='HealthCheckOptions', full_name='pb.HealthCheckOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='tcp_ports', full_name='pb.HealthCheckOptions.tcp_ports', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='http_port', full_name='pb.HealthCheckOptions.http_port', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='url', full_name='pb.HealthCheckOptions.url', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='code', full_name='pb.HealthCheckOptions.code', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3678, serialized_end=3763, ) _LOGOPTIONS_CONFIGENTRY = _descriptor.Descriptor( name='ConfigEntry', full_name='pb.LogOptions.ConfigEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.LogOptions.ConfigEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.LogOptions.ConfigEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3837, serialized_end=3882, ) _LOGOPTIONS = _descriptor.Descriptor( name='LogOptions', full_name='pb.LogOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='type', full_name='pb.LogOptions.type', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='config', full_name='pb.LogOptions.config', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_LOGOPTIONS_CONFIGENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3765, serialized_end=3882, ) _ENTRYPOINTOPTIONS_SYSCTLSENTRY = _descriptor.Descriptor( name='SysctlsEntry', full_name='pb.EntrypointOptions.SysctlsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.EntrypointOptions.SysctlsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.EntrypointOptions.SysctlsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4169, serialized_end=4215, ) _ENTRYPOINTOPTIONS = _descriptor.Descriptor( name='EntrypointOptions', full_name='pb.EntrypointOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.EntrypointOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='command', full_name='pb.EntrypointOptions.command', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='privileged', full_name='pb.EntrypointOptions.privileged', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dir', full_name='pb.EntrypointOptions.dir', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='log', full_name='pb.EntrypointOptions.log', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='publish', full_name='pb.EntrypointOptions.publish', index=5, number=6, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='healthcheck', full_name='pb.EntrypointOptions.healthcheck', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.EntrypointOptions.hook', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='restart_policy', full_name='pb.EntrypointOptions.restart_policy', index=8, number=9, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sysctls', full_name='pb.EntrypointOptions.sysctls', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_ENTRYPOINTOPTIONS_SYSCTLSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3885, serialized_end=4215, ) _DEPLOYOPTIONS_NETWORKSENTRY = _descriptor.Descriptor( name='NetworksEntry', full_name='pb.DeployOptions.NetworksEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.NetworksEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.NetworksEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4804, serialized_end=4851, ) _DEPLOYOPTIONS_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.DeployOptions.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _DEPLOYOPTIONS_NODELABELSENTRY = _descriptor.Descriptor( name='NodelabelsEntry', full_name='pb.DeployOptions.NodelabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.NodelabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.NodelabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4900, serialized_end=4949, ) _DEPLOYOPTIONS_DATAENTRY = _descriptor.Descriptor( name='DataEntry', full_name='pb.DeployOptions.DataEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.DataEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.DataEntry.value', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4951, serialized_end=4994, ) _DEPLOYOPTIONS = _descriptor.Descriptor( name='DeployOptions', full_name='pb.DeployOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.DeployOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.DeployOptions.entrypoint', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.DeployOptions.podname', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.DeployOptions.nodename', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='image', full_name='pb.DeployOptions.image', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='extra_args', full_name='pb.DeployOptions.extra_args', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu_quota', full_name='pb.DeployOptions.cpu_quota', index=6, number=7, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.DeployOptions.memory', index=7, number=8, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='count', full_name='pb.DeployOptions.count', index=8, number=9, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='env', full_name='pb.DeployOptions.env', index=9, number=10, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dns', full_name='pb.DeployOptions.dns', index=10, number=11, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='extra_hosts', full_name='pb.DeployOptions.extra_hosts', index=11, number=12, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='volumes', full_name='pb.DeployOptions.volumes', index=12, number=13, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='networks', full_name='pb.DeployOptions.networks', index=13, number=14, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='networkmode', full_name='pb.DeployOptions.networkmode', index=14, number=15, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='user', full_name='pb.DeployOptions.user', index=15, number=16, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='debug', full_name='pb.DeployOptions.debug', index=16, number=17, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='openStdin', full_name='pb.DeployOptions.openStdin', index=17, number=18, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.DeployOptions.labels', index=18, number=19, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodelabels', full_name='pb.DeployOptions.nodelabels', index=19, number=20, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='deploy_method', full_name='pb.DeployOptions.deploy_method', index=20, number=21, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.DeployOptions.data', index=21, number=22, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='softlimit', full_name='pb.DeployOptions.softlimit', index=22, number=23, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodes_limit', full_name='pb.DeployOptions.nodes_limit', index=23, number=24, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_DEPLOYOPTIONS_NETWORKSENTRY, _DEPLOYOPTIONS_LABELSENTRY, _DEPLOYOPTIONS_NODELABELSENTRY, _DEPLOYOPTIONS_DATAENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4218, serialized_end=4994, ) _REPLACEOPTIONS_FILTERLABELSENTRY = _descriptor.Descriptor( name='FilterLabelsEntry', full_name='pb.ReplaceOptions.FilterLabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.ReplaceOptions.FilterLabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.ReplaceOptions.FilterLabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5210, serialized_end=5261, ) _REPLACEOPTIONS_COPYENTRY = _descriptor.Descriptor( name='CopyEntry', full_name='pb.ReplaceOptions.CopyEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.ReplaceOptions.CopyEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.ReplaceOptions.CopyEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5263, serialized_end=5306, ) _REPLACEOPTIONS = _descriptor.Descriptor( name='ReplaceOptions', full_name='pb.ReplaceOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='deployOpt', full_name='pb.ReplaceOptions.deployOpt', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='force', full_name='pb.ReplaceOptions.force', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filter_labels', full_name='pb.ReplaceOptions.filter_labels', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='copy', full_name='pb.ReplaceOptions.copy', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ids', full_name='pb.ReplaceOptions.ids', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='networkinherit', full_name='pb.ReplaceOptions.networkinherit', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_REPLACEOPTIONS_FILTERLABELSENTRY, _REPLACEOPTIONS_COPYENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4997, serialized_end=5306, ) _CACHEIMAGEOPTIONS = _descriptor.Descriptor( name='CacheImageOptions', full_name='pb.CacheImageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.CacheImageOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.CacheImageOptions.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='images', full_name='pb.CacheImageOptions.images', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='step', full_name='pb.CacheImageOptions.step', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5308, serialized_end=5392, ) _REMOVEIMAGEOPTIONS = _descriptor.Descriptor( name='RemoveImageOptions', full_name='pb.RemoveImageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.RemoveImageOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.RemoveImageOptions.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='images', full_name='pb.RemoveImageOptions.images', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='step', full_name='pb.RemoveImageOptions.step', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='prune', full_name='pb.RemoveImageOptions.prune', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5394, serialized_end=5494, ) _COPYPATHS = _descriptor.Descriptor( name='CopyPaths', full_name='pb.CopyPaths', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='paths', full_name='pb.CopyPaths.paths', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5496, serialized_end=5522, ) _COPYOPTIONS_TARGETSENTRY = _descriptor.Descriptor( name='TargetsEntry', full_name='pb.CopyOptions.TargetsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.CopyOptions.TargetsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.CopyOptions.TargetsEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5586, serialized_end=5647, ) _COPYOPTIONS = _descriptor.Descriptor( name='CopyOptions', full_name='pb.CopyOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='targets', full_name='pb.CopyOptions.targets', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_COPYOPTIONS_TARGETSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5524, serialized_end=5647, ) _ERRORDETAIL = _descriptor.Descriptor( name='ErrorDetail', full_name='pb.ErrorDetail', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='pb.ErrorDetail.code', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='message', full_name='pb.ErrorDetail.message', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5649, serialized_end=5693, ) _BUILDIMAGEMESSAGE = _descriptor.Descriptor( name='BuildImageMessage', full_name='pb.BuildImageMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.BuildImageMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status', full_name='pb.BuildImageMessage.status', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='progress', full_name='pb.BuildImageMessage.progress', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.BuildImageMessage.error', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='stream', full_name='pb.BuildImageMessage.stream', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error_detail', full_name='pb.BuildImageMessage.error_detail', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5696, serialized_end=5831, ) _CREATECONTAINERMESSAGE_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.CreateContainerMessage.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.CreateContainerMessage.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.CreateContainerMessage.CpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1290, serialized_end=1332, ) _CREATECONTAINERMESSAGE_PUBLISHENTRY = _descriptor.Descriptor( name='PublishEntry', full_name='pb.CreateContainerMessage.PublishEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.CreateContainerMessage.PublishEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.CreateContainerMessage.PublishEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1929, serialized_end=1975, ) _CREATECONTAINERMESSAGE = _descriptor.Descriptor( name='CreateContainerMessage', full_name='pb.CreateContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.CreateContainerMessage.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.CreateContainerMessage.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='id', full_name='pb.CreateContainerMessage.id', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='pb.CreateContainerMessage.name', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.CreateContainerMessage.error', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.CreateContainerMessage.success', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.CreateContainerMessage.cpu', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='quota', full_name='pb.CreateContainerMessage.quota', index=7, number=8, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.CreateContainerMessage.memory', index=8, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='publish', full_name='pb.CreateContainerMessage.publish', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.CreateContainerMessage.hook', index=10, number=11, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_CREATECONTAINERMESSAGE_CPUENTRY, _CREATECONTAINERMESSAGE_PUBLISHENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5834, serialized_end=6196, ) _REPLACECONTAINERMESSAGE = _descriptor.Descriptor( name='ReplaceContainerMessage', full_name='pb.ReplaceContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='create', full_name='pb.ReplaceContainerMessage.create', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='remove', full_name='pb.ReplaceContainerMessage.remove', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.ReplaceContainerMessage.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6199, serialized_end=6327, ) _RUNANDWAITMESSAGE = _descriptor.Descriptor( name='RunAndWaitMessage', full_name='pb.RunAndWaitMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='container_id', full_name='pb.RunAndWaitMessage.container_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.RunAndWaitMessage.data', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6329, serialized_end=6384, ) _CACHEIMAGEMESSAGE = _descriptor.Descriptor( name='CacheImageMessage', full_name='pb.CacheImageMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='pb.CacheImageMessage.image', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.CacheImageMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.CacheImageMessage.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='message', full_name='pb.CacheImageMessage.message', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6386, serialized_end=6472, ) _REMOVEIMAGEMESSAGE = _descriptor.Descriptor( name='RemoveImageMessage', full_name='pb.RemoveImageMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='pb.RemoveImageMessage.image', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.RemoveImageMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='messages', full_name='pb.RemoveImageMessage.messages', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6474, serialized_end=6544, ) _REMOVECONTAINERMESSAGE = _descriptor.Descriptor( name='RemoveContainerMessage', full_name='pb.RemoveContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.RemoveContainerMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.RemoveContainerMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.RemoveContainerMessage.hook', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6546, serialized_end=6613, ) _REALLOCRESOURCEMESSAGE = _descriptor.Descriptor( name='ReallocResourceMessage', full_name='pb.ReallocResourceMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ReallocResourceMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.ReallocResourceMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6615, serialized_end=6668, ) _COPYMESSAGE = _descriptor.Descriptor( name='CopyMessage', full_name='pb.CopyMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.CopyMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status', full_name='pb.CopyMessage.status', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='pb.CopyMessage.name', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='path', full_name='pb.CopyMessage.path', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.CopyMessage.error', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.CopyMessage.data', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6670, serialized_end=6768, ) _RUNANDWAITOPTIONS = _descriptor.Descriptor( name='RunAndWaitOptions', full_name='pb.RunAndWaitOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='DeployOptions', full_name='pb.RunAndWaitOptions.DeployOptions', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='Cmd', full_name='pb.RunAndWaitOptions.Cmd', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6770, serialized_end=6844, ) _CONTROLCONTAINEROPTIONS = _descriptor.Descriptor( name='ControlContainerOptions', full_name='pb.ControlContainerOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.ControlContainerOptions.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='pb.ControlContainerOptions.type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6846, serialized_end=6898, ) _CONTROLCONTAINERMESSAGE = _descriptor.Descriptor( name='ControlContainerMessage', full_name='pb.ControlContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ControlContainerMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.ControlContainerMessage.error', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.ControlContainerMessage.hook', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6900, serialized_end=6966, ) _LISTCONTAINERSOPTIONS_LABELSENTRY.containing_type = _LISTCONTAINERSOPTIONS _LISTCONTAINERSOPTIONS.fields_by_name['labels'].message_type = _LISTCONTAINERSOPTIONS_LABELSENTRY _PODS.fields_by_name['pods'].message_type = _POD _PODRESOURCE_CPUENTRY.containing_type = _PODRESOURCE _PODRESOURCE_MEMORYENTRY.containing_type = _PODRESOURCE _PODRESOURCE_DIFFENTRY.containing_type = _PODRESOURCE _PODRESOURCE_DETAILENTRY.containing_type = _PODRESOURCE _PODRESOURCE.fields_by_name['cpu'].message_type = _PODRESOURCE_CPUENTRY _PODRESOURCE.fields_by_name['memory'].message_type = _PODRESOURCE_MEMORYENTRY _PODRESOURCE.fields_by_name['diff'].message_type = _PODRESOURCE_DIFFENTRY _PODRESOURCE.fields_by_name['detail'].message_type = _PODRESOURCE_DETAILENTRY _NETWORKS.fields_by_name['networks'].message_type = _NETWORK _NODE_CPUENTRY.containing_type = _NODE _NODE_LABELSENTRY.containing_type = _NODE _NODE_INITCPUENTRY.containing_type = _NODE _NODE.fields_by_name['cpu'].message_type = _NODE_CPUENTRY _NODE.fields_by_name['labels'].message_type = _NODE_LABELSENTRY _NODE.fields_by_name['init_cpu'].message_type = _NODE_INITCPUENTRY _NODES.fields_by_name['nodes'].message_type = _NODE _CONTAINER_CPUENTRY.containing_type = _CONTAINER _CONTAINER_LABELSENTRY.containing_type = _CONTAINER _CONTAINER_PUBLISHENTRY.containing_type = _CONTAINER _CONTAINER.fields_by_name['cpu'].message_type = _CONTAINER_CPUENTRY _CONTAINER.fields_by_name['labels'].message_type = _CONTAINER_LABELSENTRY _CONTAINER.fields_by_name['publish'].message_type = _CONTAINER_PUBLISHENTRY _CONTAINERS.fields_by_name['containers'].message_type = _CONTAINER _ADDNODEOPTIONS_LABELSENTRY.containing_type = _ADDNODEOPTIONS _ADDNODEOPTIONS.fields_by_name['labels'].message_type = _ADDNODEOPTIONS_LABELSENTRY _BUILD_ENVSENTRY.containing_type = _BUILD _BUILD_ARGSENTRY.containing_type = _BUILD _BUILD_LABELSENTRY.containing_type = _BUILD _BUILD_ARTIFACTSENTRY.containing_type = _BUILD _BUILD_CACHEENTRY.containing_type = _BUILD _BUILD.fields_by_name['envs'].message_type = _BUILD_ENVSENTRY _BUILD.fields_by_name['args'].message_type = _BUILD_ARGSENTRY _BUILD.fields_by_name['labels'].message_type = _BUILD_LABELSENTRY _BUILD.fields_by_name['artifacts'].message_type = _BUILD_ARTIFACTSENTRY _BUILD.fields_by_name['cache'].message_type = _BUILD_CACHEENTRY _BUILDS_BUILDSENTRY.fields_by_name['value'].message_type = _BUILD _BUILDS_BUILDSENTRY.containing_type = _BUILDS _BUILDS.fields_by_name['builds'].message_type = _BUILDS_BUILDSENTRY _BUILDIMAGEOPTIONS.fields_by_name['builds'].message_type = _BUILDS _LOGOPTIONS_CONFIGENTRY.containing_type = _LOGOPTIONS _LOGOPTIONS.fields_by_name['config'].message_type = _LOGOPTIONS_CONFIGENTRY _ENTRYPOINTOPTIONS_SYSCTLSENTRY.containing_type = _ENTRYPOINTOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['log'].message_type = _LOGOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['healthcheck'].message_type = _HEALTHCHECKOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['hook'].message_type = _HOOKOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['sysctls'].message_type = _ENTRYPOINTOPTIONS_SYSCTLSENTRY _DEPLOYOPTIONS_NETWORKSENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS_LABELSENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS_NODELABELSENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS_DATAENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS.fields_by_name['entrypoint'].message_type = _ENTRYPOINTOPTIONS _DEPLOYOPTIONS.fields_by_name['networks'].message_type = _DEPLOYOPTIONS_NETWORKSENTRY _DEPLOYOPTIONS.fields_by_name['labels'].message_type = _DEPLOYOPTIONS_LABELSENTRY _DEPLOYOPTIONS.fields_by_name['nodelabels'].message_type = _DEPLOYOPTIONS_NODELABELSENTRY _DEPLOYOPTIONS.fields_by_name['data'].message_type = _DEPLOYOPTIONS_DATAENTRY _REPLACEOPTIONS_FILTERLABELSENTRY.containing_type = _REPLACEOPTIONS _REPLACEOPTIONS_COPYENTRY.containing_type = _REPLACEOPTIONS _REPLACEOPTIONS.fields_by_name['deployOpt'].message_type = _DEPLOYOPTIONS _REPLACEOPTIONS.fields_by_name['filter_labels'].message_type = _REPLACEOPTIONS_FILTERLABELSENTRY _REPLACEOPTIONS.fields_by_name['copy'].message_type = _REPLACEOPTIONS_COPYENTRY _COPYOPTIONS_TARGETSENTRY.fields_by_name['value'].message_type = _COPYPATHS _COPYOPTIONS_TARGETSENTRY.containing_type = _COPYOPTIONS _COPYOPTIONS.fields_by_name['targets'].message_type = _COPYOPTIONS_TARGETSENTRY _BUILDIMAGEMESSAGE.fields_by_name['error_detail'].message_type = _ERRORDETAIL _CREATECONTAINERMESSAGE_CPUENTRY.containing_type = _CREATECONTAINERMESSAGE _CREATECONTAINERMESSAGE_PUBLISHENTRY.containing_type = _CREATECONTAINERMESSAGE _CREATECONTAINERMESSAGE.fields_by_name['cpu'].message_type = _CREATECONTAINERMESSAGE_CPUENTRY _CREATECONTAINERMESSAGE.fields_by_name['publish'].message_type = _CREATECONTAINERMESSAGE_PUBLISHENTRY _REPLACECONTAINERMESSAGE.fields_by_name['create'].message_type = _CREATECONTAINERMESSAGE _REPLACECONTAINERMESSAGE.fields_by_name['remove'].message_type = _REMOVECONTAINERMESSAGE _RUNANDWAITOPTIONS.fields_by_name['DeployOptions'].message_type = _DEPLOYOPTIONS DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY DESCRIPTOR.message_types_by_name['ListContainersOptions'] = _LISTCONTAINERSOPTIONS DESCRIPTOR.message_types_by_name['DeployStatusOptions'] = _DEPLOYSTATUSOPTIONS DESCRIPTOR.message_types_by_name['DeployStatusMessage'] = _DEPLOYSTATUSMESSAGE DESCRIPTOR.message_types_by_name['Pod'] = _POD DESCRIPTOR.message_types_by_name['Pods'] = _PODS DESCRIPTOR.message_types_by_name['PodResource'] = _PODRESOURCE DESCRIPTOR.message_types_by_name['ListNetworkOptions'] = _LISTNETWORKOPTIONS DESCRIPTOR.message_types_by_name['Network'] = _NETWORK DESCRIPTOR.message_types_by_name['Networks'] = _NETWORKS DESCRIPTOR.message_types_by_name['Node'] = _NODE DESCRIPTOR.message_types_by_name['Nodes'] = _NODES DESCRIPTOR.message_types_by_name['NodeAvailable'] = _NODEAVAILABLE DESCRIPTOR.message_types_by_name['Container'] = _CONTAINER DESCRIPTOR.message_types_by_name['ContainerDeployedOptions'] = _CONTAINERDEPLOYEDOPTIONS DESCRIPTOR.message_types_by_name['Containers'] = _CONTAINERS DESCRIPTOR.message_types_by_name['ContainerID'] = _CONTAINERID DESCRIPTOR.message_types_by_name['ContainerIDs'] = _CONTAINERIDS DESCRIPTOR.message_types_by_name['RemoveContainerOptions'] = _REMOVECONTAINEROPTIONS DESCRIPTOR.message_types_by_name['ReallocOptions'] = _REALLOCOPTIONS DESCRIPTOR.message_types_by_name['AddPodOptions'] = _ADDPODOPTIONS DESCRIPTOR.message_types_by_name['RemovePodOptions'] = _REMOVEPODOPTIONS DESCRIPTOR.message_types_by_name['GetPodOptions'] = _GETPODOPTIONS DESCRIPTOR.message_types_by_name['AddNodeOptions'] = _ADDNODEOPTIONS DESCRIPTOR.message_types_by_name['RemoveNodeOptions'] = _REMOVENODEOPTIONS DESCRIPTOR.message_types_by_name['GetNodeOptions'] = _GETNODEOPTIONS DESCRIPTOR.message_types_by_name['ListNodesOptions'] = _LISTNODESOPTIONS DESCRIPTOR.message_types_by_name['Build'] = _BUILD DESCRIPTOR.message_types_by_name['Builds'] = _BUILDS DESCRIPTOR.message_types_by_name['BuildImageOptions'] = _BUILDIMAGEOPTIONS DESCRIPTOR.message_types_by_name['HookOptions'] = _HOOKOPTIONS DESCRIPTOR.message_types_by_name['HealthCheckOptions'] = _HEALTHCHECKOPTIONS DESCRIPTOR.message_types_by_name['LogOptions'] = _LOGOPTIONS DESCRIPTOR.message_types_by_name['EntrypointOptions'] = _ENTRYPOINTOPTIONS DESCRIPTOR.message_types_by_name['DeployOptions'] = _DEPLOYOPTIONS DESCRIPTOR.message_types_by_name['ReplaceOptions'] = _REPLACEOPTIONS DESCRIPTOR.message_types_by_name['CacheImageOptions'] = _CACHEIMAGEOPTIONS DESCRIPTOR.message_types_by_name['RemoveImageOptions'] = _REMOVEIMAGEOPTIONS DESCRIPTOR.message_types_by_name['CopyPaths'] = _COPYPATHS DESCRIPTOR.message_types_by_name['CopyOptions'] = _COPYOPTIONS DESCRIPTOR.message_types_by_name['ErrorDetail'] = _ERRORDETAIL DESCRIPTOR.message_types_by_name['BuildImageMessage'] = _BUILDIMAGEMESSAGE DESCRIPTOR.message_types_by_name['CreateContainerMessage'] = _CREATECONTAINERMESSAGE DESCRIPTOR.message_types_by_name['ReplaceContainerMessage'] = _REPLACECONTAINERMESSAGE DESCRIPTOR.message_types_by_name['RunAndWaitMessage'] = _RUNANDWAITMESSAGE DESCRIPTOR.message_types_by_name['CacheImageMessage'] = _CACHEIMAGEMESSAGE DESCRIPTOR.message_types_by_name['RemoveImageMessage'] = _REMOVEIMAGEMESSAGE DESCRIPTOR.message_types_by_name['RemoveContainerMessage'] = _REMOVECONTAINERMESSAGE DESCRIPTOR.message_types_by_name['ReallocResourceMessage'] = _REALLOCRESOURCEMESSAGE DESCRIPTOR.message_types_by_name['CopyMessage'] = _COPYMESSAGE DESCRIPTOR.message_types_by_name['RunAndWaitOptions'] = _RUNANDWAITOPTIONS DESCRIPTOR.message_types_by_name['ControlContainerOptions'] = _CONTROLCONTAINEROPTIONS DESCRIPTOR.message_types_by_name['ControlContainerMessage'] = _CONTROLCONTAINERMESSAGE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict( DESCRIPTOR = _EMPTY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Empty) )) _sym_db.RegisterMessage(Empty) ListContainersOptions = _reflection.GeneratedProtocolMessageType('ListContainersOptions', (_message.Message,), dict( LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _LISTCONTAINERSOPTIONS_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListContainersOptions.LabelsEntry) )) , DESCRIPTOR = _LISTCONTAINERSOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListContainersOptions) )) _sym_db.RegisterMessage(ListContainersOptions) _sym_db.RegisterMessage(ListContainersOptions.LabelsEntry) DeployStatusOptions = _reflection.GeneratedProtocolMessageType('DeployStatusOptions', (_message.Message,), dict( DESCRIPTOR = _DEPLOYSTATUSOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployStatusOptions) )) _sym_db.RegisterMessage(DeployStatusOptions) DeployStatusMessage = _reflection.GeneratedProtocolMessageType('DeployStatusMessage', (_message.Message,), dict( DESCRIPTOR = _DEPLOYSTATUSMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployStatusMessage) )) _sym_db.RegisterMessage(DeployStatusMessage) Pod = _reflection.GeneratedProtocolMessageType('Pod', (_message.Message,), dict( DESCRIPTOR = _POD, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Pod) )) _sym_db.RegisterMessage(Pod) Pods = _reflection.GeneratedProtocolMessageType('Pods', (_message.Message,), dict( DESCRIPTOR = _PODS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Pods) )) _sym_db.RegisterMessage(Pods) PodResource = _reflection.GeneratedProtocolMessageType('PodResource', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.CpuEntry) )) , MemoryEntry = _reflection.GeneratedProtocolMessageType('MemoryEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_MEMORYENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.MemoryEntry) )) , DiffEntry = _reflection.GeneratedProtocolMessageType('DiffEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_DIFFENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.DiffEntry) )) , DetailEntry = _reflection.GeneratedProtocolMessageType('DetailEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_DETAILENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.DetailEntry) )) , DESCRIPTOR = _PODRESOURCE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource) )) _sym_db.RegisterMessage(PodResource) _sym_db.RegisterMessage(PodResource.CpuEntry) _sym_db.RegisterMessage(PodResource.MemoryEntry) _sym_db.RegisterMessage(PodResource.DiffEntry) _sym_db.RegisterMessage(PodResource.DetailEntry) ListNetworkOptions = _reflection.GeneratedProtocolMessageType('ListNetworkOptions', (_message.Message,), dict( DESCRIPTOR = _LISTNETWORKOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListNetworkOptions) )) _sym_db.RegisterMessage(ListNetworkOptions) Network = _reflection.GeneratedProtocolMessageType('Network', (_message.Message,), dict( DESCRIPTOR = _NETWORK, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Network) )) _sym_db.RegisterMessage(Network) Networks = _reflection.GeneratedProtocolMessageType('Networks', (_message.Message,), dict( DESCRIPTOR = _NETWORKS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Networks) )) _sym_db.RegisterMessage(Networks) Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _NODE_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node.CpuEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _NODE_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node.LabelsEntry) )) , InitCpuEntry = _reflection.GeneratedProtocolMessageType('InitCpuEntry', (_message.Message,), dict( DESCRIPTOR = _NODE_INITCPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node.InitCpuEntry) )) , DESCRIPTOR = _NODE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node) )) _sym_db.RegisterMessage(Node) _sym_db.RegisterMessage(Node.CpuEntry) _sym_db.RegisterMessage(Node.LabelsEntry) _sym_db.RegisterMessage(Node.InitCpuEntry) Nodes = _reflection.GeneratedProtocolMessageType('Nodes', (_message.Message,), dict( DESCRIPTOR = _NODES, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Nodes) )) _sym_db.RegisterMessage(Nodes) NodeAvailable = _reflection.GeneratedProtocolMessageType('NodeAvailable', (_message.Message,), dict( DESCRIPTOR = _NODEAVAILABLE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.NodeAvailable) )) _sym_db.RegisterMessage(NodeAvailable) Container = _reflection.GeneratedProtocolMessageType('Container', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _CONTAINER_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container.CpuEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _CONTAINER_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container.LabelsEntry) )) , PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict( DESCRIPTOR = _CONTAINER_PUBLISHENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container.PublishEntry) )) , DESCRIPTOR = _CONTAINER, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container) )) _sym_db.RegisterMessage(Container) _sym_db.RegisterMessage(Container.CpuEntry) _sym_db.RegisterMessage(Container.LabelsEntry) _sym_db.RegisterMessage(Container.PublishEntry) ContainerDeployedOptions = _reflection.GeneratedProtocolMessageType('ContainerDeployedOptions', (_message.Message,), dict( DESCRIPTOR = _CONTAINERDEPLOYEDOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ContainerDeployedOptions) )) _sym_db.RegisterMessage(ContainerDeployedOptions) Containers = _reflection.GeneratedProtocolMessageType('Containers', (_message.Message,), dict( DESCRIPTOR = _CONTAINERS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Containers) )) _sym_db.RegisterMessage(Containers) ContainerID = _reflection.GeneratedProtocolMessageType('ContainerID', (_message.Message,), dict( DESCRIPTOR = _CONTAINERID, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ContainerID) )) _sym_db.RegisterMessage(ContainerID) ContainerIDs = _reflection.GeneratedProtocolMessageType('ContainerIDs', (_message.Message,), dict( DESCRIPTOR = _CONTAINERIDS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ContainerIDs) )) _sym_db.RegisterMessage(ContainerIDs) RemoveContainerOptions = _reflection.GeneratedProtocolMessageType('RemoveContainerOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVECONTAINEROPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveContainerOptions) )) _sym_db.RegisterMessage(RemoveContainerOptions) ReallocOptions = _reflection.GeneratedProtocolMessageType('ReallocOptions', (_message.Message,), dict( DESCRIPTOR = _REALLOCOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReallocOptions) )) _sym_db.RegisterMessage(ReallocOptions) AddPodOptions = _reflection.GeneratedProtocolMessageType('AddPodOptions', (_message.Message,), dict( DESCRIPTOR = _ADDPODOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.AddPodOptions) )) _sym_db.RegisterMessage(AddPodOptions) RemovePodOptions = _reflection.GeneratedProtocolMessageType('RemovePodOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVEPODOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemovePodOptions) )) _sym_db.RegisterMessage(RemovePodOptions) GetPodOptions = _reflection.GeneratedProtocolMessageType('GetPodOptions', (_message.Message,), dict( DESCRIPTOR = _GETPODOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.GetPodOptions) )) _sym_db.RegisterMessage(GetPodOptions) AddNodeOptions = _reflection.GeneratedProtocolMessageType('AddNodeOptions', (_message.Message,), dict( LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _ADDNODEOPTIONS_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.AddNodeOptions.LabelsEntry) )) , DESCRIPTOR = _ADDNODEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.AddNodeOptions) )) _sym_db.RegisterMessage(AddNodeOptions) _sym_db.RegisterMessage(AddNodeOptions.LabelsEntry) RemoveNodeOptions = _reflection.GeneratedProtocolMessageType('RemoveNodeOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVENODEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveNodeOptions) )) _sym_db.RegisterMessage(RemoveNodeOptions) GetNodeOptions = _reflection.GeneratedProtocolMessageType('GetNodeOptions', (_message.Message,), dict( DESCRIPTOR = _GETNODEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.GetNodeOptions) )) _sym_db.RegisterMessage(GetNodeOptions) ListNodesOptions = _reflection.GeneratedProtocolMessageType('ListNodesOptions', (_message.Message,), dict( DESCRIPTOR = _LISTNODESOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListNodesOptions) )) _sym_db.RegisterMessage(ListNodesOptions) Build = _reflection.GeneratedProtocolMessageType('Build', (_message.Message,), dict( EnvsEntry = _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_ENVSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.EnvsEntry) )) , ArgsEntry = _reflection.GeneratedProtocolMessageType('ArgsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_ARGSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.ArgsEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.LabelsEntry) )) , ArtifactsEntry = _reflection.GeneratedProtocolMessageType('ArtifactsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_ARTIFACTSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.ArtifactsEntry) )) , CacheEntry = _reflection.GeneratedProtocolMessageType('CacheEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_CACHEENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.CacheEntry) )) , DESCRIPTOR = _BUILD, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build) )) _sym_db.RegisterMessage(Build) _sym_db.RegisterMessage(Build.EnvsEntry) _sym_db.RegisterMessage(Build.ArgsEntry) _sym_db.RegisterMessage(Build.LabelsEntry) _sym_db.RegisterMessage(Build.ArtifactsEntry) _sym_db.RegisterMessage(Build.CacheEntry) Builds = _reflection.GeneratedProtocolMessageType('Builds', (_message.Message,), dict( BuildsEntry = _reflection.GeneratedProtocolMessageType('BuildsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILDS_BUILDSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Builds.BuildsEntry) )) , DESCRIPTOR = _BUILDS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Builds) )) _sym_db.RegisterMessage(Builds) _sym_db.RegisterMessage(Builds.BuildsEntry) BuildImageOptions = _reflection.GeneratedProtocolMessageType('BuildImageOptions', (_message.Message,), dict( DESCRIPTOR = _BUILDIMAGEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.BuildImageOptions) )) _sym_db.RegisterMessage(BuildImageOptions) HookOptions = _reflection.GeneratedProtocolMessageType('HookOptions', (_message.Message,), dict( DESCRIPTOR = _HOOKOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.HookOptions) )) _sym_db.RegisterMessage(HookOptions) HealthCheckOptions = _reflection.GeneratedProtocolMessageType('HealthCheckOptions', (_message.Message,), dict( DESCRIPTOR = _HEALTHCHECKOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.HealthCheckOptions) )) _sym_db.RegisterMessage(HealthCheckOptions) LogOptions = _reflection.GeneratedProtocolMessageType('LogOptions', (_message.Message,), dict( ConfigEntry = _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), dict( DESCRIPTOR = _LOGOPTIONS_CONFIGENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.LogOptions.ConfigEntry) )) , DESCRIPTOR = _LOGOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.LogOptions) )) _sym_db.RegisterMessage(LogOptions) _sym_db.RegisterMessage(LogOptions.ConfigEntry) EntrypointOptions = _reflection.GeneratedProtocolMessageType('EntrypointOptions', (_message.Message,), dict( SysctlsEntry = _reflection.GeneratedProtocolMessageType('SysctlsEntry', (_message.Message,), dict( DESCRIPTOR = _ENTRYPOINTOPTIONS_SYSCTLSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.EntrypointOptions.SysctlsEntry) )) , DESCRIPTOR = _ENTRYPOINTOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.EntrypointOptions) )) _sym_db.RegisterMessage(EntrypointOptions) _sym_db.RegisterMessage(EntrypointOptions.SysctlsEntry) DeployOptions = _reflection.GeneratedProtocolMessageType('DeployOptions', (_message.Message,), dict( NetworksEntry = _reflection.GeneratedProtocolMessageType('NetworksEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_NETWORKSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.NetworksEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.LabelsEntry) )) , NodelabelsEntry = _reflection.GeneratedProtocolMessageType('NodelabelsEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_NODELABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.NodelabelsEntry) )) , DataEntry = _reflection.GeneratedProtocolMessageType('DataEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_DATAENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.DataEntry) )) , DESCRIPTOR = _DEPLOYOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions) )) _sym_db.RegisterMessage(DeployOptions) _sym_db.RegisterMessage(DeployOptions.NetworksEntry) _sym_db.RegisterMessage(DeployOptions.LabelsEntry) _sym_db.RegisterMessage(DeployOptions.NodelabelsEntry) _sym_db.RegisterMessage(DeployOptions.DataEntry) ReplaceOptions = _reflection.GeneratedProtocolMessageType('ReplaceOptions', (_message.Message,), dict( FilterLabelsEntry = _reflection.GeneratedProtocolMessageType('FilterLabelsEntry', (_message.Message,), dict( DESCRIPTOR = _REPLACEOPTIONS_FILTERLABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceOptions.FilterLabelsEntry) )) , CopyEntry = _reflection.GeneratedProtocolMessageType('CopyEntry', (_message.Message,), dict( DESCRIPTOR = _REPLACEOPTIONS_COPYENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceOptions.CopyEntry) )) , DESCRIPTOR = _REPLACEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceOptions) )) _sym_db.RegisterMessage(ReplaceOptions) _sym_db.RegisterMessage(ReplaceOptions.FilterLabelsEntry) _sym_db.RegisterMessage(ReplaceOptions.CopyEntry) CacheImageOptions = _reflection.GeneratedProtocolMessageType('CacheImageOptions', (_message.Message,), dict( DESCRIPTOR = _CACHEIMAGEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CacheImageOptions) )) _sym_db.RegisterMessage(CacheImageOptions) RemoveImageOptions = _reflection.GeneratedProtocolMessageType('RemoveImageOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVEIMAGEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveImageOptions) )) _sym_db.RegisterMessage(RemoveImageOptions) CopyPaths = _reflection.GeneratedProtocolMessageType('CopyPaths', (_message.Message,), dict( DESCRIPTOR = _COPYPATHS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyPaths) )) _sym_db.RegisterMessage(CopyPaths) CopyOptions = _reflection.GeneratedProtocolMessageType('CopyOptions', (_message.Message,), dict( TargetsEntry = _reflection.GeneratedProtocolMessageType('TargetsEntry', (_message.Message,), dict( DESCRIPTOR = _COPYOPTIONS_TARGETSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyOptions.TargetsEntry) )) , DESCRIPTOR = _COPYOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyOptions) )) _sym_db.RegisterMessage(CopyOptions) _sym_db.RegisterMessage(CopyOptions.TargetsEntry) ErrorDetail = _reflection.GeneratedProtocolMessageType('ErrorDetail', (_message.Message,), dict( DESCRIPTOR = _ERRORDETAIL, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ErrorDetail) )) _sym_db.RegisterMessage(ErrorDetail) BuildImageMessage = _reflection.GeneratedProtocolMessageType('BuildImageMessage', (_message.Message,), dict( DESCRIPTOR = _BUILDIMAGEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.BuildImageMessage) )) _sym_db.RegisterMessage(BuildImageMessage) CreateContainerMessage = _reflection.GeneratedProtocolMessageType('CreateContainerMessage', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _CREATECONTAINERMESSAGE_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.CpuEntry) )) , PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict( DESCRIPTOR = _CREATECONTAINERMESSAGE_PUBLISHENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.PublishEntry) )) , DESCRIPTOR = _CREATECONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage) )) _sym_db.RegisterMessage(CreateContainerMessage) _sym_db.RegisterMessage(CreateContainerMessage.CpuEntry) _sym_db.RegisterMessage(CreateContainerMessage.PublishEntry) ReplaceContainerMessage = _reflection.GeneratedProtocolMessageType('ReplaceContainerMessage', (_message.Message,), dict( DESCRIPTOR = _REPLACECONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceContainerMessage) )) _sym_db.RegisterMessage(ReplaceContainerMessage) RunAndWaitMessage = _reflection.GeneratedProtocolMessageType('RunAndWaitMessage', (_message.Message,), dict( DESCRIPTOR = _RUNANDWAITMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RunAndWaitMessage) )) _sym_db.RegisterMessage(RunAndWaitMessage) CacheImageMessage = _reflection.GeneratedProtocolMessageType('CacheImageMessage', (_message.Message,), dict( DESCRIPTOR = _CACHEIMAGEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CacheImageMessage) )) _sym_db.RegisterMessage(CacheImageMessage) RemoveImageMessage = _reflection.GeneratedProtocolMessageType('RemoveImageMessage', (_message.Message,), dict( DESCRIPTOR = _REMOVEIMAGEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveImageMessage) )) _sym_db.RegisterMessage(RemoveImageMessage) RemoveContainerMessage = _reflection.GeneratedProtocolMessageType('RemoveContainerMessage', (_message.Message,), dict( DESCRIPTOR = _REMOVECONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveContainerMessage) )) _sym_db.RegisterMessage(RemoveContainerMessage) ReallocResourceMessage = _reflection.GeneratedProtocolMessageType('ReallocResourceMessage', (_message.Message,), dict( DESCRIPTOR = _REALLOCRESOURCEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReallocResourceMessage) )) _sym_db.RegisterMessage(ReallocResourceMessage) CopyMessage = _reflection.GeneratedProtocolMessageType('CopyMessage', (_message.Message,), dict( DESCRIPTOR = _COPYMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyMessage) )) _sym_db.RegisterMessage(CopyMessage) RunAndWaitOptions = _reflection.GeneratedProtocolMessageType('RunAndWaitOptions', (_message.Message,), dict( DESCRIPTOR = _RUNANDWAITOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RunAndWaitOptions) )) _sym_db.RegisterMessage(RunAndWaitOptions) ControlContainerOptions = _reflection.GeneratedProtocolMessageType('ControlContainerOptions', (_message.Message,), dict( DESCRIPTOR = _CONTROLCONTAINEROPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ControlContainerOptions) )) _sym_db.RegisterMessage(ControlContainerOptions) ControlContainerMessage = _reflection.GeneratedProtocolMessageType('ControlContainerMessage', (_message.Message,), dict( DESCRIPTOR = _CONTROLCONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ControlContainerMessage) )) _sym_db.RegisterMessage(ControlContainerMessage) _LISTCONTAINERSOPTIONS_LABELSENTRY.has_options = True _LISTCONTAINERSOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_CPUENTRY.has_options = True _PODRESOURCE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_MEMORYENTRY.has_options = True _PODRESOURCE_MEMORYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_DIFFENTRY.has_options = True _PODRESOURCE_DIFFENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_DETAILENTRY.has_options = True _PODRESOURCE_DETAILENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _NODE_CPUENTRY.has_options = True _NODE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _NODE_LABELSENTRY.has_options = True _NODE_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _NODE_INITCPUENTRY.has_options = True _NODE_INITCPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CONTAINER_CPUENTRY.has_options = True _CONTAINER_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CONTAINER_LABELSENTRY.has_options = True _CONTAINER_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CONTAINER_PUBLISHENTRY.has_options = True _CONTAINER_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _ADDNODEOPTIONS_LABELSENTRY.has_options = True _ADDNODEOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_ENVSENTRY.has_options = True _BUILD_ENVSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_ARGSENTRY.has_options = True _BUILD_ARGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_LABELSENTRY.has_options = True _BUILD_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_ARTIFACTSENTRY.has_options = True _BUILD_ARTIFACTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_CACHEENTRY.has_options = True _BUILD_CACHEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILDS_BUILDSENTRY.has_options = True _BUILDS_BUILDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _LOGOPTIONS_CONFIGENTRY.has_options = True _LOGOPTIONS_CONFIGENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _ENTRYPOINTOPTIONS_SYSCTLSENTRY.has_options = True _ENTRYPOINTOPTIONS_SYSCTLSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_NETWORKSENTRY.has_options = True _DEPLOYOPTIONS_NETWORKSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_LABELSENTRY.has_options = True _DEPLOYOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_NODELABELSENTRY.has_options = True _DEPLOYOPTIONS_NODELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_DATAENTRY.has_options = True _DEPLOYOPTIONS_DATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _REPLACEOPTIONS_FILTERLABELSENTRY.has_options = True _REPLACEOPTIONS_FILTERLABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _REPLACEOPTIONS_COPYENTRY.has_options = True _REPLACEOPTIONS_COPYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _COPYOPTIONS_TARGETSENTRY.has_options = True _COPYOPTIONS_TARGETSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CREATECONTAINERMESSAGE_CPUENTRY.has_options = True _CREATECONTAINERMESSAGE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CREATECONTAINERMESSAGE_PUBLISHENTRY.has_options = True _CREATECONTAINERMESSAGE_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CORERPC = _descriptor.ServiceDescriptor( name='CoreRPC', full_name='pb.CoreRPC', file=DESCRIPTOR, index=0, options=None, serialized_start=6969, serialized_end=8580, methods=[ _descriptor.MethodDescriptor( name='ListPods', full_name='pb.CoreRPC.ListPods', index=0, containing_service=None, input_type=_EMPTY, output_type=_PODS, options=None, ), _descriptor.MethodDescriptor( name='AddPod', full_name='pb.CoreRPC.AddPod', index=1, containing_service=None, input_type=_ADDPODOPTIONS, output_type=_POD, options=None, ), _descriptor.MethodDescriptor( name='RemovePod', full_name='pb.CoreRPC.RemovePod', index=2, containing_service=None, input_type=_REMOVEPODOPTIONS, output_type=_EMPTY, options=None, ), _descriptor.MethodDescriptor( name='GetPod', full_name='pb.CoreRPC.GetPod', index=3, containing_service=None, input_type=_GETPODOPTIONS, output_type=_POD, options=None, ), _descriptor.MethodDescriptor( name='GetPodResource', full_name='pb.CoreRPC.GetPodResource', index=4, containing_service=None, input_type=_GETPODOPTIONS, output_type=_PODRESOURCE, options=None, ), _descriptor.MethodDescriptor( name='AddNode', full_name='pb.CoreRPC.AddNode', index=5, containing_service=None, input_type=_ADDNODEOPTIONS, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='RemoveNode', full_name='pb.CoreRPC.RemoveNode', index=6, containing_service=None, input_type=_REMOVENODEOPTIONS, output_type=_POD, options=None, ), _descriptor.MethodDescriptor( name='SetNodeAvailable', full_name='pb.CoreRPC.SetNodeAvailable', index=7, containing_service=None, input_type=_NODEAVAILABLE, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='GetNode', full_name='pb.CoreRPC.GetNode', index=8, containing_service=None, input_type=_GETNODEOPTIONS, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='GetContainer', full_name='pb.CoreRPC.GetContainer', index=9, containing_service=None, input_type=_CONTAINERID, output_type=_CONTAINER, options=None, ), _descriptor.MethodDescriptor( name='GetContainers', full_name='pb.CoreRPC.GetContainers', index=10, containing_service=None, input_type=_CONTAINERIDS, output_type=_CONTAINERS, options=None, ), _descriptor.MethodDescriptor( name='GetNodeByName', full_name='pb.CoreRPC.GetNodeByName', index=11, containing_service=None, input_type=_GETNODEOPTIONS, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='ListPodNodes', full_name='pb.CoreRPC.ListPodNodes', index=12, containing_service=None, input_type=_LISTNODESOPTIONS, output_type=_NODES, options=None, ), _descriptor.MethodDescriptor( name='ListNetworks', full_name='pb.CoreRPC.ListNetworks', index=13, containing_service=None, input_type=_LISTNETWORKOPTIONS, output_type=_NETWORKS, options=None, ), _descriptor.MethodDescriptor( name='ListContainers', full_name='pb.CoreRPC.ListContainers', index=14, containing_service=None, input_type=_LISTCONTAINERSOPTIONS, output_type=_CONTAINERS, options=None, ), _descriptor.MethodDescriptor( name='ListNodeContainers', full_name='pb.CoreRPC.ListNodeContainers', index=15, containing_service=None, input_type=_GETNODEOPTIONS, output_type=_CONTAINERS, options=None, ), _descriptor.MethodDescriptor( name='ContainerDeployed', full_name='pb.CoreRPC.ContainerDeployed', index=16, containing_service=None, input_type=_CONTAINERDEPLOYEDOPTIONS, output_type=_EMPTY, options=None, ), _descriptor.MethodDescriptor( name='Copy', full_name='pb.CoreRPC.Copy', index=17, containing_service=None, input_type=_COPYOPTIONS, output_type=_COPYMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='BuildImage', full_name='pb.CoreRPC.BuildImage', index=18, containing_service=None, input_type=_BUILDIMAGEOPTIONS, output_type=_BUILDIMAGEMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='CacheImage', full_name='pb.CoreRPC.CacheImage', index=19, containing_service=None, input_type=_CACHEIMAGEOPTIONS, output_type=_CACHEIMAGEMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='RemoveImage', full_name='pb.CoreRPC.RemoveImage', index=20, containing_service=None, input_type=_REMOVEIMAGEOPTIONS, output_type=_REMOVEIMAGEMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='DeployStatus', full_name='pb.CoreRPC.DeployStatus', index=21, containing_service=None, input_type=_DEPLOYSTATUSOPTIONS, output_type=_DEPLOYSTATUSMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='RunAndWait', full_name='pb.CoreRPC.RunAndWait', index=22, containing_service=None, input_type=_RUNANDWAITOPTIONS, output_type=_RUNANDWAITMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='CreateContainer', full_name='pb.CoreRPC.CreateContainer', index=23, containing_service=None, input_type=_DEPLOYOPTIONS, output_type=_CREATECONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='ReplaceContainer', full_name='pb.CoreRPC.ReplaceContainer', index=24, containing_service=None, input_type=_REPLACEOPTIONS, output_type=_REPLACECONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='RemoveContainer', full_name='pb.CoreRPC.RemoveContainer', index=25, containing_service=None, input_type=_REMOVECONTAINEROPTIONS, output_type=_REMOVECONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='ControlContainer', full_name='pb.CoreRPC.ControlContainer', index=26, containing_service=None, input_type=_CONTROLCONTAINEROPTIONS, output_type=_CONTROLCONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='ReallocResource', full_name='pb.CoreRPC.ReallocResource', index=27, containing_service=None, input_type=_REALLOCOPTIONS, output_type=_REALLOCRESOURCEMESSAGE, options=None, ), ]) _sym_db.RegisterServiceDescriptor(_CORERPC) DESCRIPTOR.services_by_name['CoreRPC'] = _CORERPC # @@protoc_insertion_point(module_scope)
[((13, 10, 13, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((28, 9, 49, 1), 'google.protobuf.descriptor.Descriptor', '_descriptor.Descriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4632, 72, 4632, 103), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4634, 59, 4634, 90), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4636, 62, 4636, 93), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4638, 60, 4638, 91), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4640, 62, 4640, 93), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4642, 52, 4642, 83), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4644, 55, 4644, 86), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4646, 56, 4646, 87), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4648, 57, 4648, 88), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4650, 60, 4650, 91), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4652, 61, 4652, 92), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4654, 65, 4654, 96), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4656, 54, 4656, 85), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4658, 54, 4658, 85), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4660, 56, 4660, 87), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4662, 59, 4662, 90), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4664, 55, 4664, 86), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4666, 57, 4666, 88), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4668, 61, 4668, 92), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4670, 69, 4670, 100), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4672, 66, 4672, 97), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4674, 64, 4674, 95), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4676, 68, 4676, 99), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4678, 62, 4678, 93), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4680, 71, 4680, 102), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4682, 63, 4682, 94), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4684, 63, 4684, 94), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4686, 70, 4686, 101), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((4688, 74, 4688, 105), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((79, 36, 79, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((117, 4, 123, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((304, 4, 310, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((355, 36, 355, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((392, 36, 392, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((416, 4, 422, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((429, 36, 429, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((466, 36, 466, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((490, 4, 496, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((497, 4, 503, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((504, 4, 510, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((511, 4, 517, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((587, 4, 593, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((618, 4, 624, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((656, 4, 662, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((669, 36, 669, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((706, 36, 706, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((730, 4, 736, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((743, 36, 743, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((781, 4, 787, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((795, 4, 801, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((802, 4, 808, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((809, 4, 815, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((816, 4, 822, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((823, 4, 829, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((830, 4, 836, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((868, 4, 874, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((913, 4, 919, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((951, 4, 957, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((964, 36, 964, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1001, 36, 1001, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1038, 36, 1038, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1083, 4, 1089, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1097, 4, 1103, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1104, 4, 1110, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1111, 4, 1117, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1118, 4, 1124, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1229, 4, 1235, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1291, 4, 1297, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1322, 4, 1328, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1329, 4, 1335, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1360, 4, 1366, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1374, 4, 1380, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1532, 36, 1532, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1591, 4, 1597, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1598, 4, 1604, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1605, 4, 1611, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1612, 4, 1618, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1726, 4, 1732, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1777, 36, 1777, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1814, 36, 1814, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1851, 36, 1851, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1888, 36, 1888, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1925, 36, 1925, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((1970, 4, 1976, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1977, 4, 1983, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1984, 4, 1990, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1991, 4, 1997, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((1998, 4, 2004, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2005, 4, 2011, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2012, 4, 2018, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2050, 4, 2056, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2063, 36, 2063, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2080, 4, 2086, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2087, 4, 2093, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2132, 4, 2138, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2139, 4, 2145, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2146, 4, 2152, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2184, 4, 2190, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2191, 4, 2197, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2198, 4, 2204, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2229, 4, 2235, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2250, 4, 2256, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2301, 36, 2301, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2325, 4, 2331, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2376, 36, 2376, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2407, 4, 2413, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2421, 4, 2427, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2428, 4, 2434, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2435, 4, 2441, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2442, 4, 2448, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2456, 4, 2462, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2507, 36, 2507, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2544, 36, 2544, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2581, 36, 2581, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2618, 36, 2618, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2642, 4, 2648, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2684, 4, 2690, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2691, 4, 2697, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2698, 4, 2704, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2705, 4, 2711, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2712, 4, 2718, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2719, 4, 2725, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2726, 4, 2732, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2747, 4, 2753, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2754, 4, 2760, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2761, 4, 2767, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2768, 4, 2774, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2782, 4, 2788, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2789, 4, 2795, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2796, 4, 2802, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2847, 36, 2847, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2884, 36, 2884, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((2901, 4, 2907, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2908, 4, 2914, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2915, 4, 2921, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2922, 4, 2928, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2929, 4, 2935, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2936, 4, 2942, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2981, 4, 2987, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((2988, 4, 2994, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3033, 4, 3039, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3040, 4, 3046, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3047, 4, 3053, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3078, 4, 3084, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3116, 4, 3122, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3129, 36, 3129, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((3146, 4, 3152, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3177, 4, 3183, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3250, 4, 3256, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3288, 4, 3294, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3301, 36, 3301, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((3338, 36, 3338, 67), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ({}, {}), '()', False, 'from google.protobuf import descriptor_pb2\n'), ((3390, 4, 3396, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3397, 4, 3403, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3411, 4, 3417, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3418, 4, 3424, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3456, 4, 3462, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3463, 4, 3469, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3546, 4, 3552, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3598, 4, 3604, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3605, 4, 3611, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3643, 4, 3649, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3688, 4, 3694, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3785, 4, 3791, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((3823, 4, 3829, 36), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4699, 2, 4707, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4708, 2, 4716, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4717, 2, 4725, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4726, 2, 4734, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4735, 2, 4743, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4744, 2, 4752, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4753, 2, 4761, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4762, 2, 4770, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4771, 2, 4779, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4780, 2, 4788, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4789, 2, 4797, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4798, 2, 4806, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4807, 2, 4815, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4816, 2, 4824, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4825, 2, 4833, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4834, 2, 4842, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4843, 2, 4851, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4852, 2, 4860, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4861, 2, 4869, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4870, 2, 4878, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4879, 2, 4887, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4888, 2, 4896, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4897, 2, 4905, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4906, 2, 4914, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4915, 2, 4923, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4924, 2, 4932, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4933, 2, 4941, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((4942, 2, 4950, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n')]
moevm/nosql1h19-text-graph
src/models/text_node.py
410f156ad4f232f8aa060d43692ab020610ddfd4
from neomodel import StructuredNode, StringProperty, JSONProperty, \ Relationship, IntegerProperty import numpy as np import re from models.text_relation import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res = ''.join([word.strip() + ' ' for word in re.split(r'[\n ]', self.text, 5)[:5]]) return res def describe(self): return f""" <h1>Фрагмент: {self.order_id} </h1> <table border="1" width=100%> <caption> Информация о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> """ def preview(self, frag_num=0): leading = 3 if frag_num > 0: leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label): return f"{str(self.order_id).zfill(leading)}: " \ + f"[{self.label}] {self.short()}..." else: return f"{str(self.order_id).zfill(leading)}: " \ + f"[{self.label}] {self.short()}..." return f"[{self.label}] {self.short()}..." def words_num(self): return len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self): return len([s for s in self.text.split('.') if len(s) > 2])
[((13, 15, 13, 64), 'neomodel.IntegerProperty', 'IntegerProperty', (), '', False, 'from neomodel import StructuredNode, StringProperty, JSONProperty, Relationship, IntegerProperty\n'), ((14, 12, 14, 41), 'neomodel.StringProperty', 'StringProperty', (), '', False, 'from neomodel import StructuredNode, StringProperty, JSONProperty, Relationship, IntegerProperty\n'), ((15, 11, 15, 40), 'neomodel.StringProperty', 'StringProperty', (), '', False, 'from neomodel import StructuredNode, StringProperty, JSONProperty, Relationship, IntegerProperty\n'), ((16, 18, 16, 32), 'neomodel.JSONProperty', 'JSONProperty', ({}, {}), '()', False, 'from neomodel import StructuredNode, StringProperty, JSONProperty, Relationship, IntegerProperty\n'), ((17, 11, 17, 62), 'neomodel.Relationship', 'Relationship', (), '', False, 'from neomodel import StructuredNode, StringProperty, JSONProperty, Relationship, IntegerProperty\n'), ((21, 35, 21, 67), 're.split', 're.split', ({(21, 44, 21, 52): '"""[\\\\n ]"""', (21, 54, 21, 63): 'self.text', (21, 65, 21, 66): '5'}, {}), "('[\\\\n ]', self.text, 5)", False, 'import re\n'), ((53, 35, 53, 53), 'numpy.log10', 'np.log10', ({(53, 44, 53, 52): 'frag_num'}, {}), '(frag_num)', True, 'import numpy as np\n')]
otaviocarvalho/chess-negamax
tests/test_bishop_generate.py
21f1066611e581dac3257d3f46c71ca2b09b5964
import unittest from .helpers import StubBoard, StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position): from chess.models import Bishop return Bishop(board, team, position) def compare_list(self, expected, results): compared = [] for e in expected: for r in results: if e[0] == r[0] and e[1] == r[1]: compared.append(True) break else: compared.append(False) return compared def test_generate_topright(self): board = StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d3'), C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 10) if __name__ == '__main__': unittest.main()
[((86, 4, 86, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((7, 15, 7, 44), 'chess.models.Bishop', 'Bishop', ({(7, 22, 7, 27): 'board', (7, 29, 7, 33): 'team', (7, 35, 7, 43): 'position'}, {}), '(board, team, position)', False, 'from chess.models import Bishop\n')]