query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Transforms message into PlatformMessage object
def parse(cls, message): if isinstance(message, PlatformMessage): inst = PlatformMessage.parse(message.serialize()) return inst inst = PlatformMessage() if message is not None: assert isinstance(message, (list, tuple)), "Message is expected to be a list or a tuple" assert len(message) >= 4, "Message's length expected to be at least 4" assert message[0] == PlatformMessage._signature, "Message's signature is incorrect" inst.sender = message[1] inst.interface = message[2] inst.method = message[3] if len(message) > 4: assert isinstance(message[4], (list, tuple)), "Message's args expected to be list or tuple" inst.args = copy.deepcopy(message[4]) if len(message) > 5: assert isinstance(message[5], dict), "Message's kwargs expected to be a dict" inst.kwargs = copy.deepcopy(message[5]) return inst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def message_to_python(self, raw_message):\n return self.Message(self, raw_message)", "def get_interface(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.interface\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n return message[2]\r\n return None", "def _ConvertGenericMessage(self, value, message, path):\n # Duration, Timestamp, FieldMask have a FromJsonString method to do the\n # conversion. Users can also call the method directly.\n try:\n message.FromJsonString(value)\n except ValueError as e:\n raise ParseError('{0} at {1}'.format(e, path))", "def message_to_type(raw: bytes) -> Message:\n try:\n return __type_to_message[raw[0]]\n except KeyError:\n raise InvalidType()", "def from_message(cls, template_message):\n new_equivalent_message = cls(\n template_message.env,\n template_message.source,\n template_message.destination,\n template_message.size_bytes,\n template_message.message_type,\n template_message.data)\n return new_equivalent_message", "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def get_args(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.args\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n if len(message) > 4:\r\n return copy.deepcopy(message[4])\r\n else:\r\n return None\r\n return None", "def ConvertMessage(self, value, message, path):\n self.recursion_depth += 1\n if self.recursion_depth > self.max_recursion_depth:\n raise ParseError('Message too deep. Max recursion depth is {0}'.format(\n self.max_recursion_depth))\n message_descriptor = message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if not path:\n path = message_descriptor.name\n if _IsWrapperMessage(message_descriptor):\n self._ConvertWrapperMessage(value, message, path)\n elif full_name in _WKTJSONMETHODS:\n methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self)\n else:\n self._ConvertFieldValuePair(value, message, path)\n self.recursion_depth -= 1", "def value_from_message(self, message):\n if not isinstance(message, self.message_type):\n raise DecodeError('Expected type %s, got %s: %r' %\n (self.message_type.__name__,\n type(message).__name__,\n message))\n return message", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def from_handover_message(cls, msg):\n return cls.from_items(msg.to_match().items())", "def unpack(self, raw_message):\n return self._msg_struct.unpack(raw_message)", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def getNativeMessage(self) -> unicode:\n ...", "def process_message(self, message: Message[TPayload]) -> Optional[TResult]:\n pass", "def _on_platforms_messsage(self, peer, sender, bus, topic, headers,\n message):\n topicsplit = topic.split('/')\n if len(topicsplit) < 2:\n _log.error('Invalid topic length published to volttron central')\n return\n\n # Topic is platforms/<platform_uuid>/otherdata\n topicsplit = topic.split('/')\n\n if len(topicsplit) < 3:\n _log.warn(\"Invalid topic length no operation or datatype.\")\n return\n\n _, platform_uuid, op_or_datatype, other = topicsplit[0], \\\n topicsplit[1], \\\n topicsplit[2], topicsplit[3:]\n\n if len(platform_uuid) != 36:\n _log.error('Invalid platform id detected {}'\n .format(platform_uuid))\n return\n\n platform = self._registered_platforms.get(platform_uuid)\n if platform is None:\n _log.warn('Platform {} is not registered but sent message {}'\n .format(platform_uuid, message))\n return\n\n _log.debug('Doing operation: {}'.format(op_or_datatype))\n _log.debug('Topic was: {}'.format(topic))\n _log.debug('Message was: {}'.format(message))\n\n if op_or_datatype == 'devices':\n md5hash = message.get('md5hash')\n if md5hash is None:\n _log.error('Invalid topic for devices datatype. Must contain '\n 'md5hash in message.')\n if message['md5hash'] not in self._hash_to_topic:\n devices = platform.get(\"devices\", {})\n lookup_topic = '/'.join(other)\n _log.debug(\"Lookup topic is: {}\".format(lookup_topic))\n vcp = self._get_connection(platform_uuid)\n device_node = vcp.call(\"get_device\", lookup_topic)\n if device_node is not None:\n devices[lookup_topic] = device_node\n self._hash_to_topic[md5hash] = lookup_topic\n else:\n _log.error(\"Couldn't retrive device topic {} from platform \"\n \"{}\".format(lookup_topic, platform_uuid))", "def transformMessage(self):\n\n message = json.loads(self.message)\n\n call_data = {\n 'call_id': message.get('call_id')\n }\n\n if message.get('type') == 'start':\n call_data['start_timestamp'] = message.get('timestamp')\n call_data['source'] = message.get('source')\n call_data['destination'] = message.get('destination')\n else:\n call_data['stop_timestamp'] = message.get('timestamp')\n\n self.data = call_data\n return self.data", "def _AnyMessageToJsonObject(self, message):\n if not message.ListFields():\n return {}\n # Must print @type first, use OrderedDict instead of {}\n js = OrderedDict()\n type_url = message.type_url\n js['@type'] = type_url\n sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)\n sub_message.ParseFromString(message.value)\n message_descriptor = sub_message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n js['value'] = self._WrapperMessageToJsonObject(sub_message)\n return js\n if full_name in _WKTJSONMETHODS:\n js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],\n sub_message)(self)\n return js\n return self._RegularMessageToJsonObject(sub_message, js)", "def _process_message(self, obj):\n pass", "def parse_message(self, message):\n pass", "def _transform_message(self, message):\n serialized = ev_envelope.serialize_envelope(message)\n return encodeutils.safe_encode(serialized, 'utf-8')", "def handle_message(self, message):", "def FromRpcMessage(self, message):\n self.content = message.content\n self.completed = message.completed", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def _ConvertAnyMessage(self, value, message, path):\n if isinstance(value, dict) and not value:\n return\n try:\n type_url = value['@type']\n except KeyError:\n raise ParseError(\n '@type is missing when parsing any message at {0}'.format(path))\n\n try:\n sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)\n except TypeError as e:\n raise ParseError('{0} at {1}'.format(e, path))\n message_descriptor = sub_message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n self._ConvertWrapperMessage(value['value'], sub_message,\n '{0}.value'.format(path))\n elif full_name in _WKTJSONMETHODS:\n methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message,\n '{0}.value'.format(path))(\n self)\n else:\n del value['@type']\n self._ConvertFieldValuePair(value, sub_message, path)\n value['@type'] = type_url\n # Sets Any message\n message.value = sub_message.SerializeToString()\n message.type_url = type_url", "def _GenericMessageToJsonObject(self, message):\n # Duration, Timestamp and FieldMask have ToJsonString method to do the\n # convert. Users can also call the method directly.\n return message.ToJsonString()", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def validate_new_message(payload):\n serialized_message = json.loads(payload)\n message = Message.make(serialized_message)\n print(\"Message ({0}) contents: {1}\".format(message.type, message))\n return message", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def _ConvertValueMessage(self, value, message, path):\n if isinstance(value, dict):\n self._ConvertStructMessage(value, message.struct_value, path)\n elif isinstance(value, list):\n self._ConvertListValueMessage(value, message.list_value, path)\n elif value is None:\n message.null_value = 0\n elif isinstance(value, bool):\n message.bool_value = value\n elif isinstance(value, str):\n message.string_value = value\n elif isinstance(value, _INT_OR_FLOAT):\n message.number_value = value\n else:\n raise ParseError('Value {0} has unexpected type {1} at {2}'.format(\n value, type(value), path))", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def from_msg(cls, msg):\n if cls._debug:\n log.debug('msg=%s', msg)\n key, seq_s, uuid, prop_s, body = msg\n key = key if key else None\n seq = struct.unpack('!q', seq_s)[0]\n body = body if body else None\n if body:\n body = pipeline.load(body)\n #body = json.loads(body_s)\n #prop = json.loads(prop_s)\n prop = pipeline.load(prop_s)\n return cls(seq, uuid=uuid, key=key, properties=prop, body=body)", "def decode(obj: bytes) -> Message:\n signing_pb = signing_pb2.SigningMessage()\n signing_pb.ParseFromString(obj)\n message_id = signing_pb.message_id\n dialogue_reference = (\n signing_pb.dialogue_starter_reference,\n signing_pb.dialogue_responder_reference,\n )\n target = signing_pb.target\n\n performative = signing_pb.WhichOneof(\"performative\")\n performative_id = SigningMessage.Performative(str(performative))\n performative_content = dict() # type: Dict[str, Any]\n if performative_id == SigningMessage.Performative.SIGN_TRANSACTION:\n skill_callback_ids = signing_pb.sign_transaction.skill_callback_ids\n skill_callback_ids_tuple = tuple(skill_callback_ids)\n performative_content[\"skill_callback_ids\"] = skill_callback_ids_tuple\n skill_callback_info = signing_pb.sign_transaction.skill_callback_info\n skill_callback_info_dict = dict(skill_callback_info)\n performative_content[\"skill_callback_info\"] = skill_callback_info_dict\n pb2_terms = signing_pb.sign_transaction.terms\n terms = Terms.decode(pb2_terms)\n performative_content[\"terms\"] = terms\n pb2_raw_transaction = signing_pb.sign_transaction.raw_transaction\n raw_transaction = RawTransaction.decode(pb2_raw_transaction)\n performative_content[\"raw_transaction\"] = raw_transaction\n elif performative_id == SigningMessage.Performative.SIGN_MESSAGE:\n skill_callback_ids = signing_pb.sign_message.skill_callback_ids\n skill_callback_ids_tuple = tuple(skill_callback_ids)\n performative_content[\"skill_callback_ids\"] = skill_callback_ids_tuple\n skill_callback_info = signing_pb.sign_message.skill_callback_info\n skill_callback_info_dict = dict(skill_callback_info)\n performative_content[\"skill_callback_info\"] = skill_callback_info_dict\n pb2_terms = signing_pb.sign_message.terms\n terms = Terms.decode(pb2_terms)\n performative_content[\"terms\"] = terms\n pb2_raw_message = signing_pb.sign_message.raw_message\n raw_message = RawMessage.decode(pb2_raw_message)\n performative_content[\"raw_message\"] = raw_message\n elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION:\n skill_callback_ids = signing_pb.signed_transaction.skill_callback_ids\n skill_callback_ids_tuple = tuple(skill_callback_ids)\n performative_content[\"skill_callback_ids\"] = skill_callback_ids_tuple\n skill_callback_info = signing_pb.signed_transaction.skill_callback_info\n skill_callback_info_dict = dict(skill_callback_info)\n performative_content[\"skill_callback_info\"] = skill_callback_info_dict\n pb2_signed_transaction = signing_pb.signed_transaction.signed_transaction\n signed_transaction = SignedTransaction.decode(pb2_signed_transaction)\n performative_content[\"signed_transaction\"] = signed_transaction\n elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE:\n skill_callback_ids = signing_pb.signed_message.skill_callback_ids\n skill_callback_ids_tuple = tuple(skill_callback_ids)\n performative_content[\"skill_callback_ids\"] = skill_callback_ids_tuple\n skill_callback_info = signing_pb.signed_message.skill_callback_info\n skill_callback_info_dict = dict(skill_callback_info)\n performative_content[\"skill_callback_info\"] = skill_callback_info_dict\n pb2_signed_message = signing_pb.signed_message.signed_message\n signed_message = SignedMessage.decode(pb2_signed_message)\n performative_content[\"signed_message\"] = signed_message\n elif performative_id == SigningMessage.Performative.ERROR:\n skill_callback_ids = signing_pb.error.skill_callback_ids\n skill_callback_ids_tuple = tuple(skill_callback_ids)\n performative_content[\"skill_callback_ids\"] = skill_callback_ids_tuple\n skill_callback_info = signing_pb.error.skill_callback_info\n skill_callback_info_dict = dict(skill_callback_info)\n performative_content[\"skill_callback_info\"] = skill_callback_info_dict\n pb2_error_code = signing_pb.error.error_code\n error_code = ErrorCode.decode(pb2_error_code)\n performative_content[\"error_code\"] = error_code\n else:\n raise ValueError(\"Performative not valid: {}.\".format(performative_id))\n\n return SigningMessage(\n message_id=message_id,\n dialogue_reference=dialogue_reference,\n target=target,\n performative=performative,\n **performative_content\n )", "def _ConvertWrapperMessage(self, value, message, path):\n field = message.DESCRIPTOR.fields_by_name['value']\n setattr(\n message, 'value',\n _ConvertScalarFieldValue(value, field, path='{0}.value'.format(path)))", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)", "def _create_message_from_packed_message(\n packed_message,\n envelope,\n force_payload_decoding,\n kafka_position_info=None,\n reader_schema_id=None\n):\n unpacked_message = envelope.unpack(packed_message.value)\n message_class = _message_type_to_class_map[unpacked_message['message_type']]\n message = message_class.create_from_unpacked_message(\n unpacked_message=unpacked_message,\n kafka_position_info=kafka_position_info,\n reader_schema_id=reader_schema_id\n )\n if force_payload_decoding:\n # Access the cached, but lazily-calculated, properties\n message.reload_data()\n return message", "def convert(self,message):\n \n content_type = message.get('content',{}).get('@type','')\n if content_type in self.supported:\n result = getattr(self.tconv, content_type)(message)\n else:\n return False\n \n return result", "def __message_content__(self) -> MessageContent:", "def handle_message(self, validated_message: dict):\n pass", "def create_message(message):\n return {\n \"id\": message.id,\n \"from\": message.sender,\n \"preview\": create_preview(message),\n \"subject\": message.subject,\n \"date\": message.date_created,\n }", "def _unwrap(self, msg):\n return msg['content']['data']", "def message_to_dict(message):\n return json.loads(protojson.encode_message(message))", "def _StructMessageToJsonObject(self, message):\n fields = message.fields\n ret = {}\n for key in fields:\n ret[key] = self._ValueMessageToJsonObject(fields[key])\n return ret", "def message(self, *args, **kwargs) -> Message:\n return Message(self.handle, *args, **kwargs)", "def _parse_message(self, string, protocol):\n #print(\"Parsing message: %s\" % string)\n msg = parse_message_string(string)\n result = MessageResult(original_message=msg)\n\n if isinstance(msg, MethodCallMessage):\n # Handle method call\n res = self._method_call(msg)\n response_msg = ResponseMessage(result_code=0, result=res, response_to=msg.id)\n result.response = create_message_string(response_msg)\n elif isinstance(msg, SubscribeMessage):\n # Handle subscription to event\n response_msg = ResponseMessage(result_code=0, result=None, response_to=msg.id)\n result.response = create_message_string(response_msg)\n else:\n raise MessageHandleError(MessageHandleError.RESULT_UNEXPECTED_MESSAGE, msg)\n\n return result", "def br_msg_to(msg_body):\r\n msg = Message()\r\n msg.body = msg_body\r\n msg.set_metadata(\"performative\", \"inform\")\r\n return msg", "def message_to_dict(message):\n return json.loads(protojson.encode_message(message))", "def _get_message_from_proto(self, message) -> dict: \n result = { 'transcript' : '' , 'confidence' : 0.0 }\n try: \n result = MessageToDict(message._pb)['results'][0]['alternatives'][0]\n except:\n result['transcript'] = ''\n result['confidence'] = 0.0\n\n return result", "def new_message(self):\n msg = {}\n msg['data'] = []\n msg['type'] = self.plugin_type\n msg['source'] = self.name\n msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()\n return msg", "def decode(obj: bytes) -> Message:\n message_pb = ProtobufMessage()\n ml_trade_pb = ml_trade_pb2.MlTradeMessage()\n message_pb.ParseFromString(obj)\n message_id = message_pb.dialogue_message.message_id\n dialogue_reference = (\n message_pb.dialogue_message.dialogue_starter_reference,\n message_pb.dialogue_message.dialogue_responder_reference,\n )\n target = message_pb.dialogue_message.target\n\n ml_trade_pb.ParseFromString(message_pb.dialogue_message.content)\n performative = ml_trade_pb.WhichOneof(\"performative\")\n performative_id = MlTradeMessage.Performative(str(performative))\n performative_content = {} # type: Dict[str, Any]\n if performative_id == MlTradeMessage.Performative.CFP:\n pb2_query = ml_trade_pb.cfp.query\n query = Query.decode(pb2_query)\n performative_content[\"query\"] = query\n elif performative_id == MlTradeMessage.Performative.TERMS:\n pb2_terms = ml_trade_pb.terms.terms\n terms = Description.decode(pb2_terms)\n performative_content[\"terms\"] = terms\n elif performative_id == MlTradeMessage.Performative.ACCEPT:\n pb2_terms = ml_trade_pb.accept.terms\n terms = Description.decode(pb2_terms)\n performative_content[\"terms\"] = terms\n tx_digest = ml_trade_pb.accept.tx_digest\n performative_content[\"tx_digest\"] = tx_digest\n elif performative_id == MlTradeMessage.Performative.DATA:\n pb2_terms = ml_trade_pb.data.terms\n terms = Description.decode(pb2_terms)\n performative_content[\"terms\"] = terms\n payload = ml_trade_pb.data.payload\n performative_content[\"payload\"] = payload\n else:\n raise ValueError(\"Performative not valid: {}.\".format(performative_id))\n\n return MlTradeMessage(\n message_id=message_id,\n dialogue_reference=dialogue_reference,\n target=target,\n performative=performative,\n **performative_content\n )", "def _process_msg(cls, msg):\n raise NotImplementedError", "def _construct_message(self):\n self.message = {\"token\": self._auth, \"channel\": self.channel}\n super()._construct_message()", "def handleMessage(msg):", "def encode(msg: Message) -> bytes:\n msg = cast(MlTradeMessage, msg)\n message_pb = ProtobufMessage()\n dialogue_message_pb = DialogueMessage()\n ml_trade_msg = ml_trade_pb2.MlTradeMessage()\n\n dialogue_message_pb.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]\n dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]\n dialogue_message_pb.target = msg.target\n\n performative_id = msg.performative\n if performative_id == MlTradeMessage.Performative.CFP:\n performative = ml_trade_pb2.MlTradeMessage.Cfp_Performative() # type: ignore\n query = msg.query\n Query.encode(performative.query, query)\n ml_trade_msg.cfp.CopyFrom(performative)\n elif performative_id == MlTradeMessage.Performative.TERMS:\n performative = ml_trade_pb2.MlTradeMessage.Terms_Performative() # type: ignore\n terms = msg.terms\n Description.encode(performative.terms, terms)\n ml_trade_msg.terms.CopyFrom(performative)\n elif performative_id == MlTradeMessage.Performative.ACCEPT:\n performative = ml_trade_pb2.MlTradeMessage.Accept_Performative() # type: ignore\n terms = msg.terms\n Description.encode(performative.terms, terms)\n tx_digest = msg.tx_digest\n performative.tx_digest = tx_digest\n ml_trade_msg.accept.CopyFrom(performative)\n elif performative_id == MlTradeMessage.Performative.DATA:\n performative = ml_trade_pb2.MlTradeMessage.Data_Performative() # type: ignore\n terms = msg.terms\n Description.encode(performative.terms, terms)\n payload = msg.payload\n performative.payload = payload\n ml_trade_msg.data.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n dialogue_message_pb.content = ml_trade_msg.SerializeToString()\n\n message_pb.dialogue_message.CopyFrom(dialogue_message_pb)\n message_bytes = message_pb.SerializeToString()\n return message_bytes", "def process_message(message: Message) -> spacy.tokens.doc.Doc:\n return spacy_pipeline()(str(message))", "def parse(self, message: Message):\n\t\tpass", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result", "def __parse_message_as(msg_type: type, msg_str: str) -> Any:\n # parse the message\n msg_dict = json.loads(msg_str)\n\n # the type specified in the message needs to match\n # the type we are parsing as\n assert msg_dict[MSG_TYPE_NAME] == msg_type.__name__, \\\n f\"Message type did not match the parsing type,\" \\\n f\"parsing the message as type {msg_type.__name__},\" \\\n f\"but get a message of type {msg_dict[MSG_TYPE_NAME]}\"\n\n # remove the message type information, and create the object\n del msg_dict[MSG_TYPE_NAME]\n return msg_type(**msg_dict)", "def get_localized_message(message, user_locale):\r\n if isinstance(message, Message):\r\n if user_locale:\r\n message.locale = user_locale\r\n return unicode(message)\r\n else:\r\n return message", "def get_message(self) -> Union[\"Message\", None]:\n raw_data = (\n self.raw_data.get(\"message\") or\n self.raw_data.get(\"edited_message\")\n )\n\n if raw_data:\n return Message(raw_data)\n\n return None", "def __format_message(message, as_json=False):\n formatted_message = None\n if as_json:\n formatted_message = json_format.MessageToJson(message, sort_keys=True)\n else:\n formatted_message = text_format.MessageToString(message)\n return formatted_message", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def value_to_message(self, value):\n if not isinstance(value, self.type):\n raise EncodeError('Expected type %s, got %s: %r' %\n (self.type.__name__,\n type(value).__name__,\n value))\n return value", "def raw_message(self) -> CustomRawMessage:\n enforce(self.is_set(\"raw_message\"), \"'raw_message' content is not set.\")\n return cast(CustomRawMessage, self.get(\"raw_message\"))", "def parse(msg):\n msg = msg.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\b\", \"\")\n pseudo = user_account = ip = msg_type = content = target = \"\"\n msg_parsed = message_regex.search(msg)\n if msg_parsed:\n data = msg_parsed.groups()\n if len(data) >= 6:\n pseudo = data[0]\n user_account = data[1]\n ip = data[2]\n msg_type = data[3]\n target = data[4]\n content = data[5]\n if target.startswith(\"#\") and msg_type == \"PRIVMSG\":\n msg_type = \"PUBMSG\"\n return Message(pseudo, user_account, ip, msg_type, content, target)", "def ParseMessage(message: pubsub_v1.types.PubsubMessage,\n path_type: dicom_path.Type) -> ParsedMessage:\n input_path_str = message.data.decode()\n # Support both 'True' and 'true' for user convenience with manual invocation.\n test_attr = (message.attributes.get('test') in ['True', 'true'])\n conflict_attr = message.attributes.get('conflict')\n if conflict_attr and conflict_attr not in CONFLICT_MAP:\n raise exception.CustomExceptionError(\n 'Unexpected value for conflict attribute: %s. Must be one of the '\n 'following values: %s' % (conflict_attr, CONFLICT_MAP.keys()),\n code_pb2.Code.INVALID_ARGUMENT)\n conflict = CONFLICT_MAP[conflict_attr] if conflict_attr else DEFAULT_CONFLICT\n\n try:\n input_path = dicom_path.FromString(input_path_str, path_type)\n parsed_message = ParsedMessage(\n input_path=input_path, conflict=conflict, test=test_attr)\n\n # Set the output DICOM store path, if available.\n output_store_path_str = message.attributes.get('output_dicom_store_path')\n if output_store_path_str is not None:\n output_store_path = dicom_path.FromString(output_store_path_str,\n dicom_path.Type.STORE)\n parsed_message.output_dicom_store_path = output_store_path\n return parsed_message\n except ValueError as e:\n traceback.print_exc()\n raise exception.CustomExceptionError(str(e), code_pb2.Code.INVALID_ARGUMENT)", "def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))", "def GetAioMessageStruct(message_type_name):\n try:\n if message_type_name == 'kMessageTypeControlTelemetry':\n return getattr(pack_control_telemetry, 'ControlTelemetry')\n elif message_type_name == 'kMessageTypeControlSlowTelemetry':\n return getattr(pack_control_telemetry, 'ControlSlowTelemetry')\n elif message_type_name == 'kMessageTypeControlDebug':\n return getattr(pack_control_telemetry, 'ControlDebugMessage')\n elif message_type_name == 'kMessageTypeSimTelemetry':\n return getattr(pack_sim_telemetry, 'SimTelemetry')\n elif message_type_name == 'kMessageTypeGroundTelemetry':\n return getattr(pack_ground_telemetry, 'GroundTelemetry')\n elif message_type_name in ('kMessageTypeDynamicsReplay',\n 'kMessageTypeEstimatorReplay',\n 'kMessageTypeSimCommand',\n 'kMessageTypeSimSensor',\n 'kMessageTypeSimTetherDown'):\n return getattr(pack_sim_messages,\n message_type_name[len('kMessageType'):] + 'Message')\n else:\n return getattr(pack_avionics_messages,\n message_type_name[len('kMessageType'):] + 'Message')\n except AttributeError:\n raise AioClientException(\n 'No struct for AIO message type: ' + message_type_name)", "def _MessageToJsonObject(self, message):\n message_descriptor = message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n return self._WrapperMessageToJsonObject(message)\n if full_name in _WKTJSONMETHODS:\n return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)\n js = {}\n return self._RegularMessageToJsonObject(message, js)", "def from_msg(cls, msg: Msg) -> Message:\n tokens = msg.reply.split(\".\")\n if len(tokens) != 9 or tokens[0] != \"$JS\" or tokens[1] != \"ACK\":\n raise ValueError(\n \"Failed to parse message. Message is not a valid JetStream message\"\n )\n message = Message(\n subject=msg.subject,\n seq=tokens[6],\n data=msg.data,\n time=datetime.fromtimestamp(\n int(tokens[7]) / 1_000_000_000.0, tz=timezone.utc\n ),\n hdrs=msg.headers,\n )\n message._msg = msg\n return message", "def UnpackMessage(swig_obj_pointer, msg_name):\n\n ptr = int(swig_obj_pointer)\n c_array = ctypes.c_char * aio.GetPackMessageSize(msg_name)\n received = c_array.from_address(ptr)\n\n msg_type = MESSAGE_TYPE_HELPER.Value(msg_name)\n return c_helpers.Unpack(received[:], MESSAGE_STRUCTS[msg_type])", "def makeMessage( name, *structure ):\n return X12Message( name, *structure )", "def get_method(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.method\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n return message[3]\r\n return None", "def _message_to_percept(message: GymMessage) -> Feedback:\n observation = cast(Any, message.observation.any)\n reward = cast(float, message.reward)\n done = cast(bool, message.done)\n info = cast(dict, message.info.any)\n\n return observation, reward, done, info", "def test_from_message_to_dto(self, mock_factory):\n message = Message({'class': 'FooClass', 'data': {'foo': 'bar'}})\n from_message_to_dto(message)\n\n assert mock_factory.call_args[0][0].__name__ == 'FooClass'\n assert mock_factory.call_args[0][0]._fields == ('foo', 'Meta')", "def encode(msg: Message) -> bytes:\n msg = cast(SigningMessage, msg)\n signing_msg = signing_pb2.SigningMessage()\n signing_msg.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n signing_msg.dialogue_starter_reference = dialogue_reference[0]\n signing_msg.dialogue_responder_reference = dialogue_reference[1]\n signing_msg.target = msg.target\n\n performative_id = msg.performative\n if performative_id == SigningMessage.Performative.SIGN_TRANSACTION:\n performative = signing_pb2.SigningMessage.Sign_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_transaction = msg.raw_transaction\n RawTransaction.encode(performative.raw_transaction, raw_transaction)\n signing_msg.sign_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGN_MESSAGE:\n performative = signing_pb2.SigningMessage.Sign_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n terms = msg.terms\n Terms.encode(performative.terms, terms)\n raw_message = msg.raw_message\n RawMessage.encode(performative.raw_message, raw_message)\n signing_msg.sign_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_TRANSACTION:\n performative = signing_pb2.SigningMessage.Signed_Transaction_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_transaction = msg.signed_transaction\n SignedTransaction.encode(\n performative.signed_transaction, signed_transaction\n )\n signing_msg.signed_transaction.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.SIGNED_MESSAGE:\n performative = signing_pb2.SigningMessage.Signed_Message_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n signed_message = msg.signed_message\n SignedMessage.encode(performative.signed_message, signed_message)\n signing_msg.signed_message.CopyFrom(performative)\n elif performative_id == SigningMessage.Performative.ERROR:\n performative = signing_pb2.SigningMessage.Error_Performative() # type: ignore\n skill_callback_ids = msg.skill_callback_ids\n performative.skill_callback_ids.extend(skill_callback_ids)\n skill_callback_info = msg.skill_callback_info\n performative.skill_callback_info.update(skill_callback_info)\n error_code = msg.error_code\n ErrorCode.encode(performative.error_code, error_code)\n signing_msg.error.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n signing_bytes = signing_msg.SerializeToString()\n return signing_bytes", "def get_kwargs(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.kwargs\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n if len(message) > 5:\r\n return copy.deepcopy(message[5])\r\n else:\r\n return None\r\n return None", "def test_to_message_from_dto(self):\n fields = [('id', None)]\n FooEvent = message_factory(NamedTuple('FooEvent', fields))\n dto = FooEvent(id=1)\n message = to_message_from_dto(dto)\n\n assert message['class'] == 'FooEvent'\n assert message['data']['id'] == 1", "def _to_message_record(parsed):\n return MessageRecord(record_type=parsed.record_type,\n transaction_sequence_n=parsed.transaction_sequence_n,\n record_sequence_n=parsed.record_sequence_n,\n message_type=parsed.message_type,\n message_text=parsed.message_text,\n original_record_sequence_n=parsed.sequence_n,\n message_record_type=parsed.message_record_type,\n message_level=parsed.message_level,\n validation_n=parsed.validation)", "def handle_message(self, msg):\n pass", "def _wrap(self, value, msg):\n if value is not None:\n answer = Message(msg)\n answer[RESPONSE_ID] = answer[MSG_ID]\n # answer[SENDER_ID] = self.uid # already done in send()\n answer.pop(FULL_MSG, None)\n answer.pop(COMMAND, None)\n answer[BODY] = value\n return answer", "def decode(obj: bytes) -> Message:\n default_pb = default_pb2.DefaultMessage()\n default_pb.ParseFromString(obj)\n message_id = default_pb.message_id\n dialogue_reference = (\n default_pb.dialogue_starter_reference,\n default_pb.dialogue_responder_reference,\n )\n target = default_pb.target\n\n performative = default_pb.WhichOneof(\"performative\")\n performative_id = DefaultMessage.Performative(str(performative))\n performative_content = dict() # type: Dict[str, Any]\n if performative_id == DefaultMessage.Performative.BYTES:\n content = default_pb.bytes.content\n performative_content[\"content\"] = content\n elif performative_id == DefaultMessage.Performative.ERROR:\n pb2_error_code = default_pb.error.error_code\n error_code = ErrorCode.decode(pb2_error_code)\n performative_content[\"error_code\"] = error_code\n error_msg = default_pb.error.error_msg\n performative_content[\"error_msg\"] = error_msg\n error_data = default_pb.error.error_data\n error_data_dict = dict(error_data)\n performative_content[\"error_data\"] = error_data_dict\n else:\n raise ValueError(\"Performative not valid: {}.\".format(performative_id))\n\n return DefaultMessage(\n message_id=message_id,\n dialogue_reference=dialogue_reference,\n target=target,\n performative=performative,\n **performative_content\n )", "def raw_message(self) -> RawMessage:\n return self.__raw_message", "def message_to_objects(message):\n doc = etree.fromstring(message)\n if doc[0].tag == \"post\":\n # Skip the top <post> element if it exists\n doc = doc[0]\n entities = element_to_objects(doc)\n return entities", "def get_sender(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.sender\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n return message[1]\r\n return None", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _build_standard_payload(self, message):\n recipients_list = [\n sanitize_address(addr, message.encoding)\n for addr in message.recipients()]\n recipients = [\n {\"email\": e, \"name\": n}\n for n, e in [parseaddr(r) for r in recipients_list]]\n\n sender = sanitize_address(message.from_email, message.encoding)\n name, email = parseaddr(sender)\n\n payload = {\n 'key': self.api_key,\n 'message': {\n 'text': message.body,\n 'subject': message.subject,\n 'from_email': email,\n 'from_name': getattr(message, 'from_name', None) or name,\n 'to': recipients,\n },\n }\n\n if message.attachments:\n payload['message']['attachments'] = []\n for attachment in message.attachments:\n # django supports two types of attachements:\n # * a subclass of email.mime.base.MIMEBase\n # * a tuple of (filename, content[, mimetype])\n if isinstance(attachment, MIMEBase):\n filename = attachment.get_filename()\n content = attachment.get_payload(decode=True)\n mimetype = attachment.get_content_type()\n else:\n filename = attachment[0]\n content = attachment[1]\n mimetype = (\n attachment[2]\n if len(attachment) > 2 and attachment[2]\n else mimetypes.guess_type(filename)[0]\n )\n payload['message']['attachments'].append({\n 'type': mimetype,\n 'name': str(filename),\n 'content': base64.b64encode(content),\n })\n return payload", "def _lwm2m_protocol(data):\n\n try:\n load_payload = json.loads(data['payload'])\n except Exception:\n raise FormInvalid(field='payload')\n # build emqx lwm2m protocol require payload\n handled_payload = _validate_lwm2m_topic(data['topic'])\n if data['topic'] == '/19/1/0':\n handled_payload['msgType'] = 'write'\n handled_payload['value'] = load_payload\n else:\n msg_type = load_payload.get('msgType')\n if msg_type == 'read':\n # {'msgType': 'read', 'path': xx}\n handled_payload['msgType'] = 'read'\n elif msg_type == 'write' and load_payload.get('value'):\n # {'msgType': 'write', 'path': xx, 'value': xx, 'type': xx}\n handled_payload['msgType'] = 'write'\n handled_payload['value'] = load_payload['value']\n elif msg_type == 'execute':\n # {'msgType': 'execute', 'path': xx, 'args'}\n handled_payload['msgType'] = 'execute'\n if load_payload.get('args'):\n handled_payload['args'] = load_payload['args']\n else:\n raise FormInvalid(field='payload')\n data['payload'] = json.dumps(handled_payload)\n return data", "def _ConvertStructMessage(self, value, message, path):\n if not isinstance(value, dict):\n raise ParseError('Struct must be in a dict which is {0} at {1}'.format(\n value, path))\n # Clear will mark the struct as modified so it will be created even if\n # there are no values.\n message.Clear()\n for key in value:\n self._ConvertValueMessage(value[key], message.fields[key],\n '{0}.{1}'.format(path, key))\n return", "def message(*args, **kwargs):\n version = kwargs.get('version', NATIVE_HEADER_VERSION)\n order = kwargs.get('order', '<')\n\n if len(order) == 0:\n order = \"<\"\n\n if version == 4:\n m = message4(*args)\n elif version == 3:\n m = message3(*args)\n elif version == 2:\n m = message2(*args)\n else:\n m = anymessage(*args) # only for reading message from stream\n\n m.packedHeaderDataFormat=order+m.packedHeaderDataFormat[1:]\n\n return m", "def convert_message_to_command(message):\n\n split_message = message.split(',')\n pinValue = int(split_message[2]) # split_message[1] in ('True','true')\n user = split_message[0]\n\n ## We will use MESSAGE_PINVALUE_0+pinNumber for HIGH signals,\n ## and MESSAGE_PINVALUE_0-pinNumber-1 for LOW signals\n ## e.g. pin 0 LOW corresponds to 64 and pin 1 HIGH corresponds to 66\n if pinValue:\n pinNumber = chr(int(split_message[1]) + MESSAGE_PINVALUE_0)\n else:\n pinNumber = chr(MESSAGE_PINVALUE_0 - int(split_message[1]) - 1)\n\n return (user, pinValue, pinNumber)", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg", "def hl7_message_to_dict(message, use_long_name=True):\n lib = load_library(message.version)\n base_datatypes = lib.get_base_datatypes()\n return _hl7_message_to_dict(message, set(base_datatypes), use_long_name=use_long_name)", "def process_message_object(self, message, client):\n if message['destination'] is None:\n raise BunnyMessageCancel('The received message is not from a valid conversation')\n\n messages = {\n 'image': {\n 'en': u\"Image\".format(**message),\n 'es': u\"Imagen\".format(**message),\n 'ca': u\"Imatge\".format(**message),\n }\n }\n\n if message['data']['text'] == u'Add image':\n message['data']['text'] = messages['image'][client.metadata['language']]\n message['data']['alert'] = u'{user[displayname]}: '.format(**message)\n else:\n message.setdefault('data', {})\n message['data']['alert'] = u'{user[displayname]}: '.format(**message)\n\n tokens = client.conversations[message['destination']].tokens.get()\n return message, tokens", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def process_message(self, msg, src):", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))" ]
[ "0.66961044", "0.61725485", "0.61444753", "0.6085341", "0.60676634", "0.59354925", "0.5832414", "0.58319014", "0.58083576", "0.57996356", "0.5798074", "0.5764045", "0.57599103", "0.5742993", "0.5738603", "0.57220566", "0.5719687", "0.5718132", "0.57123685", "0.5708723", "0.57086015", "0.56855506", "0.56669366", "0.5660865", "0.5651575", "0.5630863", "0.5623708", "0.56211376", "0.5608459", "0.55996764", "0.55987716", "0.55811137", "0.55748063", "0.55704737", "0.5532425", "0.5517895", "0.55055237", "0.55039495", "0.55025494", "0.5487237", "0.5485923", "0.5480985", "0.54795706", "0.5469439", "0.5455208", "0.5453504", "0.545256", "0.5444361", "0.54433715", "0.5440211", "0.54337245", "0.5413015", "0.54111814", "0.54111326", "0.5402718", "0.53988487", "0.5397661", "0.5382191", "0.5379571", "0.5369933", "0.53650373", "0.53469783", "0.5343495", "0.53426117", "0.53384024", "0.5323125", "0.53226346", "0.5321155", "0.53186464", "0.53181833", "0.53157204", "0.53141403", "0.53057563", "0.53036875", "0.5302226", "0.5298012", "0.52945155", "0.5292677", "0.5282649", "0.5278718", "0.52745765", "0.52730316", "0.52710885", "0.5268785", "0.526485", "0.526417", "0.52626777", "0.52458054", "0.5244067", "0.524306", "0.52429307", "0.5239855", "0.52355146", "0.523298", "0.52328706", "0.5227116", "0.52200025", "0.52195245", "0.521837", "0.52178013" ]
0.7441321
0
Return sender of serialized message
def get_sender(cls, message): if message is not None: if isinstance(message, PlatformMessage): return message.sender assert isinstance(message, (list, tuple)), "Message is expected to be a list or a tuple" assert len(message) >= 4, "Message's length expected to be at least 4" assert message[0] == PlatformMessage._signature, "Message's signature is incorrect" return message[1] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_sender_id(self):\n return self.__sender_id", "def get_sender_email(message):\r\n message_headers = message['payload']['headers']\r\n for header in message_headers:\r\n if header['name'] == 'From':\r\n return header['value']", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def _get_sender_key(self, outer_message, aad, plaintext, request_id):\n return self.sender_key", "def _get_sender(self, sender):\n if isinstance(sender, tuple):\n return \"%s <%s>\" % sender, sender[0], sender[1]\n else:\n return sender, sender, sender", "def SenderId(self):\n return self._sender_id", "def sender(self):\n l = self.link\n if l and l.is_sender:\n return l\n else:\n return None", "def get_sender_username(self, mess):\n jid = mess.getFrom()\n typ = mess.getType()\n username = jid.getNode()\n domain = jid.getDomain()\n if typ == \"chat\":\n return \"%s@%s\" %(username, domain)\n else:\n return \"\"", "def getSender(self):\n\n if self in WebSocketRouter.nodemap:\n return WebSocketRouter.nodemap[self]\n elif self not in WebSocketRouter.usermap:\n WebSocketRouter.usermap[self] = str(uuid4())\n debug(\"Added user py id: %s uuid: %s\" % \\\n (str(id(self)), WebSocketRouter.usermap[self]))\n return WebSocketRouter.usermap[self]", "def get_message(self, sender=None):\n if sender == None:\n if self.public_key == None:\n return None\n participant = self.public_key\n else:\n participant = sender\n following = [tx.follow for block in self.__chain for tx in block.chipsactions if tx.sender == participant] \n tx_recipient2 = [tx.message for block in self.__chain for tx in block.messsactions if tx.follower in following]\n print(\"tx_recipient2\")\n print(tx_recipient2)\n return tx_recipient2", "def sender(self) -> Address:\n return self._sender", "def sender(self) -> str:\n return self._sender", "def showsender(self):\n return self.sender", "def _get_message(self):\n return self.__message", "def sender(self):\n return self._sender", "def clean_sender(sender):\n if sender in _sender_map:\n return _sender_map[sender]\n return ''", "def whoami( self, mess, args):\n return mess.getFrom()", "def envelope_sender(self):\n envelope_sender = None\n # TODO: Make this check better as soon as SMTP from and sender are \n # Addresses, not AddressLists anymore.\n if self.smtp_from != None and len(self.smtp_from) > 0:\n envelope_sender = self.smtp_from\n elif self.sender != None and len(self.sender) > 0:\n envelope_sender = self.sender\n else:\n envelope_sender = self.author\n return Address(envelope_sender)", "def get_message(self):\n return self.message", "def get_message(self):\n return self.message", "def SenderScreenName(self):\n return self._sender_screen_name", "def whoami(self, mess, args):\n return mess.getFrom().getStripped()", "def msg_to_sender(received_msg):\r\n msg_reply = Message()\r\n msg_reply.to = str(received_msg.sender)\r\n msg_reply.set_metadata(\"performative\", \"inform\")\r\n return msg_reply", "def get_message(self):\n return self.msg", "def get_message (self) :\n return self._message", "def UserMessage(self):\n return self._usermessage", "def get_message(self):\n return self.__mess", "def owner(self):\n \n if not self.logMessage is None:\n return self.logMessage[\"author\"]", "def get_sender_email(mail: Message) -> str:\n sender_pattern = re.compile(\"^(?P<name>.*)\\s<(?P<email>.*)>$\")\n from_header = mail['From'] # type: str\n\n sender = sender_pattern.match(from_header)\n if not sender:\n raise KeyError(\"Invalid From header on email\")\n\n return sender.group('email')", "def message(self):\n return self._message", "def get_incoming_message(self):\n\n if self.incoming_message['is_used']:\n return {}\n\n self.incoming_message['is_used'] = True\n return self.incoming_message['message']", "def _get_message(self, sender_message):\n # type: (str) -> Message or None\n st_re = self.SENDER_TEXT.search(sender_message)\n if st_re is None:\n return None\n else:\n return Message(speaker=st_re.group(1), text=st_re.group(2).strip())", "def extract_sender(emailstr):\n msg = email.message_from_string(emailstr)\n sender = msg['From']\n if sender is None:\n raise IOError(\"Parsing email failed. Aborting.\")\n\n if \"(\" in emailstr:\n sender = remove_comments_from_sender(sender)\n\n if \"<\" not in sender:\n raise KeyError(\"Parsing email sender failed. No address found.\")\n\n _name, _, _email = sender.partition(\"<\")\n _name = _name.strip()\n _email = _email.rstrip(\">\").strip()\n return _name, _email", "def get_sender(doc):\n key_id = doc.find(\".//{%s}sig\" % NAMESPACE).get(\"key_id\")\n return urlsafe_b64decode(key_id).decode(\"utf-8\")", "def get_receiver(self):\n return self.receiver", "def get_from(self, data):\n from_data = data['from'][0]\n return from_data", "def raw_message(self) -> RawMessage:\n return self.__raw_message", "def message(self):\n return self.args[0]", "def user_message(self):\n return str(self)", "def message(self):\n return self._message", "def message(self):\n return self._message", "def message(self):\n return self._message", "def message(self):\n return self._message", "def message(self):\n return self._message", "def invoker(self) -> User:\n return self.msg.source", "def message(self) -> str:\n return self[\"Sns\"][\"Message\"]", "def message_id(self) -> str:\n return self[\"Sns\"][\"MessageId\"]", "def getMessageID(self):\n return self._payload[1]", "def message(self) -> \"str\":\n return self._attrs.get(\"message\")", "def message(self) -> \"str\":\n return self._attrs.get(\"message\")", "def message(self):\n if not hasattr(self, '_message'):\n self._message = email.message_from_string(self.data)\n return self._message", "def sender_to_nick(self, sender):\n regex = re.compile('^@(.*):.*$')\n nick = regex.match(sender).group(1)\n return nick", "def VendorMessage(self):\n\t\treturn self._get_attribute('vendorMessage')", "def _get_recipient_key(self, protected_message):\n return self.recipient_key", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n\n return self._message", "def message(self):\n\n return self._message", "def message(self):\n\n return self._message", "def _get_send_community(self):\n return self.__send_community", "def get_sender_params(self):\n return self.get_section_config('sender')", "def messier_name(self):\n return self._messier_name", "def message(self) -> AgentMessage:\n return self._message", "def __str__(self):\n return self.receiver_name", "def __str__(self):\n return self.receiver_name", "async def _return_exported_sender(self: 'TelegramClient', sender):\n async with self._borrow_sender_lock:\n self._log[__name__].debug('Returning borrowed sender for dc_id %d', sender.dc_id)\n state, _ = self._borrowed_senders[sender.dc_id]\n state.add_return()", "def receiver(self) -> str:\n return self._receiver", "def message_id(self):\n return self._message_id", "def recipient(self):\n return self._recipient", "def _get_plain_message (self) :\n return self._message", "def get_message(self, user):\n return None", "def get_message():\n\tincoming_message = conn.recv(1024)\n\tincoming_message = incoming_message.decode()\n\treturn incoming_message", "def getpeername(self):\r\n return self.__proxypeername", "def getpeername(self):\r\n return self.__proxypeername", "def message(self) -> str:\n return self.fields.get('msg', self.raw_string)", "def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg", "def get_message_source_from_event(event):\n return event.message.annotations[\"iothub-message-source\".encode()].decode()", "def reply_to(self):\n return self.receiver.remote_source.address", "def getpeername(self):\r\n return self.__proxypeername", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message", "def sender(self, addr,name):\n self.s[name] = (addr,self.ssn.sender(addr)) \n return self.s[name]", "def sender(self, sender: Address) -> None:\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n self._sender = sender", "def source(self) -> Union[User, Channel]:\n return self.msg.destination", "def func_from(self, data, get_recv):\n if get_recv:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode()\n else:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[0]\n data_list = checking.split(':')\n remove_bracket = str(data_list[1])\n remove_bracket = remove_bracket[2:-1]\n data_list[1] = remove_bracket\n check = data_list[0].lower().rstrip()\n if check == 'mail from':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n return True", "def get_message(self):\n return super().get_message()", "def message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"message\")", "def get_message(self):\n data = self.socket.recv(1024)\n if not data:\n logging.error('Failed to read data from socket')\n return\n\n return self.decode_message(data)", "def sender(self) -> str:", "async def get_msg(self):\n try:\n # 2^8 bytes at a time. I just like it, no special reason\n data = await self.reader.read(256)\n msg = data.decode()\n addr = writer.get_extra_info(\"peername\")\n logging.info(\"Received %s from %s\", (msg, addr))\n\n except Exception as e:\n logging.error(\"Command could not be decoded; %s\", e)\n\n return msg", "def message(self) -> str:\n return self._message", "def message(self) -> str:\n return self._message", "def getToUser(self):\n return self.toUser", "def getFromUser(self):\n return self.fromUser", "async def uid(message):\n return \"your user id is: {}\".format(message.user_id)", "def get_message(self):\n client_message = self.connection_with_client.recv(1024)\n client_message_decoded = client_message.decode()\n\n if \"quit\" in client_message_decoded.lower():\n self.end()\n return client_message_decoded", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg" ]
[ "0.7114796", "0.6647468", "0.6598164", "0.6566656", "0.65436935", "0.65187633", "0.65110844", "0.65012693", "0.6501094", "0.64227396", "0.640637", "0.6369679", "0.6141742", "0.6132606", "0.61057776", "0.6090527", "0.6054466", "0.6002109", "0.5983544", "0.5983544", "0.59530574", "0.58648294", "0.58505464", "0.58297336", "0.58288604", "0.58285195", "0.5804105", "0.5787212", "0.57627654", "0.5759763", "0.5747662", "0.57208025", "0.570599", "0.5699676", "0.569852", "0.56847054", "0.5660228", "0.56318176", "0.5613624", "0.5613491", "0.5613491", "0.5613491", "0.5613491", "0.5613491", "0.5599808", "0.5579097", "0.5578167", "0.5551678", "0.554575", "0.554575", "0.5519272", "0.5508399", "0.55043536", "0.55034614", "0.54897195", "0.54897195", "0.54897195", "0.54897195", "0.54897195", "0.54897195", "0.5478403", "0.5478403", "0.5478403", "0.54680127", "0.5458729", "0.545211", "0.54400355", "0.54374206", "0.54374206", "0.54269314", "0.5425643", "0.54250324", "0.54111946", "0.5394468", "0.53944355", "0.53907377", "0.53867984", "0.53867984", "0.53827775", "0.53824925", "0.53768706", "0.53764194", "0.5371237", "0.5368752", "0.53685397", "0.5367024", "0.53541344", "0.53334045", "0.53269994", "0.53231716", "0.531519", "0.5313469", "0.5309217", "0.5294771", "0.5294771", "0.5293988", "0.52928185", "0.5281239", "0.5265911", "0.5258302" ]
0.7315821
0
Return interface of serialized message
def get_interface(cls, message): if message is not None: if isinstance(message, PlatformMessage): return message.interface assert isinstance(message, (list, tuple)), "Message is expected to be a list or a tuple" assert len(message) >= 4, "Message's length expected to be at least 4" assert message[0] == PlatformMessage._signature, "Message's signature is incorrect" return message[2] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intf_get_notif_serializer():\n serializer = sl_interface_pb2.SLInterfaceGetNotifMsg()\n return serializer", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg", "def _stringify_proto(obj):\n if isinstance(obj, str): return obj\n elif isinstance(obj, Message): return obj.SerializeToString()\n else: raise TypeError('Object can not be serialized as a string.')", "def _serialize(\n obj: object,\n to_proto: bool = True,\n to_bytes: bool = False,\n) -> Union[str, bytes, Message]:\n\n is_serializable: Serializable\n if not isinstance(obj, Serializable):\n if hasattr(obj, \"serializable_wrapper_type\"):\n is_serializable = obj.serializable_wrapper_type(value=obj) # type: ignore\n else:\n traceback_and_raise(\n Exception(f\"Object {type(obj)} has no serializable_wrapper_type\")\n )\n else:\n is_serializable = obj\n\n serialize_method = getattr(is_serializable, \"sy_serialize\", None)\n if serialize_method is None:\n serialize_method = getattr(is_serializable, \"serialize\", None)\n if serialize_method is None:\n raise Exception(f\"Object {type(obj)} has no serialize method\")\n\n return serialize_method(to_proto=to_proto, to_bytes=to_bytes)", "def serialize_message(self) -> bytes:\n return self.compile_message().serialize()", "def parse(self, serialized):\n raise NotImplementedError(\"Calling an abstract method.\")", "def serialize(self):\n messageLen = len(self._messageBuf) + 1 # 1 byte for the message type\n header = pack(self.headerFormat, messageLen)\n msgType = pack(self.messageTypeFormat, self.messageType)\n payload = bytes(self._messageBuf)\n return header + msgType + payload", "def get_proto_serializer():\n def _serialize_proto(proto):\n return proto.SerializeToString()\n return _serialize_proto", "def intf_globals_get_serializer():\n serializer = sl_interface_pb2.SLInterfaceGlobalsGetMsg()\n return serializer", "def serialize(self):\n\n # The len must be multiple of 4 bits to convert unambiguously\n\n id_len = self.id.bit_length()\n while (id_len % 4)!= 0:\n id_len += 1\n if self.payload:\n pay_len = self.payload.bit_length()\n while (pay_len % 4)!= 0:\n pay_len += 1\n else: pay_len = 0\n if self.command:\n com_len = self.command.bit_length()\n while (com_len % 4)!= 0:\n com_len += 1\n else: com_len = 0\n\n values = {\n \"id\": self.id,\n \"id_len\": id_len,\n \"payload\": self.payload,\n \"payload_len\": pay_len,\n \"command\": self.command,\n \"command_len\": com_len\n }\n\n\n if self.id == Message.MEASURE or self.id == Message.SINGLE_MEASURE:\n serial_format = (\n \"uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n else:\n serial_format = (\n \"0x23, uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n\n message = bitstring.pack(serial_format, **values)\n\n rospy.logdebug(\"Sent command '0x%s'\", message.hex)\n\n return message.tobytes()", "def serialize(self, name, *args, **kwargs): \n if '.' in name:\n unspec = self._unspecify_name(name)\n if not unspec or not (repr(unspec) in self.messages):\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n name = unspec\n elif name in self.message_rhashes:\n name = self.message_rhashes[name]\n else:\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n struct = self.messages[repr(name)]()\n index = 0\n for field in struct.DESCRIPTOR.fields:\n # Loop through the fields in order of definition\n # If we can't, the fields have to be initialized by the\n # keyword arguments\n value = args[index] if index < len(args) else kwargs.get(field.name)\n # dict.get() returns None if the entry was not found\n if value == None:\n # If a field is optional, it can be skipped\n if field.label == field.LABEL_OPTIONAL:\n continue\n raise FieldNotDefinedException(\"The field '\" + field.name +\n \"' was not defined when serializing a '\" +\n self.message_hashes[repr(name)] + \"'\")\n try:\n r = self._map_onto(getattr(struct, field.name), value, self._get_options(struct, field.name))\n if r:\n self._checked_set(struct, field.name, r[0])\n except TypeError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' to \" + str(e).replace('has type', 'which has the type'))\n except ValueError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' but \" + str(e))\n index += 1\n return pack(str(self.header_size) + 's', name) + struct.SerializePartialToString()", "def deserialize(self, payload: bytes) -> object:\n raise NotImplementedError()", "def serialize(self):\n raise NotImplementedError(\n \"Subclasses of Serializable must implement serialize\"\n )", "def getDeserializer():", "def serialize(self, data):", "def serialize(self, data):\n raise NotImplementedError", "def deserialize(self, payload: str) -> object:\n raise NotImplementedError()", "async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def serialize(msg) -> str:\n try:\n return json.dumps(msg, separators=(',', ':'))\n except json.JSONDecodeError as err:\n return err.msg", "def bfd_get_serializer():\n serializer = sl_bfd_common_pb2.SLBfdGetMsg()\n return serializer", "def deserialize(self, data):\n return NotImplementedError", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.graspable_objects is None:\n self.graspable_objects = None\n if self.image is None:\n self.image = sensor_msgs.msg.Image()\n if self.camera_info is None:\n self.camera_info = sensor_msgs.msg.CameraInfo()\n if self.meshes is None:\n self.meshes = None\n if self.reference_to_camera is None:\n self.reference_to_camera = geometry_msgs.msg.Pose()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.graspable_objects = []\n for i in range(0, length):\n val1 = manipulation_msgs.msg.GraspableObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.reference_frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.reference_frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.potential_models = []\n for i in range(0, length):\n val2 = household_objects_database_msgs.msg.DatabaseModelPose()\n start = end\n end += 4\n (val2.model_id,) = _get_struct_i().unpack(str[start:end])\n _v32 = val2.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.key = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.db = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.db = str[start:end]\n _v33 = val2.pose\n _v34 = _v33.header\n start = end\n end += 4\n (_v34.seq,) = _get_struct_I().unpack(str[start:end])\n _v35 = _v34.stamp\n _x = _v35\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v34.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v34.frame_id = str[start:end]\n _v36 = _v33.pose\n _v37 = _v36.position\n _x = _v37\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v38 = _v36.orientation\n _x = _v38\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.detector_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2.detector_name = str[start:end]\n val1.potential_models.append(val2)\n _v39 = val1.cluster\n _v40 = _v39.header\n start = end\n end += 4\n (_v40.seq,) = _get_struct_I().unpack(str[start:end])\n _v41 = _v40.stamp\n _x = _v41\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v40.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v40.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.points = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point32()\n _x = val3\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])\n _v39.points.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.channels = []\n for i in range(0, length):\n val3 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val3.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n val3.values = s.unpack(str[start:end])\n _v39.channels.append(val3)\n _v42 = val1.region\n _v43 = _v42.cloud\n _v44 = _v43.header\n start = end\n end += 4\n (_v44.seq,) = _get_struct_I().unpack(str[start:end])\n _v45 = _v44.stamp\n _x = _v45\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v44.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v44.frame_id = str[start:end]\n _x = _v43\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v43.fields = []\n for i in range(0, length):\n val4 = sensor_msgs.msg.PointField()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val4.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val4.name = str[start:end]\n _x = val4\n start = end\n end += 9\n (_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])\n _v43.fields.append(val4)\n _x = _v43\n start = end\n end += 9\n (_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])\n _v43.is_bigendian = bool(_v43.is_bigendian)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v43.data = str[start:end]\n start = end\n end += 1\n (_v43.is_dense,) = _get_struct_B().unpack(str[start:end])\n _v43.is_dense = bool(_v43.is_dense)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v42.mask = s.unpack(str[start:end])\n _v46 = _v42.image\n _v47 = _v46.header\n start = end\n end += 4\n (_v47.seq,) = _get_struct_I().unpack(str[start:end])\n _v48 = _v47.stamp\n _x = _v48\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v47.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v47.frame_id = str[start:end]\n _x = _v46\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v46.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v46.encoding = str[start:end]\n _x = _v46\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v46.data = str[start:end]\n _v49 = _v42.disparity_image\n _v50 = _v49.header\n start = end\n end += 4\n (_v50.seq,) = _get_struct_I().unpack(str[start:end])\n _v51 = _v50.stamp\n _x = _v51\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v50.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v50.frame_id = str[start:end]\n _x = _v49\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v49.encoding = str[start:end]\n _x = _v49\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v49.data = str[start:end]\n _v52 = _v42.cam_info\n _v53 = _v52.header\n start = end\n end += 4\n (_v53.seq,) = _get_struct_I().unpack(str[start:end])\n _v54 = _v53.stamp\n _x = _v54\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v53.frame_id = str[start:end]\n _x = _v52\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v52.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v52.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v52.D = s.unpack(str[start:end])\n start = end\n end += 72\n _v52.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n _v52.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n _v52.P = _get_struct_12d().unpack(str[start:end])\n _x = _v52\n start = end\n end += 8\n (_x.binning_x, _x.binning_y,) = _get_struct_2I().unpack(str[start:end])\n _v55 = _v52.roi\n _x = _v55\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _get_struct_4IB().unpack(str[start:end])\n _v55.do_rectify = bool(_v55.do_rectify)\n _v56 = _v42.roi_box_pose\n _v57 = _v56.header\n start = end\n end += 4\n (_v57.seq,) = _get_struct_I().unpack(str[start:end])\n _v58 = _v57.stamp\n _x = _v58\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v57.frame_id = str[start:end]\n _v59 = _v56.pose\n _v60 = _v59.position\n _x = _v60\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v61 = _v59.orientation\n _x = _v61\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v62 = _v42.roi_box_dims\n _x = _v62\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.collision_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.collision_name = str[start:end]\n self.graspable_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.image.height, _x.image.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.camera_info.height, _x.camera_info.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.camera_info.D = s.unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n self.camera_info.P = _get_struct_12d().unpack(str[start:end])\n _x = self\n start = end\n end += 25\n (_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify,) = _get_struct_6IB().unpack(str[start:end])\n self.camera_info.roi.do_rectify = bool(self.camera_info.roi.do_rectify)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.meshes = []\n for i in range(0, length):\n val1 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.triangles = []\n for i in range(0, length):\n val2 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val2.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val1.triangles.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.vertices = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Point()\n _x = val2\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val1.vertices.append(val2)\n self.meshes.append(val1)\n _x = self\n start = end\n end += 56\n (_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def _AnyMessageToJsonObject(self, message):\n if not message.ListFields():\n return {}\n # Must print @type first, use OrderedDict instead of {}\n js = OrderedDict()\n type_url = message.type_url\n js['@type'] = type_url\n sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)\n sub_message.ParseFromString(message.value)\n message_descriptor = sub_message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n js['value'] = self._WrapperMessageToJsonObject(sub_message)\n return js\n if full_name in _WKTJSONMETHODS:\n js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],\n sub_message)(self)\n return js\n return self._RegularMessageToJsonObject(sub_message, js)", "def deserialize(self, data):", "def read_message(self):\n def read_data(lnth):\n data = self.pipe_in.read(lnth)\n if len(data) < lnth:\n raise EofError\n return data\n \n data = read_data(struct.calcsize(\"i\"))\n msgLnth = struct.unpack(\"i\", data)[0]\n data = read_data(msgLnth)\n\n # Ack\n try: self.pipe_out.write('a')\n except IOError: pass\n\n import cPickle\n obj = cPickle.loads(data)\n return obj", "def serialize(self):", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def deserialize(self, str):\n try:\n if self.icon is None:\n self.icon = rocon_std_msgs.msg.Icon()\n if self.remappings is None:\n self.remappings = None\n if self.pairing is None:\n self.pairing = rocon_interaction_msgs.msg.Pairing()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.compatibility = str[start:end].decode('utf-8')\n else:\n self.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.display_name = str[start:end].decode('utf-8')\n else:\n self.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.description = str[start:end].decode('utf-8')\n else:\n self.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.resource_name = str[start:end].decode('utf-8')\n else:\n self.icon.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.format = str[start:end].decode('utf-8')\n else:\n self.icon.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.icon.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parameters = str[start:end].decode('utf-8')\n else:\n self.parameters = str[start:end]\n start = end\n end += 4\n (self.max,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pairing.rapp = str[start:end].decode('utf-8')\n else:\n self.pairing.rapp = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.pairing.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.parameters = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.key = str[start:end].decode('utf-8')\n else:\n val1.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.pairing.parameters.append(val1)\n start = end\n end += 4\n (self.hash,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.role = str[start:end].decode('utf-8')\n else:\n self.role = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message", "def serialize(self):\n raise Exception(\"Unimplemented!\")", "def bfd_get_notif_serializer():\n serializer = sl_bfd_common_pb2.SLBfdGetNotifMsg()\n return serializer", "def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def read(cls, proto):\n pass", "def serialize(self, data):\n return data", "def serialize(self):\n pass", "def intf_notif_op_serializer(batch):\n serializer = sl_interface_pb2.SLInterfaceNotifMsg()\n if 'interfaces' in batch:\n interfaces = []\n for interface in batch['interfaces']:\n entry = sl_common_types_pb2.SLInterface()\n if 'if_name' in interface:\n entry.Name = interface['if_name']\n interfaces.append(entry)\n serializer.Entries.extend(interfaces)\n return serializer", "def deserialize(self, obj):\n raise NotImplementedError", "def type(self) -> MessageType:\n raise NotImplementedError", "def serialize(self):\n raise NotImplementedError(\"Abstract class, implemented in sub class\")", "def _decode(self, msgCls, data):\r\n rosMsg = msgCls()\r\n\r\n for (slotName, slotType) in zip(rosMsg.__slots__, rosMsg._slot_types):\r\n if slotName not in data:\r\n continue\r\n\r\n if '[]' == slotType[-2:]:\r\n listBool = True\r\n slotType = slotType[:-2]\r\n else:\r\n listBool = False\r\n\r\n field = data[slotName]\r\n\r\n if listBool and not isinstance(field, (list, tuple)):\r\n raise TypeError('Given data does not match the definition of '\r\n 'the ROS message.')\r\n\r\n if slotType == 'string':\r\n convFunc = _stringify\r\n elif slotType in self._BASE_TYPES:\r\n convFunc = self._BASE_TYPES[slotType]\r\n elif slotType in self._SPECIAL_TYPES:\r\n convFunc = self._SPECIAL_TYPES[slotType]().decode\r\n elif slotType in self._customTypes and _checkIsStringIO(field):\r\n convFunc = self._customTypes[slotType][0]().decode\r\n else:\r\n convFunc = partial(self._decode,\r\n self._loader.loadMsg(*slotType.split('/')))\r\n\r\n if listBool:\r\n convFunc = partial(map, convFunc)\r\n\r\n setattr(rosMsg, slotName, convFunc(field))\r\n\r\n return rosMsg", "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def bfd_session_get_serializer(get_info, af):\n Message = collections.namedtuple('Message', [\n 'af',\n 'serializer',\n ])\n if af == 4:\n # IPv4 message types.\n ipv4_or_ipv6 = Message(\n af,\n sl_bfd_ipv4_pb2.SLBfdv4GetMsg\n )\n elif af == 6:\n # IPv6 message types.\n ipv4_or_ipv6 = Message(\n af,\n sl_bfd_ipv6_pb2.SLBfdv6GetMsg\n )\n # Create a SLBfdv4GetMsg or SLBfdv6GetMsg message \n serializer = ipv4_or_ipv6.serializer()\n if ipv4_or_ipv6.af == 4:\n if 'v4_nbr' in get_info:\n serializer.Key.NbrAddr = (\n int(ipaddress.ip_address(get_info['v4_nbr']))\n )\n if 'v4_src' in get_info:\n serializer.Key.SourceAddr = (\n int(ipaddress.ip_address(get_info['v4_src']))\n )\n elif ipv4_or_ipv6.af == 6:\n if 'v6_nbr' in get_info:\n serializer.Key.NbrAddr = (\n ipaddress.ip_address(get_info['v6_nbr']).packed\n )\n if 'v6_src' in get_info:\n serializer.Key.SourceAddr = (\n ipaddress.ip_address(get_info['v6_src']).packed\n )\n if 'if_name' in get_info:\n serializer.Key.Interface.Name = get_info['if_name']\n if 'vrf_name' in get_info:\n serializer.Key.VrfName = get_info['vrf_name']\n if 'type' in get_info:\n serializer.Key.Type = get_info['type']\n if 'type' in get_info:\n serializer.Key.Type = get_info['type']\n if 'count' in get_info:\n serializer.EntriesCount = get_info['count']\n if 'seq_num' in get_info:\n serializer.SeqNum = get_info['seq_num']\n if 'get_next' in get_info:\n serializer.GetNext = get_info['get_next']\n return serializer", "def input_message_type(self) -> type:\n raise NotImplementedError()", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def global_get_serializer():\n serializer = sl_global_pb2.SLGlobalsGetMsg()\n return serializer", "def _deserialize(self, handle):\n raise NotImplementedError", "def _GenericMessageToJsonObject(self, message):\n # Duration, Timestamp and FieldMask have ToJsonString method to do the\n # convert. Users can also call the method directly.\n return message.ToJsonString()", "def serialize(self, obj):\n pass", "def serialize(self, data, content_type):\r\n return self._get_serialize_handler(content_type).serialize(data)", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def message(self) -> Union[Message, bytes]:\n return self._message", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.cond_transition is None:\n self.cond_transition = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_path = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.state_path = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_class = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.state_class = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.initial_state_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.initial_state_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.input_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.input_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.output_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.output_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cond_outcome = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.cond_outcome.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cond_transition = []\n for i in range(0, length):\n val1 = flexbe_msgs.msg.OutcomeCondition()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.state_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2 = str[start:end]\n val1.state_name.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.state_outcome = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2 = str[start:end]\n val1.state_outcome.append(val2)\n self.cond_transition.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.behavior_class = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.behavior_class = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.parameter_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.parameter_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.parameter_values = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.parameter_values.append(val1)\n start = end\n end += 8\n self.position = _get_struct_2f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.outcomes = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.outcomes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.transitions = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.transitions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sb'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.autonomy = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.userdata_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.userdata_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.userdata_remapping = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.userdata_remapping.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.type is None:\n self.type = std_msgs.msg.String()\n if self.parent_name is None:\n self.parent_name = std_msgs.msg.String()\n if self.name is None:\n self.name = std_msgs.msg.String()\n if self.pose is None:\n self.pose = geometry_msgs.msg.Pose()\n if self.sensed_objects is None:\n self.sensed_objects = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.sim_step,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.type.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parent_name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.parent_name.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.name.data = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.wall_time, _x.sim_time, _x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.count,) = _get_struct_2f7dI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.triggered = s.unpack(str[start:end])\n self.triggered = list(map(bool, self.triggered))\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.range = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.measurement = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sensed_objects = []\n for i in range(0, length):\n val1 = std_msgs.msg.String()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.data = str[start:end]\n self.sensed_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.sensed_objects_map = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def serialize(self, obj):\n return obj", "def deserialize(self, str):\n try:\n if self.objects is None:\n self.objects = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = vision_msgs.msg.ClassifiedObject()\n _v4 = val1.header\n start = end\n end += 4\n (_v4.seq,) = _struct_I.unpack(str[start:end])\n _v5 = _v4.stamp\n _x = _v5\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v4.frame_id = str[start:end].decode('utf-8')\n else:\n _v4.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_class = str[start:end].decode('utf-8')\n else:\n val1.object_class = str[start:end]\n start = end\n end += 4\n (val1.confidence,) = _struct_f.unpack(str[start:end])\n _v6 = val1.roi\n _x = _v6\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _struct_4IB.unpack(str[start:end])\n _v6.do_rectify = bool(_v6.do_rectify)\n self.objects.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def remote_pushSerialized(pNamespace):", "def serialize(self):\n raise NotImplemented()", "def deserializer():\n return bytes.decode", "def parse(cls, message):\r\n if isinstance(message, PlatformMessage):\r\n inst = PlatformMessage.parse(message.serialize())\r\n return inst\r\n inst = PlatformMessage()\r\n if message is not None:\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n inst.sender = message[1]\r\n inst.interface = message[2]\r\n inst.method = message[3]\r\n if len(message) > 4:\r\n assert isinstance(message[4], (list, tuple)), \"Message's args expected to be list or tuple\"\r\n inst.args = copy.deepcopy(message[4])\r\n if len(message) > 5:\r\n assert isinstance(message[5], dict), \"Message's kwargs expected to be a dict\"\r\n inst.kwargs = copy.deepcopy(message[5])\r\n return inst", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return StorableObject_PB", "def serialize(cls, *args, **kwargs):\n return serialize_cls(cls)(*args, **kwargs)", "def deserialize(self, value):\n raise NotImplementedError", "def message_to_python(self, raw_message):\n return self.Message(self, raw_message)", "def serialize(self):\r\n return [self._signature, self.sender, self.interface, self.method, self.args, self.kwargs]", "def _serialise(self):\n # TODO (M Foley)\n pass", "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_2d2q14dq.pack(_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret))\n _x = self.msg\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _serialize(self, state, handle):\n raise NotImplementedError", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def from_serializable(self, _):\n\n assert False, \"Not implemented\"", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.Header is None:\n self.Header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.Header.seq, _x.Header.stamp.secs, _x.Header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.Header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.Header.frame_id = str[start:end]\n _x = self\n start = end\n end += 11\n (_x.x_pos, _x.y_pos, _x.angle, _x.code_type, _x.code_num,) = _get_struct_2hHBI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def mpls_get_serializer():\n serializer = sl_mpls_pb2.SLMplsGetMsg()\n return serializer", "def deserialize(cls, payload):\n return operations_pb2.Operation.FromString(payload)", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)", "def to_proto(self) -> None:\n\n pass", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.vehicle_id is None:\n self.vehicle_id = opil_v2.msg.Id()\n if self.action_capability is None:\n self.action_capability = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.vehicle_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vehicle_id.description = str[start:end].decode('utf-8')\n else:\n self.vehicle_id.description = str[start:end]\n _x = self\n start = end\n end += 84\n (_x.left_size, _x.right_size, _x.front_size, _x.rear_size, _x.min_height, _x.max_height, _x.payload, _x.max_pos_x_vel, _x.max_neg_x_vel, _x.max_pos_x_acc, _x.max_neg_x_acc, _x.max_pos_y_vel, _x.max_neg_y_vel, _x.max_pos_y_acc, _x.max_neg_y_acc, _x.max_pos_ang_vel, _x.max_neg_ang_vel, _x.velocity_control_sensitivity, _x.min_turning_radius, _x.batt_capacity, _x.batt_max_voltage,) = _get_struct_21f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vehicle_type = str[start:end].decode('utf-8')\n else:\n self.vehicle_type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vendor = str[start:end].decode('utf-8')\n else:\n self.vendor = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.action_capability = []\n for i in range(0, length):\n val1 = opil_v2.msg.RobotAction()\n _x = val1\n start = end\n end += 2\n (_x.category, _x.action,) = _get_struct_2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.attributes = []\n for i in range(0, length):\n val2 = opil_v2.msg.Tuple()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.type = str[start:end].decode('utf-8')\n else:\n val2.type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.value = str[start:end].decode('utf-8')\n else:\n val2.value = str[start:end]\n val1.attributes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.description = str[start:end].decode('utf-8')\n else:\n val1.description = str[start:end]\n self.action_capability.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def __parse_message_as(msg_type: type, msg_str: str) -> Any:\n # parse the message\n msg_dict = json.loads(msg_str)\n\n # the type specified in the message needs to match\n # the type we are parsing as\n assert msg_dict[MSG_TYPE_NAME] == msg_type.__name__, \\\n f\"Message type did not match the parsing type,\" \\\n f\"parsing the message as type {msg_type.__name__},\" \\\n f\"but get a message of type {msg_dict[MSG_TYPE_NAME]}\"\n\n # remove the message type information, and create the object\n del msg_dict[MSG_TYPE_NAME]\n return msg_type(**msg_dict)", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def gen_message(msg: Message) -> str:\n msg_dict = msg._asdict()\n msg_dict.update({MSG_TYPE_NAME: type(msg).__name__})\n\n return json.dumps(msg_dict)", "def from_serializable(self, _):\n\n assert False, \"not implemented\"", "def _MessageToJsonObject(self, message):\n message_descriptor = message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n return self._WrapperMessageToJsonObject(message)\n if full_name in _WKTJSONMETHODS:\n return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)\n js = {}\n return self._RegularMessageToJsonObject(message, js)", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.type))\n _x = self.model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.head_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.body_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.arm_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2B3i.pack(_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def message_to_dict(message):\n return json.loads(protojson.encode_message(message))", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def receive(self) -> Message[ValueType]:", "def serialize(self, media: object) -> Union[bytes, bytearray, memoryview]:\n raise NotImplementedError()" ]
[ "0.6683183", "0.6607191", "0.6602124", "0.6388921", "0.6382543", "0.63370854", "0.6267809", "0.61996573", "0.6124349", "0.6091265", "0.6090545", "0.607747", "0.60588247", "0.6057081", "0.6040386", "0.6030836", "0.60173637", "0.5962537", "0.59547883", "0.5940483", "0.5939426", "0.59295857", "0.59241295", "0.5921641", "0.5885608", "0.58672863", "0.585991", "0.5847694", "0.58432466", "0.5834165", "0.58261895", "0.58096063", "0.58084995", "0.58052784", "0.5805053", "0.580324", "0.57846314", "0.57800484", "0.5764839", "0.5757419", "0.5745767", "0.57430387", "0.57411784", "0.5733398", "0.5730547", "0.5715915", "0.5709973", "0.5691898", "0.5691274", "0.56773096", "0.56706846", "0.5659353", "0.5656527", "0.5636598", "0.56322664", "0.5629066", "0.5618456", "0.56164485", "0.56159806", "0.5614858", "0.5614858", "0.56020683", "0.5596792", "0.5595191", "0.55825394", "0.5573597", "0.5573166", "0.5565371", "0.55512404", "0.5550836", "0.5548785", "0.55417275", "0.5537196", "0.5531674", "0.5529945", "0.5524592", "0.55222404", "0.5522222", "0.55021304", "0.5500594", "0.54915124", "0.54889065", "0.5469772", "0.54692304", "0.5460448", "0.54591537", "0.5458234", "0.5454788", "0.54543877", "0.5453518", "0.5452653", "0.54389626", "0.5432516", "0.5428034", "0.5425532", "0.54229563", "0.54188734", "0.541778", "0.54171985", "0.5414966" ]
0.58528733
27
Return method of serialized message
def get_method(cls, message): if message is not None: if isinstance(message, PlatformMessage): return message.method assert isinstance(message, (list, tuple)), "Message is expected to be a list or a tuple" assert len(message) >= 4, "Message's length expected to be at least 4" assert message[0] == PlatformMessage._signature, "Message's signature is incorrect" return message[3] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get( self ):\n return self.__to_message_function( self.__raw_payload )", "def serialize_message(self) -> bytes:\n return self.compile_message().serialize()", "def __message_content__(self) -> MessageContent:", "def parse(self, serialized):\n raise NotImplementedError(\"Calling an abstract method.\")", "def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg", "def message(self):\n return self._message", "def _get_message(self):\n return self.__message", "def __str__(self):\n return self.message.as_string()", "def serialize(self):", "def serialize(self):\r\n return [self._signature, self.sender, self.interface, self.method, self.args, self.kwargs]", "def __repr__(self):\n return self.message", "def message(self) -> Union[Message, bytes]:\n return self._message", "def getDeserializer():", "def get_proto_serializer():\n def _serialize_proto(proto):\n return proto.SerializeToString()\n return _serialize_proto", "def _serialize(\n obj: object,\n to_proto: bool = True,\n to_bytes: bool = False,\n) -> Union[str, bytes, Message]:\n\n is_serializable: Serializable\n if not isinstance(obj, Serializable):\n if hasattr(obj, \"serializable_wrapper_type\"):\n is_serializable = obj.serializable_wrapper_type(value=obj) # type: ignore\n else:\n traceback_and_raise(\n Exception(f\"Object {type(obj)} has no serializable_wrapper_type\")\n )\n else:\n is_serializable = obj\n\n serialize_method = getattr(is_serializable, \"sy_serialize\", None)\n if serialize_method is None:\n serialize_method = getattr(is_serializable, \"serialize\", None)\n if serialize_method is None:\n raise Exception(f\"Object {type(obj)} has no serialize method\")\n\n return serialize_method(to_proto=to_proto, to_bytes=to_bytes)", "def serialize(self, data):", "def serialize(msg) -> str:\n try:\n return json.dumps(msg, separators=(',', ':'))\n except json.JSONDecodeError as err:\n return err.msg", "def gen_message(msg: Message) -> str:\n msg_dict = msg._asdict()\n msg_dict.update({MSG_TYPE_NAME: type(msg).__name__})\n\n return json.dumps(msg_dict)", "def _stringify_proto(obj):\n if isinstance(obj, str): return obj\n elif isinstance(obj, Message): return obj.SerializeToString()\n else: raise TypeError('Object can not be serialized as a string.')", "def serialize(self):\n\n # The len must be multiple of 4 bits to convert unambiguously\n\n id_len = self.id.bit_length()\n while (id_len % 4)!= 0:\n id_len += 1\n if self.payload:\n pay_len = self.payload.bit_length()\n while (pay_len % 4)!= 0:\n pay_len += 1\n else: pay_len = 0\n if self.command:\n com_len = self.command.bit_length()\n while (com_len % 4)!= 0:\n com_len += 1\n else: com_len = 0\n\n values = {\n \"id\": self.id,\n \"id_len\": id_len,\n \"payload\": self.payload,\n \"payload_len\": pay_len,\n \"command\": self.command,\n \"command_len\": com_len\n }\n\n\n if self.id == Message.MEASURE or self.id == Message.SINGLE_MEASURE:\n serial_format = (\n \"uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n else:\n serial_format = (\n \"0x23, uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n\n message = bitstring.pack(serial_format, **values)\n\n rospy.logdebug(\"Sent command '0x%s'\", message.hex)\n\n return message.tobytes()", "def _func_serialize(self, args):\n return args", "def string_conversion_method(self) -> tp.Callable:\n\n # First, look for a method defined for this specific type.\n try:\n field_type_name = self.field_type.__name__\n except AttributeError:\n raise AttributeError(f\"Could not detect name of field type {self.field_type}.\")\n try:\n return getattr(self, f\"_string_to_{field_type_name}\")\n except AttributeError:\n pass\n\n # Try a super-type method.\n if issubclass(self.field_type, str):\n return lambda value: value\n if issubclass(self.field_type, GameObjectSequence):\n return self._string_to_GameObjectSequence\n if issubclass(self.field_type, GameObject):\n return self._string_to_GameObject\n if issubclass(self.field_type, IntEnum):\n raise NotImplementedError\n\n raise AttributeError(f\"Could not find field update method '_string_to_{field_type_name}' or a superclass.\")", "def message_to_python(self, raw_message):\n return self.Message(self, raw_message)", "def serialize(cls, *args, **kwargs):\n return serialize_cls(cls)(*args, **kwargs)", "def _MessageToJsonObject(self, message):\n message_descriptor = message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n return self._WrapperMessageToJsonObject(message)\n if full_name in _WKTJSONMETHODS:\n return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)\n js = {}\n return self._RegularMessageToJsonObject(message, js)", "def _AnyMessageToJsonObject(self, message):\n if not message.ListFields():\n return {}\n # Must print @type first, use OrderedDict instead of {}\n js = OrderedDict()\n type_url = message.type_url\n js['@type'] = type_url\n sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)\n sub_message.ParseFromString(message.value)\n message_descriptor = sub_message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n js['value'] = self._WrapperMessageToJsonObject(sub_message)\n return js\n if full_name in _WKTJSONMETHODS:\n js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],\n sub_message)(self)\n return js\n return self._RegularMessageToJsonObject(sub_message, js)", "def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def message(self):\n \n return(self.__message)", "def serialize(self):\n pass", "def get_message(self):\n return super().get_message()", "def message_bytes(self) -> bytes:\n if isinstance(self._message, Message):\n return self._message.encode()\n return self._message", "def get_message(self):\n return self.msg", "def _encode_message(self, bound_obj: Any, message: Message) -> str:\n msg_dict = self.protocol.message_to_dict(message)\n if self._encode_filter_call is not None:\n self._encode_filter_call(bound_obj, message, msg_dict)\n return self.protocol.encode_dict(msg_dict)", "def serialize(self):\n raise Exception(\"Unimplemented!\")", "def payload(self):", "def _process_msg(cls, msg):\n raise NotImplementedError", "def serialize(self):\n messageLen = len(self._messageBuf) + 1 # 1 byte for the message type\n header = pack(self.headerFormat, messageLen)\n msgType = pack(self.messageTypeFormat, self.messageType)\n payload = bytes(self._messageBuf)\n return header + msgType + payload", "def __getstate__(self):\n return self.message", "def __nanojsonrpc_unpack(self, msg):\n try:\n pack = json.loads(msg)\n if 'method' not in pack:\n return None\n else:\n return pack\n except:\n traceback.print_exc()\n return None", "def _process_message(self, obj):\n pass", "def _decode(self, msgCls, data):\r\n rosMsg = msgCls()\r\n\r\n for (slotName, slotType) in zip(rosMsg.__slots__, rosMsg._slot_types):\r\n if slotName not in data:\r\n continue\r\n\r\n if '[]' == slotType[-2:]:\r\n listBool = True\r\n slotType = slotType[:-2]\r\n else:\r\n listBool = False\r\n\r\n field = data[slotName]\r\n\r\n if listBool and not isinstance(field, (list, tuple)):\r\n raise TypeError('Given data does not match the definition of '\r\n 'the ROS message.')\r\n\r\n if slotType == 'string':\r\n convFunc = _stringify\r\n elif slotType in self._BASE_TYPES:\r\n convFunc = self._BASE_TYPES[slotType]\r\n elif slotType in self._SPECIAL_TYPES:\r\n convFunc = self._SPECIAL_TYPES[slotType]().decode\r\n elif slotType in self._customTypes and _checkIsStringIO(field):\r\n convFunc = self._customTypes[slotType][0]().decode\r\n else:\r\n convFunc = partial(self._decode,\r\n self._loader.loadMsg(*slotType.split('/')))\r\n\r\n if listBool:\r\n convFunc = partial(map, convFunc)\r\n\r\n setattr(rosMsg, slotName, convFunc(field))\r\n\r\n return rosMsg", "def _object2proto(self) -> RunFunctionOrConstructorAction_PB:\n return RunFunctionOrConstructorAction_PB(\n path=self.path,\n args=[serialize(x) for x in self.args],\n kwargs={k: serialize(v) for k, v in self.kwargs.items()},\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(self.id),\n )", "def bfd_get_serializer():\n serializer = sl_bfd_common_pb2.SLBfdGetMsg()\n return serializer", "def getSerializer():", "def __repr__(self):\n return f'Message: {vars(self)}'", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def message_type(self):\n return self.type", "def get_message(self):\n return self.message", "def get_message(self):\n return self.message", "def getMessage():\n return message", "def test_serialize_through_message(self):\n s1 = StatsEntry(self.stats, \"test\", \"GET\")\n s1.log(10, 0)\n s1.log(20, 0)\n s1.log(40, 0)\n u1 = StatsEntry.unserialize(s1.serialize())\n\n data = Message.unserialize(\n Message(\"dummy\", s1.serialize(), \"none\").serialize()\n ).data\n u1 = StatsEntry.unserialize(data)\n\n self.assertEqual(20, u1.median_response_time)", "def get_message(self):\n data = self.socket.recv(1024)\n if not data:\n logging.error('Failed to read data from socket')\n return\n\n return self.decode_message(data)", "def _GenericMessageToJsonObject(self, message):\n # Duration, Timestamp and FieldMask have ToJsonString method to do the\n # convert. Users can also call the method directly.\n return message.ToJsonString()", "def raw_message(self) -> RawMessage:\n return self.__raw_message", "def serialize(self, data):\n return data", "def msg(self):\n\t\treturn self.message", "def serialize(self) -> str:\n pass", "def Method(self, default=None):\n return self.data.get('method', default)", "def onMessage(self, payload, isBinary):", "def get_message (self) :\n return self._message", "def encode(rosMsg): #@NoSelf", "def serialize(self, name, *args, **kwargs): \n if '.' in name:\n unspec = self._unspecify_name(name)\n if not unspec or not (repr(unspec) in self.messages):\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n name = unspec\n elif name in self.message_rhashes:\n name = self.message_rhashes[name]\n else:\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n struct = self.messages[repr(name)]()\n index = 0\n for field in struct.DESCRIPTOR.fields:\n # Loop through the fields in order of definition\n # If we can't, the fields have to be initialized by the\n # keyword arguments\n value = args[index] if index < len(args) else kwargs.get(field.name)\n # dict.get() returns None if the entry was not found\n if value == None:\n # If a field is optional, it can be skipped\n if field.label == field.LABEL_OPTIONAL:\n continue\n raise FieldNotDefinedException(\"The field '\" + field.name +\n \"' was not defined when serializing a '\" +\n self.message_hashes[repr(name)] + \"'\")\n try:\n r = self._map_onto(getattr(struct, field.name), value, self._get_options(struct, field.name))\n if r:\n self._checked_set(struct, field.name, r[0])\n except TypeError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' to \" + str(e).replace('has type', 'which has the type'))\n except ValueError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' but \" + str(e))\n index += 1\n return pack(str(self.header_size) + 's', name) + struct.SerializePartialToString()", "def serialize(self, data):\n raise NotImplementedError", "def __str__(self):\n return str(self.serialize())", "def receive(self) -> Message[ValueType]:", "def __call__(self):\n return self.method.hexdigest()", "def getMessage(self):\n return self.message", "def getMessage(self):\n return self.message", "def receive_message(self, message):", "def __str__(self):\n return self.message", "def __str__(self):\n return self.message", "def __str__(self):\n return self.message", "def type(self) -> MessageType:\n raise NotImplementedError", "def serialize(self):\n raise NotImplementedError(\"Abstract class, implemented in sub class\")", "def _post_deserialize (self):\n pass", "def get_method(self) -> MethodStr:\n return METHODS.inverse[self.method()]", "def serialize(self):\n\n\t\treturn str(self)", "def callable(func, message):\n return func, message", "def _process_method(self, method):\n return method", "def serialize(self) -> typing.Any:\n return self._serialize(self.__dict__)", "def send_jsonified(self, msg, stats=True):\n raise NotImplemented()", "def _simple_object_recv(self, txn, actionName, request, method, transform=None):\n\n storeObject, classObject = yield self._getStoreObjectForRequest(txn, request)\n if classObject is not None:\n value = yield getattr(classObject, method)(storeObject, *request.get(\"arguments\", ()), **request.get(\"keywords\", {}))\n else:\n value = yield getattr(storeObject, method)(*request.get(\"arguments\", ()), **request.get(\"keywords\", {}))\n\n returnValue(transform(value) if transform is not None else value)", "def serialize(self, message):\n value = bytes(json.dumps(message, cls=DjangoJSONEncoder), encoding=\"utf-8\")\n if self.crypter:\n value = self.crypter.encrypt(value)\n\n # As we use an sorted set to expire messages we need to guarantee uniqueness,\n # with 12 bytes.\n random_prefix = random.getrandbits(8 * 12).to_bytes(12, \"big\")\n return random_prefix + value", "def _parse_message(self, string, protocol):\n #print(\"Parsing message: %s\" % string)\n msg = parse_message_string(string)\n result = MessageResult(original_message=msg)\n\n if isinstance(msg, MethodCallMessage):\n # Handle method call\n res = self._method_call(msg)\n response_msg = ResponseMessage(result_code=0, result=res, response_to=msg.id)\n result.response = create_message_string(response_msg)\n elif isinstance(msg, SubscribeMessage):\n # Handle subscription to event\n response_msg = ResponseMessage(result_code=0, result=None, response_to=msg.id)\n result.response = create_message_string(response_msg)\n else:\n raise MessageHandleError(MessageHandleError.RESULT_UNEXPECTED_MESSAGE, msg)\n\n return result", "def encode_message(self, message):\n assert isinstance(message, Message.Implementation)\n tmp_member = self._community._my_member\n self._community._my_member = self._my_member\n try:\n return self._community.get_conversion_for_message(message).encode_message(message)\n finally:\n self._community._my_member = tmp_member", "def message(self) -> str:\n return self.fields.get('msg', self.raw_string)", "def _get_payload(self, method, **params):\n try:\n payload = params['data']['payload']\n if self.prettyprint:\n payload = \"\\n\" + json.dumps(json.loads(payload),\n indent=self.indent)\n except KeyError:\n payload = \"N/A\" if method == \"Event Channel Message\" else \"None\"\n return payload", "def message(self):\n\n if self.op_return is None:\n return None\n\n return bytearray.fromhex(self.op_return).decode('utf-8')", "def get_message_type(self):\n return self.message_type", "def __next__(self):\n return self.read_message()", "def msg(self):\n if \"msg\" in self._json:\n return self._json[\"msg\"]\n elif \"detail\" in self._json:\n return self._json[\"detail\"]\n else:\n return self._json", "def message(self):\n if not python_utils.is_string(self._message):\n raise NotImplementedError(\n 'self.message must be assigned a value in __init__')\n return self._message" ]
[ "0.64785546", "0.6399759", "0.6097286", "0.6095025", "0.6048653", "0.5926648", "0.5798314", "0.5763225", "0.5758533", "0.5757418", "0.5754897", "0.57526416", "0.5739287", "0.5705692", "0.570088", "0.5696248", "0.5656703", "0.5653811", "0.56207484", "0.56186205", "0.5613904", "0.56124", "0.5609937", "0.55895746", "0.5559307", "0.55566204", "0.5541941", "0.5541668", "0.5534327", "0.5529613", "0.5529613", "0.5529613", "0.5529613", "0.5529613", "0.5529613", "0.5529174", "0.5522952", "0.55143327", "0.5507726", "0.54924434", "0.5492428", "0.5490815", "0.5487275", "0.5481626", "0.54755807", "0.54633373", "0.54631114", "0.54615974", "0.5448301", "0.54045105", "0.5396525", "0.5392231", "0.53878367", "0.53878367", "0.538397", "0.53828543", "0.53828543", "0.5382504", "0.5379553", "0.53745276", "0.5368407", "0.53681046", "0.5363945", "0.5356522", "0.5345367", "0.5337585", "0.53358984", "0.53176355", "0.53154224", "0.5315335", "0.53144395", "0.5305667", "0.52990216", "0.52946484", "0.5293267", "0.5293267", "0.5292108", "0.52911085", "0.52911085", "0.52911085", "0.52878916", "0.5278204", "0.5269622", "0.52662665", "0.52646303", "0.5259261", "0.52530855", "0.525209", "0.5250271", "0.52500683", "0.52490264", "0.52455676", "0.52333236", "0.5233179", "0.5232071", "0.52284425", "0.5222197", "0.5216327", "0.52162725", "0.5215183" ]
0.6116328
2
Return args of serialized message
def get_args(cls, message): if message is not None: if isinstance(message, PlatformMessage): return message.args assert isinstance(message, (list, tuple)), "Message is expected to be a list or a tuple" assert len(message) >= 4, "Message's length expected to be at least 4" assert message[0] == PlatformMessage._signature, "Message's signature is incorrect" if len(message) > 4: return copy.deepcopy(message[4]) else: return None return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _func_serialize(self, args):\n return args", "def func_deserialize(self, args): # pragma: no cover\n if len(args) == 0:\n return []\n x = eval(args.decode(\"utf-8\"))\n return x", "def toArgs(self):\n # FIXME - undocumented exception\n post_args = self.toPostArgs()\n kvargs = {}\n for k, v in post_args.items():\n if not k.startswith('openid.'):\n raise ValueError(\n 'This message can only be encoded as a POST, because it '\n 'contains arguments that are not prefixed with \"openid.\"')\n else:\n kvargs[k[7:]] = v\n\n return kvargs", "def get_args(self):\r\n return self.args", "def parse_args(self):\n return Args(self.args)", "def get_args(self) -> List[str]:\n return self.content.split()[1:]", "def serialize(self):\r\n return [self._signature, self.sender, self.interface, self.method, self.args, self.kwargs]", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "async def dict(self):\n\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def args(self) -> List[str]:\n return self.__args", "def get_args(msg):\n match = re.match(Controller.KEY_REGEX, msg.content)\n full_command = msg.content[match.end(1):].strip()\n\n full_command = re.findall(r'(?:\")[^\"]+(?:\")|[^\" ]+', full_command)\n full_command = [token.strip('\"') for token in full_command]\n if len(full_command) == 0:\n command = 'help'\n args = []\n elif len(full_command) == 1:\n command = full_command[0]\n args = []\n else:\n command = full_command[0]\n args = full_command[1:]\n return command, args", "def arguments(self):\n return parse_arguments(self['data'])", "def _parse_msg(msg):\n split_args_regex = \"(.*?)\\:(.*)\"\n args_split_regex = \"\\,\"\n match = re.match(split_args_regex, msg)\n if match is not None:\n message = match.group(1)\n arg_str = match.group(2)\n arg_iter = re.finditer(args_split_regex, args)\n args = []\n for arg in arg_iter:\n args.append(arg) \n return None", "def args(self):\n return self._parse_args", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def parse_args(self):\n\n # Parse the arguments themselves.\n args = vars( self.parser.parse_args() )\n\n return args", "def process(self, msg, kwargs) -> Tuple[str, Dict]:\n return msg, kwargs", "def args(self):\n return self._args.copy()", "def dataargs(self):\n return self.argsbytype(Data)", "def arguments(self, args=[]):\n if args is None:\n args = []\n\n if not isinstance(args, (list, tuple)):\n args = (args,)\n\n serialize_me = []\n\n for arg in args:\n if isinstance(arg, str):\n serialize_me.append({'str': arg})\n else:\n serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))\n\n self._arguments = json.dumps(serialize_me)", "def _get_args(self):\n return (self.http_status, self.code, self.message)", "def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args", "def punkte(self):\n return self.args", "async def prepare_message(self, **args):\n await self.set_args(**args)\n await self.validate()\n return await self.dict()", "def _sanitize_args(self, args_msg):\n if isinstance(args_msg, (list, tuple)):\n args = args_msg\n else:\n args = None\n return args", "def args(self):\n return self.cmd_args", "def sync_dict(self):\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def getArgs(self, namespace):\n namespace = self._fixNS(namespace)\n args = []\n for ((pair_ns, ns_key), value) in self.args.items():\n if pair_ns == namespace:\n if isinstance(ns_key, bytes):\n k = str(ns_key, encoding=\"utf-8\")\n else:\n k = ns_key\n if isinstance(value, bytes):\n v = str(value, encoding=\"utf-8\")\n else:\n v = value\n args.append((k, v))\n return dict(args)", "def get_kwargs(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.kwargs\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n if len(message) > 5:\r\n return copy.deepcopy(message[5])\r\n else:\r\n return None\r\n return None", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def args_str(self):", "def get_args(msg_or_text, return_string=False, **kwargs):\n\n if \"as_string\" in kwargs:\n logger.warning(\n \"as_string is being depreciated, please use return_string.\")\n return_string = kwargs[\"as_string\"]\n\n try:\n text = msg_or_text['text']\n except TypeError:\n text = msg_or_text\n\n if return_string:\n return \" \".join(text.split(\" \")[1:])\n else:\n return shlex.split(text)[1:]", "def get_arguments_string(self):\n result = self.__get_client_server_arg_string('')\n result = self.__get_x_args_string(result)\n result = self.__get_xx_args_string(result)\n result = self.__get_system_property_args_string(result)\n result = self.__get_unsorted_args_string(result)\n return result", "def args(self):\n\t\tret = []\n\t\tfor argname in self._arg_names:\n\t\t\tret += [self._args[argname]]\n\t\treturn ret", "def _func_serialize(self, args): # pragma: no cover\n return repr(args).encode(\"utf-8\")", "def func_serialize(self, args): # pragma: no cover\n return repr(args).encode(\"utf-8\")", "def args(self) -> typing.Tuple[str, typing.List[str]]:\n func = inspect.stack()[1][3]\n command = func[len(self.CMD_PREFIX):]\n return ('{} {}'.format(sys.argv[0], command),\n sys.argv[2:])", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def _get_args(item):\n args = item.get('args')\n if args and not isinstance(args, collections.Sequence):\n args = (args,)\n return args", "def __prepare_args(self, args):\n ret = []\n for a in args:\n if isinstance(a, bytes):\n if self.__size_expr.match(a):\n ret += [a]\n else:\n ret += [b'\"' + a + b'\"']\n continue\n ret += [bytes(str(a).encode(\"utf-8\"))]\n return ret", "def get_command_args(self, skip_serialized_namedtuple: bool = False) -> Sequence[str]:\n return (\n _get_entry_point(self.job_origin)\n + [\"api\", \"execute_step\"]\n + (\n [\"--compressed-input-json\", self._get_compressed_args()]\n if not skip_serialized_namedtuple\n else []\n )\n )", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def decode_args(args, encoding=None):\n return [\n arg.decode(encoding)\n if type(arg) == bytes else arg\n for arg in args\n ]", "def get_args(self):\n return {\n 'contents': self.get_formatted_code()\n }", "def __parse_function_args(self, buffer):\n\t\targs = []\n\t\ttoken = buffer.read(1)\n\t\twhile token != \"(\": # FIXME don't duplicate code with __read_block\n\t\t\ttoken = buffer.read(1)\n\t\t\tassert token\n\t\tcount = 1\n\t\t\n\t\tdef flusharg(arg, args):\n\t\t\targ = \"\".join(arg)\n\t\t\targ = SpellString(arg).format(self.obj, proxy=self.proxy)\n\t\t\targs.append(arg)\n\t\t\treturn []\n\t\t\n\t\t_arg = []\n\t\twhile count:\n\t\t\ttoken = buffer.read(1)\n\t\t\tif token == \"(\":\n\t\t\t\tcount += 1\n\t\t\telif token == \")\":\n\t\t\t\tcount -= 1\n\t\t\tif not count or not token:\n\t\t\t\t_arg = flusharg(_arg, args)\n\t\t\t\tbreak\n\t\t\t\n\t\t\tif token == \",\" and count == 1:\n\t\t\t\t_arg = flusharg(_arg, args)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t_arg.append(token)\n\t\t\n\t\treturn args", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg", "def get_message(self, *args, **kwargs):\n\n message = ''\n message += ', '.join([str(key) + ': ' + str(val) for key, val in kwargs.items()]) + '; ' if kwargs else ''\n message += ', '.join(str(val) for val in args) if args else ''\n\n return message", "def message(self):\n return self.args[0]", "def decode_raw_args(\n\targs: List[Union[str, bytes]],\n\tstdin_encoding: str\n) -> List[str]:\n\treturn [\n\t\targ.decode(stdin_encoding)\n\t\tif type(arg) == bytes else arg\n\t\tfor arg in args\n\t]", "def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"args\")", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def pack(self, *args):\n return self._msg_struct.pack(self.message_id, *args)", "def params(self):\n if isinstance(self.request, list):\n return unmunchify(self.request)\n (params, _) = xmlrpc.loads(self.request)\n return params", "def unpack_args(kwargs):\n return [v for p in zip(list(kwargs.keys()), list(kwargs.values())) for v in p]", "def test_args(self):\n args = forge.args\n assert isinstance(args, forge._signature.VarPositional)\n assert args.name == 'args'\n assert args.converter is None\n assert args.validator is None", "def read_sockeye_args(params_path):\n with open(params_path) as f:\n content = f.readlines()\n\n res = []\n for line in content:\n res += line.split()\n return res", "def argdict(self):\n return dict((arg.name, val) for arg, val in zip(self.sig, self))", "def args_extract(self, args, kwargs):\n # make popable (can't pop tuple of args)\n args = list(args)\n\n def getarg(name, num):\n if args and len(args) > num:\n return args.pop(num)\n elif kwargs.get('files'):\n return kwargs.pop('files')\n return None\n\n # First to not affect data = args.pop(0)\n files = getarg('files', 1)\n data = getarg('data', 0)\n\n # make mutable if something\n if files:\n files = MultiValueDict(files)\n if data:\n data = MultiValueDict(data)\n\n return data, files, args, kwargs", "def get_x_args_dict(self):\n return self.__x_args", "def parse_arguments(args):", "def _func_deserialize(self, args): # pragma: no cover\n if len(args) == 0:\n return self.testing_options['empty']\n x = eval(args.decode(\"utf-8\"))\n return x", "def _collect_repr_args(self, poargs, kwargs):", "def get_display_message_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n duration = params.get('P', 0)\n args = [duration]\n return args", "def getPositionalArgs():", "def args(self):\n return self._args_[: self.nargs()]", "def parse(self,message):\n\t\tprefix = ''\n\t\ttrailing = []\n\t\tif not message:\n\t\t\treturn '','',''\n\t\tif message[0] == ':':\n\t\t prefix, message = message[1:].split(' ', 1)\n\t\tif message.find(' :') != -1:\n\t\t message, trailing = message.split(' :', 1)\n\t\t args = message.split()\n\t\t args.append(trailing)\n\t\telse:\n\t\t args = message.split()\n\t\tcommand = args.pop(0)\n\t\treturn prefix, command, args", "def unpack(self, raw_message):\n return self._msg_struct.unpack(raw_message)", "def unpack_message(line):\n if not line:\n return None\n\n prefix = None\n trailing = []\n\n line = line.rstrip()\n\n if line[0] == ':':\n prefix, line = line[1:].split(' ', 1)\n prefix = unpack_prefix(prefix)\n if ' :' in line:\n line, trailing = line.split(' :', 1)\n args = line.split()\n args.append(trailing)\n else:\n args = line.split()\n\n try:\n command = args.pop(0)\n except IndexError:\n command = ''\n\n return prefix, command.upper(), args", "def command_args(self):\n return self._command_args", "def __init__(self, message: str = None, **kwargs):\n self.message = message\n self.kwargs = kwargs\n self.args = {}\n self.args['message'] = self.message\n self.args['args'] = self.kwargs", "def normalize_args(self, args):\n return args", "def parse_args(self):\n return self.__process_args__(self.parser.parse_args())", "def parse_args(self, argv=None):\n return vars(self.parser.parse_args(argv))", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def get_queue_song_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [params.get('P', 1)]\n return args", "def parse_args_dict(args=None):\n return vars(parse_args(args))", "def parseArgs(self, args, **vars):\n argList = []\n for token in self.argLexer.finditer(args):\n for tokenType, tokenValue in list(token.groupdict().items()):\n if tokenValue is not None:\n argList.append(getattr(self, 'argtoken_' +\n tokenType)(tokenValue, vars))\n return argList" ]
[ "0.6681042", "0.6476981", "0.6381497", "0.6343124", "0.63172007", "0.6315931", "0.6190981", "0.61659044", "0.61323375", "0.61323375", "0.61323375", "0.6127745", "0.60635227", "0.6060811", "0.5984353", "0.59649163", "0.5958156", "0.593882", "0.59370756", "0.5936084", "0.5916326", "0.5907584", "0.58706874", "0.5865834", "0.58502877", "0.5849781", "0.5848143", "0.5842201", "0.5839241", "0.5835915", "0.58154577", "0.58051676", "0.57938457", "0.57661", "0.5727334", "0.56973684", "0.56764865", "0.56686014", "0.5664379", "0.56548613", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56411046", "0.56395626", "0.56365967", "0.5631321", "0.55979973", "0.5563975", "0.55578107", "0.5527194", "0.5517265", "0.5481201", "0.5464863", "0.54260963", "0.5417341", "0.5417341", "0.53930783", "0.53875726", "0.5374801", "0.5374605", "0.5359152", "0.53458536", "0.5343083", "0.53340805", "0.53251195", "0.53178346", "0.5315504", "0.5314628", "0.5302508", "0.52998286", "0.5291001", "0.52760154", "0.5270627", "0.52602893", "0.5244269", "0.52442104", "0.5243175", "0.5238925", "0.52313447", "0.5229267", "0.5220046", "0.52152556", "0.5203864" ]
0.7136301
0
Return kwargs of serialized message
def get_kwargs(cls, message): if message is not None: if isinstance(message, PlatformMessage): return message.kwargs assert isinstance(message, (list, tuple)), "Message is expected to be a list or a tuple" assert len(message) >= 4, "Message's length expected to be at least 4" assert message[0] == PlatformMessage._signature, "Message's signature is incorrect" if len(message) > 5: return copy.deepcopy(message[5]) else: return None return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toArgs(self):\n # FIXME - undocumented exception\n post_args = self.toPostArgs()\n kvargs = {}\n for k, v in post_args.items():\n if not k.startswith('openid.'):\n raise ValueError(\n 'This message can only be encoded as a POST, because it '\n 'contains arguments that are not prefixed with \"openid.\"')\n else:\n kvargs[k[7:]] = v\n\n return kvargs", "def process(self, msg, kwargs) -> Tuple[str, Dict]:\n return msg, kwargs", "def get_kwargs(self):\n return {}", "def json(self):\n return self.kwargs", "def kwargs(self):\n return self._kwargs", "def kwargs(self):\n return self._kwargs", "def get_message_dict(self):\n if not self.is_valid():\n raise ValueError(\"Message cannot be sent from invalid contact form\")\n message_dict = {}\n for message_part in ('from_email', 'message', 'recipient_list', 'subject'):\n attr = getattr(self, message_part)\n message_dict[message_part] = callable(attr) and attr() or attr\n return message_dict", "def get_message(self, **kwargs):\n message = dovesoft.Sms()\n\n for key in kwargs.keys():\n if key not in kwargs.get(\"ignore\", smsconf.IGNORE_KEYS):\n fun = getattr(message, \"set_{key}\".format(key=key))\n fun(kwargs.get(key, \"\"))\n\n return message", "def serialize(self):\r\n return [self._signature, self.sender, self.interface, self.method, self.args, self.kwargs]", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def __init__(self, message: str = None, **kwargs):\n self.message = message\n self.kwargs = kwargs\n self.args = {}\n self.args['message'] = self.message\n self.args['args'] = self.kwargs", "def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]:\n if self._constructed_manually:\n raise UnsupportedError(\n \"Surrogates constructed manually (ie Surrogate.from_botorch) may not \"\n \"be serialized. If serialization is necessary please initialize from \"\n \"the constructor.\"\n )\n\n return {\n \"botorch_model_class\": self.botorch_model_class,\n \"model_options\": self.model_options,\n \"mll_class\": self.mll_class,\n \"mll_options\": self.mll_options,\n \"outcome_transform\": self.outcome_transform,\n \"input_transform\": self.input_transform,\n \"covar_module_class\": self.covar_module_class,\n \"covar_module_options\": self.covar_module_options,\n \"likelihood_class\": self.likelihood_class,\n \"likelihood_options\": self.likelihood_options,\n \"allow_batched_models\": self.allow_batched_models,\n }", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]", "def get_request_kwargs(self, api_params, *args, **kwargs):\n serialized = self.serialize_data(kwargs.get(\"data\"))\n kwargs[\"data\"] = self.format_data_to_request(serialized)\n return kwargs", "def params(self, **kwargs):\n return kwargs", "def format_arguments(self, **kwargs):\n return kwargs", "def _kwargs(self):\n dict = {\"name\":self.name}\n return dict", "def _func_serialize(self, args):\n return args", "def get_kwargs(d):\n raise NotImplementedError(\"subclass must implement get_kwargs()\")", "def test_kwargs(self):\n def f(**kwargs):\n self.assertEqual(kwargs, {'spam': 'eggs'})\n\n kwargs = self.decode('\\n\\x0b\\x01\\tspam\\x06\\teggs\\x01')\n\n f(**kwargs)", "async def prepare_message(self, **args):\n await self.set_args(**args)\n await self.validate()\n return await self.dict()", "def get_kwargs(self):\n return {\n 'user': self.user,\n }", "def serialize(self) -> dict:\n return {\n 'type': self.type,\n **self.args,\n }", "def get_kwargs(d):\n return {\"values\": d.get(\"values\", None)}", "def get_message(self, *args, **kwargs):\n\n message = ''\n message += ', '.join([str(key) + ': ' + str(val) for key, val in kwargs.items()]) + '; ' if kwargs else ''\n message += ', '.join(str(val) for val in args) if args else ''\n\n return message", "async def dict(self):\n\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def _get_kwargs_for_backend(self):\n return dict()", "def serialize(self):\n return {\n 'special_messages': self.special_messages,\n 'description': self.description,\n 'name': self.name,\n 'id': self.id,\n }", "def get_event_params(self) -> dict[str, Any]:\n\n return json.loads( # type: ignore[no-any-return]\n json.dumps(self.__dict__, default=lambda o: o.__dict__),\n object_pairs_hook=self._dict_clean,\n )", "def get_dict(**kwargs):\n return kwargs", "def get_args(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.args\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n if len(message) > 4:\r\n return copy.deepcopy(message[4])\r\n else:\r\n return None\r\n return None", "def serialize(self, name, *args, **kwargs): \n if '.' in name:\n unspec = self._unspecify_name(name)\n if not unspec or not (repr(unspec) in self.messages):\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n name = unspec\n elif name in self.message_rhashes:\n name = self.message_rhashes[name]\n else:\n raise UnknownMessageException(\"Tried to provide serialization for \" + \n \"unknown message '\" + name + \"'\")\n struct = self.messages[repr(name)]()\n index = 0\n for field in struct.DESCRIPTOR.fields:\n # Loop through the fields in order of definition\n # If we can't, the fields have to be initialized by the\n # keyword arguments\n value = args[index] if index < len(args) else kwargs.get(field.name)\n # dict.get() returns None if the entry was not found\n if value == None:\n # If a field is optional, it can be skipped\n if field.label == field.LABEL_OPTIONAL:\n continue\n raise FieldNotDefinedException(\"The field '\" + field.name +\n \"' was not defined when serializing a '\" +\n self.message_hashes[repr(name)] + \"'\")\n try:\n r = self._map_onto(getattr(struct, field.name), value, self._get_options(struct, field.name))\n if r:\n self._checked_set(struct, field.name, r[0])\n except TypeError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' to \" + str(e).replace('has type', 'which has the type'))\n except ValueError, e:\n raise FieldWrongTypeException(\"Tried to set the field '\" + field.name +\n \"' but \" + str(e))\n index += 1\n return pack(str(self.header_size) + 's', name) + struct.SerializePartialToString()", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def transformMessage(self):\n\n message = json.loads(self.message)\n\n call_data = {\n 'call_id': message.get('call_id')\n }\n\n if message.get('type') == 'start':\n call_data['start_timestamp'] = message.get('timestamp')\n call_data['source'] = message.get('source')\n call_data['destination'] = message.get('destination')\n else:\n call_data['stop_timestamp'] = message.get('timestamp')\n\n self.data = call_data\n return self.data", "def prepare_message(self, message_data, delivery_mode,\n content_type, content_encoding, **kwargs):\n return (message_data, content_type, content_encoding)", "def __init__(self, data, attributes=None, *args, **kwargs):\n\n super(PublisherMessage, self).__init__(*args, **kwargs)\n\n self.data = data\n self.attributes = attributes or {}", "def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict", "def prepare_message(self, cmd: Dict, value: Any) -> Any:\n\n message = {}\n message[\"endpoint\"] = cmd[\"endpoint\"]\n message[\"method\"] = cmd[\"method\"]\n\n # Prepare payload\n payload = None\n # Check that value is empty for GET requests\n if cmd[\"method\"] == \"GET\":\n if value is not None:\n self.logger.warning(\"Trying to send GET request with non-empty payload <%s>\", value)\n else:\n path_to_payload = cmd[\"path\"].copy()\n parameter = path_to_payload.pop()\n payload = {parameter: value}\n # The easiest way to build the rest of the nested dict we need\n # is to start bottom up\n path_to_payload.reverse()\n # Wrap the rest of stuff around\n for item in path_to_payload:\n payload = {item: payload}\n payload = json.dumps(payload)\n message[\"data\"] = payload\n self.logger.debug(\"prepare_message()::constructed payload <%s>\", payload)\n return message", "def prepare_message(self, cmd: Dict, value: Any) -> Any:\n\n message = {}\n message[\"endpoint\"] = cmd[\"endpoint\"]\n message[\"method\"] = cmd[\"method\"]\n\n # Prepare payload\n payload = None\n # Check that value is empty for GET requests\n if cmd[\"method\"] == \"GET\":\n if value is not None:\n self.logger.warning(\"Trying to send GET request with non-empty payload <%s>\", value)\n else:\n path_to_payload = cmd[\"path\"].copy()\n parameter = path_to_payload.pop()\n payload = {parameter: value}\n # The easiest way to build the rest of the nested dict we need\n # is to start bottom up\n path_to_payload.reverse()\n # Wrap the rest of stuff around\n for item in path_to_payload:\n payload = {item: payload}\n payload = json.dumps(payload)\n message[\"data\"] = payload\n self.logger.debug(\"prepare_message()::constructed payload <%s>\", payload)\n return message", "def construct_payload(self, **kwargs):\r\n \r\n payload = kwargs.get('parse')\r\n excude = kwargs.get('dele')\r\n\r\n if payload and excude:\r\n payload.pop(excude, None)\r\n return payload", "def kwargs (self):\n return copy.deepcopy (self._kwargs)", "def sync_dict(self):\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def payload(cls, **kwargs):\n payload = {}\n\n # If no lists, bools, or dicts have been defind\n if cls.lists is None:\n cls.lists = []\n if cls.bools is None:\n cls.bools = []\n if cls.dicts is None:\n cls.dicts = []\n if cls.keyword_arguments is None:\n cls.keyword_arguments = []\n\n # If nothing has been passed in\n if not kwargs:\n kwargs = {}\n\n for key, value in kwargs.items():\n if key not in cls.keyword_arguments:\n raise KeywordArgument('The keyword argument', key, 'is not supported')\n continue\n\n # Change strings to lists, bools, or dicts based on a provided list\n if key in cls.lists:\n if isinstance(value, str):\n list_values = []\n [list_values.append(value.strip(',')) for value in value.splitlines()]\n else:\n list_values = [value]\n payload[key] = list_values\n elif key in cls.bools:\n payload[key] = ast.literal_eval(value)\n elif key in cls.dicts:\n payload[key] = json.loads(value)\n elif key == 'passwd':\n payload[key] = hashlib.md5(str.encode(value, 'utf-8')).hexdigest()\n else:\n payload[key] = value\n\n return payload", "def transformer_arguments(self) -> Dict[str, Any]:\n return {**self.transformer_required_arguments(), **self.transformer_optional_arguments()}", "def _kwargs(self):\n dict = DAG._kwargs(self) \n dict[\"inputpaths\"] = self.inputpaths\n dict[\"outputpath\"] = self.outputpath\n dict[\"query\"] = self.query\n return dict", "def msg_info_multiple_dict(self):\n return self._msg_info_multiple_dict", "def get_form_kwargs(self):\n kwargs = super(msgWriteView, self).get_form_kwargs()\n if hasattr(self, 'object'):\n kwargs.update({'instance': self.object})\n kwargs.update({'user': self.request.user})\n return kwargs", "def _collect_repr_args(self, poargs, kwargs):", "def test_kwargs():\n client, server = make_queue_pairs('localhost')\n client.send_inputs(1, input_kwargs={'hello': 'world'})\n _, task = server.get_task()\n assert task.args == (1,)\n assert task.kwargs == {'hello': 'world'}", "def kwargs_to_json(self):\n result = dict()\n for keyword, argument in self.kwargs.items():\n to_json_method = getattr(argument, 'to_json', None)\n\n if to_json_method:\n result[keyword] = argument.to_json()\n\n else:\n try:\n argument_ = json.loads(json.dumps(argument))\n result[keyword] = argument_\n except (TypeError, OverflowError):\n logging.warning(\"Object of type %s cannot be JSON serialized. Skipping...\" % type(argument))\n\n return result", "def _AnyMessageToJsonObject(self, message):\n if not message.ListFields():\n return {}\n # Must print @type first, use OrderedDict instead of {}\n js = OrderedDict()\n type_url = message.type_url\n js['@type'] = type_url\n sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)\n sub_message.ParseFromString(message.value)\n message_descriptor = sub_message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n js['value'] = self._WrapperMessageToJsonObject(sub_message)\n return js\n if full_name in _WKTJSONMETHODS:\n js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],\n sub_message)(self)\n return js\n return self._RegularMessageToJsonObject(sub_message, js)", "def serialize(self):\n return {\n\n\n }", "def interpolator_kwargs(self):\n return self._interpolator_kwargs", "def interpolator_kwargs(self) -> dict:\n\n return self._interpolator_kwargs", "def encode(self, *args, **kwargs):\n return self.serializer.serialize(args), \\\n self.serializer.serialize(kwargs)", "def _log_kwargs(msg='', **kwargs):\n kwarg_msg = ' '.join([('%s: |%s|' % (str(key), kwargs[key]))\n for key in kwargs])\n return \"%s %s\" % (msg, kwarg_msg)", "def testConstructorKwargs(self):\n class SomeMessage(messages.Message):\n name = messages.StringField(1)\n number = messages.IntegerField(2)\n\n expected = SomeMessage()\n expected.name = 'my name'\n expected.number = 200\n self.assertEquals(expected, SomeMessage(name='my name', number=200))", "def get_message_payload(self):\n return {\n 'ts': self.timestamp,\n 'channel': self.channel,\n 'username': self.username,\n 'icon_emoji': self.icon_emoji,\n 'blocks': [self._get_message_block()],\n }", "def test_parameters_message(self):\n expected_topic = self.factory.common_topic + WAPMF.PARAMETERS\n values = {\n \"bool_parameter\": False,\n \"int_parameter\": 1,\n \"float_parameter\": 13.37,\n \"string_parameter\": \"foo\",\n }\n expected_payload = json.dumps(values)\n expected_message = Message(expected_topic, expected_payload)\n\n serialized_message = self.factory.make_from_parameters(values)\n\n self.assertEqual(expected_message, serialized_message)", "def as_dict(self):\n return {'message':self.message, 'line': self.line}", "def _check_serialize(self, kwargs):\n for k in kwargs:\n if k in self.backend.TO_SERIALIZE:\n if isinstance(kwargs[k], dict):\n kwargs[k] = {j: self.backend.serialize(kwargs[k][j])\n for j in kwargs[k]}\n elif isinstance(kwargs[k], list):\n kwargs[k] = [self.backend.serialize(j)\n for j in kwargs[k]]\n else:\n raise TypeError('Your iterable should be a dict or a list')\n return kwargs", "def message_to_dict(message):\n return json.loads(protojson.encode_message(message))", "def decode(self, *payload):\n if not payload:\n return (), {}\n args, kwargs = payload\n return self.serializer.deserialize(args), \\\n self.serializer.deserialize(kwargs)", "def _dispatch_kwargs(self, **kwargs) -> Tuple[Dict, Dict, Dict, Dict]:\n # Ensure each argument only matches one function\n method_kwargs = self.preprocess_kwargs | self.forward_kwargs | \\\n self.visualize_kwargs | self.postprocess_kwargs\n\n union_kwargs = method_kwargs | set(kwargs.keys())\n if union_kwargs != method_kwargs:\n unknown_kwargs = union_kwargs - method_kwargs\n raise ValueError(\n f'unknown argument {unknown_kwargs} for `preprocess`, '\n '`forward`, `visualize` and `postprocess`')\n\n preprocess_kwargs = {}\n forward_kwargs = {}\n visualize_kwargs = {}\n postprocess_kwargs = {}\n\n for key, value in kwargs.items():\n if key in self.preprocess_kwargs:\n preprocess_kwargs[key] = value\n elif key in self.forward_kwargs:\n forward_kwargs[key] = value\n elif key in self.visualize_kwargs:\n visualize_kwargs[key] = value\n else:\n postprocess_kwargs[key] = value\n\n return (\n preprocess_kwargs,\n forward_kwargs,\n visualize_kwargs,\n postprocess_kwargs,\n )", "def serialize(self) -> dict:\n return {\n \"input_idx\": self.input_idx,\n \"body_parameter_idx\": self.body_parameter_idx,\n }", "def as_dict(self):\n d = {}\n for k in self._kwargs:\n d[k] = getattr(self, k)\n return d", "def _to_dict(self, **kwargs):\n pass", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def message_to_dict(message):\n return json.loads(protojson.encode_message(message))", "def test_kwargs(self):\n kwargs = forge.kwargs\n assert isinstance(kwargs, forge._signature.VarKeyword)\n assert kwargs.name == 'kwargs'\n assert kwargs.converter is None\n assert kwargs.validator is None", "def messages(self) -> dict:\n raise NotImplementedError", "def unpack_args(kwargs):\n return [v for p in zip(list(kwargs.keys()), list(kwargs.values())) for v in p]", "def _GenericMessageToJsonObject(self, message):\n # Duration, Timestamp and FieldMask have ToJsonString method to do the\n # convert. Users can also call the method directly.\n return message.ToJsonString()", "def to_kwargs(f, *args, **kwargs):\n\n s = inspect.getargspec(f)\n defaults = s.defaults or []\n default_args = s.args[-len(defaults):]\n\n kw = {}\n kw.update(zip(default_args, defaults))\n kw.update(kwargs)\n kw.update(zip(s.args, args))\n return kw", "async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj", "def serialize(cls, *args, **kwargs):\n return serialize_cls(cls)(*args, **kwargs)", "def _submit_unpack_kwargs(self, params):\n return self._submit(**params)", "def process_message(message):\n return {\n \"subject\": message.subject,\n \"sender\": message.sender_name,\n \"header\": message.transport_headers,\n \"body\": message.plain_text_body,\n \"creation_time\": message.creation_time,\n \"submit_time\": message.client_submit_time,\n \"delivery_time\": message.delivery_time,\n \"attachment_count\": message.number_of_attachments,\n }", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg", "def get_form_kwargs(self):\n kwargs = {\n 'initial': self.get_initial(),\n 'prefix': self.get_prefix(),\n }\n\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({\n 'data': self.request.POST,\n 'files': self.request.FILES,\n })\n #print('kwargs',kwargs)\n return kwargs", "def _get_method_kwargs(self, kwargs, method_args):\n if isinstance(method_args, str):\n method_args = getattr(self.instance, method_args)()\n\n method_kwargs = dict()\n for arg in method_args:\n name = arg['name']\n keyword = arg.get('keyword', name)\n\n if name in kwargs:\n value = kwargs[name]\n elif 'default' in arg:\n value = arg['default']\n elif arg.get('required', True):\n raise TypeError(\"missing expected argument '{}'\".format(name))\n\n method_kwargs[keyword] = value\n\n return method_kwargs", "def extrapolator_kwargs(self) -> dict:\n\n return self._extrapolator_kwargs", "def dumps(self) -> Dict[str, Any]:\n return {\n \"commitId\": self.commit_id,\n \"parentCommitId\": self.parent_commit_id,\n \"message\": self.message,\n \"committer\": self.committer.dumps(),\n }", "def data(self, **kw):\n return dict(params=kw)", "def data(self, **kw):\n return dict(params=kw)", "def create_message(message):\n return {\n \"id\": message.id,\n \"from\": message.sender,\n \"preview\": create_preview(message),\n \"subject\": message.subject,\n \"date\": message.date_created,\n }", "def params(self):\n if isinstance(self.request, list):\n return unmunchify(self.request)\n (params, _) = xmlrpc.loads(self.request)\n return params", "def get_partial_arguments(self):\n return (), {}", "def serialize_object(\n self, data: Dict[str, Any], many: bool, **kwargs\n ) -> JobFormData:\n return self.__model__(**data) # type: ignore", "def uncall(self):\n args = {}\n for name in self.schema:\n args[name] = serialize(getattr(self, name))\n return (), args", "def get_form_kwargs(self):\n self.object = self.get_object()\n kwargs = super().get_form_kwargs()\n return kwargs", "def kwargs(self):\n return self.environ.get('router.kwargs', {})", "def _RegularMessageToJsonObject(self, message, js):\n fields = message.ListFields()\n\n try:\n for field, value in fields:\n if self.preserving_proto_field_name:\n name = field.name\n else:\n name = field.json_name\n if _IsMapEntry(field):\n # Convert a map field.\n v_field = field.message_type.fields_by_name['value']\n js_map = {}\n for key in value:\n if isinstance(key, bool):\n if key:\n recorded_key = 'true'\n else:\n recorded_key = 'false'\n else:\n recorded_key = str(key)\n js_map[recorded_key] = self._FieldToJsonObject(\n v_field, value[key])\n js[name] = js_map\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n # Convert a repeated field.\n js[name] = [self._FieldToJsonObject(field, k)\n for k in value]\n elif field.is_extension:\n name = '[%s]' % field.full_name\n js[name] = self._FieldToJsonObject(field, value)\n else:\n js[name] = self._FieldToJsonObject(field, value)\n\n # Serialize default value if including_default_value_fields is True.\n if self.including_default_value_fields:\n message_descriptor = message.DESCRIPTOR\n for field in message_descriptor.fields:\n # Singular message fields and oneof fields will not be affected.\n if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and\n field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or\n field.containing_oneof):\n continue\n if self.preserving_proto_field_name:\n name = field.name\n else:\n name = field.json_name\n if name in js:\n # Skip the field which has been serialized already.\n continue\n if _IsMapEntry(field):\n js[name] = {}\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n js[name] = []\n else:\n js[name] = self._FieldToJsonObject(field, field.default_value)\n\n except ValueError as e:\n raise SerializeToJsonError(\n 'Failed to serialize {0} field: {1}.'.format(field.name, e))\n\n return js", "def serialize_message(self) -> bytes:\n return self.compile_message().serialize()", "def _deserialize_data(self):\n try:\n self._func_name, self._instance, self._args, self._kwargs = self.serializer.loads(self.data)\n except Exception as e:\n raise DeserializationError() from e", "def toData(self):\n\n lines = []\n # 1. Request and protocol version\n lines.append(self.request + \" \" + BANNER)\n # 2. Request arguments\n lines.extend(['%s: %s' % (arg, self.args[arg]) for arg in self.args])\n # 3. End of message (double CR-LF)\n data = \"\\r\\n\".join(lines) + \"\\r\\n\\r\\n\"\n # In debug mode, parse our own message to check it is well-formed\n assert checkMessage(data), \"Bad generated message: \" + data\n return data", "def __init__(self, msg: dict):\n\n for key, value in msg.items():\n setattr(self, key, value)\n self.data = msg\n self.dt = datetime.fromisoformat(self.timestamp)", "def getCloneArgs(self):\n\n values = {\n \"value\": self.subnode_value.makeClone(),\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def get_validator_kwargs(self):\n return {\n 'schema': self.get_validation_schema(),\n }" ]
[ "0.6532804", "0.63777333", "0.6250696", "0.61953866", "0.60052204", "0.60052204", "0.59744835", "0.59685344", "0.59614235", "0.59334373", "0.59222966", "0.5830837", "0.5793464", "0.579085", "0.57832605", "0.57062554", "0.5704295", "0.57024705", "0.56911284", "0.5642286", "0.56362814", "0.56006736", "0.5589026", "0.55782497", "0.5555141", "0.5540734", "0.5522632", "0.5508533", "0.55057347", "0.5504289", "0.5485831", "0.54782736", "0.5471479", "0.546942", "0.54423237", "0.54344875", "0.5401513", "0.5388383", "0.5380725", "0.5380725", "0.5353606", "0.53355986", "0.5334412", "0.53057504", "0.5302655", "0.5300884", "0.5292084", "0.5276258", "0.5270313", "0.52493846", "0.5244377", "0.52188456", "0.5217586", "0.5214994", "0.52146137", "0.5201164", "0.51975614", "0.51963633", "0.5193952", "0.518208", "0.51805526", "0.5151271", "0.5150318", "0.5146759", "0.5132974", "0.5125673", "0.50944316", "0.5093211", "0.5086964", "0.50844914", "0.50827456", "0.507971", "0.50755537", "0.50729376", "0.50718904", "0.5062219", "0.50496316", "0.5044703", "0.50418997", "0.50397456", "0.5029442", "0.5025873", "0.5025663", "0.5023089", "0.50176835", "0.50176835", "0.50173295", "0.50056607", "0.5001773", "0.5000981", "0.49978068", "0.49955288", "0.499438", "0.49856248", "0.49845368", "0.49813986", "0.49797314", "0.49724296", "0.49695352", "0.49677092" ]
0.721355
0
Transforms self into list with key fields values
def serialize(self): return [self._signature, self.sender, self.interface, self.method, self.args, self.kwargs]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def values(self):\r\n return [self[k] for k in self]", "def items(self):\r\n return [(k, self[k]) for k in self]", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def to_list(self) -> List[Tuple[keyType, valueType]]:\n key_count, value_count = self.size()\n if (key_count == 0) and (value_count == 0):\n return []\n keys = [] # type: List[keyType]\n values = [] # type: List[List]\n for head_node in self.hashTable:\n if head_node.count != 0:\n for node in head_node.singlyLinkedList:\n keys.append(node.key)\n keys.sort(key=functools.cmp_to_key(self.compare_for_key))\n values.insert(keys.index(node.key), node.values)\n result = []\n for index in range(0, len(keys)):\n for value in values[index]:\n result.append((keys[index], value))\n return sorted(result, key=functools.cmp_to_key(self.compare_for_list_key_value))", "def items(self):\n x = []\n for k in list(self.keys()):\n x.append((k, self[k]))\n return x", "def keys(self):\r\n return [k for k in self]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[k] for k in self.keys()]", "def items(self):\n return [(k, self[k]) for k in self.keys()]", "def values(self):\n return [self[name] for name in self.keys()]", "def get_for_key(self, key) -> list:\n return [res[key] for res in self.list]", "def get_key_values(self):\n return self.key_values", "def list(self) -> List:\n return list(self.values())", "def keys(self):\n return [ x for x in self ]", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def values(self):\n x = []\n for k in list(self.keys()):\n x.append(self[k])\n return x", "def list_ids (self, key):\n\n list_of_key_values = [str(x[key]) for x in self.result]\n\n self.result = list(dict.fromkeys([re.findall(r'\\b\\d+\\b', x)[0] for x in list_of_key_values if len(re.findall(r'\\b\\d+\\b', x)) !=0]))\n\n return self", "def items(self):\n return [ (x, self[x]) for x in self ]", "def __dic2list(self,dic):\n\treturn map(lambda x:[x,dic[x]],dic)", "def getlist(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n return []", "def values(self, *keys: _K) -> t.List[t.Any]: # type: ignore[override]\n if keys:\n d: t.List[t.Any] = []\n for key in keys:\n try:\n i = self.index(key)\n except KeyError:\n d.append(None)\n else:\n d.append(self[i])\n return d\n return list(self)", "def values(self):\n return [ self[x] for x in self ]", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def keys(self) -> List:\n pass", "def items(self):\n return list(zip(self.keys(), self.values()))", "def items(self):\n return list(zip(self.keys(), self.values()))", "def as_list(self, keys=None) -> Sequence:\n lst = self.list\n\n if keys is not None:\n lst = [{key: res[key] for key in keys} for res in lst]\n\n return lst", "def list_values(key):\n return meta.list_values(key=key)", "def transform_to_key_value(self, source):\n return [{\"key\": key, \"value\": source[key]} for key in source]", "def _force_key_as_list(self, key):\r\n return [key] if isinstance(key, (str, unicode)) else key", "def getlist(self, key, type=None):\n if key not in self:\n return []\n values = super().__getitem__(key)\n if type is not None:\n values = [type(value) for value in values]\n return values", "def __dic2list(self,dic):\n return [(x,dic[x]) for x in dic]", "def lists(self):\n return dict.items(self)", "def values(self):\n values = []\n for key in self.keys():\n values.append(self[key])\n return values", "def key_pairs(self) -> List[Tuple[int, str]]:\n return [\n (timestamp, sensor_id)\n for timestamp, sensors in self.items()\n for sensor_id in sensors.keys()\n ]", "def key_attributes(self):\n\n return [level.key for level in self.levels]", "def items(self):\n return ((key, value) for (key, value) in zip(self.__keys, self.__vals))", "def _as_dict(self):\r\n local = dict((key, value) for key, value in self)\r\n joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)\r\n if not k[0] == '_'])\r\n local.update(joined)\r\n return local", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def _as_dict(self):\n local = dict((key, value) for key, value in self)\n joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])\n local.update(joined)\n return local", "def items(self):\n acc = []\n for k in self.keys():\n pm = self._maps[k]\n acc.append((k,pm))\n return acc", "def keys(self):\n return list(self.__iter__())", "def obtain(self, key):\n if key in self:\n vals = self[key]\n else:\n vals = []\n dict.__setitem__(self, key, vals)\n return vals", "def keys(self):\n return [key for key, value in self.items()]", "def dic2list(self, X: numpy.ndarray):\n raise NotImplementedError()", "def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def keys(self):\n for ts in self:\n yield ts", "def secondary_keys_dicts(self):", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def data(self, *keys: _TResultKey) -> t.List[t.Dict[str, t.Any]]:\n return [record.data(*keys) for record in self]", "def get_values(self) -> list:\r\n values = []\r\n for key, value in self._items:\r\n values.append(value)\r\n return values", "def keyify(self):\n return keyify_obj(self)", "def _modified(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))", "def keyrefs(self):\n return list(self.data)", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def Dictify(self):\n\t\tnew_self = copy.deepcopy(self.__dict__)\n\n\t\t#TODO: add in support for non built in classes?\n\t\t#fields = list(self.__dict__.keys())\n\t\t#for fieldname in fields:\n\t\t#\tif type(new_self[fieldname]).__module__ == 'numpy':\n\t\t#\t\tselecting[fieldname] = getattr(self,fieldname).tolist()[0]\n\n\t\treturn new_self", "def keys(self):\n return list(self.token2id.values())", "def getkeys(self):\n return list(self.keys)", "def _sparse2seq(self, key):\n seq = []\n for (d,v) in key:\n seq.append(d)\n seq.append(v)\n return seq", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def getlist(self, key):\n try:\n vals = _dict_getitem(self, key.lower())\n except KeyError:\n return []\n else:\n if isinstance(vals, tuple):\n return [vals[1]]\n else:\n return vals[1:]", "def items(self, *args, **kwargs):\n return [ (key, self._get(key, *args, **kwargs),) for key in self.keys(*args, **kwargs) ]", "def _key(self):\n return (self.name, self.array_type.upper(), self.values)", "def values(\n self, *keys: _TResultKey\n ) -> t.List[t.List[t.Any]]:\n return [record.values(*keys) for record in self]", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def key_attributes(self):\n\n return [level.key for level in self._levels.values()]", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def __init__(self):\n self.keys = []\n self.values = []", "def get_items(self, value, key=None):\n if key is None:\n return self.dicts(value)\n else:\n items = self.dicts(value)\n return [item[key] for item in items]", "def _list_fields(self):\n return list(self._state.keys())", "def _getKeyList(self):\n return LinkedList(InternalRack(self, 1))", "def __iter__(self) -> (str, np.ndarray):\n for k, v in self.fields.items():\n yield k, v", "def values(self):\n return [_ for _ in self._dict.values()]", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def _convert(self, dictlike):\n for incoming_key, valuelist in util.dictlike_iteritems(dictlike):\n for value in valuelist:\n new_key = self.keyfunc(value)\n if incoming_key != new_key:\n raise TypeError(\n \"Found incompatible key %r for value %r; this \"\n \"collection's \"\n \"keying function requires a key of %r for this value.\" % (\n incoming_key, value, new_key))\n yield value", "def items(self):\n with self.__plock:\n return map(lambda key: (key, self[key]), self._keys)", "def asPyDict(self):\n fieldDict = dict()\n for kvp in self.keyvaluepair_set.all():\n fieldDict[kvp.key] = kvp.value\n return fieldDict", "def _key(self):\n return (self.name, self.type_.upper(), self.value)", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def keys(self):\n return list(self.iterkeys())", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def collect_keys(self, list_):\n keys = list_.keys[:]\n todo = [list_]\n while 1:\n node = todo.pop()\n refs = []\n for ch in node.children:\n if ch.name == \"ref\": refs.append(ch)\n elif ch.name == \"element\" and ch.attr[\"name\"] in keys:\n k = ch.attr[\"name\"]\n list_.keymap[k] = ch\n keys.remove(k)\n if not keys: break\n for r in refs:\n d = self.defs[r.attr[\"name\"]]\n d.ref = r\n todo.append(d)\n for k in list_.keymap:\n out = list_.keymap[k]\n in_ = []\n while out.parent != list_:\n chs = out.parent.children[:]\n pos = chs.index(out)\n chs[pos:pos+1] = in_\n in_ = chs\n out = out.parent.ref\n pos = list_.children.index(out)\n list_.children[pos:pos+1] = in_", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def hash_fieldlist(cls):\n for field in cls.fieldlist:\n cls.fieldhash[field.id] = field", "def registered_fields(self):\n return {key for mapping in self for key in mapping.mapping.keys()}", "def get_list(self):\n return sorted(self.__entries.keys())", "def _get_keys(self, listOfKeys):\n return self._keys", "def __iter__(self):\n return self.ordered_keys.__iter__()", "def __iter__(self):\n return self.keys()", "def keys(self) -> tuple[Hashable, ...]:\n return tuple([self._hashify(item = c) for c in self.contents])", "def to_list(hash):\r\n list = []\r\n if hash is None:\r\n return list\r\n for i, key in enumerate(hash.keyset):\r\n list.append(get(hash, key))\r\n return list", "def items(self):\n return zip(self._keys, self._values)", "def __init__(self, key):\n self.key = [int_mapping(k) for k in key]" ]
[ "0.67881566", "0.6781893", "0.6743758", "0.6743758", "0.6675744", "0.6648665", "0.66283107", "0.6555832", "0.65546185", "0.65546185", "0.65546185", "0.6533801", "0.6497453", "0.64785767", "0.6443632", "0.6415694", "0.64099073", "0.64046633", "0.63925886", "0.63925886", "0.63813394", "0.63793266", "0.63657945", "0.63535494", "0.63390595", "0.63316196", "0.6329521", "0.6327017", "0.63226646", "0.6299535", "0.6299535", "0.629845", "0.6281282", "0.62691975", "0.625697", "0.6218899", "0.617572", "0.6158653", "0.6148685", "0.6138199", "0.6107172", "0.6106927", "0.60973036", "0.6086754", "0.6079411", "0.60736585", "0.6057584", "0.60449576", "0.6013738", "0.5995565", "0.5980472", "0.5977473", "0.5969719", "0.59658605", "0.59638655", "0.59459174", "0.5930188", "0.59235245", "0.59234875", "0.59163237", "0.5907416", "0.5907101", "0.5903933", "0.5888352", "0.5887693", "0.58781797", "0.58781797", "0.58671296", "0.5862658", "0.58472633", "0.5837718", "0.58283633", "0.58178", "0.5788608", "0.5788608", "0.5781724", "0.57804996", "0.5778275", "0.57704103", "0.5770136", "0.5763618", "0.57621425", "0.57541996", "0.57529277", "0.5739047", "0.5736961", "0.57350993", "0.5727933", "0.57145447", "0.570743", "0.5704559", "0.57031685", "0.5701236", "0.5696794", "0.568406", "0.5682659", "0.568167", "0.5680783", "0.56788766", "0.56740713", "0.5666682" ]
0.0
-1
Creates message with reply indicating successfull ending of method call
def success(cls, retval, retvalname='value'): if isinstance(retval, dict) and retvalname is None: retval["__result__"] = "success" # TODO: right here just modified input dict. That's not good else: retval = {"__result__": "success", retvalname: retval} return PlatformMessage(method="__reply__", kwargs=retval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finished(self, reply):\n pass", "def endMessage(self):", "def end():\n return say()", "def finish(self, message):\n self.stdout = message\n self.returncode = 0", "def reply_object():\n reply_object = {\"code\": \"\"}\n return reply_object", "def acknowledgement(self, message: Message[ValueType]):", "def success(self, msg):\n print \"comm succeded\"\n return msg", "def _success(self, msg=\"\"):\n if msg:\n self.result[\"message\"] = msg\n self.module.exit_json(**self.result)", "def end(self, text):\n if text is None:\n string = self._build_string(\"Completed!\")\n else:\n string = self._build_string(text)\n out.write(\"\\r\" + string + \"\\n\")\n out.flush()", "def sendEnd(self):\n self._sendCommonMessage(self.SONY_MSG_Common_Bye, self.ThreeValueMsg.pack(a=0, b=0, c=0))", "def reply(self, result):\n if self._reply_channel is None:\n assert False, \"can only reply to a synchronous message, e.g. somebody must be calling us with 'call'\"\n else:\n self._reply_channel.send(result)", "def end(update, context) -> int:\n update.callback_query.edit_message_text(\n 'Bye! I hope we can talk again some day.')\n\n logger.info(\"User [%s] exited the conversation, [Exit], from [Main Menu / Results State].\",\n update.callback_query.message.chat.first_name)\n return ConversationHandler.END", "def end_message(self):\r\n if self.cur_message is not None:\r\n self.cur_message.end_time = datetime.now()\r\n self.wait_message()", "def _response_success(self, msg, msgID):\r\n if not self._status:\r\n # Can not help it if the response takes some time and in the mean\r\n # time the interface is disabled; therefore, don't raise an error\r\n # instead just skip sending the response\r\n return\r\n\r\n self._conn.sendMessage(self._iTag, self._clsName, msg, msgID)", "def finish(text):\n finish.has_been_called = True\n print(\"{}{}\".format(success, text))", "def succeeded(self):\n self.did_end = True", "def finish(self):\n self.logger.debug(\"%s -> finish()\" % self)\n self.lines = ''.join(self.response_data).split(CRLF)\n\n if len(self.lines) < 1:\n raise nntplib.NNTPDataError(\"No data received\")\n\n self.response_code, self.response_message = self.lines[0][:3], \\\n self.lines[0][3:].strip()\n\n self.logger.debug(\"code = %s\" % self.response_code)\n self.logger.debug(\"msg = %s\" % self.response_message)", "def response(self, context, message):\r\n return True", "def end(update: Update, context: CallbackContext) -> int:\n update.callback_query.answer()\n\n text = \"See you around!\"\n update.callback_query.edit_message_text(text=text)\n\n return END", "def _response(self, *lines):\n for line in lines:\n self.client.dataReceived(line + b'\\r\\n')\n self.client.dataReceived(\n b'0001 OK [READ-ONLY] ' + self.command + b' completed\\r\\n')", "def Finish(self, message=\"\"):\n delta = int(self.length - self.nbits)\n sys.stdout.write(self.char * delta + \"] \" + message + \"\\n\")", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def send_message(self):\r\n return \"success\"", "def finish(self):\n self.connection.reset_arguments()\n self.connection.write_ok()", "def test_tls_finished_message_symmetry(self):\n server, client = loopback()\n\n assert server.get_finished() == client.get_peer_finished()\n assert client.get_finished() == server.get_peer_finished()", "def tellIfEnded(self):\n self.congratulate()", "def testTerminateResponseWithServerClose(self):\n self.client_connect()\n self.client_send('set chopped 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set chopped 0 0 1\\r\\n1\\r\\n\")\n self.mock_close()\n self.client_recv('.*ERROR .*\\r\\n')", "def reply_with_code(self, code: int) -> None:", "def send_finish_event(self):\n self.status['type'] = '__end__'\n self._send()", "def notify_end(self, status, objective):\n pass # pragma: no cover", "def reply(cls, user, context, message, reply_message):\r\n pass", "def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))", "def _reply(self, success: bool, error_message: str, result: dict, **kwargs):\n return rest_response(\n success=success,\n message=error_message,\n result=result,\n convert_google_style=False,\n **kwargs,\n )", "def start_finish(self):\r\n self.send_queue.put(('finish',))", "def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)", "def append_message_footer(msg, messagebody):\n footer = \"\\n\\n_____________________\\n\\n^^I ^^am ^^a ^^Bot. ^^| [^^How ^^to ^^use](https://github.com/Rafficer/ProtonStatusBot/tree/master#how-to-use) ^^| ^^Made ^^with ^^🖤 ^^by ^^/u/Rafficer ^^| ^^[Source](https://github.com/rafficer/ProtonStatusBot) ^^| [^^Report ^^a ^^Bug](https://github.com/Rafficer/ProtonStatusBot/issues)\"\n full_message = messagebody + footer\n logger.debug(\"Replying...\")\n try:\n msg.reply(full_message)\n except prawcore.exceptions.RequestException:\n logger.debug(\"Replying failed. Checking for connectivity and trying again.\")\n connectivity_check()\n msg.reply(full_message)\n logger.debug(\"Replied!\")", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def end_call(self, request, identifier=None):\n call = self.get_object()\n timestamp = request.data.get('timestamp', None)\n\n try:\n if not call.has_ended:\n call.end_call(timestamp=timestamp)\n except ValueError as exc:\n raise serializers.ValidationError(str(exc))\n\n serializer = self.serializer_class(instance=call)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def _respond_message(self, msg):\n self.set_status(200)\n self.set_header(\"Content-Type\", \"application/x-mplane+json\")\n self.write(mplane.model.unparse_json(msg))\n self.finish()", "def InvocationEnd(builder):\n return End(builder)", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_make_reply(self):\n msg_helper = MessageHelper()\n msg = msg_helper.make_inbound('inbound')\n reply = msg_helper.make_reply(msg, 'reply content')\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def reply(self, request, *args, **kwargs):\n context = {\n 'request': request,\n 'conversation': self.get_object()\n }\n serializer = MessageSerializer(data=request.data, partial=True, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n headers = self.get_success_message_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def outgoing(self, msg):\n self.outgoing_serial += 1\n msg.header.serial = self.outgoing_serial\n\n if msg.header.message_type is MessageType.method_call:\n self.awaiting_reply[msg.header.serial] = handle = self.handle_factory()\n return handle", "def exit_success(message: Optional[str] = None) -> NoReturn:\n\n\tif (message != None):\n\t\tprint(message)\n\tsys.exit(EXIT_SUCCESS)", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__end_msg__)", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def exit(self):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"exit()\"\n\n # exit() functionality is implemented with a special dst.\n exit_msg = Msg(\n dst = DST_EXIT,\n x = randint(0, UINT32_MAX),\n y = randint(0, UINT32_MAX),\n op = randint(0, UINT8_MAX),\n result = randint(0, UINT64_MAX))\n\n # First, bury a REQUEST.\n self.read(length=SZ_MSG)\n\n # Then, write the exit packet to TAP.\n self.write(str(exit_msg))\n\n # Exit the poller.\n return -1", "def process_quit(message):\n try:\n Resident.objects.get(phone_number=message.sender).delete()\n except Resident.DoesNotExist:\n pass\n \n # TODO - wording...\n message.respond('You have been removed from our system and will no longer get text messages.')\n \n return TropoOkResponse()", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def make_reply(msg):\n reply = ''\n if msg is not None:\n for i in range(len(messages)):\n if msg == message[i]:\n reply = m_responses[i]\n return reply", "async def end(self, roles, dialogs):\n self.ended = True", "def finish_goal(self, goal_handle, result, text=\"\"):\n goal_handle.set_succeeded(result, text)\n self.set_goal_done()", "def P_SendCloseResult(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass", "def MessageAck(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def generate_message(self, mtu):\r\n raise GeneratorExit(\"No more message to send\")", "def writeMethodEnd(self): # CMETHODS DONE\n\n self.emit('CMETHODS DONE')", "def testTerminateResponseWithServerCloseIn2ndValue(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def reply(cls, user, context, message, reply_message):\n pass", "def end(update, context):\n query = update.callback_query\n bot = context.bot\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=\"See you next time!\"\n )\n return ConversationHandler.END", "def end(self):\n self.logger.info(self.result)\n return self.result", "def writeMethodEnd(self): # SMETHODS DONE\n\n self.emit('SMETHODS DONE')", "def send_reply(self, (request, result)):\n if not 'callback' in request.args:\n return self.send_json(request, result)\n else:\n return self.send_jsonp(request.args['callback'][0], request, result)", "def test_endFunctionKEy(self):\n return self._endTest(ServerProtocol.END)", "def tellDone(self, success, originatorId):\n self.jobSender.send(self.jobSender.createJobDoneEvent(self.name,\n success,\n originatorId))", "def process(self):\n # hello_message = HelloMessage(*self.message.value)\n # TODO: assert realm is in allowed list\n welcome_message = WelcomeMessage()\n self.answer_message = welcome_message", "def reply(self, msg_id, response):\n return self.hub.reply(self.get_private_key(), msg_id, response)", "async def reply(self, ctx, *, m):\n await ctx.message.delete()\n await ctx.send(m)", "def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!", "def action(self):\n\n success = None\n msg = self.incoming_message_text\n\n\n if msg == '/start':\n text = 'Welcome to SimpleBook_Bot!\\nI can help you book an venue.\\\n \\n\\nYou can control me using these commands:\\n\\\n /start-to start chatting with the bot\\n\\\n /book-to make a booking\\n\\\n /cancel-to stop chatting with the bot.\\n\\\n For more information please contact [email protected]'\n\n if self.last_name == None:\n self.outgoing_message_text = \"Hello {}! \".format(self.first_name) + text\n else :\n self.outgoing_message_text = \"Hello {} {}! \".format(self.first_name, self.last_name) + text\n elif msg == '/book':\n self.outgoing_message_text = \"Please enter a date in this format: YYYY-MM-DD\"\n \n elif msg == '/cancel':\n self.outgoing_message_text = \"See you again!\"\n\n else:\n try:\n datetime.strptime(msg, '%Y-%m-%d')\n self.outgoing_message_text = 'Please enter a start time in the format: HH-MM'\n except:\n try:\n datetime.strptime(msg, '%H:%M')\n self.outgoing_message_text = 'Start time: ' + msg + ' Please enter an end time in the format: HH-MM'\n except:\n self.outgoing_message_text = 'Invalid format, please try again'\n return False\n \n success = self.send_message()\n return success", "def FinishSequence(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def end_object(self):", "def create_reply(self):\n return MessageCreateReplyRequestBuilder(self.append_to_request_url(\"createReply\"), self._client)", "def set_reply(msg):\n \n result = Message(msg.content, correlation_id=msg.correlation_id ) \n return result", "def finish(self):\n self.body.finish()", "def onMessageEnd(self):", "def print_ok(msg):\n print('OK - %s' % (msg))\n sys.exit(0)", "def sms_ahoy_reply():\n # Start our response\n resp = MessagingResponse()\n if request.method == 'POST':\n msg = request.form['Body']\n joke = re.search(r'(.*)joke(.*)', msg, re.I)\n greet = re.search(r'(.*)[hi|hey|hello](.*)', msg, re.I)\n quote = re.search(r'(.*)quote(.*)', msg, re.I)\n # joke = re.search(r'(.*)joke(.*)', msg, re.I)\n\n if joke: resp.message(\"I wanted to look for my watch but I couldn't find the time!\")\n elif quote: resp.message(\"A great player is the one who makes the game look easy!\")\n elif greet: resp.message(\"Greetings! I am your assistant!\")\n\n # Add a message\n else: resp.message(\"Ahoy! You said, '\" + msg + \"'\")\n print(request.form)\n\n else: resp.message(\"Greetings! I am your assistant!\") \n\n return str(resp)", "def message_with_reply(cmd, name, data, version = NATIVE_HEADER_VERSION, order=\"<\"):\n\n new_reply = SpecReply()\n reply_id = new_reply.id\n\n msg = message(reply_id, cmd, name, data, version = version, order=order)\n\n return (new_reply, msg)", "def EndSession( self ):\r\n\r\n self._socket.write( 'X' ) \r\n # self._connection.write( 'X' ).flush() \r\n\r\n return self.GetServerResponse()", "def _send_abort_reply(self, stream, msg, idents):\n # FIXME: forward-port ipython/ipykernel#684\n self.log.info(\n f\"Aborting {msg['header']['msg_id']}: {msg['header']['msg_type']}\"\n )\n reply_type = msg[\"header\"][\"msg_type\"].rsplit(\"_\", 1)[0] + \"_reply\"\n status = {\"status\": \"aborted\"}\n md = self.init_metadata(msg)\n md = self.finish_metadata(msg, md, status)\n md.update(status)\n\n self.session.send(\n stream,\n reply_type,\n metadata=md,\n content=status,\n parent=msg,\n ident=idents,\n )", "def finish ( self ) :\n raise AbstractMethodException( self , \"finish\" )", "def testTerminateResponseWithServerCloseInValue(self):\n self.client_connect()\n self.client_send('get someChoppedVal\\r\\n')\n self.mock_recv(\"get someChoppedVal\\r\\n\")\n self.mock_send('VALUE someChoppedVal 0 10\\r\\n')\n self.mock_send('012345')\n self.mock_close()\n self.client_recv('END\\r\\n')", "def answer(self, msg, klass=Message):\n _msg = msg.get(FULL_MSG, msg)\n answer = klass(_msg)\n answer[RESPONSE_ID] = _msg[MSG_ID]\n # answer[SENDER_ID] = self.uid # already done in send()\n answer.pop(FULL_MSG, None)\n answer.pop(COMMAND)\n return answer", "def get_async_response(self,message): \n index = self.async_query_buffer.index(message)\n #print('**********')\n #print ('requesting ' + message + ' at index ' + str(index))\n b = True\n try:\n response = self.async_reply_buffer[index]\n if response.endswith('\\n'):\n response = self.async_reply_buffer.pop(index)\n else:\n b = False\n response = 'EMPTY'\n except IndexError: \n #print('response not available yet!!')\n response = 'EMPTY'\n b = False\n if b: \n #print('got reply:')\n #print(response)\n query = self.async_query_buffer.pop(index)\n #print('for query:')\n #print(query)\n #print('Buffers:')\n #print(self.async_reply_buffer)\n #print(self.async_query_buffer)\n #print('_________________')\n\n return response", "def finish(ch, method, properties, body) -> Union[Job, None]:\n del ch, method, properties\n # todo: add error handling\n found_job = db.Jobs().get_by_id(body)\n if not found_job:\n return\n found_job.status = \"done\"\n return db.Jobs().update(found_job)", "def handleresponse():\n if str(request.json['from']) == EXAMPLE_APPOINTMENT['contactNumber'] \\\n and 'YES' in str(request.json['body']).upper():\n msg = Message(\n to=request.json['from'],\n from_=ORIGINATING_NUMBER,\n content='Your appointment has been confirmed')\n response = controller.create_message(msg)\n print response\n EXAMPLE_APPOINTMENT['status'] = 'confirmed'\n return \"Appointment status: \" + EXAMPLE_APPOINTMENT['status']\n elif str(request.json['from']) == EXAMPLE_APPOINTMENT['contactNumber'] \\\n and 'NO' in str(request.json['body']).upper():\n msg = Message(\n to=request.json['from'],\n from_=ORIGINATING_NUMBER,\n content=(\"Your appointment has been cancelled. Please call {} to\"\n \"reschedule\").format(ORIGINATING_NUMBER))\n response = controller.create_message(msg)\n print response\n EXAMPLE_APPOINTMENT['status'] = 'cancelled'\n return \"Appointment status: \" + EXAMPLE_APPOINTMENT['status']\n else:\n msg = Message(\n to=request.json['from'],\n from_=ORIGINATING_NUMBER,\n content='Please respond with either \"Yes\" or \"No\"')\n response = controller.create_message(msg)\n print response\n return \"Appointment status: \" + EXAMPLE_APPOINTMENT['status']", "def write_ok(self, message='Ok', callback=None, read_until_delimiter=CRLF):\n self.write('%s %s' % (250, message), callback=callback,\n read_until_delimiter=read_until_delimiter)", "def callback(ch, method, properties, body):\n print(f\"[X] Received %r\" % body)\n\n # wait for certain time until task is completed\n time.sleep(body.count(b'.'))\n print(\"[X] Done\")\n\n \"\"\"Acknowledge after completing task this prevents message\n message loss when the worker dies. And when worker\n dies message will be passes to another online worker.\n Caution: We are not talking about worker node of RabbitMQ.\n \"\"\"\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def task_done(self):\n if self.message is None:\n raise Exception('no message to acknowledge')\n self.handle.delete_message(self.message)\n self.message = None", "def trial_end(self, parameter_id, success, **kwargs):", "def handle_message(self, session, message):\n # Handle an RPC call\n # Reason should come from inform call.\n response = {}\n if message['method'] == 'done' and message['id'] is None:\n # Here we switch roles, becoming RPC Client\n next_state, response = RPCS.SendingRpc, None\n else:\n # We have a valid method.\n # (VALID_METHODS checked in rpcsd:parse_message)\n next_state = RPCS.ExpectRpc\n response['error'] = {'code': -31998, 'message': 'Wrong request'}\n response['id'] = message['id']\n\n return next_state, response", "def respond(cmd,t,p):\n\tt.write(cmd)\n\treturn wait(t,p)", "def answer_waiting_call(self) -> None:", "def response(self):\n return self._send(bytes([0xef,0xfe,0x02,0x0,0x0,0x0,0x0,0x0]))", "def onFinished( self, resultLine ):\n\t\treturn self.agi.finish()", "def _sendVersion_result (self, (code, data)) :\n\n assert code == \"REPLY_HELLO\"\n\n return data" ]
[ "0.69738877", "0.67656934", "0.6293983", "0.6158718", "0.6092211", "0.60331434", "0.59224075", "0.59184587", "0.5894392", "0.58468145", "0.58453137", "0.5838691", "0.58309144", "0.58224344", "0.581687", "0.5788007", "0.57852334", "0.57048154", "0.5697229", "0.56179005", "0.56067485", "0.5585295", "0.5576961", "0.55617464", "0.5556173", "0.5556091", "0.554542", "0.5528975", "0.55249536", "0.55192155", "0.5516767", "0.5515826", "0.55003387", "0.5497252", "0.54874414", "0.5482735", "0.548267", "0.5480871", "0.5480376", "0.547987", "0.5473972", "0.5473972", "0.5471445", "0.54521", "0.5439292", "0.54391056", "0.543797", "0.5428284", "0.54258144", "0.542162", "0.54145133", "0.54145133", "0.540322", "0.54008055", "0.5396608", "0.5389401", "0.5381728", "0.5380107", "0.53746706", "0.5372484", "0.5330931", "0.5327262", "0.53250575", "0.53165215", "0.53138036", "0.53074783", "0.52963895", "0.5295177", "0.5291541", "0.52843094", "0.5281361", "0.52807313", "0.52788377", "0.5274933", "0.52724105", "0.52714086", "0.52685064", "0.52673984", "0.5266384", "0.5262075", "0.5253319", "0.5252486", "0.5249856", "0.52446115", "0.52436614", "0.52370965", "0.522999", "0.52292037", "0.5229053", "0.5227862", "0.5222447", "0.5213604", "0.5212824", "0.5206776", "0.5206472", "0.52055836", "0.52053106", "0.5202127", "0.519479", "0.51941955" ]
0.55243546
29
Creates message with reply indicating failiing ending of method call
def failure(cls, state, errcode=-1): return PlatformMessage(method="__reply__", kwargs={"__result__": "fail", "state": state, "errcode": errcode})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def endMessage(self):", "def finished(self, reply):\n pass", "def end():\n return say()", "def end(update, context) -> int:\n update.callback_query.edit_message_text(\n 'Bye! I hope we can talk again some day.')\n\n logger.info(\"User [%s] exited the conversation, [Exit], from [Main Menu / Results State].\",\n update.callback_query.message.chat.first_name)\n return ConversationHandler.END", "def end_message(self):\r\n if self.cur_message is not None:\r\n self.cur_message.end_time = datetime.now()\r\n self.wait_message()", "def sendEnd(self):\n self._sendCommonMessage(self.SONY_MSG_Common_Bye, self.ThreeValueMsg.pack(a=0, b=0, c=0))", "def end(update: Update, context: CallbackContext) -> int:\n update.callback_query.answer()\n\n text = \"See you around!\"\n update.callback_query.edit_message_text(text=text)\n\n return END", "def end(self, text):\n if text is None:\n string = self._build_string(\"Completed!\")\n else:\n string = self._build_string(text)\n out.write(\"\\r\" + string + \"\\n\")\n out.flush()", "def InvocationEnd(builder):\n return End(builder)", "def finish(self, message):\n self.stdout = message\n self.returncode = 0", "def Finish(self, message=\"\"):\n delta = int(self.length - self.nbits)\n sys.stdout.write(self.char * delta + \"] \" + message + \"\\n\")", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def finish(self):\n self.logger.debug(\"%s -> finish()\" % self)\n self.lines = ''.join(self.response_data).split(CRLF)\n\n if len(self.lines) < 1:\n raise nntplib.NNTPDataError(\"No data received\")\n\n self.response_code, self.response_message = self.lines[0][:3], \\\n self.lines[0][3:].strip()\n\n self.logger.debug(\"code = %s\" % self.response_code)\n self.logger.debug(\"msg = %s\" % self.response_message)", "def RespEnd(builder):\n return End(builder)", "def append_message_footer(msg, messagebody):\n footer = \"\\n\\n_____________________\\n\\n^^I ^^am ^^a ^^Bot. ^^| [^^How ^^to ^^use](https://github.com/Rafficer/ProtonStatusBot/tree/master#how-to-use) ^^| ^^Made ^^with ^^🖤 ^^by ^^/u/Rafficer ^^| ^^[Source](https://github.com/rafficer/ProtonStatusBot) ^^| [^^Report ^^a ^^Bug](https://github.com/Rafficer/ProtonStatusBot/issues)\"\n full_message = messagebody + footer\n logger.debug(\"Replying...\")\n try:\n msg.reply(full_message)\n except prawcore.exceptions.RequestException:\n logger.debug(\"Replying failed. Checking for connectivity and trying again.\")\n connectivity_check()\n msg.reply(full_message)\n logger.debug(\"Replied!\")", "def writeMethodEnd(self): # CMETHODS DONE\n\n self.emit('CMETHODS DONE')", "def send_finish_event(self):\n self.status['type'] = '__end__'\n self._send()", "def reply_object():\n reply_object = {\"code\": \"\"}\n return reply_object", "def end(update, context):\n query = update.callback_query\n bot = context.bot\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=\"See you next time!\"\n )\n return ConversationHandler.END", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def end_call(self, request, identifier=None):\n call = self.get_object()\n timestamp = request.data.get('timestamp', None)\n\n try:\n if not call.has_ended:\n call.end_call(timestamp=timestamp)\n except ValueError as exc:\n raise serializers.ValidationError(str(exc))\n\n serializer = self.serializer_class(instance=call)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def end(self):\n ...", "def onMessageEnd(self):", "def end():\n return EndBlock()", "def exit(self):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"exit()\"\n\n # exit() functionality is implemented with a special dst.\n exit_msg = Msg(\n dst = DST_EXIT,\n x = randint(0, UINT32_MAX),\n y = randint(0, UINT32_MAX),\n op = randint(0, UINT8_MAX),\n result = randint(0, UINT64_MAX))\n\n # First, bury a REQUEST.\n self.read(length=SZ_MSG)\n\n # Then, write the exit packet to TAP.\n self.write(str(exit_msg))\n\n # Exit the poller.\n return -1", "def acknowledgement(self, message: Message[ValueType]):", "def finish(self):\n self.body.finish()", "def finish(text):\n finish.has_been_called = True\n print(\"{}{}\".format(success, text))", "def test_endFunctionKEy(self):\n return self._endTest(ServerProtocol.END)", "def end(self) -> None:", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__end_msg__)", "async def end_object(self):", "def End(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"End\", cmd)", "def writeMethodEnd(self): # SMETHODS DONE\n\n self.emit('SMETHODS DONE')", "def generate_message(self, mtu):\r\n raise GeneratorExit(\"No more message to send\")", "def endRep(self, rep):\n \n pass", "def start_finish(self):\r\n self.send_queue.put(('finish',))", "def outgoing(self, msg):\n self.outgoing_serial += 1\n msg.header.serial = self.outgoing_serial\n\n if msg.header.message_type is MessageType.method_call:\n self.awaiting_reply[msg.header.serial] = handle = self.handle_factory()\n return handle", "def EndMelting(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def process_quit(message):\n try:\n Resident.objects.get(phone_number=message.sender).delete()\n except Resident.DoesNotExist:\n pass\n \n # TODO - wording...\n message.respond('You have been removed from our system and will no longer get text messages.')\n \n return TropoOkResponse()", "def testTerminateResponseWithServerClose(self):\n self.client_connect()\n self.client_send('set chopped 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set chopped 0 0 1\\r\\n1\\r\\n\")\n self.mock_close()\n self.client_recv('.*ERROR .*\\r\\n')", "def msg_close(version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CLOSE, \"\", \"\", version, order)", "def sendSslEnd(self, req):\n self._sendTcpMessage(self.SONY_MSG_Tcp_ProxyEnd, req, self.ThreeValueMsg.pack(a=1, b=1, c=0))", "def end(self):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def quit_cmd(self):\n print_debug(\"Executing QUIT\")\n command = \"QUIT\\r\\n\"\n msg_rec = self.send_and_log(self.s, command)\n self.close_socket(self.s) # Close socket since we're done.\n return msg_rec", "def notify_end(self, status, objective):\n pass # pragma: no cover", "async def end(self, roles, dialogs):\n self.ended = True", "def end(response):\n if isinstance(response.response, ClosingIterator):\n return response\n\n diff = time.time() - request.start\n del request.start\n\n if response.response:\n response.response[0] = response.response[0].replace('__EXECUTION_TIME__', '{:.3}'.format(diff))\n response.headers[\"content-length\"] = len(response.response[0])\n\n return response", "def sendClose(self, code=None, reason=None):", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def FinishSequence(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def tellIfEnded(self):\n self.congratulate()", "def EndSession( self ):\r\n\r\n self._socket.write( 'X' ) \r\n # self._connection.write( 'X' ).flush() \r\n\r\n return self.GetServerResponse()", "def reply(cls, user, context, message, reply_message):\r\n pass", "def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))", "def __exit__(self, *args, **kwargs):\n\n self.ctx.driver.socket_send(\n socket=self.socket,\n msg_id=self.job_id,\n control=self.job_state,\n command=self.command,\n data=self.data,\n info=self.info,\n stderr=self.stderr,\n stdout=self.stdout,\n )", "def found_terminator(self):\r\n self.msg = ''.join(self.msg_buffer)\r\n self.msg_split = self.msg.split(client_api[\"delimiter\"])\r\n cmd = self.msg_split[0]\r\n try:\r\n self.msg_handler[cmd]()\r\n except KeyError as e:\r\n server_log.info('Unhandled command received from client id {}: {}'.format(self.client_id, cmd))\r\n except Exception as e:\r\n server_log.info('Exception raised in server when receiving message from client: {!r}'.format(e))\r\n raise e\r\n finally:\r\n self.msg_buffer = []\r\n self.msg = ''\r\n self.msg_split = []", "def finish ( self ) :\n raise AbstractMethodException( self , \"finish\" )", "def finish(self):\n self.connection.reset_arguments()\n self.connection.write_ok()", "def testTerminateResponseWithServerCloseIn2ndValue(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def end():\n\tdata = bottle.request.json\n\t#print(\"END:\", json.dumps(data))\n\treturn HTTPResponse(status=200)", "def end():\n logging.info(\"Execution Ended\")", "async def reply(self, ctx, *, m):\n await ctx.message.delete()\n await ctx.send(m)", "def command_quit(self, arg):\n self.write('221 Bye', self.finish)", "def end(self):\n winners = mafia.str_player_list(self.game.winners())\n logging.info(\"Game over! Winners: %s\" % winners)\n\n subject = \"%s: The End\" % self.name\n body = \"Game over!\\n\\nCongratulations to %s for a well \" \\\n \"(or poorly; I can't tell) played game!\" % winners\n self.send_message(mafia.events.PUBLIC, subject, body)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False", "def finish():", "def finish():", "def finish():", "def finish():", "def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass", "def close(self):\n self._udp_handler.send('exit'.encode(encoding='utf-8'))", "def send_close_request(self):\n\tdata = struct.pack(\"!4I\", 0b1101, 0b1001, self.epoch_no, self.handle_no)\n\tself.client_socket.sendto(data, self.address)\n\tself.client_socket.close()\t\n return", "def create_reply(self):\n return MessageCreateReplyRequestBuilder(self.append_to_request_url(\"createReply\"), self._client)", "def StopRecording( self ): \r\n\r\n self._socket.write( 'E' ) \r\n \r\n return self.GetServerResponse()", "def _response(self, *lines):\n for line in lines:\n self.client.dataReceived(line + b'\\r\\n')\n self.client.dataReceived(\n b'0001 OK [READ-ONLY] ' + self.command + b' completed\\r\\n')", "def end_run(*args, **kwargs):\n return fluent.end_run(*args, **kwargs)", "async def end(self, ctx, message: discord.Message):\n\n if message.guild != ctx.guild:\n return await send_embed(ctx, \"You do not have permission to do that.\", negative=True)\n\n embed = message.embeds[0]\n host = embed.description.split('\\n')\n host = host[2]\n\n async def _end(description: str):\n new_embed = discord.Embed(\n colour=discord.Colour.dark_grey(),\n description=description\n )\n\n new_embed.set_author(name=embed.author.name)\n\n new_embed.set_footer(text=f\"Ended at • {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n\n await message.edit(text=\"<:tada:740055373926367383> **GIVEAWAY ENDED** <:tada:740055373926367383>\",\n embed=new_embed)\n\n await db.execute(\"Update Giveaway set Ended = ? where MessageID = ?\", (True, message.id,))\n await db.commit()\n\n cursor = await db.execute(\"Select Members, Ended from Giveaway where MessageID = ? and GuildID = ?\",\n (message.id, ctx.guild.id))\n result = await cursor.fetchone()\n\n if not result:\n return await send_embed(ctx, \"Giveaway with given message ID does not exist.\", negative=True)\n\n if result[1]:\n return await send_embed(ctx, \"Giveaway has already ended.\", negative=True)\n\n for reaction in message.reactions:\n if str(reaction) == \"<:tada:740055373926367383>\":\n thisreaction = reaction\n\n try:\n a = thisreaction\n except:\n await _end(f\"Could not determine winner!\\n{host}\")\n\n members = [i for i in await thisreaction.users().flatten() if not i.bot and i in ctx.guild.members]\n winners = []\n\n if not members:\n return await _end(f\"Could not determine winner!\\n{host}\")\n\n for i in range(result[0]):\n if not members:\n break\n member = random.choice(members)\n winners.append(member)\n members.remove(member)\n\n if len(winners) == 1:\n await _end(f\"Winner: {winners[0].mention}\\n{host}\")\n await ctx.send(f\"Congratulations {winners[0].mention}! You won the **{embed.author.name}**!\\n\"\n f\"{message.jump_url}\")\n\n else:\n await _end(f\"Winners: {', '.join([i.mention for i in winners])}\\n{host}\")\n await ctx.send(f\"Congratulations {', '.join([i.mention for i in winners[:-1]])}, \"\n f\"and {winners[-1].mention}! You won the **{embed.author.name}**!\\n\"\n f\"{message.jump_url}\")", "def _send_abort_reply(self, stream, msg, idents):\n # FIXME: forward-port ipython/ipykernel#684\n self.log.info(\n f\"Aborting {msg['header']['msg_id']}: {msg['header']['msg_type']}\"\n )\n reply_type = msg[\"header\"][\"msg_type\"].rsplit(\"_\", 1)[0] + \"_reply\"\n status = {\"status\": \"aborted\"}\n md = self.init_metadata(msg)\n md = self.finish_metadata(msg, md, status)\n md.update(status)\n\n self.session.send(\n stream,\n reply_type,\n metadata=md,\n content=status,\n parent=msg,\n ident=idents,\n )", "def end(self, won, reason):\n pass\n # replace with your end logic", "def exitWithMsg(msg):\n\tprint(msg + \" -- quitting\")\n\tsys.exit(0)", "def _bye(self):\n self.get(\"BYE\",'')\n self.send()", "def exit_with_message(message: str) -> NoReturn:\n raise CallParseError(input_string, message)", "def testTerminateResponseWithServerCloseIn2ndValueData(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0 10\\r\\n')\n self.mock_send('012345')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def test_make_reply(self):\n msg_helper = MessageHelper()\n msg = msg_helper.make_inbound('inbound')\n reply = msg_helper.make_reply(msg, 'reply content')\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def goodbye(self, args):\n\t\tself.write_line(\"GOODBYE\")\n\t\tself.close();", "def test_tls_finished_message_symmetry(self):\n server, client = loopback()\n\n assert server.get_finished() == client.get_peer_finished()\n assert client.get_finished() == server.get_peer_finished()", "def end(self):\n return self._args[1]", "def endElement(self, name):\n if name == 'Status':\n self.message = self.curr_text\n else:\n self.afterEndElement(name)\n self.curr_text = ''", "def _finish(self, context):\n if self._on_arrive is not None:\n context.change_state(self._on_arrive)\n else:\n context.revert_state()\n\n arrive_msg = Telegram(context.agent_id, None, MessageTypes.MSG_ARRIVAL, context.location)\n context.world.dispatch(arrive_msg)", "def reply(cls, user, context, message, reply_message):\n pass", "def do_end(end):\n if end:\n do_action(end)", "def get_async_response(self,message): \n index = self.async_query_buffer.index(message)\n #print('**********')\n #print ('requesting ' + message + ' at index ' + str(index))\n b = True\n try:\n response = self.async_reply_buffer[index]\n if response.endswith('\\n'):\n response = self.async_reply_buffer.pop(index)\n else:\n b = False\n response = 'EMPTY'\n except IndexError: \n #print('response not available yet!!')\n response = 'EMPTY'\n b = False\n if b: \n #print('got reply:')\n #print(response)\n query = self.async_query_buffer.pop(index)\n #print('for query:')\n #print(query)\n #print('Buffers:')\n #print(self.async_reply_buffer)\n #print(self.async_query_buffer)\n #print('_________________')\n\n return response", "def end(self, *args):\n return _ida_hexrays.hexwarns_t_end(self, *args)", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.7095744", "0.69297856", "0.66436666", "0.62102324", "0.6166731", "0.6165642", "0.60455865", "0.5994747", "0.5920864", "0.5850099", "0.5848156", "0.5846051", "0.5840882", "0.5801606", "0.5791126", "0.57714516", "0.5769898", "0.5752875", "0.5749393", "0.5738365", "0.5730379", "0.5716588", "0.57073826", "0.56565017", "0.56251675", "0.56187546", "0.5618431", "0.5589373", "0.5586673", "0.55847424", "0.5582944", "0.55686134", "0.5568254", "0.5564753", "0.5560452", "0.55583423", "0.55499095", "0.55301154", "0.551627", "0.5508882", "0.54922056", "0.54527205", "0.5450895", "0.544974", "0.5448149", "0.5448149", "0.5448149", "0.544798", "0.54444396", "0.5424992", "0.5397898", "0.5396737", "0.5393987", "0.5393987", "0.5371503", "0.5370455", "0.5365958", "0.5362923", "0.53558975", "0.53522927", "0.53521496", "0.534716", "0.53366005", "0.53139573", "0.5307667", "0.5300905", "0.530025", "0.5291668", "0.5287351", "0.5283819", "0.5283819", "0.52748406", "0.52660465", "0.52660465", "0.52660465", "0.52660465", "0.5265319", "0.52408713", "0.52336913", "0.52281827", "0.5220763", "0.5215861", "0.5207829", "0.52052426", "0.52013016", "0.519762", "0.51968557", "0.5194009", "0.51938653", "0.5193606", "0.51934594", "0.5187755", "0.5182903", "0.51802284", "0.51771003", "0.5176078", "0.5174096", "0.5172008", "0.5171008", "0.51631546", "0.5162966" ]
0.0
-1
Creates message with reply indicating failiing ending of method call by exception
def failure_exception(cls, state, exception): return PlatformMessage(method="__reply__", kwargs={"__result__": "fail", "state": state, "errcode": -2, "e": exception})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def endMessage(self):", "def end():\n return say()", "def exit_with_message(message: str) -> NoReturn:\n raise CallParseError(input_string, message)", "def finished(self, reply):\n pass", "def sendErrorMessage(msg): #@NoSelf", "def whenException(self, channel, call):", "def abort(self, errtype, message):\n self.log += \"\\n{0}: {1}\\n\".format(errtype.__name__, message)\n self.endtime = timenow(False)\n self.log += \"\\nSimulation terminated {}.\".format(timenow())\n self.logsave()\n raise errtype(message)", "def InvocationEnd(builder):\n return End(builder)", "def write_error_message(self, message: str):\n\n return sys.exit(message)", "def sendEnd(self):\n self._sendCommonMessage(self.SONY_MSG_Common_Bye, self.ThreeValueMsg.pack(a=0, b=0, c=0))", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__end_msg__)", "def error_exit(self, msg):\n wrappedmsg = textwrap.fill(msg, 78)\n fullmsg = \"%s\\n%s\" % (wrappedmsg, self.get_usage_command())\n raise SBToolError(fullmsg, True)", "async def __aexit__(self, err_type, err_value, err_t):\n if err_type and err_type != asyncio.CancelledError:\n self.logger.exception(\"Exception in outbound transport\")\n await self.stop()", "def get_end_reason(self):\n # Someday I will have real end reasons\n raise IllegalState()", "def create_exception(self, msg: str):", "def sendClose(self, code=None, reason=None):", "def exitWithMsg(msg):\n\tprint(msg + \" -- quitting\")\n\tsys.exit(0)", "def _on_invalid_call(self, msg):\r\n # Workaround: Maybe a bug in their server software,\r\n # I don't know what's missing. Its all poorly documented :-(\r\n # Sometimes some API calls fail the first time for no reason,\r\n # if this happens just send them again. This happens only\r\n # somtimes (10%) and sending them again will eventually succeed.\r\n\r\n if msg[\"id\"] == \"idkey\":\r\n self.debug(\"### resending private/idkey\")\r\n self.client.send_signed_call(\r\n \"private/idkey\", {}, \"idkey\")\r\n\r\n elif msg[\"id\"] == \"info\":\r\n self.debug(\"### resending private/info\")\r\n self.client.send_signed_call(\r\n \"private/info\", {}, \"info\")\r\n\r\n elif msg[\"id\"] == \"orders\":\r\n self.debug(\"### resending private/orders\")\r\n self.client.send_signed_call(\r\n \"private/orders\", {}, \"orders\")\r\n\r\n elif \"order_add:\" in msg[\"id\"]:\r\n parts = msg[\"id\"].split(\":\")\r\n typ = parts[1]\r\n price = int(parts[2])\r\n volume = int(parts[3])\r\n self.debug(\"### resending failed\", msg[\"id\"])\r\n self.client.send_order_add(typ, price, volume)\r\n\r\n elif \"order_cancel:\" in msg[\"id\"]:\r\n parts = msg[\"id\"].split(\":\")\r\n oid = parts[1]\r\n self.debug(\"### resending failed\", msg[\"id\"])\r\n self.client.send_order_cancel(oid)\r\n\r\n else:\r\n self.debug(\"### _on_invalid_call() ignoring:\", msg)", "def finish_interrupted_request(self, exc):\n if not self.config.display_exceptions and exc.private_msg:\n exc.private_msg = None # hide it\n request = get_request()\n request.response = HTTPResponse(status=exc.status_code)\n output = self.format_publish_error(exc)\n self.session_manager.finish_successful_request()\n return output", "def exit_message(message):\n\tprint(yellow(message))\n\tsys.exit(1)", "def EndMelting(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def acknowledgement(self, message: Message[ValueType]):", "def msg_abort(version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(ABORT, \"\", \"\", version, order)", "def generate_message(self, mtu):\r\n raise GeneratorExit(\"No more message to send\")", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False", "def _exit(msg):\n __exit(msg)", "def writeMethodError(self, name, message): # CMETHOD-ERROR\n\n self.emit('CMETHOD-ERROR %s %s' % (name, message))", "def reply_object():\n reply_object = {\"code\": \"\"}\n return reply_object", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__exit_msg__)", "def __exit__(\n self,\n exc_type: Optional[typing.Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[python_types.TracebackType],\n ) -> None:\n\n self.end()", "def exexit(ex: BaseException, exit_code: int = 1, autre_message: str = None) -> NoReturn:\n print(Fore.YELLOW, \"[XG] \",\n Fore.RED, ex.__class__.__name__,\n Fore.YELLOW, \": \", ex,\n file=sys.stderr, sep='')\n if autre_message is not None:\n print(autre_message)\n sys.exit(exit_code)", "def end(update, context) -> int:\n update.callback_query.edit_message_text(\n 'Bye! I hope we can talk again some day.')\n\n logger.info(\"User [%s] exited the conversation, [Exit], from [Main Menu / Results State].\",\n update.callback_query.message.chat.first_name)\n return ConversationHandler.END", "def die(self, msg, code=1):\n self.feedback.error(msg)\n exit(code)", "def _send_abort_reply(self, stream, msg, idents):\n # FIXME: forward-port ipython/ipykernel#684\n self.log.info(\n f\"Aborting {msg['header']['msg_id']}: {msg['header']['msg_type']}\"\n )\n reply_type = msg[\"header\"][\"msg_type\"].rsplit(\"_\", 1)[0] + \"_reply\"\n status = {\"status\": \"aborted\"}\n md = self.init_metadata(msg)\n md = self.finish_metadata(msg, md, status)\n md.update(status)\n\n self.session.send(\n stream,\n reply_type,\n metadata=md,\n content=status,\n parent=msg,\n ident=idents,\n )", "def testAbort(self):\n Compound(\"asd\",\"dsa\").post_goal()\n result,stream_id=resume()\n outStream=Stream(stream_id)\n self.assertEqual(outStream.readall(),b\"calling an undefined procedure asd(\\\"dsa\\\") in module eclipse\\n\")\n result,myAtom=resume()\n self.assertEqual(result,THROW)\n self.assertIsInstance(myAtom,Atom)\n self.assertEqual(myAtom.__str__(),\"abort\")", "def RespEnd(builder):\n return End(builder)", "def _abort(self, msg: str = \"Terminating\"):\n try:\n message = f\"Process {os.getpid()} aborting: {msg}\\n{traceback.format_exc()}\"\n except Exception:\n message = f\"Process {os.getpid()} aborting: {msg} (no traceback)\"\n self.__finish(False, message=message)", "def __my_exit__(self, arg=0):\n self.services.error('Called sys.exit() from component code')\n raise Exception('Called sys.exit() from component code')", "def exit(\n self,\n status_code: int = 0,\n message: Optional[str] = None,\n usage: Optional[str] = None,\n ) -> NoReturn:\n print(\"\\n\\n\".join(m for m in (usage, message) if m)) # noqa: T201\n sys.exit(status_code)", "def __init__(self, msg):\n super(QuitMessageException, self).__init__(msg)", "def fail(msg):\n\n # Not sure if simply raising the exception is clearer.\n raise CommandFailed(msg)", "def __exit(msg, code=0, std=sys.stdout):\n if std not in (sys.stderr, sys.stdout):\n raise IOError(\"Bad output file descriptor '%s'.\" % std)\n if not msg.endswith(os.linesep):\n msg += os.linesep\n std.write(msg)\n sys.exit(code)", "def __exit__(self, exc_type, exc_value, exc_traceback):\n self.end = self()\n print(str(self))", "def msg(_type, text, exit=0):\n sys.stderr.write(\"%s: %s\\n\" % (_type, text))\n sys.exit(exit)", "def testTerminateResponseWithServerClose(self):\n self.client_connect()\n self.client_send('set chopped 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv(\"set chopped 0 0 1\\r\\n1\\r\\n\")\n self.mock_close()\n self.client_recv('.*ERROR .*\\r\\n')", "def raise_timeout(*args, **kwargs):\n raise ZMQNotResponding('ZMQ server is not responding')", "def finish(self, message):\n self.stdout = message\n self.returncode = 0", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def abort(self):\n self.write(\":ABORT\")", "def die(self, msg=None):\r\n raise Exception(msg)", "def reply_with_code(self, code: int) -> None:", "def __exit__(self, *args, **kwargs):\n\n self.ctx.driver.socket_send(\n socket=self.socket,\n msg_id=self.job_id,\n control=self.job_state,\n command=self.command,\n data=self.data,\n info=self.info,\n stderr=self.stderr,\n stdout=self.stdout,\n )", "def exit(cls, msg):\n Console.error(msg)\n sys.exit()", "def exit_with_message(error_text: str) -> NoReturn:\n raise PealSpeedParseError(peal_speed, error_text)", "def append_message_footer(msg, messagebody):\n footer = \"\\n\\n_____________________\\n\\n^^I ^^am ^^a ^^Bot. ^^| [^^How ^^to ^^use](https://github.com/Rafficer/ProtonStatusBot/tree/master#how-to-use) ^^| ^^Made ^^with ^^🖤 ^^by ^^/u/Rafficer ^^| ^^[Source](https://github.com/rafficer/ProtonStatusBot) ^^| [^^Report ^^a ^^Bug](https://github.com/Rafficer/ProtonStatusBot/issues)\"\n full_message = messagebody + footer\n logger.debug(\"Replying...\")\n try:\n msg.reply(full_message)\n except prawcore.exceptions.RequestException:\n logger.debug(\"Replying failed. Checking for connectivity and trying again.\")\n connectivity_check()\n msg.reply(full_message)\n logger.debug(\"Replied!\")", "def exc_end(self):\n return self._exc_end", "def exit(status=None): # real signature unknown; restored from __doc__\n pass", "def exit(status=0, msg=\"end\", type_=LogType.ERROR, src=None, doexit=True, line=None):\n\n if src is not None:\n msg = '{} ({})'.format(src, msg)\n message(msg, type_=type_ if status!=ExitCode.OK else LogType.INFO, status=status, line=line)\n exitcode = status\n if doexit:\n sys.exit(exitcode)", "def end():\n return EndBlock()", "def send_rpc_error(req, rpcreq, e):", "def Message(msg, id=260, ok=None):\n sys.stderr.write(msg+'\\n')", "def process_quit(message):\n try:\n Resident.objects.get(phone_number=message.sender).delete()\n except Resident.DoesNotExist:\n pass\n \n # TODO - wording...\n message.respond('You have been removed from our system and will no longer get text messages.')\n \n return TropoOkResponse()", "def finish(self):\n self.logger.debug(\"%s -> finish()\" % self)\n self.lines = ''.join(self.response_data).split(CRLF)\n\n if len(self.lines) < 1:\n raise nntplib.NNTPDataError(\"No data received\")\n\n self.response_code, self.response_message = self.lines[0][:3], \\\n self.lines[0][3:].strip()\n\n self.logger.debug(\"code = %s\" % self.response_code)\n self.logger.debug(\"msg = %s\" % self.response_message)", "def __exit__(self, type, value, traceback):\n self.transport.close()", "def finish_error(self, msg: str = 'Unknown error'):\r\n\r\n self._is_error = True\r\n self._error_msg = msg", "def Error(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def answerFailure( self, reason ):\n\t\tlog.warn( \n\t\t\t\"\"\"Unable to answer channel %r: %s\"\"\", \n\t\t\tself.agi.variables['agi_channel'], reason.getTraceback(),\n\t\t)\n\t\tself.agi.finish()", "def P_SendCloseResult(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def exit_error(message: Optional[str] = None) -> NoReturn:\n\n\tif (message != None):\n\t\tprint(message)\n\tsys.exit(EXIT_FAILURE)", "def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))", "def _cmd_exit(self):\n raise EOFError()", "def end_message(self):\r\n if self.cur_message is not None:\r\n self.cur_message.end_time = datetime.now()\r\n self.wait_message()", "def error(message):\n print message\n sys.exit(2)", "def sendError():\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n\n self.sendData((\n RPC_ERROR,\n request_id,\n (exceptionType.__name__,\n exceptionValue.args[0] if len(exceptionValue.args) == 1 else \"\",\n \"\".join(traceback.format_tb(exceptionTraceback)))\n ))", "def end(self):\n ...", "def terminate(exitmsg: str):\n print(exitmsg)\n sys.exit(1)", "def InterfaceClientStop(self, exitCode=200): \n pass", "def VoiceCommonExcelEnd(builder):\n return End(builder)", "def print_msg_exit(msg=\"\", exit_code=0):\n if msg:\n print(msg)\n sys.exit(exit_code)", "def abort(self):\n raise NotImplementedError", "def exit(self):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"exit()\"\n\n # exit() functionality is implemented with a special dst.\n exit_msg = Msg(\n dst = DST_EXIT,\n x = randint(0, UINT32_MAX),\n y = randint(0, UINT32_MAX),\n op = randint(0, UINT8_MAX),\n result = randint(0, UINT64_MAX))\n\n # First, bury a REQUEST.\n self.read(length=SZ_MSG)\n\n # Then, write the exit packet to TAP.\n self.write(str(exit_msg))\n\n # Exit the poller.\n return -1", "def exit_message(self):\n return self._exit_message", "def test_endFunctionKEy(self):\n return self._endTest(ServerProtocol.END)", "def testTerminateResponseWithServerCloseIn2ndValue(self):\n self.client_connect()\n self.client_send('get someWholeVal someChoppedVal\\r\\n')\n self.mock_recv(\"get someWholeVal someChoppedVal\\r\\n\")\n self.mock_send('VALUE someWholeVal 0 10\\r\\n')\n self.mock_send('0123456789\\r\\n')\n self.mock_send('VALUE someChoppedVal 0')\n self.mock_close()\n self.client_recv('VALUE someWholeVal 0 10\\r\\n0123456789\\r\\nEND\\r\\n')", "def testTerminateRace(self):\n yield self.connect(self.get_body_node(connect=True))\n\n def log_observer(event):\n self.failIf(event['isError'], event)\n\n log.addObserver(log_observer)\n\n # Simultaneously cause a stream error (server->client closed) and send a terminate\n # from the client to the server. Both sides are closing the connection at once.\n # Make sure the connection closes cleanly without logging any errors (\"Unhandled\n # Error\"), and the client receives a terminate in response.\n try:\n self.server_protocol.triggerStreamError()\n yield self.proxy.send(self.get_body_node(type='terminate'))\n except httpb_client.HTTPBNetworkTerminated as e:\n self.assertEqual(e.body_tag.getAttribute('condition', None), 'remote-stream-error')\n finally:\n log.removeObserver(log_observer)", "def exit_with_message(error_text: str) -> NoReturn:\n raise StartRowParseError(start_row, error_text)", "def test_greeting_raising_exception(bot):\n bot.dataset['greetings'].upsert.side_effect = ValueError()\n mask = IrcString('[email protected]')\n channel = IrcString('#meleca')\n args = {\n '<nick>': 'nickname',\n '<message>': 'Hello there.'\n }\n plugin = Commands(bot)\n\n async def test():\n response = await plugin.greeting(mask, channel, args)\n assert response == 'Sorry, looks like something went wrong :('\n asyncio.get_event_loop().run_until_complete(test())", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def exit_failure():\n\tglobal state\n\tglobal c1, c2\n\tglobal a1, a2\n\n\t# Tell clients to close connections\n\tc1.send(\"CLOSE\")\n\tc2.send(\"CLOSE\")\n\n\tprint \"EXIT FAILURE\"\n\tstate += 1 # increment state to 10", "def aborted(self, exc_info):\n self.exc_info = exc_info\n self.did_end = True\n self.write(format_exception(*self.exc_info))", "def ConsoleExit(self, errorcode=200):\n pass", "async def __aexit__(\n self, exc_type: Exception, exc_value: str, traceback: TracebackType\n ) -> None:\n await self.disconnect()", "def __exit__(self, exception, value, trace):\n self.manual_exit()", "def ReceiveMsg(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply", "def FinishSequence(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def quit(self, reason=\"\", *args, **kwargs):\n pass", "def error(update: Update, context: CallbackContext):\n update.message.reply_text('Sorry! I don\\'t understand you!')", "def end_call(self, request, identifier=None):\n call = self.get_object()\n timestamp = request.data.get('timestamp', None)\n\n try:\n if not call.has_ended:\n call.end_call(timestamp=timestamp)\n except ValueError as exc:\n raise serializers.ValidationError(str(exc))\n\n serializer = self.serializer_class(instance=call)\n return Response(serializer.data, status=status.HTTP_200_OK)" ]
[ "0.64972866", "0.60457224", "0.59910274", "0.5865661", "0.5790894", "0.5711769", "0.56799406", "0.5674615", "0.56623536", "0.56480086", "0.5638144", "0.5630722", "0.563067", "0.5607326", "0.5588698", "0.5577028", "0.5576119", "0.55424887", "0.5533531", "0.55177295", "0.5499352", "0.54871863", "0.5480284", "0.5469065", "0.5463766", "0.545993", "0.54524505", "0.54511917", "0.5451146", "0.54466647", "0.5441611", "0.5439551", "0.5431919", "0.5431205", "0.5414126", "0.5413557", "0.5404757", "0.5404154", "0.5399157", "0.5395115", "0.53912", "0.5387078", "0.5387029", "0.5379797", "0.53494775", "0.5326848", "0.5323234", "0.5323161", "0.5323161", "0.53069067", "0.5306808", "0.5296085", "0.5294505", "0.5292499", "0.52923197", "0.5290897", "0.5285461", "0.52715373", "0.5269758", "0.52666694", "0.5261902", "0.52591205", "0.52537686", "0.52537", "0.5252857", "0.52483135", "0.52380645", "0.5237556", "0.5235648", "0.52339274", "0.522566", "0.5220672", "0.5219989", "0.5218659", "0.52122676", "0.521111", "0.52044374", "0.52027196", "0.5198433", "0.5196278", "0.5190004", "0.51879877", "0.518741", "0.51859957", "0.51856005", "0.5183133", "0.5179904", "0.5178275", "0.51767206", "0.51715416", "0.5166127", "0.5166102", "0.51598525", "0.51574564", "0.5156082", "0.515518", "0.5152928", "0.5148555", "0.51482534", "0.5148083" ]
0.521757
74
Creates message with reply indicating some stage of method call is taken place
def notify(cls, state): return PlatformMessage(method="__reply__", kwargs={"state": state})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, answer):\n method_name = 'on_%s' % sanitize_answer(answer)\n method = getattr(self, method_name, None)\n if method:\n msg = method()\n else:\n msg = self.default(answer)\n return msg", "def Message(self, *args, **kwargs):\n pass", "def process(self):\n # hello_message = HelloMessage(*self.message.value)\n # TODO: assert realm is in allowed list\n welcome_message = WelcomeMessage()\n self.answer_message = welcome_message", "def message_dispatch():\n if not from_twilio(request):\n abort(403)\n resp = twilio.twiml.Response()\n if not session.get(\"pwd\"):\n session['pwd'] = '__META__ROOT__'\n body = request.values.get(\"Body\")\n number = request.values.get(\"From\")\n message = cmd_handler.handle(number,session,body)\n session.modified = True\n resp.message(message)\n # We are probably going to modify the session on every command.\n return str(resp)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply(cls, user, context, message, reply_message):\r\n pass", "def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass", "def handle_message(self, session, message):\n # Handle an RPC call\n # Reason should come from inform call.\n response = {}\n if message['method'] == 'done' and message['id'] is None:\n # Here we switch roles, becoming RPC Client\n next_state, response = RPCS.SendingRpc, None\n else:\n # We have a valid method.\n # (VALID_METHODS checked in rpcsd:parse_message)\n next_state = RPCS.ExpectRpc\n response['error'] = {'code': -31998, 'message': 'Wrong request'}\n response['id'] = message['id']\n\n return next_state, response", "def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)", "def _add_message(self, message):\r\n self.result = self.result + message", "def notify(self):\n return _MethodCall(self._proto)", "def cmd(self, message):\n pass", "def reply(cls, user, context, message, reply_message):\n pass", "def Message(title, msg):\r\n return _hiew.HiewGate_Message(title, msg)", "def acknowledgement(self, message: Message[ValueType]):", "def horde_message(self, message):", "def reply_object():\n reply_object = {\"code\": \"\"}\n return reply_object", "def do_something(incoming_msg):\n return \"i did what you said - {}\".format(incoming_msg.text)", "def test_make_reply(self):\n msg_helper = MessageHelper()\n msg = msg_helper.make_inbound('inbound')\n reply = msg_helper.make_reply(msg, 'reply content')\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def finished(self, reply):\n pass", "def outgoing(self, msg):\n self.outgoing_serial += 1\n msg.header.serial = self.outgoing_serial\n\n if msg.header.message_type is MessageType.method_call:\n self.awaiting_reply[msg.header.serial] = handle = self.handle_factory()\n return handle", "def message(self, function_address, new_name):\n pass", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle(self, message):", "def reply(self, request, *args, **kwargs):\n context = {\n 'request': request,\n 'conversation': self.get_object()\n }\n serializer = MessageSerializer(data=request.data, partial=True, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n headers = self.get_success_message_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def generate_message(ctx, question, answer):\n return preamble.format(channel=rules_channel(ctx).id) + question + answer", "def success(self, msg):\n print \"comm succeded\"\n return msg", "def process(self, msg):\n raise NotImplemented", "def processMessage(self, *args, **kwargs):\r\n pass", "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n if msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n if msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def process_outgoing(self, msg):\n return msg, 0", "def SendMessage(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def response(self, context, message):\r\n return True", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def MessageAck(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def gen_msg(self, code, cont, func):\n\t\tself.msgcode = ' '\n\t\tif(code == 200 and func == 'PUT' or func == 'DELETE'):\n\t\t\tself.msgcode = 'HTTP/1.1 200 OK\\r\\nContent-type: text/xml\\r\\nContent-length: 0\\r\\nConnection: Close\\r\\n\\r\\n'\n\t\t\treturn self.msgcode\t\n\t\telif(code == 200):\n\t\t\tself.msgcode = 'HTTP/1.1 200 OK\\r\\nContent-type: text/xml\\r\\nContent-length: '+ str(len(cont)) + '\\r\\nConnection: Close\\r\\n\\r\\n'+cont\n\t\t\treturn self.msgcode\t\t\n\t\telif(code == 404):\n\t\t\tself.msgcode = 'HTTP/1.1 404 Not Found\\r\\nContent-type: text/xml\\r\\nConnection: Close\\r\\n\\r\\n'\n\t\t\treturn self.msgcode", "def gen_call(self, frame, label, args, rv): # pragma: no cover\n raise NotImplementedError(\"Implement this!\")", "def HelloMsg(self, request_iterator, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def reply(self, result):\n if self._reply_channel is None:\n assert False, \"can only reply to a synchronous message, e.g. somebody must be calling us with 'call'\"\n else:\n self._reply_channel.send(result)", "def process(self, message: Message, **kwargs: Any) -> None:", "def call(self, message: Message) -> None:\n self.fn(message)", "def _process_message(self, obj):\n pass", "def say(self, message):\r\n print message", "def respond(self,move):\n self.statusmessage=\"\"\n if move is None or self.lastmove is None:\n if self.startmove is not None:\n self.lastmove=self.startmove\n else:\n self.lastmove=2*random.random()-1\n self.statusmessage=\"Chose first move: \"+str(self.lastmove)\n else:\n mypayoff=move+self.lastpayoff[0]\n opppayoff=math.sqrt(1-move**2)+self.lastpayoff[1]\n if self.envy is not None and self.fairness is not None:\n payoff=getpayoff(mypayoff,opppayoff,self.envy,self.fairness)\n elif self.responsefunc is not None and self.oppresponsefunc is not None:\n raise NotImplementedError(\"use of responsefunc in player\")\n else:\n payoff=mypayoff\n self.learner.observe(self.lastmove,payoff)\n if len(self.override)>0:\n self.lastmove=self.override.pop(0)\n elif self.teachingstrat is not None:\n teachingresponse=self.teachingstrat.respond(move)\n self.lastmove=self.learner.pickmove(tmove=teachingresponse,twt=self.teachingweight)\n else:\n self.lastmove=self.learner.pickmove()\n self.statusmessage=\"Observed \"+str(move)+\"\\nEvaluated payoff of last move as \"+str(payoff)+\"\\nPicked response \"+str(self.lastmove)\n if self.radial:\n self.lastpayoff=(math.cos(math.pi*self.lastmove/2.0),math.sin(math.pi*self.lastmove/2.0))\n return math.sin(math.pi*self.lastmove/2.0)\n else:\n self.lastpayoff=(math.sqrt(1-self.lastmove**2),self.lastmove)\n return self.lastmove", "def message_with_reply(cmd, name, data, version = NATIVE_HEADER_VERSION, order=\"<\"):\n\n new_reply = SpecReply()\n reply_id = new_reply.id\n\n msg = message(reply_id, cmd, name, data, version = version, order=order)\n\n return (new_reply, msg)", "def __str__(self):\n return_str = \"\"\n args_str = \",\" + \",\".join(self.arguments) if self.arguments else \"\"\n if self.is_reply():\n args = (\n self.message_type,\n self.name,\n self.code,\n args_str\n )\n return_str = f\"{args[0]}{args[1]},{args[2]}{args[3]}\"\n else:\n args = (\n self.message_type,\n self.name,\n args_str\n )\n return_str = f\"{args[0]}{args[1]}{args[2]}\"\n return return_str + '\\r\\n'", "def success(cls, retval, retvalname='value'):\r\n if isinstance(retval, dict) and retvalname is None:\r\n retval[\"__result__\"] = \"success\" # TODO: right here just modified input dict. That's not good\r\n else:\r\n retval = {\"__result__\": \"success\", retvalname: retval}\r\n return PlatformMessage(method=\"__reply__\", kwargs=retval)", "def test_make_dispatch_reply(self):\n md_helper = MessageDispatchHelper(\n MessageHelper(), WorkerHelper('fooconn'))\n broker = self.setup_broker(md_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.outbound'), [])\n msg = md_helper.msg_helper.make_inbound('inbound')\n reply = yield md_helper.make_dispatch_reply(msg, 'reply content')\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.outbound'), [reply])\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def create_next_message(self, **kwargs):\n message = self._builder.create_message(**kwargs)\n return message", "def _respond_message(self, msg):\n self.set_status(200)\n self.set_header(\"Content-Type\", \"application/x-mplane+json\")\n self.write(mplane.model.unparse_json(msg))\n self.finish()", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def _create_msg(self, tr_id, i_triples, i_type, r_triples, r_type, confirm):\n params = SSAP_UPDATE_PARAM_TEMPLATE % (str(i_type).upper(),\n str(i_triples),\n str(r_type).upper(),\n str(r_triples),\n str(confirm).upper())\n tmp = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(self.targetSS),\n self.tr_type, str(tr_id), params)\n return tmp", "def message(self, target, *text):\n raise NotImplementedError", "def reply(self, message):\n self.logger.info(\"message came as {}\".format(message))\n message = message.lower()\n if message in [\"start over\", \"get started\", \"hello\", \"hi\", \"say hello\"]:\n self.params = \"\"\n self.readyseteatparams = \"\"\n # self.api.send_text_facebook(\n # self.user_id,\n # 'What type of recipe would you like to make? You can type \"start over\" at any time'\n # )\n # return self.api.send_facebook(self.user_id, self.config.QUESTION_MAIN)\n self.send_welcome_messages()\n return self.api.send_facebook(self.user_id, self.config.QUICK_REPLY_MAIN)\n if message in [\"more\", \"show more\"] and self.data:\n self.index += 5\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n return self.api.send_facebook(self.user_id, m_data)\n if message == \"ask-tomorrow-payload\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"ask-week-payload\":\n self.usersModule.makeNotificationWeekly(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"activate notifications\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"Notification has been activated.\")\n if message in [\"do-nothing\", \"payload_unsubscribe\"]:\n if message == \"payload_unsubscribe\":\n self.usersModule.deactivateNotification(self.user_id)\n return self.api.send_text_facebook(\n self.user_id,\n 'Notification has been deactivated. You can type \"start over\" anytime.')\n else:\n return self.api.send_text_facebook(\n self.user_id,\n 'You can type \"start over\" when you are looking for new recipes.')\n\n try:\n title, choice = message.split(\"_\")\n except:\n title = None\n choice = message\n\n if title == \"category\":\n self.params = \"\"\n self._type = choice\n if choice == \"dinner\":\n self.params += \"&category=89\"\n self.readyseteatparams += \"&category=89\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient:\")\n # return self.api.send_facebook(self.user_id, self.config.DINNER_INGREDIENTS)\n return self.api.send_facebook(self.user_id, self.config.DINNER_GUICK_REPLY)\n elif choice == \"dessert\":\n self.params += \"&category=88\"\n self.readyseteatparams += \"&category=88\"\n # self.api.send_text_facebook(self.user_id, \"What kind of dessert would you like to make?\")\n # return self.api.send_facebook(self.user_id, self.config.DESSERTS)\n return self.api.send_facebook(self.user_id, self.config.DESSERTS_QUICK_REPLY)\n elif choice == \"breakfast\":\n self.params += \"&category=87\"\n self.readyseteatparams += \"&category=87\"\n # self.api.send_text_facebook(self.user_id, \"What kind of breakfast do you want?\")\n # return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUICK_REPLY)\n elif choice == \"appetizer\":\n self.params += \"&category=85\"\n self.readyseteatparams += \"&category=85\"\n # self.api.send_text_facebook(self.user_id, \"What kind of appetizer or snack sounds good?\")\n # return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUICK_REPLY)\n elif choice == \"side dish\":\n self.params += \"&category=95\"\n self.readyseteatparams += \"&category=95\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient\")\n # return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUICK_REPLY)\n else:\n return self.api.send_text_facebook(self.user_id,\n \"I don't know answer that belongs to {} yet\".format(message))\n\n if title == \"main-ingredient\":\n self.mainIngredient = choice\n if choice == \"chicken\":\n self.params += \"&mainingredient=76\"\n self.readyseteatparams += \"&mainingredient=76\"\n elif choice == \"beef\":\n self.params += \"&mainingredient=70\"\n self.readyseteatparams += \"&mainingredient=70\"\n elif choice == \"pork\":\n self.params += \"&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=249\"\n elif choice == \"seafood\":\n self.params += \"&mainingredient=73\"\n self.readyseteatparams += \"&mainingredient=73\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"vegetarian\":\n self.params += \"&lifestyle=299\"\n self.readyseteatparams += \"&lifestyle=299\"\n return self.api.send_facebook(self.user_id, self.config.TIME_QUICK_REPLY)\n if title == \"bre-time\":\n self.breakfastTime = choice\n if choice == \"15\":\n self.params += \"&totaltime=15\"\n self.readyseteatparams += \"&totaltime=15\"\n elif choice == \"30\":\n self.params += \"&totaltime=30\"\n self.readyseteatparams += \"&totaltime=30\"\n elif choice == \"45\":\n pass\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n if title == \"time\":\n self.time = choice\n self.params += \"&totaltime={}\".format(choice)\n self.readyseteatparams += \"&totaltime={}\".format(choice)\n # self.api.send_text_facebook(self.user_id, \"What sounds Good?\")\n # return self.api.send_facebook(self.user_id, self.config.REGION_DINNER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.REGION_QUICK_REPLY)\n\n if title == \"region\":\n self.region = choice\n if choice == \"asian\":\n self.params += \"&cuisine=44\"\n self.readyseteatparams += \"&cuisine=44\"\n elif choice == \"italian\":\n self.params += \"&cuisine=46\"\n self.readyseteatparams += \"&cuisine=46\"\n elif choice == \"mediterranean\":\n self.params += \"&cuisine=367\"\n self.readyseteatparams += \"&cuisine=367\"\n elif choice == \"mexican\":\n self.params += \"&cuisine=45\"\n self.readyseteatparams += \"&cuisine=45\"\n elif choice == \"american\":\n self.params += \"&suppresstraits=44,35,355,46,367,45,356,261\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"dessert\":\n self.dessert = choice\n if choice == \"cookies\":\n self.params += \"&trait=48,10,20,110&suppresstraits=22,24&keywords=cookies\"\n self.readyseteatparams += \"&trait=48,10,20,110&keywords=cookies\"\n elif choice == \"cakes\":\n self.params += \"&suppresstraits=24&keywords=cake\"\n self.readyseteatparams += \"&keywords=cake\"\n elif choice == \"pies\":\n self.params = \"sortby=season,rating&order=desc,desc&negativeingredientkeyword=pieces&keywords=pie&suppresstraits=24&category=88\"\n self.readyseteatparams = \"&negativeingredientkeyword=pieces&keywords=pie&category=88\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n elif choice == \"seasonal\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=88&season=330\"\n self.readyseteatparams = \"&category=88&season=330\"\n elif choice == \"quick\":\n self.params = \"&totaltime=30\"\n self.readyseteatparams = \"&totaltime=30\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"breakfast\":\n self.breakfastIngredient = choice\n if choice == \"eggs\":\n self.params += \"&mainingredient=72\"\n self.readyseteatparams += \"&mainingredient=72\"\n self.params += \"&trait=9\"\n self.readyseteatparams += \"&trait=9\"\n elif choice == \"casserole\":\n self.params += \"&keywords=casserole\"\n self.readyseteatparams += \"&keywords=casserole\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260&goodforyou=258\"\n self.readyseteatparams += \"&goodforyou=260&goodforyou=258\"\n elif choice == \"sweet\":\n self.params += \"&trait=22\"\n self.readyseteatparams += \"&trait=22\"\n # will add something sweet\n pass\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_TIME_QUICK_REPLY)\n\n if title == \"appetizer\":\n self.appetizerIng = choice\n if choice == \"cheesy\" or choice == \"meaty\":\n if choice == \"cheesy\":\n self.params += \"&keywords=cheese\"\n self.readyseteatparams += \"&keywords=cheese\"\n elif choice == \"meaty\":\n self.params += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n elif choice == \"veggies\" or choice == \"healthier\":\n if choice == \"veggies\":\n self.params += \"&mainingredient=77&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=77&mainingredient=310\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=260\"\n return self.api.send_facebook(self.user_id, self.config.HOT_OR_COLD_QUICK_REPLY)\n\n if title == \"hot-cold\":\n self.appetizerType = choice\n if choice == \"hot\":\n self.params += \"&suppresstraits=252\"\n elif choice == \"cold\":\n self.params += \"&cookingmethod=252\"\n self.readyseteatparams += \"&cookingmethod=252\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"side-dish\":\n self.sideDish = choice\n if choice == \"potato\":\n self.params += \"&mainingredient=298\"\n self.readyseteatparams += \"&mainingredient=298\"\n elif choice == \"vegetable\":\n self.params += \"&mainingredient=77\"\n self.readyseteatparams += \"&mainingredient=77\"\n elif choice == \"rice\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=75\"\n self.readyseteatparams += \"&mainingredient=75\"\n elif choice == \"salad\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=95&mainingredient=77\"\n self.readyseteatparams = \"&category=95&mainingredient=77&trait=92\"\n elif choice == \"beans\":\n self.params += \"&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=310\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n isParamInMessage = self.fetch_parameters(message)\n if isParamInMessage:\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n return self.api.send_text_facebook(self.user_id, \"You can write ‘start over’ to go to the first step\")", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def build_method_call(code, line, method_object):\n full_signature = method_object[\"methodSignature\"]\n normalised_signature = normalise_signature(full_signature)\n param_values = get_method_parameter_values(code, line, full_signature)\n string_values, cmplx_types = get_string_values(param_values, full_signature)\n\n rpc_payload_length = str(\n 4 + len(normalised_signature) + len(string_values)\n )\n # Default to stub value if method-to-service correlation failed\n strong_name = (\n method_object[\"service\"][\"strongName\"]\n if method_object[\"service\"] is not None\n else \"X\"*32\n )\n rpc_blocks = []\n rpc_blocks.extend([\n RPC_VERSION,\n RPC_FLAGS,\n rpc_payload_length,\n BASE_URL,\n strong_name,\n method_object[\"rmtSvcIntName\"],\n method_object[\"methodName\"],\n ])\n rpc_blocks.extend(normalised_signature)\n rpc_blocks.extend(string_values)\n rpc_blocks.extend([\n \"1\", \"2\", \"3\", \"4\",\n method_object[\"paramCount\"]\n ])\n rpc_blocks.extend(\n generate_parameter_map(\n rpc_blocks,\n full_signature,\n param_values\n )\n )\n return rpc_blocks, cmplx_types", "def invoke(self, msg, req):\n\n node = Node.create()\n\n try:\n node.receive(msg, req)\n return\n except Exception, e:\n node.log('Error', e.message)", "def __call__(self, message):\n if not hasattr(message, 'body'):\n logger.info('Got an invalid message format, skip.')\n message.ack()\n return None\n\n body = message.body\n\n reply_exchange = body['reply_exchange']\n reply_key = body['reply_key']\n\n logger.info('Got a new task call, uid: %s', body['id'])\n\n # Task is not in registry.\n if not body['name'] in self.tasks.keys():\n logger.error('<Task \"%s\"> Does not exist in registry, skip.', body['id'])\n self.consumer.reply_state_failure(\n reply_exchange=reply_exchange,\n reply_key=reply_key,\n id=body['id'],\n reason=\"ConsumerError('cant find task in registry')\"\n )\n message.ack()\n return None\n\n # Pass task to started state\n logger.info('<Task \"%s\"> Change state to STARTED', body['id'])\n if body['reply_states']:\n self.consumer.reply_state_started(\n reply_exchange=reply_exchange,\n reply_key=reply_key,\n id=body['id']\n )\n\n try:\n # Call the task.\n result = self.tasks[body['name']](*body['args'], **body['kwargs'])\n\n except Exception as reason:\n logging.error('<Task \"%s\"> Change state to FAILURE: %r', body['id'], reason)\n self.consumer.reply_state_failure(\n reply_exchange=reply_exchange,\n reply_key=reply_key,\n id=body['id'],\n reason=repr(reason)\n )\n\n else:\n logger.info('<Task \"%s\"> Change state to SUCCESS: %r', body['id'], result)\n self.consumer.reply_state_success(\n reply_exchange=reply_exchange,\n reply_key=reply_key,\n id=body['id'],\n result=result\n )\n\n finally:\n logger.debug('<Task \"%s\"> Acknowledge the message.', body['id'])\n message.ack()", "def create_reply(self,\n source=None,\n to=None,\n previous_hop=None,\n next_hop=None,\n command=None,\n reply=True,\n handler=None,\n time_to_live=DEFAULT_TIME_TO_LIVE,\n **kwargs):\n result = Message(\n source=source if source is not None else self.to,\n to=to if to is not None else self.source,\n previous_hop=previous_hop if previous_hop is not None else self.next_hop,\n next_hop=next_hop if next_hop is not None else self.previous_hop,\n command=command if command is not None else self.command,\n reply=reply,\n handler=handler if handler is not None else self.handler,\n **kwargs\n )\n result.time_to_live = time() + time_to_live\n return result", "def action(self):\n\n success = None\n msg = self.incoming_message_text\n\n\n if msg == '/start':\n text = 'Welcome to SimpleBook_Bot!\\nI can help you book an venue.\\\n \\n\\nYou can control me using these commands:\\n\\\n /start-to start chatting with the bot\\n\\\n /book-to make a booking\\n\\\n /cancel-to stop chatting with the bot.\\n\\\n For more information please contact [email protected]'\n\n if self.last_name == None:\n self.outgoing_message_text = \"Hello {}! \".format(self.first_name) + text\n else :\n self.outgoing_message_text = \"Hello {} {}! \".format(self.first_name, self.last_name) + text\n elif msg == '/book':\n self.outgoing_message_text = \"Please enter a date in this format: YYYY-MM-DD\"\n \n elif msg == '/cancel':\n self.outgoing_message_text = \"See you again!\"\n\n else:\n try:\n datetime.strptime(msg, '%Y-%m-%d')\n self.outgoing_message_text = 'Please enter a start time in the format: HH-MM'\n except:\n try:\n datetime.strptime(msg, '%H:%M')\n self.outgoing_message_text = 'Start time: ' + msg + ' Please enter an end time in the format: HH-MM'\n except:\n self.outgoing_message_text = 'Invalid format, please try again'\n return False\n \n success = self.send_message()\n return success", "def emit(self, message):", "def Echo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_call(self, number, direction):\n raise NotImplementedError", "def _pre_hook(self, msg: Message) -> Message:\n msg.add_route(self.name, self._id)\n\n expected_parts = self._get_expected_parts(msg)\n\n req_id = msg.envelope.request_id\n if expected_parts > 1:\n self._pending_msgs[req_id].append(msg)\n\n num_partial_requests = len(self._pending_msgs[req_id])\n\n if self.logger.debug_enabled:\n self._log_info_msg(\n msg,\n f'({num_partial_requests}/{expected_parts} parts)'\n if expected_parts > 1\n else '',\n )\n\n if expected_parts > 1 and expected_parts > num_partial_requests:\n # NOTE: reduce priority is higher than chain exception\n # otherwise a reducer will lose its function when earlier pods raise exception\n raise NoExplicitMessage\n\n if (\n msg.envelope.status.code == jina_pb2.StatusProto.ERROR\n and self.args.on_error_strategy >= OnErrorStrategy.SKIP_HANDLE\n ):\n raise ChainedPodException\n\n return msg", "def MessageHandlerMethod(**kwargs):\n data: dict = kwargs['data']\n bus: AbstractPikaBus = kwargs['bus']\n payload: dict = kwargs['payload']\n print(payload)\n if payload['reply']:\n payload['reply'] = False\n bus.Reply(payload=payload)", "def comsume_msg(self, msg_type):", "def print_success_message(self, fromobj, moved):\n moved_names = list_to_string(moved)\n if fromobj:\n moved_names += \" from {}\".format(fromobj.name)\n caller_msg = \"You {} {}.\".format(self.cmdstring, moved_names)\n loc_msg = \"{} {}s {}.\".format(self.caller.name, self.cmdstring, moved_names)\n self.msg(caller_msg)\n self.caller.location.msg_contents(loc_msg, exclude=self.caller)", "def _proceed(self):\n raise NotImplementedError", "def answer_msg(self, context):\n msg = self._get_base_message(self.ANSWER_QUESTION)\n self._add_thread(msg)\n self._add_relationship(msg, self.for_relationship)\n msg.response = self.answer_str\n return msg", "def _post(self, which_port, msg):\n return _spacegrant_swig.hdlc_framer_sptr__post(self, which_port, msg)", "def status_msg(self, msg):\n\n\t\tprint(\"function not supported yet\")", "def send_message(self):\r\n return \"success\"", "def handle_message(self, message):", "def smoke_simplest_dto_create_reply_param(crud_client, result_acc):\n result_acc.add_report_line('BEGIN smoke_simplest_dto_create_reply_param()')\n resp = crud_client.response_of_delete_all_entries()\n if not resp.did_succeed():\n result_acc.add_report_line(\"delete all failed: \" + resp.get_error_message())\n\n dto1 = SimplestDTO(message='j')\n\n resp1_id = crud_client.response_of_create_entry(dto1, reply='id')\n if resp1_id.did_succeed():\n jdata = resp1_id.get_data_payload_as_jdata()\n if not 'created_id' in jdata:\n result_acc.add_report_line(\"didn't get back created_id in response\")\n result_acc.set_success(False)\n else:\n result_acc.add_report_line(\"create_entry with reply='id' failed\")\n result_acc.set_success(False)\n\n resp1_count = crud_client.response_of_create_entry(dto1, reply='count')\n if resp1_count.did_succeed():\n jdata = resp1_count.get_data_payload_as_jdata()\n if not 'num_created' in jdata and (jdata['num_created'] == 1):\n result_acc.add_report_line(\"didn't get back num_created = 1 in response\")\n result_acc.set_success(False)\n else:\n result_acc.add_report_line(\"create_entry with reply='count' failed\")\n result_acc.set_success(False)\n\n \n resp1_whole = crud_client.response_of_create_entry(dto1, reply='whole')\n if resp1_whole.did_succeed():\n jdata = resp1_whole.get_data_payload_as_jdata()\n if not (('message' in jdata) and (jdata['message'] == 'j')):\n result_acc.add_report_line(\"didn't get back whole entry in response: \" + str(jdata))\n result_acc.set_success(False)\n if not '_id' in jdata:\n result_acc.add_report_line(\"didn't get back _id in response\")\n result_acc.set_success(False)\n else:\n result_acc.add_report_line(\"create_entry with reply='whole' failed\")\n result_acc.set_success(False)\n\n dto2 = SimplestDTO(message='k')\n dto_lst = [dto1, dto2]\n\n resp12_id = crud_client.response_of_bulk_create_entries(dto_lst, reply='id')\n if resp12_id.did_succeed():\n jdata = resp12_id.get_data_payload_as_jdata()\n if not 'created_ids' in jdata:\n result_acc.add_report_line(\"didn't get back created_id in response\")\n result_acc.set_success(False)\n else:\n result_acc.add_report_line(\"create_entry with reply='id' failed\")\n result_acc.set_success(False)\n\n resp12_count = crud_client.response_of_bulk_create_entries(dto_lst, reply='count')\n if resp12_count.did_succeed():\n jdata = resp12_count.get_data_payload_as_jdata()\n if not (('num_created' in jdata) and (jdata['num_created'] == 2)):\n result_acc.add_report_line(\"didn't get back num_created=2 in response\")\n result_acc.set_success(False)\n else:\n result_acc.add_report_line(\"create_entry with reply='count' failed\")\n result_acc.set_success(False)\n\n \n resp12_whole = crud_client.response_of_bulk_create_entries(dto_lst, reply='whole')\n if resp12_whole.did_succeed():\n jdata = resp12_whole.get_data_payload_as_jdata()\n if not (('num_created' in jdata) and (jdata['num_created'] == 2)):\n result_acc.add_report_line(\"didn't get back num_created=2 in 'whole' response\")\n result_acc.set_success(False)\n\n if not 'created_entries' in jdata:\n result_acc.add_report_line(\"didn't get back 'created_entries' in 'whole' response\")\n result_acc.set_success(False)\n return\n\n createds = jdata['created_entries']\n\n if not (('message' in createds[0]) and (createds[0]['message'] == 'j')):\n result_acc.add_report_line(\"didn't get back whole entry in response\")\n result_acc.set_success(False)\n if not '_id' in createds[0]:\n result_acc.add_report_line(\"didn't get back _id in response\")\n result_acc.set_success(False)\n \n if not (('message' in createds[1]) and (createds[1]['message'] == 'k')):\n result_acc.add_report_line(\"didn't get back whole entry in response\")\n result_acc.set_success(False)\n if not '_id' in createds[1]:\n result_acc.add_report_line(\"didn't get back _id in response\")\n result_acc.set_success(False)\n\n else:\n result_acc.add_report_line(\"create_entry with reply='whole' failed\")\n result_acc.set_success(False)\n \n return", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def __call__(self, *args, **kwargs):\n return self.method(self.receiver, *args, **kwargs)", "def say_meow(self):\n\n pylog.info('My master calls me {} and meow!'.format(self.name))", "def success(msg):\n log('SUCCESS', msg)", "def _post(self, which_port, msg):\n return _spacegrant_swig.message_debug_sptr__post(self, which_port, msg)", "def fake_op_message(interface, reply, on_channel=None, on_message=None, after=None, execute=False,\r\n on_success=None, on_failure=None):\r\n assert isinstance(interface, str), \"fake_op_info: interface should be a string\"\r\n assert isinstance(reply, ProtocolReply), \"fake_op_info: reply should be a ProtocolReply instance\"\r\n options = {\"reply\": reply}\r\n if on_channel is not None:\r\n assert isinstance(on_channel, str), \"fake_op_info: on_channel should be a string\"\r\n options[\"on_channel\"] = on_channel\r\n if on_message is not None:\r\n assert isinstance(on_message, PlatformMessage), \"fake_op_info: on_message should be a PlatformMessage instance\"\r\n options[\"on_message\"] = on_message\r\n if after is not None:\r\n assert isinstance(after, int) and after >= 0, \"fake_op_info: after should be a natural integer\"\r\n options[\"after\"] = after\r\n if execute is not None:\r\n assert isinstance(execute, bool) or execute in (1, 0) >= 0, \"fake_op_info: execute should be a boolean \" \\\r\n \"or 0 or 1\"\r\n options[\"execute\"] = execute\r\n\r\n if on_success is None and on_failure is None:\r\n on_success = True\r\n on_failure = False\r\n if on_success is None and on_failure is False:\r\n on_success = True\r\n if on_failure is None and on_success is False:\r\n on_failure = True\r\n\r\n if on_success is True:\r\n assert isinstance(on_success, bool), \"fake_op_info: on_success should be a boolean\"\r\n options[\"on_success\"] = on_success\r\n\r\n if on_failure is True:\r\n assert isinstance(on_failure, bool), \"fake_op_info: on_failure should be a boolean\"\r\n options[\"on_failure\"] = on_failure\r\n else:\r\n options[\"on_failure\"] = False\r\n\r\n return new_message(interface, \"__testing__\", \"fake_next_op\", options)", "def resolve_message(self, rq):\n\n if rq.command == u\"initialize\":\n self.next_seq += 1\n DAPInitializeResponse.create(self.next_seq, rq.seq, True, rq.command, body=DAPCapabilities.create(**features)).send(self._current_client)\n self.next_seq += 1\n DAPInitializedEvent.create(self.next_seq).send(self._current_client)\n elif rq.command == u\"setBreakpoints\":\n self.next_seq += 1\n bkps = self.create_breakpoints(**rq.get_arguments().as_current_kwargs())\n body = DAPSetBreakpointsResponseBody.create([b.serialize() for b in bkps])\n DAPSetBreakpointsResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"configurationDone\":\n self.next_seq += 1\n DAPConfigurationDoneResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n elif rq.command == u\"launch\":\n # no special noDebug\n self.next_seq += 1\n DAPLaunchResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n self._ready_for_events = True\n elif rq.command == u\"disconnect\":\n self.next_seq += 1\n DAPDisconnectResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n self._current_client.close()\n self._current_client = None\n return\n elif rq.command == u\"continue\":\n self.next_seq += 1\n body = DAPContinueResponseBody.create(all_threads_continued=True)\n DAPContinueResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n debugger.stepping = SteppingMode.STEP_NO_STEP\n debugger.continue_next()\n elif rq.command == u\"threads\":\n self.next_seq += 1\n body = DAPThreadsResponseBody.create([DAPThread.create(0, \"renpy_main\")])\n DAPThreadsResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"stackTrace\":\n self.next_seq += 1\n body = DAPStackTraceResponseBody.create(debugger.get_stack_frames(**rq.get_arguments().as_current_kwargs()))\n DAPStackTraceResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"scopes\":\n self.next_seq += 1\n body = DAPScopesResponseBody.create(debugger.get_scopes(int(rq.get_arguments().get_frame_id())))\n DAPScopesResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"variables\":\n self.next_seq += 1\n body = DAPVariablesResponseBody.create(debugger.format_variable(**rq.get_arguments().as_current_kwargs()))\n DAPVariablesResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"pause\":\n self.next_seq += 1\n DAPPauseResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.break_pause = True\n elif rq.command == u\"next\":\n print(\"STEP\")\n self.next_seq += 1\n DAPNextResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_NEXT\n debugger.continue_next()\n elif rq.command == u\"stepIn\":\n self.next_seq += 1\n DAPStepInResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_INTO\n debugger.continue_next()\n elif rq.command == u\"stepOut\":\n self.next_seq += 1\n DAPStepOutResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_OUT\n debugger.continue_next()\n else:\n self.next_seq += 1\n DAPErrorResponse.create(self.next_seq, rq.seq, False, message=\"NotImplemented\").send(self._current_client)", "def create_reply(self):\n return MessageCreateReplyRequestBuilder(self.append_to_request_url(\"createReply\"), self._client)", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def MessageAck(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def Result(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def callback(ch, method, properties, body):\n print(f\"[X] Received %r\" % body)\n\n # wait for certain time until task is completed\n time.sleep(body.count(b'.'))\n print(\"[X] Done\")\n\n \"\"\"Acknowledge after completing task this prevents message\n message loss when the worker dies. And when worker\n dies message will be passes to another online worker.\n Caution: We are not talking about worker node of RabbitMQ.\n \"\"\"\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def agent_message(self, in_message):\n\n logging.debug(\"Received %s\" % in_message)\n\n if in_message.startswith(\"start_testing\"):\n self._start_testing()\n\n elif in_message.startswith(\"finish_testing\"):\n epoch = int(in_message.split(\" \")[1]) \n self._finish_testing(epoch)\n else:\n return \"I don't know how to respond to your message\"", "def create_output(self, messages):", "def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)", "def _call_method(self, method, req, resp_class):\n payload = req.SerializeToString()\n headers = {\n 'Content-Type': 'application/x-protobuf',\n 'Content-Length': str(len(payload))\n }\n response, content = self._http.request(\n self._url + method, method='POST', body=payload, headers=headers)\n if response.status != 200:\n raise RPCError(method, response, content)\n resp = resp_class()\n resp.ParseFromString(content)\n return resp", "def ReceiveMsg(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def func(self):\n\n caller = self.caller\n\n if not self.args:\n caller.msg(\"Say what?\")\n return\n\n speech = self.args\n\n # calling the speech hook on the location\n speech = caller.location.at_say(caller, speech)\n\n # Feedback for the object doing the talking.\n caller.msg('You say, \"%s|n\"' % speech)\n\n # Build the string to emit to neighbors.\n emit_string = '%s says, \"%s|n\"' % ( getNameAnsi( caller ), speech )\n caller.location.msg_contents(text=(emit_string, {\"type\": \"say\"}),\n exclude=caller, from_obj=caller)", "def handleMessage(msg):", "def create_action_msg(self, action):\n raise NotImplementedError(\"Don't know how to translate the action to a msg\")" ]
[ "0.5911461", "0.5875573", "0.5865693", "0.5813939", "0.58092356", "0.58092356", "0.58038336", "0.57274956", "0.5696109", "0.56097317", "0.56019354", "0.55925906", "0.5573429", "0.55502784", "0.55441946", "0.5507472", "0.54945314", "0.5474131", "0.547276", "0.5455448", "0.54343057", "0.54322386", "0.5427942", "0.54077053", "0.53943294", "0.5380617", "0.53710395", "0.53643584", "0.53633934", "0.53626037", "0.5349844", "0.5339986", "0.5339986", "0.5339914", "0.5321966", "0.53205043", "0.53202504", "0.53202504", "0.53182477", "0.53182477", "0.5314789", "0.5303688", "0.5292952", "0.52889985", "0.52873266", "0.5286241", "0.5285217", "0.5282668", "0.5282564", "0.5281613", "0.5281085", "0.5278764", "0.5275093", "0.52676153", "0.525641", "0.525378", "0.5249897", "0.5248284", "0.5246989", "0.5245537", "0.5241673", "0.52401197", "0.5237729", "0.5233448", "0.52306515", "0.522985", "0.5223894", "0.5216946", "0.5214382", "0.521391", "0.5213068", "0.5202205", "0.5198928", "0.5184969", "0.5182852", "0.51825583", "0.51819015", "0.51756936", "0.5175029", "0.5154", "0.5154", "0.51538104", "0.51529807", "0.5125271", "0.5125106", "0.5117785", "0.5116207", "0.51109225", "0.5110598", "0.5110099", "0.51094294", "0.509371", "0.5088877", "0.5086143", "0.5080377", "0.50773287", "0.50762177", "0.50702685", "0.50698906", "0.5069527" ]
0.57788986
7
Returns conversation for a thread
def conversation(self, thread): assert isinstance(thread, int) and 0 <= thread < len(self._threads), "Thread {} don't exists at channel {}!".\ format(thread, self.name) return self._threads[thread]["conversation"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_thread_for_message(id):\n query = 'SELECT thread_id from messages WHERE id like %s'\n return __perform__(query, (id,), method='fetchone')", "def find_all_messages_in_thread_for_person(cnx: db_connector, thread_id: int, is_singlechat: bool):\n result = []\n cursor = cnx.cursor(buffered=True)\n command = \"\"\"\n SELECT id, content, sender, thread_id, is_singlechat, timestamp FROM message\n WHERE thread_id = %s AND is_singlechat = %s\n ORDER BY timestamp ASC\n \"\"\"\n cursor.execute(command, (thread_id, is_singlechat))\n tuples = cursor.fetchall()\n\n for(id, content, sender, thread_id, is_singlechat, timestamp) in tuples:\n message = MessageObject(\n id_=id,\n content=content,\n sender=sender,\n thread_id=thread_id,\n is_singlechat=is_singlechat,\n timestamp=timestamp\n )\n result.append(message)\n\n cnx.commit()\n cursor.close()\n\n return result", "def get_thread(self):\n url = (\"https://api.imgur.com/3/message/{0}/thread\".format(\n self.first_message.id))\n resp = self._imgur._send_request(url)\n return [Message(msg, self._imgur) for msg in resp]", "def get_thread(self):\n return self.threads[self.thread_id]", "def convthread(self, convthread_id):\n\n df = self.dfs[\"convthreads\"]\n tag_records = df[df.id == convthread_id]\n if 1 == len(tag_records): \n return tag_records.values[0]\n elif 1 < len(tag_records): \n raise Exception(\"More than one record exist by convthread_id\")\n else :\n import warnings\n warnings.warn(\"No record matched with convthread_id\", Warning)\n return None", "def message_thread(self):\r\n return resource.MessageThread(self)", "def conversation(self, line, teamchat):\n # print(type(line))\n if (line.split(\" \")[0] == \"[chat]:\" or line.split(\" \")[0] == \"[teamchat]:\") and line.split(\" \")[1] != \"***\":\n if teamchat:\n result = re.search(\"\\[teamchat\\]: (\\d+):(.+):(.+): (.+)\", line)\n else:\n result = re.search(\"\\[chat\\]: (\\d+):(.+):(.+): (.+)\", line)\n name = result.groups()[2]\n ide = result.groups()[0]\n message = result.groups()[-1]\n team_chat = result.groups()[1]\n info = [name, message, ide, team_chat]\n return info\n #[chat]: 0:-2:LeveL 5: mo\n else:\n info = [\"NONE\", \"NONE\", \"NONE\"]\n return info", "def get_conversation(self, sender, receiver):\n return Conversation.objects.get_or_new(sender, receiver)", "async def get_thread(self, board_id, thread_id):\n\n route = f'{board_id}/thread/{thread_id}'\n\n data = await self.interact(route)\n\n value = tuple(map(Asset, data['posts']))\n\n return value", "def get_message(self, sender=None):\n if sender == None:\n if self.public_key == None:\n return None\n participant = self.public_key\n else:\n participant = sender\n following = [tx.follow for block in self.__chain for tx in block.chipsactions if tx.sender == participant] \n tx_recipient2 = [tx.message for block in self.__chain for tx in block.messsactions if tx.follower in following]\n print(\"tx_recipient2\")\n print(tx_recipient2)\n return tx_recipient2", "def get_conversations(self):\n\t\treturn self.conversations", "def chat(self):\n return self._get(\"chat\")", "async def get_thread_info(self) -> Any:\n return await self.AD.threading.get_thread_info()", "def get_gift_conversation(self):\r\n return self.gift_conversation", "def get_thread(self):\n return Comment.objects.filter(path__startswith=self.get_root_path())", "def get_thread(request, slug_cat, slug_thread):\n context_dict = {}\n thread = Thread.objects.get(slug=slug_thread)\n if thread and thread.category.slug == slug_cat:\n page = request.GET.get('page')\n context_dict['thread'] = thread\n posts = Post.objects.filter(thread=thread).order_by('date')\n if posts:\n context_dict['first_post_pk'] = posts[0].pk\n paginator = Paginator(posts, posts_per_page) # Show 15 threads per page\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n posts = paginator.page(paginator.num_pages)\n context_dict['posts'] = posts\n else:\n return page_not_found(request)\n if request.user.is_authenticated():\n member = Member.objects.get(user=request.user)\n if member:\n context_dict['member'] = member\n return render(request, 'thread.html', context_dict)", "def read_thread(thread_num):\n pass# TODO", "def convthreads(self,tag_id=None):\n if None == tag_id:\n return self.dfs[\"convthreads\"].values\n else :\n df = self.dfs[\"convthreads_with_tags\"]\n return df[df.tag_id == tag_id].values", "async def getContext(self, channel: discord.TextChannel):\n ctxGuild = channel.guild\n ctxChannelId = await self.config.guild(ctxGuild).get_attr(KEY_CTX_CHANNEL_ID)()\n ctxChannel = discord.utils.get(ctxGuild.channels, id=ctxChannelId)\n if not ctxChannel:\n self.logger.error(\"Cannot find channel to construct context!\")\n return None\n async for message in ctxChannel.history(limit=1):\n lastMessage = message\n return await self.bot.get_context(lastMessage)", "def get_rel_thread(self, org_id, rel_id):\n for thread in self.get_org_question(org_id).iter('Thread'):\n if thread.attrib['THREAD_SEQUENCE'] == org_id + \"_\" + rel_id:\n return thread\n return None", "def thread(self):\n return self._thread", "def thread(self):\n return self._thread", "def get_thread(self, id, format='metadata'):\n try:\n return self.service.users().threads().get(userId='me',\n id=id,\n format=format).\\\n execute()\n\n except googleapiclient.errors.HttpError as ex:\n if ex.resp.status == 403 or ex.resp.status == 500:\n return self.get_thread(id, format)\n else:\n raise ex", "def cmd_conversation_id(client, args):\n conversation = client.get_conversation(args.conversation_id,\n args.page, args.offset)\n data = conversation.__dict__\n try:\n data['messages'] = [item.__dict__ for item in data['messages']]\n except KeyError:\n pass\n generate_output({'conversation': data})", "def get_conversations(self, name: str) -> ConversationDict:\n if self.conversations:\n pass\n elif not self.single_file:\n filename = f\"{self.filename}_conversations\"\n data = self._load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self._load_singlefile()\n return self.conversations.get(name, {}).copy() # type: ignore[union-attr]", "def new_reply(cls, thread, user, content):\n msg = cls.objects.create(thread=thread, sender=user, content=content)\n thread.userthread_set.exclude(user=user).update(deleted=False, unread=True)\n thread.userthread_set.filter(user=user).update(deleted=False, unread=False)\n message_sent.send(sender=cls, message=msg, thread=thread, reply=True)\n #for recip in thread.userthread_set.exclude(user=user):\n # send_newmessage_mail(msg, recip.user)\n return msg", "def get_thread_id(self):\n\t\tl = re.findall('(?<=\\[)[\\w/-]+', self.subject)\n\t\treturn l and l[0] or None", "def thread(self, board, t_id):\n OP = self.posts_coll.find_one( {'board' : board, 'thread':0, 'id': t_id } )\n if not OP:\n return None\n replies = self.posts_coll.find( {'board':board, 'thread':t_id} ).sort('id',1)\n OP=self.__process_data(OP)\n replies=map(self.__process_data, replies)\n return (OP, replies)", "def popular_messages(self, convthread_id):\n if self.dfs.has_key(convthread_id):\n return self.dfs[convthread_id]\n else:\n import warnings\n warnings.warn(\"No record matched with convthread_id\", Warning)\n return []", "def _check_conversation(self):\n _conversation_info = redis_hash_to_dict(self.application.redis, ConversationInfo, self._conversation_uuid)\n if _conversation_info == None:\n return (None, None, [])\n\n _key = ConversationUserData.__tablename__ + \".app_uuid.\" + self._app_uuid + \".user_uuid.\" + \\\n self._user_uuid + \".conversation_uuid.\" + self._conversation_uuid\n _conversation_user_data_uuid = self.application.redis.get(_key)\n if _conversation_user_data_uuid == None:\n return (_conversation_info, None, [])\n\n _conversation_user_data = redis_hash_to_dict(self.application.redis, ConversationUserData, _conversation_user_data_uuid)\n if _conversation_user_data == None:\n return (_conversation_info, None, [])\n\n _key = ConversationUserData.__tablename__ + \".conversation_uuid.\" + self._conversation_uuid\n _members = self.application.redis.smembers(_key)\n return (_conversation_info, _conversation_user_data, _members)", "def get_thread_name(self) -> Optional[str]:\n return self.thread_name", "def get_conversation(request):\n collected_values = {}\n\n # Only allow GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract and form params\n uid = request.GET['uid']\n oid = request.GET['oid']\n token = request.GET['token']\n ts_query = request.GET['ts']\n time_user_seen = request.GET.get('tus')\n limit = int(request.GET['limit'])\n\n if ts_query == \"\":\n ts_query = timezone.now()\n\n change_user_seen = False\n if time_user_seen == \"true\":\n change_user_seen = True\n\n # Check if token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Collect all messages sent by two users in question listed by created at time\n message_query_set = Messages.objects.filter(\n Q(user_id=uid, other_id=oid) |\n Q(other_id=uid, user_id=oid)).order_by('-created_at')[:limit]\n\n # Collect all messages from query\n test_list = []\n for message in message_query_set:\n if change_user_seen:\n message.time_user_seen = timezone.now()\n message.save()\n test_list.append(message.get_map())\n\n # Collect return values\n collected_values[\"messages\"] = test_list\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def get(self, thread, timeout=None):\n thread.join(timeout=timeout)\n if thread.is_alive():\n raise TimeoutError('timeout exceeded')\n thread_id = id(thread)\n output = self.outputs[thread_id]\n del self.outputs[thread_id]\n return output", "def extract_current_thread(maybe_thread_str: str) -> Optional[str]:\n match = CURRENT_THREAD_RE.search(maybe_thread_str)\n if match is not None:\n return match.group(1)\n return None", "def get_chat_thread_properties(\n self,\n chat_thread_id, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.ChatThreadProperties\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.ChatThreadProperties\"]\n error_map = {\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-09-07\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_chat_thread_properties.metadata['url'] # type: ignore\n path_format_arguments = {\n 'endpoint': self._serialize.url(\"self._config.endpoint\", self._config.endpoint, 'str', skip_quote=True),\n 'chatThreadId': self._serialize.url(\"chat_thread_id\", chat_thread_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n deserialized = self._deserialize('ChatThreadProperties', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_message(self, stream, timeout=None):\n if stream == 'iopub':\n return self.kc.get_iopub_msg(timeout=timeout)\n elif stream == 'shell':\n return self.kc.get_shell_msg(timeout=timeout)", "def get_chat(self) -> Optional[ChatNode]:\n triples = self.agent_memory.get_triples(pred_text=\"chat_effect_\", obj=self.memid)\n if triples:\n chat_id, _, _ = triples[0]\n return ChatNode(self.agent_memory, chat_id)\n else:\n return None", "def get_related_threads(element):\n tag2path = {\n 'OrgQuestion': './Thread',\n 'Thread': '.',\n }\n if element.tag in tag2path:\n return element.findall(tag2path[element.tag])\n return element.findall('./OrgQuestion/Thread')", "def read_message(stdscr, conversation):\n backlog = []\n tail = filesystem.tail_conversation(conversation)\n old_backlog = 0\n while True:\n # These settings are inside the loop because the reply mode disables them.\n stdscr.nodelay(1)\n curses.noecho()\n for line in tail:\n if line:\n backlog.append(line.replace(\"\\r\", \"\"))\n else:\n break\n if old_backlog != len(backlog):\n stdscr.erase()\n safe_put(stdscr, \"Viewing conversation with {user}. You can (r)eply or (q)uit.\".format(user=conversation), (2, 0))\n safe_put(stdscr, \"\\r\".join(backlog[-20:]), (4, 0))\n stdscr.refresh()\n old_backlog = len(backlog)\n\n selection = stdscr.getch()\n if selection == ord(\"q\"):\n break\n if selection == ord(\"r\"):\n stdscr.nodelay(0)\n send_message(stdscr, conversation)\n # Trigger a redraw after sending a message\n old_backlog = 0\n time.sleep(0.1)\n stdscr.nodelay(0)\n stdscr.clear()\n stdscr.refresh()", "def get_chat_message(\n self,\n chat_thread_id, # type: str\n chat_message_id, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.ChatMessage\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.ChatMessage\"]\n error_map = {\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 401: lambda response: ClientAuthenticationError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 403: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 429: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n 503: lambda response: HttpResponseError(response=response, model=self._deserialize(_models.CommunicationErrorResponse, response)),\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-09-07\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_chat_message.metadata['url'] # type: ignore\n path_format_arguments = {\n 'endpoint': self._serialize.url(\"self._config.endpoint\", self._config.endpoint, 'str', skip_quote=True),\n 'chatThreadId': self._serialize.url(\"chat_thread_id\", chat_thread_id, 'str'),\n 'chatMessageId': self._serialize.url(\"chat_message_id\", chat_message_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n deserialized = self._deserialize('ChatMessage', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_thread_by_name( self, name):\n\n assert name in self._thread_index, \"No thread named {}\".format( name )\n\n return self._threads[ self._thread_index[ name ]]", "def reply(self, name, namespace, uid):\n with self.lock:\n for m in self.messages(namespace, name):\n if m.replying_to == uid:\n return m", "def conversation_path(project: str, location: str, conversation: str,) -> str:\n return \"projects/{project}/locations/{location}/conversations/{conversation}\".format(\n project=project, location=location, conversation=conversation,\n )", "def get_thread(dayname):\n d = datetime.date.today()\n\n try:\n title = config.get(dayname, \"title\") + ' (' + d.strftime(\"%B %d\") + ')'\n text = config.get(dayname, \"text\")\n except:\n sys.exit(2) # nothing found for today\n text = \"\\n\\n\".join(text.split(\"\\n\"))\n\n return title, text", "def get(self) -> List[Conversation]:\n return get_all_conversations(), 200", "def _extract_thread_stack_trace(\n self, thread: str, lines: List[str]\n ) -> Optional[List[str]]:\n thread_str = f\"Thread {thread} \"\n i: int = 0\n while i < len(lines) and thread_str not in lines[i]:\n i += 1\n if i != len(lines) and thread_str in lines[i]:\n j: int = i\n while j < len(lines) and lines[j] != \"\\n\":\n j += 1\n start = i - 1\n end = j\n return lines[start:end]\n return None", "def retrieve_message(channel, message_id):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_history(channel=channel, latest=message_id, inclusive=True, limit=1)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "def message_threads(self):\r\n return resource.MessageThreads(self)", "def _message(message):\n str_thread = \"Thread-%d\" % threading.current_thread().ident\n return \"%s\\t%s\" % (str_thread, message)", "def get(self,id):\n adm = Administration()\n cm = adm.get_chatmessage_by_id(id)\n return cm", "def get_message(self, mid):\n cursor = self.get_cursor()\n query = 'WITH like_count AS (SELECT mid, COUNT(*) AS likes ' \\\n 'FROM vote WHERE upvote = TRUE GROUP BY mid), ' \\\n 'dislike_count AS (SELECT mid, COUNT(*) AS dislikes ' \\\n 'FROM vote WHERE upvote = FALSE GROUP BY mid),' \\\n 'replies_query AS (SELECT replied_to, array_agg(mid) AS replies_list ' \\\n 'FROM replies INNER JOIN messages ON replies.reply = messages.mid ' \\\n 'GROUP BY replied_to) ' \\\n 'SELECT messages.mid, cid, message, image, COALESCE(likes, 0) AS likes, ' \\\n 'COALESCE(dislikes, 0) AS dislikes, username, ' \\\n \"COALESCE(replies_list, '{}') AS replies \" \\\n 'messages.created_on FROM messages ' \\\n 'LEFT OUTER JOIN like_count ON messages.mid = like_count.mid ' \\\n 'LEFT OUTER JOIN dislike_count ON messages.mid = dislike_count.mid ' \\\n 'LEFT OUTER JOIN photo ON messages.mid = photo.mid ' \\\n 'INNER JOIN users ON messages.uid = users.uid ' \\\n 'LEFT OUTER JOIN replies_query ON messages.mid = replies_query.replied_to ' \\\n 'WHERE messages.mid = %s ORDER BY messages.created_on DESC'\n cursor.execute(query, (mid,))\n messages = cursor.fetchall()\n return messages", "def thread_id(self):\n return self._thread_id", "def get_chat(self):\n name = self.message.find_element_by_xpath(\".//ancestor::div[@class='_1Flk2 _1sFTb']\").find_element_by_xpath(\".//span[@class='_35k-1 _1adfa _3-8er']\").text\n chat: ChatElement = ChatElement(self.message.find_element_by_xpath(\"//div[@class='_2aBzC'][.//span[@title='{}']]\".format(name)))\n return chat", "def concatenate_session(session_id):\n conversation = \"\"\n for msg in db.messages.find({'session_id': session_id}):\n conversation += (msg['message'] + \"\\n\") \n return conversation", "async def topic(self, ctx):\r\n website = requests.get('https://www.conversationstarters.com/generator.php').content\r\n soup = BeautifulSoup(website, 'html.parser')\r\n topic = soup.find(id=\"random\").text\r\n await ctx.send(topic)", "def getfirstmessage(s,refconvdf):\r\n return refconvdf[(refconvdf.convid==s) & (refconvdf.part_type=='initial')].body.iloc[0]", "def start_thread(self, topic_caster, channel, interface, reply_to_tc=None):\r\n if channel not in self._channels:\r\n raise ValueError(\"Channel {} not exists!\".format(channel))\r\n return TalkContext(channel, self._channels[channel].start_thread(topic_caster, reply_to_tc), interface)", "def primary_channel(guild: discord.Guild) -> discord.TextChannel | None:\n if guild.system_channel is not None:\n return guild.system_channel\n\n for channel_candidate in guild.channels:\n if (\n isinstance(channel_candidate, discord.TextChannel)\n and channel_candidate.permissions_for(guild.me).send_messages\n ):\n return channel_candidate\n\n return None", "def get_message(self):\n return self.msg", "def get(self, id):\n adm = Administration()\n cm = adm.get_chatmessage_by_id(id)\n return cm", "def get_best_thread(self, question, tag_name):\n thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)\n\n # HINT: you have already implemented a similar routine in the 3rd assignment.\n question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim).reshape(1, -1)\n best_thread = pairwise_distances_argmin(question_vec,thread_embeddings, metric=\"cosine\")\n return thread_ids[best_thread][0]", "def get_name(thread_id):\r\n for thread in threading.enumerate():\r\n if thread.ident == thread_id:\r\n return thread.name", "def list_messages(stdscr):\n # Show the cursor and echo output.\n curses.curs_set(1)\n curses.echo()\n conversations = filesystem.conversations()\n stdscr.clear()\n row = 1\n column = 1\n for name in conversations:\n safe_put(stdscr, name, (row, column))\n row += 1\n safe_put(stdscr, \"Start typing a name: \", (row+1, column))\n stdscr.refresh()\n selection = \"\"\n possibilities = conversations\n while len(possibilities) > 1:\n selection += chr(stdscr.getch())\n if selection.endswith(\"\\n\") and selection[:-1] in possibilities:\n # Hit enter to confirm the choice of a username when it's a\n # substring of another username.\n possibilities = [selection[:-1]]\n break\n possibilities = [p for p in possibilities if p.startswith(selection)]\n curses.curs_set(0)\n curses.noecho()\n stdscr.clear()\n stdscr.refresh()\n if possibilities:\n read_message(stdscr, possibilities[0])\n else:\n print(\"No user matched '{selection}'\".format(selection=selection))", "async def get_chat_id(conn, chat):\n query = db.chats.select().where(db.chats.c.name == chat)\n chat = await conn.fetch(query)\n return chat[0]['id']", "def get_message():\r\n conn = None\r\n try:\r\n params = config()\r\n conn = psycopg2.connect(**params)\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT user_id, user_message FROM messages ORDER BY user_message\")\r\n print(\"Number of messages: \", cur.rowcount)\r\n row = cur.fetchone()\r\n\r\n while row is not None:\r\n print(row)\r\n row = cur.fetchone()\r\n\r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()", "def get_discovery_message(self):\n return self.messages[\"discovery\"].get()", "def fetch_conversation_starter():\n\n url = 'https://www.conversationstarters.com/generator.php'\n\n try:\n response = requests.get(url)\n html_content = response.text\n soup = BeautifulSoup(html_content, 'html.parser')\n conv_starter = soup.find_all(text=True)[22].strip()\n return conv_starter\n except Exception as e:\n print(\"Error occurred fetching conversation starter:\\n\", e)", "def get_relevant(self, msg):\n nick = msg.GetNick()\n net = msg.GetNetwork()\n data = {\"body\": msg.GetText(),\n \"network\": net.GetName(),\n \"away\": net.IsIRCAway(),\n \"client_count\": len(net.GetClients()),\n \"nick\": nick.GetNick(),\n \"ident\": nick.GetIdent(),\n \"host\": nick.GetHost(),\n \"hostmask\": nick.GetHostMask()}\n chan = msg.GetChan()\n if chan:\n data[\"context\"] = data[\"channel\"] = chan.GetName()\n data[\"detached\"] = chan.IsDetached()\n else:\n data[\"context\"] = data[\"nick\"]\n return data", "def get_conversations(self):\n conversation_ids_index = -1\n conversations = []\n with open(self.movie_conversations, 'r', encoding='iso-8859-1') as f:\n for line in f:\n items = line.split(self.DELIM)\n conversation_ids_field = items[conversation_ids_index]\n conversation_ids = literal_eval(conversation_ids_field) # evaluate as a python list\n conversations.append(conversation_ids)\n return conversations", "def retrieve(self, request, *args, **kwargs):\n return super(ConversationViewSet, self).retrieve(request, *args, **kwargs)", "def test_get_message_reply(self):\n message1 = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n message1.created_at = now() - datetime.timedelta(days=1)\n message1.save()\n message2 = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n message2.created_at = now() - datetime.timedelta(hours=2)\n message2.save()\n\n # thread.last_read_at is normally set by the by_user query\n self.thread.last_read_at = now() - datetime.timedelta(hours=3)\n messages = self.thread.messages_for_user(self.user)\n\n # Messages are returned sorted from newest to oldest\n self.assertEqual(messages[0], message2)\n self.assertFalse(messages[0].read)\n self.assertEqual(messages[1], message1)\n self.assertTrue(messages[1].read)", "def getTelescope(self):\n return self.header['TELESCOP']", "def get_message(self, user):\n return None", "def competition(update, context):\n #update.message.reply_text(s)\n chat_id = update.message.chat_id\n bot.send_message(chat_id,text=message,\n parse_mode=telegram.ParseMode.HTML)\n #return s ", "def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\n self.markAsDelivered(thread_id, message_object.uid)\n self.markAsRead(thread_id)\n \n log.info(\"{} from {} in {}\".format(message_object, thread_id, thread_type.name))\n \n # Don't reply if the bot has messaged itself or if the bot has been messaged from a group\n if author_id != self.uid and thread_type != ThreadType.GROUP:\n text = message_object.text.split()\n\n # If !help has been detected, show available options\n if text[0] == \"!help\":\n if self.currentPlayer.fbid == author_id:\n start = \"The action is currently on you. \"\n else:\n start = \"The action is currently on \" + self.currentPlayer.name + \". \"\n\n self.send(Message(text=start + \"\\nAvailable commands: \\n !help- show options \\n !stacks- show stack sizes \\n !pots- show pots \\n !bets- show current bets\"),\\\n thread_id=thread_id, thread_type=ThreadType.USER)\n\n # If prompted, show stack sizes\n elif text[0] == \"!stacks\":\n message = \"\"\n\n for player in self.application.table.Players:\n message += player.name + \": \" + str(player.chips) + \"\\n\"\n\n self.send(Message(text=message), thread_id=thread_id, thread_type=ThreadType.USER)\n\n # If prompted, show pots\n elif text[0] == \"!pots\":\n message = \"\"\n\n for pot in self.application.table.pots:\n message += str(pot) + \"\\n\"\n\n self.send(Message(text=message), thread_id=thread_id, thread_type=ThreadType.USER)\n\n # If prompted, show bets\n elif text[0] == \"!bets\":\n message = \"\"\n\n for player in self.application.table.Players:\n message += player.name + \": \" + str(player.bet) + \"\\n\"\n\n message += self.application.currentBet.get()\n\n self.send(Message(text=message), thread_id=thread_id, thread_type=ThreadType.USER)\n \n # If the current player has messaged the bot, parse their input to see how they have bet\n elif author_id == self.currentPlayer.fbid:\n \n # Handle calling, going all-in, or folding\n if text[0] == 'c' or text[0] == 'a' or text[0] == 'f':\n # Set bet in application\n self.application.setBet(self.currentPlayer, text[0])\n \n # Find the proper string for the action taken\n action = \"\"\n\n if text == 'c':\n action = \"called\"\n \n elif text == 'a':\n action = \"raised\"\n\n else:\n action = \"folded\"\n\n # Send a message to the gameThread and the current player, stop listening, and proceed the application\n self.send(Message(text=\"Player \" + self.currentPlayer.name + \" has \" + action + \"\\n\" + self.application.potString.get() + \"\\n\" + self.application.currentBet.get()),\\\n thread_id=self.gameThread, thread_type=ThreadType.GROUP)\n self.send(Message(text=\"You have \" + action), thread_id=thread_id, thread_type=ThreadType.USER)\n\n self.stopListening()\n self.application.proceed()\n \n # Handle raising\n elif text[0] == 'r':\n # Check to see if the raise amount is valid\n try:\n raiseAmount = int(text[1])\n\n # If the raiseAmount is too low, notify the player\n if raiseAmount != self.currentPlayer.chips - (self.application.table.currentBet - self.currentPlayer.bet)\\\n and raiseAmount <= self.application.table.currentBet:\n\n self.send(Message(text=\"Invalid raise amount. Please raise at least as much as the current bet (or go all-in).\"),\\\n thread_id=thread_id, thread_type=ThreadType.USER)\n\n else:\n # Set bet in application\n self.application.setBet(self.currentPlayer, text[0], raiseAmount)\n\n # Send a message to the gameThread and the current player, stop listening, and proceed the application\n self.send(Message(text=\"Player \" + self.currentPlayer.name + \" has \" + action), thread_id=self.gameThread, thread_type=ThreadType.GROUP)\n self.send(Message(text=\"You have \" + action), thread_id=thread_id, thread_type=ThreadType.USER)\n\n self.stopListening()\n self.application.proceed()\n\n except:\n self.send(Message(text=\"Invalid raise amount. Please provide a valid integer.\"), thread_id=thread_id, thread_type=ThreadType.USER)\n\n # If the wrong player has responded, notify them that it is not their turn\n else:\n self.send(Message(text=\"It is not your turn. The current player is \" + self.currentPlayer.name), thread_id=thread_id, thread_type=ThreadType.USER)", "def get_messages_from_cursor(self):\n\n def get_msg(r):\n msg = dict(\n id=r[0],\n datetime=r[1],\n text=r[2],\n sender=r[3],\n media=r[4],\n **json.loads(r[5]),\n )\n if len(r) > 6:\n msg['dialog'] = r[6]\n for field in DATETIME_FIELDS:\n if field not in msg:\n continue\n tz_field = msg[field]\n if isinstance(tz_field, str):\n msg[field] = parse_time(tz_field)\n return {k: v for k, v in msg.items() if v} # get rid of Falsey\n\n return {\n r[0]: get_msg(r)\n for r in self.cur.fetchall()\n }", "def channel_info(channel_id):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_info(channel=channel_id)\n assert response['ok'] is True\n return response['channel']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def get_messages(user, conv_id, msg_id):\n\n if msg_id:\n def get_messages(user, conv, response):\n msg = ConvMessages.get_by_id(msg_id)\n if msg:\n response['message'] = msg.get_full_data()\n else:\n return NOT_FOUND_RESPONSE\n return response\n\n else:\n # method to call if user is part of the conversation\n def get_messages(user, conv, response):\n response['messages'] = conv.get_messages_full_data()\n return response\n\n return process_apicall_checkconv_checkuser(user, conv_id, get_messages)", "def traducitelo(slack_rtm_output):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and output['user'] != BOT_ID:\n if AT_BOT in output['text']:\n output['text'] = output['text']\\\n .split(AT_BOT)[1].strip().lower()\n return output['text'], output['channel'], output['user']\n\n return None, None, None", "async def get_context(self, message, *, cls=NewCtx):\n return await super().get_context(message, cls=cls)", "def _get_send_community(self):\n return self.__send_community", "def get_message(self, i):\n pass", "def wait_for_messages(self):\n msg = self.inbox.get()\n return msg", "def load_inbox(request):\n threads = models.MessageThread.objects.filter(clients=request.user).annotate(\n unread_count=Count('receipts',filter=Q(receipts__recipient=request.user))\n )\n thread_data = serializers.MessageThreadListSerializer(threads).data\n #user = userauth_models.User.objects.filter(username=request.user.username)\n #print(user.username)\n #print(get_channel_layer())\n #print(request.session['channel_name'])\n return JsonResponse({'threads':thread_data})", "def get_word_context(word):\r\n\tfor content, profile in word_context_profile:\r\n\t\tif word == content:\r\n\t\t\treturn profile \r\n\treturn 0", "def get_reply_target(self, agent, collective):", "def _get_text_channel(\n self,\n guild: Guild,\n channel_id: int,\n ) -> Optional[TextChannel]:\n return guild.get_channel(channel_id)", "def __chat_id_response(self) -> int:\n try:\n fetch_updates = self.__get_updates()\n return fetch_updates[0]['message']['chat']['id']\n except TimeoutError as tm_err:\n print(tm_err)\n sys.exit(1)", "def getmessage(self, update, context):\r\n\r\n redirect_uri = \"https://thawing-ridge-47246.herokuapp.com\"\r\n\r\n # настройка соединения\r\n flow = Flow.from_client_secrets_file(\r\n 'credentials.json',\r\n scopes=SCOPES,\r\n redirect_uri=redirect_uri)\r\n\r\n code = self.get_code()\r\n\r\n flow.fetch_token(code=code, code_verifier=\"111\") # устанавливаем соединение с гуглом\r\n\r\n session = flow.authorized_session() # создаем сессию\r\n response = session.get('https://www.googleapis.com/gmail/v1/users/me/messages').json() # формируем запрос и получаем ответ сервера\r\n\r\n messages = response[\"messages\"]\r\n\r\n # у каждого из сообщений достаем id\r\n for message in messages[0:10]:\r\n mid = message['id']\r\n\r\n # получаем сообщение по id\r\n message_message = session.get(f'https://www.googleapis.com/gmail/v1/users/me/messages/{mid}').json()\r\n\r\n # информация об отправителе, получателе и теме сообщения хранится в ключе 'payload' --> 'headers'\r\n headers = message_message['payload']['headers']\r\n\r\n from_who = None\r\n to_whom = None\r\n subject = None\r\n\r\n for item in headers:\r\n if item['name'] == 'From':\r\n from_who = item['value']\r\n elif item['name'] == 'To':\r\n to_whom = item['value']\r\n elif item['name'] == 'Subject':\r\n subject = item['value']\r\n\r\n # ищем текст сообщения\r\n # достаем из сообщения его части\r\n message_payload_parts = message_message['payload']['parts']\r\n zero_part = message_payload_parts[0]\r\n\r\n if zero_part['mimeType'] == 'text/plain':\r\n self.message_without_attachments(context, message_payload_parts, from_who, to_whom, subject)\r\n elif zero_part['mimeType'] == 'multipart/alternative':\r\n self.message_with_attachments(session, mid, context, zero_part, message_payload_parts, from_who,\r\n to_whom, subject)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'Done.')", "def send(self, text, thread, mentions=None, reply=None):\n # TODO: more settings (in kwargs), like attachments\n if thread is None:\n raise Exception('Could not send message: `thread` is None')\n message = None\n if mentions is not None:\n message = models.Message.formatMentions(text, *mentions)\n if message is None:\n message = models.Message(text=text)\n if reply is not None:\n message.reply_to_id = reply\n log.info('Sending a message to thread %s', repr(thread))\n return self.fbchat_client.send(\n message,\n thread_id=thread.id_,\n thread_type=thread.type_\n )", "def get_messages_to_me(self, day):\n try:\n return self.messages_to_me[day]\n except KeyError:\n return []", "def update_thread(thread_id, credentials, is_final_job):\r\n # get number of replies for this thread\r\n service = build(\r\n 'gmail', 'v1', \r\n http=credentials.authorize(http = httplib2.Http())\r\n )\r\n try:\r\n thread = service.users().threads().get(userId=\"me\", id=thread_id).execute()\r\n messages = thread['messages']\r\n thread_id = thread['id']\r\n print(f'thread id: {thread_id} - number of messages in this thread: {len(messages)}')\r\n if is_replied_to(thread):\r\n set_replied_to(thread)\r\n if is_final_job:\r\n complete_thread_job(get_current_job())\r\n except (errors.HttpError, errors.Error) as e:\r\n print('ThreadHelper: An error occurred: %s' % e)", "def run_model(self, chat: Tuple[str, str]) -> Optional[DialogueObject]:\n\n if chat[1] == \"ipdb\":\n ipdb.set_trace()\n\n if len(self.dialogue_stack) > 0 and self.dialogue_stack[-1].awaiting_response:\n return None\n\n # chat is a single line command\n speaker, chatstr = chat\n preprocessed_chatstrs = preprocess.preprocess_chat(chatstr)\n\n # Push appropriate DialogueObjects to stack if incomign chat\n # is one of the scripted ones\n if any([chat in self.botCapabilityQuery for chat in preprocessed_chatstrs]):\n return BotCapabilities(**self.dialogue_object_parameters)\n if any([chat in self.botGreetings for chat in preprocessed_chatstrs]):\n return BotGreet(**self.dialogue_object_parameters)\n if any([\"debug_remove\" in chat for chat in preprocessed_chatstrs]):\n return BotVisionDebug(**self.dialogue_object_parameters)\n\n # don't use preprocess for ttad, done in the model code\n action_dict = self.ttad(s=chatstr, model=self.ttad_model)\n return self.handle_action_dict(speaker, action_dict, preprocessed_chatstrs[0])", "def get_focus_mode_message(self, focus_mode_name):\n for focus_mode in self.focus_modes:\n if focus_mode['modeName'] == focus_mode_name:\n return focus_mode['message']", "def get_messages(self, channel_name=None, channel_id=None, skip_non_user=False):\n if not channel_id:\n channel_id = self.find_channel_id(channel_name)\n response = self.client.api_call(\n f'conversations.history?channel={channel_id}'\n )\n assert response['ok']\n\n messages = []\n\n for message in response['messages']:\n if skip_non_user and 'subtype' in message and message['subtype'] in cfg.SUBTYPES:\n continue\n\n thread_response = self.client.api_call(\n f'conversations.replies?'\n f'channel={channel_id}&'\n f'ts={message[\"ts\"]}'\n )\n assert thread_response['ok']\n messages.extend(thread_response['messages'])\n return messages", "def get_reply_to():\n local,domain = get_base_submission_message_address().split('@')\n while True:\n rand = base64.urlsafe_b64encode(os.urandom(12))\n address = \"{}+{}@{}\".format(local,rand,domain)\n q = Message.objects.filter(reply_to=address)\n if not q:\n return address", "def get_threadname():\n cur_thread = threading.current_thread()\n return cur_thread.name", "def message_handler(self, msg):\n thread = threading.Thread(target=self.answer, args=(msg,))\n thread.start()\n return thread", "def get_message_replies(self, mid):\n pass", "def reply_to(self):\n return self.getattr('reply_to')" ]
[ "0.65898234", "0.6572362", "0.6492414", "0.64143395", "0.6396454", "0.60996646", "0.60811704", "0.6057928", "0.58519685", "0.57067835", "0.5651671", "0.5620204", "0.55983543", "0.5575817", "0.55702025", "0.55671215", "0.55500007", "0.5483425", "0.54719275", "0.5438706", "0.541879", "0.541879", "0.54110426", "0.5393482", "0.5367528", "0.5366646", "0.5338647", "0.53279895", "0.53240323", "0.5323961", "0.53214407", "0.5277326", "0.52737975", "0.5263668", "0.52538997", "0.5211475", "0.5199872", "0.51868457", "0.5179203", "0.51717544", "0.5167352", "0.5144155", "0.51374346", "0.5114051", "0.5108729", "0.5096874", "0.5094266", "0.508599", "0.5068389", "0.5044556", "0.50256044", "0.50061274", "0.500077", "0.49988037", "0.49961698", "0.49914157", "0.49647403", "0.49612337", "0.49533638", "0.49488094", "0.4946796", "0.49467283", "0.4931009", "0.4926167", "0.48979348", "0.48936722", "0.48930198", "0.48770133", "0.48629537", "0.48588935", "0.48578876", "0.48526913", "0.4840431", "0.48154628", "0.48088562", "0.47990015", "0.47988954", "0.478414", "0.4778523", "0.47778445", "0.47744656", "0.47730243", "0.47722048", "0.4767045", "0.47610134", "0.47497174", "0.47430438", "0.47421035", "0.474162", "0.47382665", "0.47307846", "0.47307208", "0.4724655", "0.47201884", "0.47189716", "0.47156423", "0.47149503", "0.47136006", "0.47120547", "0.47060314" ]
0.8358802
0
Subscribes specified instance onto channel
def subscribe(self, inst): if inst not in self._subscribers: self._subscribers.append(inst) vprint("{} is subscribed to {}".format(inst.name, self.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(self, channel, **kwargs):\n pass", "def subscribe(self, inst, channel):\r\n if channel not in self._channels:\r\n self._channels[channel] = TalkChannel(channel, print_messages=self.verbose, timeref=self._timeref)\r\n self._channels[channel].subscribe(inst)", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver, updateInterval=None):", "def subscribe(self, channel: str) -> None:\n print(f'{self._name} starts subscribing to channel-[{channel}]')\n self._server.register(self, channel)", "def subscribe(receiver, updateInterval=10):", "def subscribe(self, subject):\n pass", "def subscribe(receiver, catchup):", "def subscribe(self, channels: typing.Iterable, listener: types.MethodType):\n raise TypeError(\"{} - subscribe not implemented!\")", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def _subscribe(self, channel, callback):\n local_subs = self._sub_cbs.get(channel, None)\n if local_subs is None:\n local_subs = {callback}\n self._sub_cbs[channel]= local_subs\n self._rc.publish(self._sub_cmd_q, 'subscribe:' + channel)\n else:\n local_subs.add(callback)", "def subscribeConsumer(consumer):", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def subscribe(actor_id, worker_ch):\n actor_ch = ActorMsgChannel(actor_id)\n t = threading.Thread(target=process_worker_ch, args=(worker_ch, actor_id, actor_ch))\n t.start()\n print(\"Worker subscribing to actor channel...\")\n while keep_running:\n try:\n msg = actor_ch.get(timeout=2)\n except channelpy.ChannelTimeoutException:\n continue\n print(\"Received message {}. Starting actor container...\".format(str(msg)))\n try:\n stats, logs = execute_actor(image, msg['msg'])\n except DockerStartContainerError as e:\n print(\"Got DockerStartContainerError: {}\".format(str(e)))\n Actor.set_status(actor_id, ERROR)\n continue\n # add the execution to the actor store\n print(\"Actor container finished successfully. Got stats object:{}\".format(str(stats)))\n exc_id = Execution.add_execution(actor_id, stats)\n Execution.set_logs(exc_id, logs)", "async def _subscribe_to_channels(self, ws: WSAssistant):\n try:\n # BitMart WebSocket API currently offers only spot/user/order private channel.\n for trading_pair in self._trading_pairs:\n ws_message: WSRequest = WSRequest({\n \"op\": \"subscribe\",\n \"args\": [f\"spot/user/order:{bitmart_utils.convert_to_exchange_trading_pair(trading_pair)}\"]\n })\n await ws.send(ws_message)\n\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Error occured during subscribing to Bitmart private channels.\", exc_info=True)\n raise", "def subscribe(self, sub, chan, auth=\"\", cipher=\"\", use_ssl=False):\r\n self.sub = sub\r\n self.chan = chan\r\n self.auth = auth\r\n self.cipher = cipher\r\n self.use_ssl = use_ssl\r\n\r\n # force disconnect of currently active longpoll.\r\n self.hup()", "def subscribe(self, callback):\n self.channel.basic_consume(callback, queue=self.queue_name)\n self.channel.start_consuming()", "def subscribe(self, client, api_key, channel):\n if channel not in self.clients:\n return False\n pair = (client, api_key)\n if pair in self.clients[channel]:\n return False\n\n self.clients[channel].append(pair)\n return True", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def subscribe(self, client, channel_id):\n logger.access('-- SlimPatternSubscriberManager subscribe, channel_id: %s', channel_id)\n\n need_subscribe = False\n channel_id = str(channel_id)\n\n clients = self.clients.get(channel_id, None)\n if not clients:\n clients = set()\n self.clients[channel_id] = clients\n\n need_subscribe = True\n\n clients.add(client)\n\n if need_subscribe:\n # this function return None\n self.subscriber.psubscribe(channel_id)\n logger.debug('SlimSubscriberManger need subscribe')\n return False\n\n elif channel_id in self._subscribed_channels:\n # the channel has been subscribed\n logger.debug('SlimSubscriberManager have subscribed')\n return True\n else:\n logger.debug('SlimSubscriberManager return NONE!!!!!!!')", "def on_message(self, channel_id, message):\n logger.access('-- SlimPatternSubscriberManager subscribe, channel_id: %s, message: %s', channel_id, message)\n\n clients = None\n for key in self.clients.iterkeys():\n # redis 仅支持 glob-style 的正则\n if fnmatch.fnmatchcase(channel_id, key):\n clients = self.clients.get(key, None)\n break\n\n if clients is None:\n return\n\n bad_clients = []\n for client in clients:\n if client.is_alive():\n client.on_sub_notification(channel_id, message)\n else:\n bad_clients.append(client)\n\n for client in bad_clients:\n clients.remove(client)\n\n if not clients:\n del self.clients[channel_id]\n self.subscriber.punsubscribe(channel_id)", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def subscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/subscribe\", post_args=args)", "def subscribe(observer):", "def subscribe(observer):", "def channel(self):\n raise NotImplementedError", "def subscribe(self, chanel_name):\n name = 'subscribe'\n\n self._send_websocket_request(name, chanel_name)", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "async def _sender(self, aredis):\n\n try:\n async for message in self.bus.listen(self.pattern):\n if message.source != \"redis\":\n logger.info(f\"bridge out {self.pattern}: {message}\")\n await aredis.publish(message.key, message.value)\n except asyncio.CancelledError:\n logger.info(f\"bridge out {self.pattern}: cancelled\")", "def __init__(self , channel_index):\n\n self.__channel = channel_index\n self.__observers = []", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def on_open_handler(self, instmt, ws):\n Logger.info(self.__class__.__name__, \"Instrument %s is subscribed in channel %s\" % \\\n (instmt.get_instmt_name(), instmt.get_exchange_name()))\n if not instmt.get_subscribed():\n Logger.info(self.__class__.__name__, 'order book string:{}'.format(self.api_socket.get_order_book_subscription_string(instmt)))\n Logger.info(self.__class__.__name__, 'trade string:{}'.format(self.api_socket.get_trades_subscription_string(instmt)))\n ws.send(self.api_socket.get_order_book_subscription_string(instmt))\n ws.send(self.api_socket.get_trades_subscription_string(instmt))\n instmt.set_subscribed(True)", "def subscribe(self, channel, event, consumer):\n\n if event not in self.task_methods[channel]:\n self.task_methods[channel][event] = []\n\n self.task_methods[channel][event].append(consumer)", "def subscribeToEvent(eventName,subscriber,msgInterface):", "def register(self, cli: Client, channel: str) -> None:\n subscribers = self._channels_to_subscribers.get(channel, [])\n subscribers.append(cli)\n self._channels_to_subscribers[channel] = subscribers", "def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()", "def event_subscribe(self, obj_ref):\n self.subscribers.append(obj_ref)", "def listen(self):\n self.channel.start_consuming()", "async def channel(self, ctx):\n pass", "def subscribe(self):\n with self._rabbit_connection.connection.channel() as channel:\n self._queue = rabbitpy.Queue(\n channel=channel,\n name=self._subscriber_name + \"_queue\",\n durable=True,\n message_ttl=5 * 24 * 60 * 60 * 1000 # 5 days\n )\n self._queue.declare()\n self._queue.bind(self._exchange, self._routing_key)\n\n self._consume()", "def on_connected(self):\n logger.info('connection to redis resumed')\n for chid in self.clients.iterkeys():\n self.subscriber.psubscribe(chid)", "def channel(self):\n\n self._channel = self._connection.channel()\n print(\"Channel opened...\")", "def subscribe(self):\n if hasattr(self.bus, \"signal_handler\"):\n self.bus.signal_handler.subscribe()\n if hasattr(self.bus, \"console_control_handler\"):\n self.bus.console_control_handler.subscribe()", "def start(self):\n if self._chan is not None:\n try:\n self._chan.start_consume()\n except ChannelError:\n log.info('Subscriber is already started')\n\n else:\n self.gl = spawn(self.listen)", "def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )", "def subscribe(self, subscriber):\n self.subscribers.append(subscriber)", "def subscribe_to_quorum_channel(self):\n from dallinger.experiment_server.sockets import chat_backend\n\n self.log(\"Bot subscribing to quorum channel.\")\n chat_backend.subscribe(self, \"quorum\")", "def subscriber(self, iTag, msgType, addr):\r\n return ROSSubscriber(self, iTag, msgType, addr)", "def join(self, channel, func=None):\n\n self._pubsub.subscribe(**{'cluster:%s' % channel: func\n if func is not None\n else self._handler})", "def subscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = user\n self.invited.pop(user)\n self.message_queue.append('_%s has joined the channel_' % user)\n self.log.info('%s subscribed to the broadcast.' % user)\n self.save_state()\n return 'You are now subscribed.'", "def channel(self, channel: int, /) -> \"TimerChannel\" | None:", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def _create_subscriber(self, topic_name):\n if self._sub:\n self._sub.unregister()\n self._sub = rospy.Subscriber(topic_name, Image, self._image_callback)\n rospy.loginfo(\"Listening to %s -- spinning ..\" % self._sub.name)\n self._widget.setWindowTitle(\"Label plugin, listening to (%s)\" % self._sub.name)", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def channel(self):\n if not hasattr(self, '_channel'):\n self._channel = self.new_channel()\n return self._channel", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def subscribe(self):\n pubsub = self.redis_client.pubsub()\n pubsub.subscribe(self.message_channel)\n for item in pubsub.listen():\n if item.get(\"data\") not in (1, None):\n yield item", "async def subscribe(self, topic: str, callback: aiowamp.SubscriptionHandler, *,\n match_policy: aiowamp.MatchPolicy = None,\n node_key: str = None,\n options: aiowamp.WAMPDict = None) -> int:\n ...", "def subscribe_ticker(self, symbol, update_handler=None):\n pass", "def _subscribe(self, signal, reconnect=False):\n if reconnect:\n if signal not in self._downstream_reconnect:\n self._downstream_reconnect.append(signal)\n else:\n if signal not in self._downstream:\n self._downstream.append(signal)", "def __producer__(self):\n import time\n i = 0\n while True:\n self.publish( i )\n i += 1\n time.sleep(1)", "def compose(self, channel_name):\n return super().compose(subscription=channel_name)", "def compose(self, channel_name):\n return super().compose(subscription=channel_name)", "def __init__(self, r, handlers):\n\t\tthreading.Thread.__init__(self)\n\t\tself.redis = r\n\t\tself.pubSub = self.redis.pubsub()\n\t\tself.handlers = handlers\n\t\tchannels = []\n\t\tfor k, v in self.handlers.items():\n\t\t\tchannels.append(k)\n\t\tself.pubSub.subscribe(channels)\n\t\tlog.info(\"Subscribed to redis pubsub channels: {}\".format(channels))", "def publish(self, topic: Hashable, *args, **kwargs):\n for sub in self.subscribers[topic]:\n sub(*args, **kwargs)", "def run():\n listen_active_email_channel()", "def cbMqtt_on_subscribe(client, userdata, mid, granted_qos):\n # logger.debug('Subscribed to MQTT topic with message id %d', mid)\n pass", "def subscribe_to_ticks_publisher(topic):\n ConfigFile = \"../config/kuber.conf\"\n config = configparser.ConfigParser()\n config.read(ConfigFile)\n\n zmq_conf = config['ZMQ CONFIGURATION']\n publish_port = zmq_conf['publish_port']\n\n print(\"Subscribing to topic %s at %s\" % (topic, publish_port))\n sub = TopicSubscriber()\n\n try: \n sub.init(topic, publish_port)\n except Exception as e:\n print(\"\"\"\n Subscriber init failed: {}\n \"\"\".format(e))\n sys.exit(0)\n\n # Return the subscriber context.\n return sub", "def CreateSubscribeTransaction(self, dest, once=False):\n c = Subscribe(dest, self.node_id, once)\n self.connections.append((\"REACTIVE\", c))\n return c", "async def connected_callback(self):\n channels = []\n for ch in self._channels:\n if ch == \"orderbook\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"orderBook10\")\n channels.append(channel)\n if ch == \"trade\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"trade\")\n channels.append(channel)\n if ch == \"kline\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"tradeBin1m\")\n channels.append(channel)\n while channels:\n data = {\n \"op\": \"subscribe\",\n \"args\": channels[:10]\n }\n await self._ws.send(data)\n channels = channels[10:]\n logger.info(\"subscribe orderbook/trade/kline successfully.\", caller=self)", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)\n\n # subscribe to personal channel and fire up the message handler\n self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})\n self._proc = Thread(target=pubsub_thread)\n self._proc.daemon = True\n self._proc.start()", "def subscribe(self) -> None:\n events = [\n HathorEvents.NETWORK_NEW_TX_ACCEPTED,\n HathorEvents.NETWORK_PEER_CONNECTING,\n HathorEvents.NETWORK_PEER_READY,\n HathorEvents.NETWORK_PEER_CONNECTED,\n HathorEvents.NETWORK_PEER_DISCONNECTED,\n HathorEvents.NETWORK_PEER_CONNECTION_FAILED\n ]\n\n for event in events:\n self.pubsub.subscribe(event, self.handle_publish)", "def subscribe(self, req: SubscribeRequest):\n tick = TickData(\n symbol=req.symbol,\n exchange=req.exchange,\n name=req.symbol,\n datetime=datetime.now(),\n gateway_name=self.gateway_name,\n )\n self.ticks[req.symbol] = tick", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n self.channel = self.connection.channel()", "def subscribe(self, subscription_type, callback):\n if subscription_type in self._subscriptions.keys():\n self._subscriptions[subscription_type].append(callback)", "async def subscribe(self, callback: Callable=None):\n LOGGER.info('Subscription added')\n await self._ros.send(self._subscribe_msg)", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def on_connect(client, interface, flags, rc):\n logger.info(\"Connected with result code \" + str(rc))\n for i in Const.sub_topics:\n client.subscribe(i)\n logger.info(\"Successfully subscribed to \" + i)", "def listen_channel_subscribe(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:\n return self._subscribe('channel.subscribe', '1', {'broadcaster_user_id': broadcaster_user_id}, callback)", "def subscribe(self, destination, id=None, *args, **kwargs):\n if not id:\n id = self.id\n\n self.connection.subscribe(destination, id=id)\n return self", "def new_match(sender, instance: Match, **kwargs):\n log.debug('in new_match signal')\n match = serializers.MatchFullSerializer(instance).data\n streamers = serializers.MatchStreamersSerializer(instance.streamers, many=True).data\n\n\n payload = {\n 'match': match,\n 'streamers': streamers\n }\n log.debug('sending new match notification')\n ws.send_notification('new_match', payload)", "def on_subscribe(self, client, userdata, mid, granted_qos):\n\t\tprint (\"[{}] Client subscribed to {}\".format(\n\t\t\tint(time.time()),\n\t\t\tself.topic\n\t\t))\n\t\t#the following lines are here and not in on_connect() only for printing purpose\n\t\tif not self.printed_sub:\n\t\t\tself.printed_sub = True\n\t\t\tself.subscribe(\"measure/people\")", "def subscribe(self, name, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'sub', 'id': cur_id, 'name': name, 'params': params})\n return cur_id", "def subscribe(self, name, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'sub', 'id': cur_id, 'name': name, 'params': params})\n return cur_id", "def is_subscribed(self, inst, channel):\r\n if channel not in self._channels:\r\n return False\r\n return inst in self._channels[channel].subscribers", "async def broadcast(self):\n with await self.redis as connection:\n await connection.execute_pubsub(\"subscribe\", self.channel)\n try:\n while True:\n room = await self.channel.get(encoding=\"utf-8\")\n await self.ws.send(message)\n except websockets.ConnectionClosed as e:\n print(f\"<ChatManager:broadcast>[error] {e}\")\n await self.connection_closed()", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def publish(self, channel, event, data):\n if not channel in self.task_methods or \\\n not event in self.task_methods[channel]:\n return\n\n data['event_name'] = event;\n\n for consumer in self.task_methods[channel][event]:\n consumer.consume(channel, event, data)", "def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)" ]
[ "0.73403704", "0.7077951", "0.6784527", "0.6784527", "0.6784527", "0.65946174", "0.65910834", "0.6586851", "0.63931453", "0.6311332", "0.62572837", "0.62503433", "0.62227", "0.61942494", "0.6113197", "0.6018034", "0.6016623", "0.5920134", "0.5876832", "0.587313", "0.58557457", "0.58343315", "0.5834188", "0.581642", "0.5762294", "0.57515115", "0.5746885", "0.5746885", "0.57207876", "0.57123524", "0.57108957", "0.57012373", "0.5698822", "0.56892216", "0.5675922", "0.5664578", "0.563443", "0.5627703", "0.5617762", "0.56155086", "0.5597741", "0.558044", "0.55764174", "0.5566162", "0.55625904", "0.5543507", "0.5510513", "0.5504804", "0.549548", "0.54946774", "0.54905933", "0.54835856", "0.548222", "0.54788923", "0.54720265", "0.5465096", "0.5457946", "0.5452836", "0.5449572", "0.5435437", "0.5397652", "0.5395463", "0.5391878", "0.53910613", "0.53902704", "0.53896093", "0.5386241", "0.53851837", "0.5384311", "0.5383783", "0.53828865", "0.53828865", "0.53814393", "0.5371482", "0.5370304", "0.5369596", "0.5361483", "0.53493005", "0.5342023", "0.53381026", "0.5331998", "0.5324587", "0.5308952", "0.53065395", "0.53007126", "0.5298689", "0.52936435", "0.52910614", "0.52809125", "0.5279545", "0.5268631", "0.52638394", "0.5263497", "0.525019", "0.525019", "0.5227927", "0.5207926", "0.5206741", "0.52066755", "0.5205117" ]
0.6719197
5
Unsubscribes specified instance from channel
def unsubscribe(self, inst): if inst in self._subscribers: self._subscribers.remove(inst) vprint("{} is unsubscribed from {}".format(inst.name, self.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unsubscribe(self, channel, update_handler=None):\n pass", "def unsubscribe(self, inst, channel):\r\n if channel not in self._channels:\r\n raise ValueError(\"Channel {} not exists!\".format(channel))\r\n self._channels[channel].unsubscribe(inst)\r\n return\r\n # TODO: ?delete channels if there is no subscribers\r\n # if len(self._channels[channel].subscribers) == 0:\r\n # del self._channels[channel]\r", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(self):\n pass # pragma: no cover", "def unsubscribe(self, destination, *args, **kwargs):", "def unsubscribe(self, subject):\n pass", "def part(self, channel):\n\n self._pubsub.unsubscribe('cluster:%s' % channel)", "def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def unsubscribe(self):\r\n self._unregister()", "def unsubscribe(observer):", "def unsubscribe(observer):", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def unsubscribe(cls,sender,receiver):\n cls._unsubscribe(id(sender),receiver)", "def on_close_handler(self, instmt, ws):\n Logger.info(self.__class__.__name__, \"Instrument %s is unsubscribed in channel %s\" % \\\n (instmt.get_instmt_name(), instmt.get_exchange_name()))\n instmt.set_subscribed(False)", "def unsubscribe(callback):\n if callback in _subscribers:\n del _subscribers[callback]", "def _unsubscribe(self, signal):\n while signal in self._downstream:\n self._downstream.remove(signal)\n while signal in self._downstream_reconnect:\n self._downstream_reconnect.remove(signal)", "def unsubscribeFromEvent(eventName,subscriber):", "def unsubscribe(self, client, channel):\n clients = self.clients.get(channel)\n if clients is None:\n return False\n index = None\n for i, pair in enumerate(clients):\n if pair[0] != client:\n continue\n index = i\n break\n if index is not None:\n del self.clients[channel][index]\n return True", "def __del__(self):\n self.unsubscribe()", "def unregister_publisher(self, hostname):", "async def unsubscribe(self):\n LOGGER.info('Subscription removed')\n await self._ros.send(self._unsubscribe_msg)", "def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)", "def unsubscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/unsubscribe\", post_args=args)", "def unregister(self):\n self._executor.unregister_publisher(self)", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "def unsubscribe(self, chanel_name):\n name = 'unsubscribe'\n\n self._send_websocket_request(name, chanel_name)", "def unsubscribe(self, meta_type, callback):\n try:\n self.subscribers.get(meta_type, []).remove(callback)\n except ValueError:\n pass\n try:\n self.nackables.get(meta_type, []).remove(callback)\n except ValueError:\n pass", "def unsubscribe(self, event_handler):\n pass # pragma: no cover", "def unsubscribe(self):\n\n # Unsubscribe\n self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) \n\n # Remove message queue\n self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))", "def cb_stop(self, update, context):\n\n print(f\"Unsubscribing chat_id '{update.message.chat_id}'\")\n try:\n self.clientChatIds.remove(update.message.chat_id)\n answer = \"You sucessfully unsubscribed.\"\n self.saveToFile(self.configFile)\n except KeyError:\n answer = \"You are not subscribed.\"\n\n update.message.reply_text(answer)", "def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'", "def unsubscribe(self, callback: Callable) -> None:\n self.callbacks.discard(callback)", "def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()", "def stop_subscription(event):\n _LOGGER.info(\"Shutting down subscriptions\")\n hass.data[vera.VERA_CONTROLLER].stop()", "def disconnect_subscriber(reply_channel):\n try:\n send_disc_message_to_channel(reply_channel)\n except Exception as exc:\n print(str(exc))\n\n disconnect_in_subscribers(reply_channel)", "def unregisterEvent(eventName, publisher):", "def dropchan(channel):", "def unregister_factory(self, instance):\r\n to_remove = None\r\n for k, v in self._awaiting.items():\r\n if v[\"instance\"] == instance:\r\n to_remove = k\r\n break\r\n if to_remove is not None:\r\n del self._awaiting[to_remove]", "def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()", "async def unsubscribe(self, topic: str, subscription_id: int = None) -> None:\n ...", "def unsubscribe(self, item_name):\n self.subscribed = None", "def onUnsubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount -= 1\n\t\treturn True", "def unsubscribe(self):\n res = self._unsubscribe()\n if res is not None:\n self._subscribed = False\n return res", "def _unsubscribe(self):\n self.unsubscribe_date = now()\n self.unsubscribed = True\n self.subscribed = False", "def unsubscribe(self, client, channel_id):\n clients = self.clients.get(channel_id, None)\n if not clients:\n return\n\n if client in clients:\n clients.remove(client)\n\n if not clients:\n # no client subscribed on this channel...\n del self.clients[channel_id]\n\n if channel_id in self._subscribed_channels:\n # the channel maybe is under subscribing - ignore it\n self.subscriber.punsubscribe(channel_id)", "async def unlistened(self, value=None):\n pass", "def perform_destroy(self, instance):\n instance.subscription_set.filter(owner=self.request.user).delete()", "def unlisten(obj, name, func):\n _signals(obj, name).remove(func)", "def unregisterProducer():", "def drop_channel(self, channel):\n return self.clients.pop(channel, None)", "def _onUnsubscribe(self, client:mqtt.Client, userdata:Any, mid:int) -> None:\n\t\t# TODO doc, error check when not connected, not subscribed\n\t\tfor t in self.subscribedTopics.values():\n\t\t\tif t.mid == mid:\n\t\t\t\tdel self.subscribedTopics[t.topic]\n\t\t\t\tself.messageHandler and self.messageHandler.onUnsubscribed(self, t.topic)\n\t\t\t\tbreak", "def unsubscribe(self, event_type: typing.Type[typing.Any], callback: CallbackT[typing.Any]) -> None:", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def _async_untrack_subscription(self, subscription: Subscription) -> None:\n topic = subscription.topic\n try:\n if _is_simple_match(topic):\n simple_subscriptions = self._simple_subscriptions\n simple_subscriptions[topic].remove(subscription)\n if not simple_subscriptions[topic]:\n del simple_subscriptions[topic]\n else:\n self._wildcard_subscriptions.remove(subscription)\n except (KeyError, ValueError) as ex:\n raise HomeAssistantError(\"Can't remove subscription twice\") from ex", "def _remove_sub(sub):\n # counting publisher instance per topic name\n TopicBack.sub_instance_count[sub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return sub.unregister()", "def unsubscribe(self, namespace, unsub_strings=None):\n req = JSONRPCRequest('unsubscribe', [namespace, unsub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)", "def unsubscribe(self, topic):\n request = protos.RequestUnsubscribe(topic=topic)\n return self.stub.unsubscribe(request)", "def unsubscribe_symbol(self, symbol):\n try:\n self.symbol.pop(symbol, None)\n self.symbol_data.pop(symbol, None)\n except KeyError:\n print(\"Could not unsubscribe symbol {} as it was never subscribed.\".format(str(symbol)))", "def unsubscribe(self, id):\n self._signal_pool_uids.pop(id)\n self._signal_pool.unsubscribe(id)", "def unsubscribe_values(self, req):\n \n rospy.loginfo(\"Unsubscribing values for \" + str(req.component) +\" \" + str(req.field))\n \n resp = UnsubscribeValuesResponse()\n resp.success = False\n \n if (req.component, req.field, req.datatype) in self.publishers.keys():\n rospy.loginfo(\"Removing publisher thread for \" + str((req.component, req.field)) + \"...\")\n t = self.publishers[(req.component, req.field, req.datatype)]\n if t.running:\n t.stop()\n \n timeout = 0\n while t.running and timeout <=5:\n time.sleep(1) #waiting\n timeout += 1\n if not t.running:\n t.join()\n with self.lock: \n del self.publishers[req.component, req.field, req.datatype]\n resp.success = True\n self.set_max_rate()\n rospy.loginfo(\"..done!\")\n else:\n rospy.logerr(\"Something went wrong, publisher not removed\")\n else:\n rospy.loginfo(\"publisher does not exist, nothing to delete...\")\n return resp", "async def unregister_client(connection):\n if connection.uuid in connections:\n connections.pop(connection.uuid)\n messages_to_clients.pop(connection.uuid)\n\n await connection.notify_disconnected()", "def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()", "def unsubscribe(self, update, context):\n # remove or update to the sqlite table.\n chat = update.message.chat\n self.db_manager.remove_user(chat.id)\n self.logger.info(\n 'Username: %s and chat_id: %s unsubscribed to the list.' % (chat.username, chat.id)\n )\n update.message.reply_text('You have successfully unsubscribed the notifications forever.')", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def unsubscribe(self, subscription):\n request = Request(\n method='delete',\n endpoint='/streams/subcription/{}'.format(subscription)\n )\n\n def response_handler(resp):\n code = resp.status_code\n if resp.is_success:\n return 'OK'\n elif code == 403:\n raise ex.StreamPermissionError(resp, request)\n raise ex.StreamConnectionError(resp, request)\n\n return self._execute(request, response_handler)", "def unsubscribe(self, observer):\n self._observers.remove(observer)", "async def deregister(self, ctx:commands.Context):\r\n\r\n if await self.IsSpecialized(ctx.guild, ctx.channel.id):\r\n channels = await self.config.guild(ctx.guild).channels()\r\n t = channels.pop(str(ctx.channel.id))\r\n await self.config.guild(ctx.guild).channels.set(channels)\r\n await ctx.send(f'<#{ctx.channel.id}> is no longer a {t}')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> was never specialized!')", "def test_unsubscribe(self):\n dest = '/topic/dest'\n\n self.tm.subscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)\n subscription = self.conn.frames[0].headers.pop(\"subscription\", None)\n self.assertEqual(subscription, 0)\n self.assertEqual(self.conn.frames[0], f)\n\n self.tm.unsubscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)", "def unsubscribe(id, userId):\n db = core.connect()\n theUser = db[userId]\n if id in theUser[\"streams\"]:\n theUser[\"streams\"].remove(id)\n db[userId] = theUser", "def unsubscribe_callback(self, callback, sensor):\n if sensor in self._callbacks:\n self._callbacks[sensor].remove(callback)", "def listen_channel_unban(self, broadcaster_user_id: str, callback: CALLBACK_TYPE) -> str:\n return self._subscribe('channel.unban',\n '1',\n {'broadcaster_user_id': broadcaster_user_id},\n callback)", "def remove_channel(self, channel):\n self._channels.pop(channel.fileno, None)\n\n try:\n self._poller.remove(channel.fileno, channel._events)\n except (IOError, OSError):\n log.exception(\"Error while removing %r.\" % channel)", "def unsubscribe(self, timeframe):\n result = False\n self.lock()\n\n if timeframe is not None and isinstance(timeframe, (float, int)):\n timeframe = self.timeframes.get(timeframe)\n\n if timeframe in self._timeframe_streamers:\n self._timeframe_streamers[timeframe].unuse()\n if self._timeframe_streamers[timeframe].is_free():\n # delete if 0 subscribers\n del self._timeframe_streamers[timeframe]\n \n result = True\n\n self.unlock()\n return False", "def unsubscribe(self, instrument_ids, exchange_id=b''):\n pass", "def unplug(self):\n return signal_base_unplug(self.obj)", "def msg_unregister(channel, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(UNREGISTER, channel, \"\", version, order)", "def unsubscribe(self, observer, name=None):\n if name is None:\n name = 'default'\n if observer in self._observers:\n del self._observers[observer][name]", "async def unsubscribe(connection, message):\n from high_templar.hub import NotSubscribedException\n\n if 'requestId' not in message:\n return await connection.send({\n 'code': 'error',\n 'message': 'no-request-id'\n })\n\n for subscription in connection.app.hub.subscriptions[connection]:\n if subscription.request_id == message['requestId']:\n try:\n connection.app.hub.unsubscribe(subscription)\n await connection.send({ 'code': 'success' })\n except NotSubscribedException:\n await connection.send({\n 'code': 'error',\n 'message': 'not-subscribed'\n })\n break\n else:\n return await connection.send({\n 'code': 'error',\n 'message': 'not-subscribed'\n })", "def unregister(self, subject, observer):\r\n if subject in self.observers:\r\n self.observers[subject].remove(observer)", "def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]", "def unsubscribe(self, event_type, func):\n if func in self.event_subscribers[event_type]:\n kwargs = {event_type: func}\n self.unbind(**kwargs)\n self.event_subscribers[event_type].remove(func)", "def unsubscribe(cls, *, group=None):\n try:\n event_loop = asyncio.get_event_loop()\n except RuntimeError:\n pass\n else:\n if event_loop.is_running():\n return asyncio.create_task(cls.unsubscribe_async(group=group))\n\n return cls.unsubscribe_sync(group=group)", "async def test_removed_notifier(self):\n await self.cog._unsilence(self.text_channel)\n self.cog.notifier.remove_channel.assert_called_once_with(self.text_channel)", "def test_unsubscribe(self):\n self.service.clientConnected()\n\n unsubscribers = []\n self.service.subscribe(u'url', 1\n ).addCallback(lambda fn: unsubscribers.append(fn))\n self.service.subscribe(u'url', 2\n ).addCallback(lambda fn: unsubscribers.append(fn))\n\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)\n\n unsubscribers.pop()()\n self.service.unsubscribe(u'url')\n self.assertIn(u'url', pubsubClient.subscriptions)\n\n unsubscribers.pop()()\n self.service.unsubscribe(u'url')\n self.assertNotIn(u'url', pubsubClient.subscriptions)", "async def unwatch(self, ctx, channel: discord.TextChannel):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n if channel.id in channel_list:\r\n channel_list.remove(channel.id)\r\n else:\r\n return await ctx.send(\"Channel is not being watched.\")\r\n await self.config.guild(ctx.guild).watching.set(channel_list)\r\n await ctx.send(f\"{self.bot.get_channel(channel.id).mention} will not have bad gifs removed.\")", "def untag():\n form = TagSubscriptionForm(hidden_mode=True)\n if not form.validate_on_submit():\n abort(403)\n\n subscription = current_user.subscriptions.filter_by(\n channel_id=form.channel_id.data\n ).first_or_404()\n tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()\n\n results = subscription.untag(tag.id)\n response = {\"success\": results}\n return jsonify(response)", "def disconnect(self, cid):\n try:\n del self.observers[cid]\n except KeyError:\n pass", "def disconnect(self, cid):\n try:\n del self.observers[cid]\n except KeyError:\n pass", "def unsubscribe(self, request):\n email = self.cleaned_data.get('email')\n subscriber = Subscriber.objects.get(email=email, mailing_list=self.mailing_list)\n subscriber.unsubscribe(request)", "def _unregisterConnect(self, function):\n self._sig_connect.unsubscribe(function)", "def unassign_instance(InstanceId=None):\n pass", "def unsubscribe(self, destination, extra_headers=None):\n unsubscribe = frame.UnsubscribeFrame(destination, extra_headers=extra_headers)\n res = self.send_frame(unsubscribe)\n with self.subscription_lock:\n self.subscribed_destinations.pop(destination)\n return res", "def unsubscribe(self, event, callback, args = None):\n if {\"event\": event, \"callback\": callback, \"args\": args, }\\\n in self.events:\n self.events.remove({\"event\": event, \"callback\": callback,\\\n \"args\": args, })\n\n return True", "async def vote_unsetup(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is None:\n await ctx.send('This channel was never setup for votes.')\n return\n session.delete(old_channel)\n session.commit()\n await vote_clear(ctx)\n await ctx.send(f'{ctx.channel} no longer open for voting.')" ]
[ "0.77895993", "0.7697996", "0.7457277", "0.7457277", "0.7457277", "0.7457277", "0.7457277", "0.7024203", "0.6909685", "0.68771696", "0.6872388", "0.68624234", "0.6827826", "0.67210144", "0.67210144", "0.66898584", "0.66726786", "0.66592056", "0.6605039", "0.66029906", "0.65521884", "0.64625424", "0.637168", "0.6339691", "0.62770414", "0.6267145", "0.62387025", "0.62313795", "0.62076205", "0.62035555", "0.61874413", "0.61784023", "0.6141073", "0.6116446", "0.6074481", "0.60697013", "0.6067456", "0.60385185", "0.602845", "0.6025154", "0.6006976", "0.5993507", "0.59902257", "0.59783095", "0.59714323", "0.5931393", "0.5924771", "0.59168035", "0.5911004", "0.58945256", "0.588323", "0.5873121", "0.5869181", "0.5861232", "0.58531934", "0.58357555", "0.58224076", "0.58224076", "0.5809673", "0.57772315", "0.57729834", "0.57639617", "0.57616484", "0.57551277", "0.57547283", "0.57340884", "0.57259965", "0.57257193", "0.5710542", "0.5696679", "0.568866", "0.5681602", "0.56764793", "0.5674578", "0.5668622", "0.5666226", "0.5663832", "0.56625164", "0.5656963", "0.56556755", "0.56519747", "0.56490374", "0.56448793", "0.56437856", "0.5640768", "0.5617926", "0.5610011", "0.560962", "0.5601164", "0.5595963", "0.5588338", "0.5578563", "0.55693734", "0.55693734", "0.5555249", "0.55159956", "0.55142945", "0.5512938", "0.55112463", "0.5504461" ]
0.74092454
7
Sends message into thread
def send_message(self, context, message): if context.channel == "__void__": return if self._busy: self._queue.append((context, message)) return thread = context.thread _msg = message message = message.serialize() self._busy = True if self._topics[thread] is None: assert not _msg.is_reply, "First message shouldn't be reply!\n" \ " were told to send into {}:{} message {}".format(self.name, thread, message) self._topics[thread] = ' '.join(str(m) for m in message) first_message = True else: assert _msg.is_reply, "Messages besides first should be replies!\n" \ " were told to send into {}:{} messaage {}".format(self.name, thread, message) first_message = False assert isinstance(thread, int) and 0 <= thread < len(self._threads), "Thread {} don't exists at channel {}!".\ format(thread, self.name) if self.print_messages: if first_message: vprint("{}: Sending message {} to {}::{}".format(time.time() - self._timeref, message, self.name, thread)) else: vprint("{}: Sending reply {} to {}::{}({})".format(time.time() - self._timeref, message, self.name, thread, self._topics[thread])) fail_idx = next(_mc) received_by = 0 if self.gather_conversation: conv = [_msg.sender, "-->", None, message[2:], 0, 0] if not _msg.is_reply or self._threads[thread]["reply_to_tc"] is not True: for s in self._subscribers: if s.name == _msg.sender: # Don't send message back to it's source continue if s.name == self._threads[thread]["tc"].name \ and self._threads[thread]["reply_to_tc"] is not False: # If s is topic caster and it would get reply - send it later (to avoid double sends) continue if self.gather_conversation: conv[-2] = time.time() idx = next(_mc) r = s.receive_message(context, message) if self.gather_conversation: conv[-1] = time.time() if r not in (False, True): self._busy = False assert r in (False, True), \ "{}: Reply from {} contains no result or value({}) not in (False, True)".format( time.time() - self._timeref, s.name, r) if r: received_by += 1 if self.gather_conversation and (r or self.gather_all): if r: conv[1] = "-->" else: conv[1] = "-->x" conv[2] = s.name self._log_conv(thread, conv, idx) if self._threads[thread]["reply_to_tc"] is not False: idx = next(_mc) r = self._threads[thread]["tc"].receive_message(context, message) if self.gather_conversation: conv[-1] = time.time() if r not in (False, True): self._busy = False assert r in (False, True), \ "{}: Reply from {} contains no result or value({}) not in (False, True)".format( time.time() - self._timeref, self._threads[thread]["tc"].name, r) if r: received_by += 1 if self.gather_conversation and (r or self.gather_all): if r: conv[1] = "-->" else: conv[1] = "-->x" conv[2] = self._threads[thread]["tc"].name self._log_conv(thread, conv, idx) if received_by < 1: if self.gather_conversation: conv[-1] = time.time() if self.print_messages: vprint("{}: Message {} to {}::{} had no effect".format(time.time() - self._timeref, message, self.name, thread)) if self.gather_conversation: conv[1] = "-->x" conv[2] = None self._log_conv(thread, conv, fail_idx) self._busy = False if len(self._queue) > 0: queued = self._queue.pop(0) self.send_message(*queued)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send(self, message):", "def send_to_gui(self, message):\n message.on_thread_side()\n self.queue.put(message)\n self.sig.set()\n logger.debug(\"Message %r has been send to GUI\", message.message_id)", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def send_message(self, message):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send_msg(self, my_queue, my_msg):", "def send(self, message):\n pass", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "def send_message(self, message):\n self.send_message_queue.put(message)", "async def send(self):", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_loop(self, msg, executor=None):\n self.loop.run_in_executor(executor, msg.send)", "def __send_msg(self, msg):\n self.frame_nb += 1\n self.__send_frame(self.frame_nb, msg)", "def client(self,message):\n self.message = message\n self.run()", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def sendMessage(self, name, message):\n time.sleep(int(self.getOwnName()) * 0.05)\n self.getSocket(name).send_pyobj(message)", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send_message(self, message):\n self.client.queue.put(message)", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def __send(self):\r\n self.msgLock.acquire()\r\n if self.numMsg > 0:\r\n self.socket.send(self.msg.pop(0))\r\n self.numMsg -= 1\r\n self.msgLock.release()", "def _send_message(self, *args, **kwargs):\n with self.comm_lock:\n return super(FrontendComm, self)._send_message(*args, **kwargs)", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "def sendMessage(self, msg):\r\n binaries, msg = recursiveBinarySearch(msg)\r\n msg = json.dumps(msg)\r\n\r\n if isInIOThread():\r\n self._send(msg, binaries)\r\n else:\r\n self._connection.reactor.callFromThread(self._send, msg, binaries)", "def send(self, msg):\r\n self.msgLock.acquire()\r\n self.msg.append(msg)\r\n self.numMsg += 1\r\n self.msgLock.release()", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def send(self, msg):\n self.message('Me', msg)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def message_handler(self, msg):\n thread = threading.Thread(target=self.answer, args=(msg,))\n thread.start()\n return thread", "def send(self, event, message):\n pass", "def _start_send_to_queue(self):\n while True:\n message_to_send = str(self.send_message_queue.get())\n if self.verbose: print \"Sending\", message_to_send\n send_msg(self.TCPSock, message_to_send)\n # self.TCPSock.send(message_to_send)", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def pass_message_to_main_thread_fn():\n\n pass", "def sendMessage(self):\n print(\"sendMessage\")\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def sendMessage(self):\n print('sendMessage')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def send(self,msg):\n try:\n if self.mutexCmd.tryLock(100):\n self.cmd.append(msg)\n self.mutexCmd.unlock()\n #print(\"ADD TO QUEUE: {}\".format(msg))\n else:\n print(\"WARN: cmd not added to queue\")\n except Exception as e:\n print(\"ERROR:Serial:send:\",e)\n self.ConnexionError.emit(True)", "def msg(self, chan, msg):\n self._msg(chan, msg)", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, msg):\n self.__sock.send(msg)", "def send_msg(self):\n while True:\n msg = input()\n # Added to show logs clean at the first time\n # a conncetion send a message.\n if(self.flag):\n self.k = self.k + 1\n self.flag = False\n self.srvsock.send(bytes(msg, encoding='utf-8'))", "def send(self, message):\n self.sock.send(message)", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send(self, data):", "def send_message(self, message):\n self.outbox.put(message)\n if message.TYPE_STRING != \"ack\":\n self.awaiting_ack.put((message, time.time()))", "async def send_msg(self, message: str) -> None:\n await self.socket.sendall(message.encode())", "def send(self, message) -> None:\n raise NotImplementedError", "def sendMessage(self, message):\n message.counter = self.counter\n self.socket.send(message.tobytes())\n self.counter += 1", "def send_message(self, serial_message):\n #print(\"Sending message: %s\" % serial_message)\n self.sendString(serial_message)", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def msg(self, target, message):\n self.server.message_queue.put(('[email protected]', target, message))", "async def send_msg(self, msg):\n try:\n logging.info(\"Sending: %s\", msg)\n self.writer.write(msg.encode())\n await self.writer.drain()\n\n except Exception as e:\n logging.error(\"Command could not be encoded; %s\", e)", "def send_message(self, message):\n with self._current_full_update_lock:\n if message.message_type == ServerMessage.FullUpdate:\n self._current_full_update = message\n self.message_queue.put(message)", "def send(self, message, callback=None):\n assert isinstance(message, domintell.Message)\n self._write_queue.put_nowait((message, callback))", "def send(self, message, callback=None):\n assert isinstance(message, domintell.Message)\n self._write_queue.put_nowait((message, callback))", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def run(self):\n while True:\n msg = self.recv()", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "def send(self, ard: Arduino_functions.Arduino, write_msg_str):\n self.worker_send.queue.put((ard, write_msg_str))\n\n # Trigger processing the worker_send queue.\n self.worker_send.qwc.wakeAll()", "def send(self, response):\n self.mh.send_message(response)", "def _send_msg(self, message: bin, location: BaseWorker) -> bin:\n if self.message_pending_time > 0:\n if self.verbose:\n print(f\"pending time of {self.message_pending_time} seconds to send message...\")\n sleep(self.message_pending_time)\n\n return location._recv_msg(message)", "def send2(self, message):\n\n self.send(message)\n self.sync(message)", "def sendThread(clientsocket):\r\n global strdata\r\n while True:\r\n # print server_ready\r\n # msg=raw_input(\"\")\r\n # clientsocket.send(msg)\r\n\r\n if server_ready:\r\n print \"server ready\"\r\n root.destroy()\r\n gui1.call_guiboat(clientsocket)\r\n #clientsocket.send(json.dumps('grid'))\r\n #clientsocket.send(strdata)\r\n #print strdata\r\n break\r\n \r\n #while not(gui1.select_done):\r\n # pass\r\n #clientsocket.send(json.dumps('grid'))\r\n #clientsocket.send(json.dumps(gui1.boat_map))\r", "def send_message(self, cmd_id, message_type, status, message=None):\n pass", "def send_message(self, data):\n self.transport.write(data)", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def send_message_to_server(self, key, value):\n if self.from_kivy_queue is None:\n return\n self.from_kivy_queue.put((key, value))", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def send_message(self, message):\n if self.connected:\n self.send(\n json.dumps(message.request))", "def send(self, text):\n if text:\n self.text_queue.put(text)", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def send(self, data):\n pass", "def _send_message(self, path, arg_lst):\n self._client.send_message(path, arg_lst)", "def sendmessage(self):\n \n self.message.parentItem = self.rxtxcontroller.transmittable.rootItem\n self.message.can_id = self.idInput.toPlainText()\n self.message.dlc = self.lengthInput.value()\n self.message.cycle_time = self.cycleInput.toPlainText()\n self.message.time = int(round(time.time() * 1000))\n self.message.rxtx = \"TX\"\n self.message.count = 1\n self.message.data = self.dataInput.toPlainText()\n self.accept()", "def sendMessage(self,message):\n if message is not None: self.controlProtocol.sendMessage(message)", "def process_thread(self):", "def send (self, data):\n return self.sending.send(data)", "def send_message(self, message):\r\n if not self.is_connected():\r\n self.__root.after(self.WAIT_PERIOD, lambda: self.\r\n send_message(message))\r\n return\r\n self.__socket.send(str(message).encode())", "def send(self, sock, message):\r\n try:\r\n sock.send(message)\r\n except:\r\n ip = sock.getpeername()[0]\r\n thread_message = protocol.thread.disconnected(\r\n client=ip)\r\n self.logic_queue.put(thread_message)\r\n del self.sockets[ip]", "def send(self, msg, adb_info):\n with self._transport_lock:\n self._send(msg, adb_info)", "def send(self, text):\n log.msg('me %s' % (text))\n self.sendLine(text)", "def send_message(self, end_point):\n self.message_controller.send_message(end_point, self)", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "def send(event, msg=None):\n\n pyotherside.send(event, msg)", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "def _send_data(self):\n pass", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)" ]
[ "0.7251886", "0.7028535", "0.7008835", "0.70002615", "0.69917643", "0.69660854", "0.69660854", "0.69660854", "0.6932752", "0.69234616", "0.6904757", "0.68988985", "0.68747884", "0.68623", "0.68569106", "0.68312794", "0.6817026", "0.6795177", "0.6756574", "0.6755073", "0.67476225", "0.67410374", "0.6737127", "0.6710233", "0.6687435", "0.66812223", "0.66637164", "0.6646135", "0.66316897", "0.6616215", "0.66103256", "0.66077435", "0.6605404", "0.65977365", "0.6590007", "0.65881145", "0.65253586", "0.6525305", "0.65151906", "0.6514929", "0.650609", "0.6502778", "0.65010303", "0.64681065", "0.64625084", "0.64517134", "0.6437976", "0.64220893", "0.6415234", "0.6411415", "0.64047074", "0.63910526", "0.6384882", "0.63777864", "0.6373401", "0.63545585", "0.63524544", "0.63494945", "0.6346575", "0.6330155", "0.6323762", "0.63215977", "0.63197374", "0.63041365", "0.63041365", "0.630276", "0.629082", "0.6289225", "0.627949", "0.6256119", "0.62555236", "0.6246767", "0.62373465", "0.62212557", "0.621185", "0.6209329", "0.61988103", "0.6189369", "0.6187675", "0.61851436", "0.61844623", "0.6183823", "0.6173674", "0.6164812", "0.6164665", "0.61640334", "0.61640286", "0.61633915", "0.61618036", "0.6161399", "0.61519486", "0.6146034", "0.61330235", "0.6129019", "0.6128696", "0.61263263", "0.61207175", "0.611911", "0.61189413", "0.6113256" ]
0.6107267
100
Incoming message handler that can be overridden by derived classes
def _incoming_handler(self, context, message, fake_reply): return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle(self, message):", "def handle_message(self, message):", "def handle_message(self, msg):\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "def handleMessage(msg):", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def process_messages(self):\n pass", "def _process_msg(cls, msg):\n raise NotImplementedError", "def process(self, message: Message, **kwargs: Any) -> None:", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def message_handler(self, dest, source, message):\n pass", "def _process_message(self, obj):\n pass", "def handler(self, input_message: FSPluginMessageBase, context: FSContext) -> FSPluginOutput:\n raise NotImplementedError()", "def _handle_message(self, msg):\n self.event('message', msg)", "def handle_message(self, data, channel):\n pass", "def handle_message(self, validated_message: dict):\n self.logger.debug(f'Sensor received message {validated_message}')\n if (validated_message['messageType'] !=\n model.MessageTypes.Control.value):\n self.logger.debug(\n 'Sensor ignoring because messageType was not control'\n )\n return\n if validated_message['messageBody']['target'] != self.component_id:\n self.logger.debug(\n 'Sensor ignoring because not targeted at me'\n )\n return\n\n subtype = validated_message['messageSubtype']\n try:\n self.logger.debug(f'Dispatching message with subtype {subtype}')\n self.message_handler_table[subtype](validated_message)\n except KeyError:\n self.logger.warning(f'No handler for with subtype {subtype}')\n pass", "def received(self, message):\n raise NotImplementedError()", "def onMessage(self, message):\n raise NotImplementedError", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def handle(self, data):\n pass", "def _receive(self, what, address, **kwargs):\n\n print('_receive: please override me.')", "def on_message(data):\n pass", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def handle_msg(self, state_id, msg):\n pass", "def handle(self, message: Message) -> None:\n self.handled_message = message", "def receive(self, message):", "def on_receive(self, msg):\n raise NotImplementedError", "def process_message(self, msg, src):", "def receive_message(self, message):", "def __data_handler__(self, msg):\n print(msg)", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle(self, msg, options):\n raise NotImplementedError()", "def get_message_handler(self, taxii_message):\n raise NotImplementedError()", "def onMessage(self, payload, isBinary):", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def process(self, msg):\n raise NotImplemented", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(self._parse_message, string, protocol)\n d.addCallback(self._r_process_message, protocol)\n d.addCallbacks(callback=self._r_send_result, errback=self._r_send_error, callbackArgs=(protocol,), errbackArgs=(protocol,))", "def incoming(self, msg):\n hdr = msg.header\n\n # Signals:\n if hdr.message_type is MessageType.signal:\n key = (hdr.fields.get(HeaderFields.path, None),\n hdr.fields.get(HeaderFields.interface, None),\n hdr.fields.get(HeaderFields.member, None)\n )\n cb = self.signal_callbacks.get(key, None)\n if cb is not None:\n cb(msg.body)\n return\n\n # Method returns & errors\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\n if reply_handle is not None:\n if hdr.message_type is MessageType.method_return:\n reply_handle.set_result(msg.body)\n return\n elif hdr.message_type is MessageType.error:\n reply_handle.set_exception(DBusErrorResponse(msg))\n return\n\n if self.on_unhandled:\n self.on_unhandled(msg)", "def received_message(self, m):\n self.receiver.handle_message(m)", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def handle_read(self):\n pass", "def handle(self):\n self.raw_requestline = self.rfile.readline()\n if not self.parse_request(): # An error code has been sent, just exit\n return\n\n # next line is where we'd have expect a configuration key somehow\n handler = self.WebSocketWSGIHandler(\n self.rfile, self.wfile, self.get_stderr(), self.get_environ()\n )\n handler.request_handler = self # backpointer for logging\n handler.run(self.server.get_app())", "def apply_handler(self):\n tmp = self.event_type\n if hasattr(self, tmp):\n getattr(self, tmp)()\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)", "def handle_process(self, connection):\n client_address = connection.getpeername()\n self.HandlerClass(connection, client_address)", "def handle_recv(self,stream,msgs):\n pass", "def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "def messageReceived(self, message):\n raise NotImplementedError(self)", "def receive_message(self, context, message):\r\n pass", "def message_received_handler(pdu, **kwargs):\n\n logging.warning('Message received handler (Override me)')", "def receive(self):\n pass", "def handle(self, *args, **kwargs):\n raise NotImplementedError()", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def receive(self, msg):\n pass", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if \"in_reply_to_status_id\" in data:\n status = Status.parse(None, data)\n return self.on_status(status)\n if \"delete\" in data:\n delete = data[\"delete\"][\"status\"]\n return self.on_delete(delete[\"id\"], delete[\"user_id\"])\n if \"disconnect\" in data:\n return self.on_disconnect_message(data[\"disconnect\"])\n if \"limit\" in data:\n return self.on_limit(data[\"limit\"][\"track\"])\n if \"scrub_geo\" in data:\n return self.on_scrub_geo(data[\"scrub_geo\"])\n if \"status_withheld\" in data:\n return self.on_status_withheld(data[\"status_withheld\"])\n if \"user_withheld\" in data:\n return self.on_user_withheld(data[\"user_withheld\"])\n if \"warning\" in data:\n return self.on_warning(data[\"warning\"])\n\n log.error(\"Received unknown message type: %s\", raw_data)", "def make_new_handler(self, *args, **kwargs):", "def handle_message(self, validated_message: dict):\n pass", "def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)", "def receive_message(self, message):\r\n return", "def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply", "def handle(self, message: InternalMessage) -> None:\n if isinstance(message, TransactionMessage):\n self._handle_tx_message(message)\n elif isinstance(message, StateUpdateMessage):\n self._handle_state_update_message(message)", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def handle(self, msg, peer_protocol):\n msg_id = msg[0]\n if msg_id == 0:\n self._handle_handshake(msg, peer_protocol)\n elif msg_id == 1: #update\n print(msg, len(msg))\n self._handle_update(msg)", "def handle_message(self, msg: mqtt.MQTTMessage) -> None:\n payload = json.loads(msg.payload.decode(\"utf-8\"))\n logging.info(f\"Received a new message: {payload}\")\n if \"volume\" in payload:\n validate(payload, schema=self.volume_schema)\n self.volume = payload[\"volume\"]\n elif \"volumeCtrl\" in payload:\n validate(payload, schema=self.volume_ctrl_schema)\n self.volume_up() if payload[\"volumeCtrl\"] == \"+\" else self.volume_down()\n elif \"mute\" in payload:\n validate(payload, schema=self.mute_schema)\n self.mute = payload[\"mute\"]\n elif \"toggle\" in payload:\n validate(payload, schema=self.toggle_schema)\n self.toggle_mute() if payload[\"toggle\"] == \"mute\" else self.toggle_pause()\n elif \"ctrl\" in payload:\n validate(payload, schema=self.ctrl_schema)\n self.skip_forward() if payload[\"ctrl\"] == \">>\" else self.skip_backward()\n else:\n raise ValueError(f\"Cannot handle message: {payload}, not a valid command\")", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def __processMsg(self, sock, msgData):\n\n pass", "def _handle_read(self):\n pass", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def obj_received(self, obj):\n\n # TODO do something like handler registry\n\n if isinstance(obj, pb.Ping):\n self.handle_ping(obj)\n\n elif isinstance(obj, pb.Pong):\n self.handle_pong(obj)\n\n elif isinstance(obj, pb.ACS):\n if self.factory.config.failure != 'omission':\n res = self.factory.acs.handle(obj, self.remote_vk)\n self.process_acs_res(res, obj)\n\n elif isinstance(obj, pb.TxReq):\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.TxResp):\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationReq):\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationResp):\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.SigWithRound):\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\n\n elif isinstance(obj, pb.CpBlock):\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Cons):\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\n\n elif isinstance(obj, pb.AskCons):\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\n\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\n\n elif isinstance(obj, pb.Bracha):\n if self.factory.config.failure != 'omission':\n self.factory.bracha.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Mo14):\n if self.factory.config.failure != 'omission':\n self.factory.mo14.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Dummy):\n logging.info(\"NODE: got dummy message from {}\".format(b64encode(self.remote_vk)))\n\n else:\n raise AssertionError(\"invalid message type {}\".format(obj))\n\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()", "def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)", "async def handle(self, message: discord.Message):\n raise NotImplementedError()", "def handle(self, m):\n\n\t\tline = m.split(\" \")\n\n\t\tif line[0] == \"PING\":\n\t\t\tself(\"PONG\", line[1])\n\t\telif len(line) > 1 and line[1] == \"001\":\n\t\t\tself.callbacks[\"loggedin\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"JOIN\":\n\t\t\tself.callbacks[\"joined\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"PRIVMSG\":\n\t\t\tself.callbacks[\"messaged\"](self, *line)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def emit(self, message):", "def data_received(self, data):\n data = data.decode()\n signal = int(data[:3])\n msg = data[3:]\n\n\n handle_signal = { # dictionary matching signals the user can send to functions that handle them.\n SIG.MESSAGE : self.handle_message,\n SIG.LOGIN : self.log_in\n }\n handle_signal[signal](msg)", "def handle_incoming_message(obj, reply_channel):\n if int(obj[message_type_key]) == 0:\n try:\n sub_obj = create_subscriber_object(reply_channel, obj)\n subscribers[reply_channel.name] = sub_obj\n except ApiException as exc:\n send_save_to_channel(reply_channel, str(exc))\n\n elif int(obj[message_type_key]) == 1:\n disconnect_subscriber(reply_channel)\n\n print(\"incoming_msg_handled\")", "def _handle(self, *args, **options):\n return super()._handle(*args, **options)", "def connection_handler(self):\n\t\tline = yield self.read_line()\n\t\tyield self.sendall(line + \"\\r\\n\")", "def handleMessage(self, channels, sender, code, datagram):\n self.stateServer.handle(channels, sender, code, datagram)\n self.clientAgent.handle(channels, sender, code, datagram)\n self.databaseServer.handle(channels, sender, code, datagram)", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def message_callback(self, message):\n pass", "def handle_message(self, msg):\n Logger.debug(\"Slave: Trying to parse\")\n if MessageKeys.command_key in msg.fields:\n Logger.info(\"Slave: Message command: %s\", str(msg.get_command()))\n return self.messagehandler[msg.get_command()](self, msg)\n return self.handle_invalid_command(msg)", "def incoming(self, context, message, fake_reply=None):\r\n if message.interface != self._id:\r\n return False\r\n if message.is_reply:\r\n return False\r\n if message.method not in self._methods:\r\n eprint(\"{}:{} Unsupported method {}\".format(self._host.name, self._name, message.method))\r\n return False\r\n if self._map[message.method] is None:\r\n eprint(\"{}:{} Method {} is not implemented\".format(self._host.name, self._name, message.method))\r\n return False\r\n self._incoming_handler(context, message, fake_reply)", "def handle(self):", "def dataReceived(self, data):", "def responder():\n pass", "def justhandle(self, rawdata):\r\n\r\n return self.__handler(rawdata)", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def onMessageBegin(self, isBinary):" ]
[ "0.76674205", "0.7387368", "0.72166294", "0.7163365", "0.70100635", "0.6967958", "0.69566435", "0.67521715", "0.6731579", "0.6724816", "0.66986245", "0.6680195", "0.66316897", "0.6626075", "0.6598926", "0.65867203", "0.65795434", "0.65649223", "0.656297", "0.655957", "0.65346295", "0.6519268", "0.65009815", "0.64945406", "0.6487373", "0.64779574", "0.6471269", "0.6467606", "0.64604354", "0.6452455", "0.644372", "0.64310676", "0.64307016", "0.6409587", "0.6391526", "0.638903", "0.6388217", "0.63870555", "0.638447", "0.63798857", "0.63717407", "0.6371301", "0.6370273", "0.6365255", "0.63648474", "0.63598496", "0.63497794", "0.6346012", "0.6342151", "0.6318491", "0.6303954", "0.6298757", "0.6293669", "0.6290697", "0.62903976", "0.6288502", "0.6277934", "0.62648815", "0.6242389", "0.6212179", "0.6205848", "0.61893266", "0.61742383", "0.61681014", "0.61638767", "0.6161375", "0.61598736", "0.615349", "0.6152258", "0.61488146", "0.6147876", "0.6142953", "0.6142328", "0.6139435", "0.61354846", "0.61325973", "0.6110005", "0.6107281", "0.6107281", "0.6100638", "0.6064988", "0.60646284", "0.6064158", "0.60635465", "0.6047867", "0.6047406", "0.6030377", "0.60278904", "0.6026217", "0.60201603", "0.6014225", "0.6012716", "0.6011387", "0.60066247", "0.5998799", "0.59977686", "0.5996929", "0.59956867", "0.59954816", "0.5994946" ]
0.6481269
25
Common method for handling incoming messages from talk channel For customization redefine _incoming_handler please
def incoming(self, context, message, fake_reply=None): if message.interface != self._id: return False if message.is_reply: return False if message.method not in self._methods: eprint("{}:{} Unsupported method {}".format(self._host.name, self._name, message.method)) return False if self._map[message.method] is None: eprint("{}:{} Method {} is not implemented".format(self._host.name, self._name, message.method)) return False self._incoming_handler(context, message, fake_reply)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_incoming_message(obj, reply_channel):\n if int(obj[message_type_key]) == 0:\n try:\n sub_obj = create_subscriber_object(reply_channel, obj)\n subscribers[reply_channel.name] = sub_obj\n except ApiException as exc:\n send_save_to_channel(reply_channel, str(exc))\n\n elif int(obj[message_type_key]) == 1:\n disconnect_subscriber(reply_channel)\n\n print(\"incoming_msg_handled\")", "def handle_message(self, data, channel):\n pass", "def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)", "def handle_message(self, msg):\n pass", "def handleMessage(msg):", "def handle_message(self, from_channel, to_channel, message_type, *args):\n logger.debug(\"{} received message: {} -> {} ({})\".format(\n self, from_channel, to_channel, message_type,\n ))\n # TODO: Validation\n if message_type == msgtypes.AI_CHANNEL_ASSIGNED:\n channel = args[0]\n self.handle_channel_assigned(channel)\n elif message_type == msgtypes.AI_CONNECTED:\n channel = args[0]\n self.handle_ai_connected(channel)\n elif message_type == msgtypes.CLIENT_CONNECTED:\n client_id = args[0]\n self.handle_client_connected(client_id)\n elif message_type == msgtypes.CLIENT_DISCONNECTED:\n client_id = args[0]\n self.handle_client_disconnected(client_id)\n elif message_type == msgtypes.DOBJECT_CREATED:\n dobject_id = args[0]\n token = args[1]\n self.handle_dobject_created(dobject_id, token)\n elif message_type == msgtypes.CREATE_DOBJECT_VIEW:\n dobject_id = args[0]\n dclass = args[1]\n fields = args[2]\n self.handle_create_dobject_view(dobject_id, dclass, fields)\n elif message_type == msgtypes.CREATE_AI_VIEW:\n dobject_id = args[0]\n dclass = args[1]\n fields = args[2]\n self.handle_create_ai_view(dobject_id, dclass, fields)\n elif message_type == msgtypes.FIELD_UPDATE:\n source = from_channel\n dobject_id = args[0]\n field_id = args[1]\n values = args[2]\n self.handle_field_update(source, dobject_id, field_id, values)\n else:\n # FIXME: Better to log it and drop it on the floor?\n raise NotImplementedError", "def handle(self, message):", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def handle_incoming(self, incoming: \"incoming_NT\"):\n # TODO find out what CClient::UserCommand does\n #\n # XXX should freeze this func till it has tests; already too unruly\n msg = {}\n if (self.config and incoming.source not in\n self.config.settings[\"authorized\"]):\n msg[\"warning\"] = (\"{} not listed in /settings/authorized\"\n .format(incoming.source))\n try:\n raise UserWarning(msg[\"warning\"])\n except Exception:\n self.print_traceback()\n if not self.debug:\n return\n if self.debug:\n assert self.config\n from datetime import datetime\n dto = datetime.fromtimestamp(incoming.timestamp/1000, self.tz)\n msg[\"incoming\"] = dict(\n incoming._asdict(),\n timestamp=dto.isoformat(timespec=\"milliseconds\")\n )\n from .ootil import OrderedPrettyPrinter as OrdPP\n self.logger.debug(\"\\n{}\".format(OrdPP(width=60).pformat(msg)))\n if \"warning\" in msg:\n return\n #\n retort = []\n if not hasattr(self, \"_session\"):\n self._session = {\"network\": None,\n \"focus\": None}\n session = self._session\n request = incoming.message\n target = None\n body = None\n #\n connected = self.get_networks(as_dict=True)\n net_advise = False\n if not connected:\n retort.append(\"Not connected to any networks\")\n elif request.startswith(\"/net\"):\n cand = request.replace(\"/net\", \"\", 1).strip()\n if cand in connected:\n session[\"network\"] = connected[cand]\n retort.append(f\"Network set to: {cand!r}\")\n else:\n net_advise = True\n elif session[\"network\"] is None:\n net_advise = True\n if net_advise:\n if len(connected) == 1:\n __, session[\"network\"] = connected.popitem()\n else:\n joined_nets = \", \".join(connected)\n retort.extend([\"Multiple IRC networks available:\",\n f\" {joined_nets}\",\n \"Select one with: /net <network>\"])\n if session[\"network\"]:\n netname = session['network'].GetName()\n retort.append(f\"Current network is {netname!r}\")\n #\n if request.startswith(\"/focus\"):\n focus = request.replace(\"/focus\", \"\", 1).strip()\n if not focus:\n if session[\"focus\"] is not None:\n retort.append(f\"Current focus is {session['focus']!r}\")\n else:\n retort.append(\"Focus not set\")\n else:\n # FIXME use FindChan here instead\n if (focus.startswith(\"#\") and session[\"network\"]\n and focus not in [c.GetName() for c in\n session[\"network\"].GetChans()]):\n chwarn = \"Warning: channel {!r} not joined in network {!r}\"\n retort.append(chwarn.format(focus,\n session[\"network\"].GetName()))\n session[\"focus\"] = focus\n retort.append(f\"Focus set to {session['focus']!r}\")\n elif request.startswith(\"/msg\"):\n tarbod = request.replace(\"/msg\", \"\", 1).strip()\n try:\n target, body = tarbod.split(None, 1)\n except ValueError:\n retort.append(\"Unable to determine /msg <target>\")\n target = body = None\n elif request.startswith(\"/help\"):\n retort += [\"Available commands:\",\n \" /net, /focus, /msg\"]\n elif not request.startswith(\"/\") and session[\"focus\"] is not None:\n target = session[\"focus\"]\n body = request\n elif not retort:\n if request.split()[0] in (\"/tail\", \"/snooze\"):\n retort.append(\"Sorry, coming soon\")\n else:\n retort.append(f\"Unable to interpret {request!r}\")\n #\n if retort:\n cb = self.make_generic_callback(lambda r: None)\n payload = (\"\\n\".join(retort), [], incoming.source)\n self.do_send(\"Signal\", \"sendMessage\", cb, payload)\n return\n if target and body is not None:\n session[\"network\"].PutIRC(f\"PRIVMSG {target} :{body}\")\n source = self.GetUser().GetNick()\n if session[\"network\"].GetClients():\n fmt = f\":{source}[email protected] PRIVMSG {target} :{{}}\"\n self.put_pretty(body, where=\"PutClient\", fmt=fmt,\n putters=session[\"network\"].GetClients())\n return\n #\n # Could join chan here, but better reserved for explicit command\n if target.startswith(\"#\"):\n found = session[\"network\"].FindChan(target)\n else:\n # CClient::HasSelfMessage\n # <http://defs.ircdocs.horse/info/selfmessages.html>\n found = session[\"network\"].FindQuery(target)\n if not found:\n found = session[\"network\"].AddQuery(target)\n if found:\n fmt = f\":{source}[email protected] PRIVMSG {target} :{{text}}\"\n # Could use AddBuffer but would have to make a CMessage object\n # beforehand (string form is deprecated)\n found.GetBuffer().AddLine(fmt, body)\n elif self.debug:\n self.logger.debug(\"Fell through; request: \"\n f\"{request}, session: {session}\")", "def handleMessage(self, channels, sender, code, datagram):\n self.stateServer.handle(channels, sender, code, datagram)\n self.clientAgent.handle(channels, sender, code, datagram)\n self.databaseServer.handle(channels, sender, code, datagram)", "def handle_message(self, message):", "def _handle_message(self, msg):\n self.event('message', msg)", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sms_call_me(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n elif \"media\" in data[0][\"message\"]:\n handle_inbound_media_mms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"], data[0][\"message\"][\"media\"])\n else:\n handle_inbound_sms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n else:\n print(data)\n return \"\"", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def processMessage(self, *args, **kwargs):\r\n pass", "def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"", "def incoming(self, msg):\n hdr = msg.header\n\n # Signals:\n if hdr.message_type is MessageType.signal:\n key = (hdr.fields.get(HeaderFields.path, None),\n hdr.fields.get(HeaderFields.interface, None),\n hdr.fields.get(HeaderFields.member, None)\n )\n cb = self.signal_callbacks.get(key, None)\n if cb is not None:\n cb(msg.body)\n return\n\n # Method returns & errors\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\n if reply_handle is not None:\n if hdr.message_type is MessageType.method_return:\n reply_handle.set_result(msg.body)\n return\n elif hdr.message_type is MessageType.error:\n reply_handle.set_exception(DBusErrorResponse(msg))\n return\n\n if self.on_unhandled:\n self.on_unhandled(msg)", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def handle_msg(self, state_id, msg):\n pass", "def handle_message(self, sender, message):\n self.logger.debug('handle_message(%r, %r)', sender, message.handler)\n\n message_handler = self.message_handlers.get(message.handler)\n if message_handler is None:\n self.logger.warning(\"sender=%r, No handler found: '%s'\",\n sender, message.handler)\n return\n\n message_handler(sender, message)", "def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)", "def _on_inbound_message(self, message):\n if message.channel.startswith(\"actuators/commands/\"):\n actuation = self.inbound_message_deserializer.deserialize_actuator_command(message)\n if actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_SET:\n self.actuation_handler.handle_actuation(actuation.reference, actuation.value)\n\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_STATUS:\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_UNKNOWN:\n print(\"Received unsupported actuation command\")\n\n else:\n print(\"Received unsupported message: \\n\" +\n message.channel + \"\\n\" + message.payload)", "def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def handle_message(self, validated_message: dict):\n self.logger.debug(f'Sensor received message {validated_message}')\n if (validated_message['messageType'] !=\n model.MessageTypes.Control.value):\n self.logger.debug(\n 'Sensor ignoring because messageType was not control'\n )\n return\n if validated_message['messageBody']['target'] != self.component_id:\n self.logger.debug(\n 'Sensor ignoring because not targeted at me'\n )\n return\n\n subtype = validated_message['messageSubtype']\n try:\n self.logger.debug(f'Dispatching message with subtype {subtype}')\n self.message_handler_table[subtype](validated_message)\n except KeyError:\n self.logger.warning(f'No handler for with subtype {subtype}')\n pass", "def message_handler(self, dest, source, message):\n pass", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def handle(message):\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def process_incoming_message(self):\n\n # Get the webhook data\n post_data = request.json\n\n # Determine the Spark Room to send reply to\n room_id = post_data[\"data\"][\"roomId\"]\n\n # Get the details about the message that was sent.\n message_id = post_data[\"data\"][\"id\"]\n message = self.spark.messages.get(message_id)\n if self.DEBUG:\n sys.stderr.write(\"Message content:\" + \"\\n\")\n sys.stderr.write(str(message) + \"\\n\")\n\n # First make sure not processing a message from the bots\n # Needed to avoid the bot talking to itself\n # We check using IDs instead of emails since the email\n # of the bot could change while the bot is running\n # for example from [email protected] to [email protected]\n if message.personId in self.spark.people.me().id:\n if self.DEBUG:\n sys.stderr.write(\"Ignoring message from our self\" + \"\\n\")\n return \"\"\n\n # Log details on message\n sys.stderr.write(\"Message from: \" + message.personEmail + \"\\n\")\n\n # Find the command that was sent, if any\n command = \"\"\n for c in self.commands.items():\n if message.text.find(c[0]) != -1:\n command = c[0]\n sys.stderr.write(\"Found command: \" + command + \"\\n\")\n # If a command was found, stop looking for others\n break\n\n # Build the reply to the user\n reply = \"\"\n\n # Take action based on command\n # If no command found, send the default_action\n if command in [\"\"] and self.default_action:\n # noinspection PyCallingNonCallable\n reply = self.commands[self.default_action][\"callback\"](message)\n elif command in self.commands.keys():\n # noinspection PyCallingNonCallable\n reply = self.commands[command][\"callback\"](message)\n else:\n pass\n\n # allow command handlers to craft their own Spark message\n if reply and isinstance(reply, Response):\n reply.roomId = room_id\n reply = reply.as_dict()\n self.spark.messages.create(**reply)\n reply = \"ok\"\n elif reply:\n self.spark.messages.create(roomId=room_id, markdown=reply)\n return reply", "def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)", "def callback_message( self, conn, mess):\n\n jid = mess.getFrom()\n props = mess.getProperties()\n text = mess.getBody()\n username = self.get_sender_username(mess)\n\n if username not in self.users.keys() + self.invited.keys():\n self.log.info(\"Ignored message from %s.\" % username)\n return\n\n self.log.debug(\"*** props = %s\" % props)\n self.log.debug(\"*** jid = %s\" % jid)\n self.log.debug(\"*** username = %s\" % username)\n self.log.debug(\"*** type = %s\" % type)\n self.log.debug(\"*** text = %s\" % text)\n\n # Ignore messages from before we joined\n if xmpp.NS_DELAY in props: return\n\n # If a message format is not supported (eg. encrypted), txt will be None\n if not text: return\n\n # Remember the last-talked-in thread for replies\n self._JabberBot__threads[jid] = mess.getThread()\n\n if ' ' in text:\n command, args = text.split(' ', 1)\n else:\n command, args = text, ''\n cmd = command\n self.log.debug(\"*** cmd = %s\" % cmd)\n\n # parse operators, commands, etc and if not, dump the message to the chat\n if self.apply_operator(mess, args):\n return\n\n if self.replace_text(username, mess):\n return\n\n if self.commands.has_key(cmd) and cmd != 'help':\n try:\n reply = self.commands[cmd](mess, args)\n except Exception, e:\n reply = traceback.format_exc(e)\n self.log.exception('An error happened while processing a message (\"%s\") from %s: %s\"' % (text, jid, reply))\n else:\n # In private chat, it's okay for the bot to always respond.\n # In group chat, the bot should silently ignore commands it\n # doesn't understand or aren't handled by unknown_command().\n default_reply = 'Unknown command: \"%s\". Type \"help\" for available commands.<b>blubb!</b>' % cmd\n if type == \"groupchat\": default_reply = None\n reply = self.unknown_command( mess, cmd, args)\n if reply is None:\n reply = default_reply\n\n if reply:\n self.send_simple_reply(mess,reply)\n\n self.log_to_mini_log(username, text)", "def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):\n self.on_message_handler = handler", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def handle(self, msg):\n\n if msg.command == \"PING\":\n self._sendmsg(\"PONG :{}\".format(msg.args[0]))\n\n elif msg.command == \"JOIN\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has joined {}\".format(name, channel))\n\n elif msg.command == \"PART\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has left {}\".format(name, channel))\n\n elif msg.command == \"KICK\":\n name = msg.sendername\n channel = msg.args[0]\n victim = msg.args[1]\n print(\"{} has kicked {} from {}\".format(name, victim, channel))\n\n elif msg.command == \"QUIT\":\n name = msg.sendername\n print(\"{} has quit IRC\".format(name))\n\n elif msg.command == \"KILL\":\n name = msg.sendername\n victim = msg.args[0]\n print(\"{} has killed {}\".format(name, victim))\n\n elif msg.command == \"NICK\":\n name = msg.sendername\n newname = msg.args[0]\n print(\"{} is now known as {}\".format(name, newname))\n\n elif msg.command == \"MODE\":\n name = msg.sendername\n target = msg.args[0]\n mode = msg.args[1]\n print(\"{} has set the mode of {} to {}\".format(name, target, mode))\n\n elif msg.command == \"NOTICE\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}]! {}\".format(name, target, message))\n\n elif msg.command == \"PRIVMSG\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}] {}\".format(name, target, message))\n\n elif msg.command.isdigit():\n print(msg.args[-1])\n\n else:\n print(str(msg))\n\n hooks.handle(self, msg)", "def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply", "def on_message(data):\n pass", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle_message(self, msg: mqtt.MQTTMessage) -> None:\n payload = json.loads(msg.payload.decode(\"utf-8\"))\n logging.info(f\"Received a new message: {payload}\")\n if \"volume\" in payload:\n validate(payload, schema=self.volume_schema)\n self.volume = payload[\"volume\"]\n elif \"volumeCtrl\" in payload:\n validate(payload, schema=self.volume_ctrl_schema)\n self.volume_up() if payload[\"volumeCtrl\"] == \"+\" else self.volume_down()\n elif \"mute\" in payload:\n validate(payload, schema=self.mute_schema)\n self.mute = payload[\"mute\"]\n elif \"toggle\" in payload:\n validate(payload, schema=self.toggle_schema)\n self.toggle_mute() if payload[\"toggle\"] == \"mute\" else self.toggle_pause()\n elif \"ctrl\" in payload:\n validate(payload, schema=self.ctrl_schema)\n self.skip_forward() if payload[\"ctrl\"] == \">>\" else self.skip_backward()\n else:\n raise ValueError(f\"Cannot handle message: {payload}, not a valid command\")", "def handle_recv(self,stream,msgs):\n pass", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if \"in_reply_to_status_id\" in data:\n status = Status.parse(None, data)\n return self.on_status(status)\n if \"delete\" in data:\n delete = data[\"delete\"][\"status\"]\n return self.on_delete(delete[\"id\"], delete[\"user_id\"])\n if \"disconnect\" in data:\n return self.on_disconnect_message(data[\"disconnect\"])\n if \"limit\" in data:\n return self.on_limit(data[\"limit\"][\"track\"])\n if \"scrub_geo\" in data:\n return self.on_scrub_geo(data[\"scrub_geo\"])\n if \"status_withheld\" in data:\n return self.on_status_withheld(data[\"status_withheld\"])\n if \"user_withheld\" in data:\n return self.on_user_withheld(data[\"user_withheld\"])\n if \"warning\" in data:\n return self.on_warning(data[\"warning\"])\n\n log.error(\"Received unknown message type: %s\", raw_data)", "def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])", "def message_received_handler(pdu, **kwargs):\n\n logging.warning('Message received handler (Override me)')", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def process_message(self, context, message):\r\n r = self._process_message_general(context, message)\r\n if r is True:\r\n return\r\n elif r is not False:\r\n self._interface.incoming(context, message, r)\r\n else:\r\n self._interface.incoming(context, message, None)", "def handle_send_messages():\n items = {k: v for k, v in subscribers.items() if v}\n for key in items:\n subscriber_obj = items[key]\n sim_id = get_sim_id(subscriber_obj)\n if sim_id and type(sim_id) is int:\n frame_messenger(subscriber_obj)\n elif sim_id and sim_id == \"live\":\n live_messenger(subscriber_obj)", "def pm_handler(self, msg):\n\t\tif str(msg['from']).split('/')[0] == self.boundjid.bare:\n\t\t\tself.recipient = str(msg['to']).split('/')[0]\n\t\telse:\n\t\t\tself.recipient = str(msg['from']).split('/')[0]\n\t\t# For some reason carbons sent by you come twice (from gajim at least)\n\t\tif self.user().last_msg == msg:\n\t\t\treturn\n\t\tif msg['body'][0] == '!':\n\t\t\tself.parse(msg)\n\t\telif msg['body'].split()[0].lower() in self.qwords \\\n\t\t\t\tor msg['body'][-1] == '?' \\\n\t\t\t\tor self.user().force[str(msg['from']).split('/')[0]]:\n\t\t\tself.assist(msg)\n\t\tself.user().last_msg = msg", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def process_messages(self):\n pass", "def handle_incoming_data(self, connection, data):\n if data == 'quit':\n self.close_connection(connection)\n elif isinstance(data, tuple) and data[0] in ('login', 'register'):\n if data[1] in self.connections.values():\n self.send(connection, self.auth.already_logged())\n else:\n auth_response = self.auth.identify_user(*data)\n if auth_response['flag']:\n self.connections[connection] = data[1]\n self.send(connection, auth_response)\n elif isinstance(data, str) and data.startswith('@'):\n self.route(connection, data)\n else:\n self.broadcast(connection, data)", "def MessageHandlerMethod(**kwargs):\n data: dict = kwargs['data']\n bus: AbstractPikaBus = kwargs['bus']\n payload: dict = kwargs['payload']\n print(payload)\n if payload['reply']:\n payload['reply'] = False\n bus.Reply(payload=payload)", "def handle_process(self, connection):\n client_address = connection.getpeername()\n self.HandlerClass(connection, client_address)", "def process_message(self, message):\n processors = {\n \"^org.chicago.cta.stations.\": self._handle_station,\n \"^org.chicago.cta.arrivals.\": self._handle_arrival,\n \"org.chicago.cta.turnstiles\": self._handle_turnstiles\n }\n processor = processors.get(message.topic, False)\n if processor:\n processor(message)\n else:\n logger.debug(\n \"unable to find handler for message from topic %s\", message.topic\n )", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")", "def sample_handler(controller, msg, pkt):\n pass", "def handle(self, msg, peer_protocol):\n msg_id = msg[0]\n if msg_id == 0:\n self._handle_handshake(msg, peer_protocol)\n elif msg_id == 1: #update\n print(msg, len(msg))\n self._handle_update(msg)", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def handle(self, talk_action):\n target = talk_action.target\n action = talk_action.action\n logging.debug(\n \"Target: %s. Action: %s. Connections: %s\" % (target, action, len(self.tcp_clients))\n )\n if action == \"INITIATE\":\n logging.debug(\n \">>> Starting VoIP conversation for '%s'\" % self.session_key\n )\n self.voip()\n elif action == \"DENY\":\n for client in self.tcp_clients:\n if client is not talk_action.instigator:\n client.server.queue_message(\n \"TALK %s: DENIED\" % self.session_key,\n client.sock\n )\n elif action == \"ACCEPT\":\n for client in self.tcp_clients:\n if client is not talk_action.instigator:\n client.server.queue_message(\n \"TALK %s: ACCEPTED\" % self.session_key,\n client.sock\n )", "def receive_message(self, context, message):\r\n pass", "def _on_message_handler(client, callback_dict, message):\n # If the message topic is in the subscribed list, handle it\n if message.topic in callback_dict:\n callback_dict[message.topic](message)", "def handleIncomingMessage(self, resourceEnum: ResourceNameEnum, msg: str) -> bool:\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The handleIncomingMessage method is being called\")\n\t\t# Use the DataUtil class to convert the msg content (which should be JSON) to an ActuatorData instance\n\t\tad = DataUtil.jsonToActuatorData(self, msg)\n\t\tself._handleIncomingDataAnalysis(msg)", "def onMessage(self, payload, isBinary):", "def process_message(self, msg, src):", "def process_chat_event(\n self, chat: Chat, event: VkBotMessageEventModel\n ) -> ResponceHandler:\n\n if self.__is_command_event(event):\n return self.command_handler.handling(event.message.text, chat)\n elif self.__is_module_event(event) and chat.chat_settings.enabled:\n return self.module_handler.handling(event.message.text, chat)\n elif chat.chat_settings.enabled:\n return self.message_handler.handling(event.message.text, chat)\n\n return ResponceHandler(send_to_chat_id=chat.chat_id, error=\"Not found matches\", is_matches_found=False)", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(self._parse_message, string, protocol)\n d.addCallback(self._r_process_message, protocol)\n d.addCallbacks(callback=self._r_send_result, errback=self._r_send_error, callbackArgs=(protocol,), errbackArgs=(protocol,))", "def handle_message(self, msg):\n Logger.debug(\"Slave: Trying to parse\")\n if MessageKeys.command_key in msg.fields:\n Logger.info(\"Slave: Message command: %s\", str(msg.get_command()))\n return self.messagehandler[msg.get_command()](self, msg)\n return self.handle_invalid_command(msg)", "def incoming(self,message):\n #Convert to Dictionary, Whatever the input is\n if isinstance(message, str):\n message = json.loads(message)\n elif isinstance(message, bytes):\n message = self.deserialize(message)\n\n op = message.get(\"op\")\n if op == \"publish\":\n message[\"msg\"] = self.decompress(message[\"topic\"],message.get(\"msg\"))\n message[\"topic\"] = self.remap_topic(message[\"topic\"]) \n elif op == \"advertise\":\n message[\"topic\"] = self.remap_topic(message[\"topic\"])\n elif op == \"advertise_service\" or op == \"service_response\":\n message[\"service\"] = self.remap_service(message[\"service\"])\n\n\n message = json.dumps(message)\n #--------\n #replace JSON Null values in float32 types with infinity datatype (changed according to the error for LaserScan values)\n message = message.replace(\"null\", \"Infinity\")\n #--------\n self._protocol.incoming(message)", "def apply_handler(self):\n tmp = self.event_type\n if hasattr(self, tmp):\n getattr(self, tmp)()\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)", "def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)", "def on_message(self, unused_channel, basic_deliver, properties, body):\n LOGGER.debug('Received message # %s from %s: %s',\n basic_deliver.delivery_tag, properties.app_id, body)\n\n if basic_deliver.routing_key in self._handlers:\n # future that will handle this request,\n # and a callback invoked to handle it\n future = self._wrap_handler(\n self._handlers[basic_deliver.routing_key],\n body)\n # functools.partial is used to invoke future by name\n # without creating a nested func\n callback = functools.partial(\n self._on_reply,\n reply_tag=basic_deliver.delivery_tag,\n answer_tag=properties.reply_to,\n correlation_id=properties.correlation_id)\n\n # get ioloop and exec it\n self._ioloop.add_future(future, callback)\n else:\n LOGGER.debug('Skipping non-handed message with request to %s' %\n basic_deliver.routing_key)", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "def handle(self, m):\n\n\t\tline = m.split(\" \")\n\n\t\tif line[0] == \"PING\":\n\t\t\tself(\"PONG\", line[1])\n\t\telif len(line) > 1 and line[1] == \"001\":\n\t\t\tself.callbacks[\"loggedin\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"JOIN\":\n\t\t\tself.callbacks[\"joined\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"PRIVMSG\":\n\t\t\tself.callbacks[\"messaged\"](self, *line)", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def receive(channel):\n\n def callback(ch, method, properties, body):\n\n event = json.loads(body)\n event_info = event['event_info']\n event_type = event['type']\n success = True\n logger.info(f\"Received event {event}\")\n\n try:\n # Events coming from account microservice\n\n if event_type == USER_CREATED_EVENT:\n\n add_and_publish_event(\n GlobalPreferencesCreatedEvent(event['uuid'], event_info['id'], dict(\n vehicles=['bus', 'subway', 'train', 'tram', 'car', 'walking', 'bike', 'taxi',\n 'enjoy', 'mobike'],\n personal_vehicles=[])),\n PREFERENCES_CREATED)\n\n elif event_type == USER_DELETED_EVENT:\n\n add_and_publish_event(GlobalPreferencesDeletedEvent(event['uuid'], event_info['id']), PREFERENCES_DELETED)\n\n # Events generated in this microservice\n\n elif event_type == PREFERENCES_CREATED_EVENT:\n add_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_MODIFIED_EVENT:\n modify_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == PREFERENCES_DELETED_EVENT:\n delete_global_preferences(GlobalPreferences(**event_info))\n\n elif event_type == CALENDAR_CREATED_EVENT:\n add_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_MODIFIED_EVENT:\n modify_calendar(Calendar(**event_info))\n\n elif event_type == CALENDAR_DELETED_EVENT:\n delete_calendar(Calendar(**event_info))\n\n except SQLAlchemyError as e:\n\n # to deal with at least once delivery of rabbitmq and the create methods which are not idempotent\n if (event_type == USER_CREATED_EVENT or event_type == PREFERENCES_CREATED_EVENT or event_type == CALENDAR_CREATED_EVENT) \\\n and method.redelivered and isinstance(e, IntegrityError):\n logger.info(f'Not processed redelivered event {event}')\n\n else:\n logger.info(f\"Couldn't process event {event}\")\n success = False\n\n finally:\n if success: # ack only if the event has been processed\n ch.basic_ack(delivery_tag=method.delivery_tag)\n logger.info(f\"Processed and acked event {event}\")\n\n # channel.basic_qos(prefetch_count=1)\n channel.basic_consume(callback,\n queue=CALENDAR_QUEUE)\n\n logger.info(\"Started listening to events\")\n channel.start_consuming()", "def listen():\n if request.method == 'GET':\n print request\n return verify_webhook(request)\n\n if request.method == 'POST':\n payload = request.json\n event = payload['entry'][0]['messaging']\n for x in event:\n if is_user_message(x):\n text = x['message']['text']\n sender_id = x['sender']['id']\n respond(sender_id, text)\n\n return \"ok\"", "def messageHandler(self):\n\n while len(self.ReceiveMessageBuffer) > 0: # if message handler is called all received messages will be processed\n #print 'entered message handler of ID {0}'.format(self.CommID)\n msg = self.ReceiveMessageBuffer.popleft()\n self.MsgReceiveCount += 1\n self.MsgReceiveCount_interval += 1\n type = msg.getType()\n # for communication test:\n if type == 0: #System message\n print 'ID {0} has received msg {1} from ID {2}'.format(self.CommID, msg.getData(), msg.getIDSender())\n # send reply\n data = msg.getData()\n if data == 'ping':\n retval = self.sendMessage(msg.getIDSender(), 0, 'pong')\n return retval\n elif data == 'pong':\n retval = self.sendMessage(msg.getIDSender(), 0, 'ping')\n return retval\n # elif data[0] == 'system':\n # if(data[1] == 'startRONOPT'):\n # #save fluctuation curve of cluster\n # self.EFluctuationCurve = data[4]\n # #begin with local optimization (data[2] = fromTime, data[3]=toTime)\n # self.stateRONOPT = 0\n # for n in range(len(self.Neighbors)):\n # self.NeighborMessageRec[n] = 0\n # self.RemainderOfNeighborsOpt(data[2],data[3],1)\n #########################################################################################################\n\n elif type == 20: # pseudo tree generation message\n ret = self.messageHandler_PseudoTree(msg)\n if ret == -1:\n break\n\n elif type == 40: # load propagation message\n self.messageHandler_LoadProp(msg)\n\n elif type == 70:\n self.messageHandler_RemainderMulticast(msg) #remainder multicast optimization\n\n return 0", "def dispatch_incoming_message(self, event):\n\n device_id = get_device_id_from_event(event)\n\n body = event.body_as_json()\n\n if get_message_source_from_event(event) == \"twinChangeEvents\":\n body = body.get(Fields.PROPERTIES, {}).get(Fields.REPORTED, {})\n\n self.update_pairing(device_id, body)\n device_data = self.device_list.try_get(device_id)\n\n if not device_data:\n return\n\n if get_message_source_from_event(event) == \"twinChangeEvents\":\n self.incoming_twin_changes.put(event)\n else:\n cmd = body.get(Fields.CMD, None)\n received_operation_id = body.get(Fields.OPERATION_ID, None)\n received_run_id = body.get(Fields.RUN_ID, None)\n\n if cmd == Commands.PAIR_WITH_SERVICE_APP:\n # handled in the update_pairing() function above\n pass\n elif cmd == Commands.SEND_OPERATION_RESPONSE:\n logger.info(\n \"Received telemetry sendOperationResponse from {} with operationId {}\".format(\n device_id, received_operation_id,\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n if Flags.RETURN_EVENTHUB_MESSAGE_CONTENTS in body.get(Fields.FLAGS, []):\n payload = {\n Fields.CMD: Commands.OPERATION_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: received_run_id,\n Fields.OPERATION_ID: received_operation_id,\n Fields.EVENTHUB_MESSAGE_CONTENTS: {\n Fields.EVENTHUB_MESSAGE_BODY: body,\n Fields.EVENTHUB_CONTENT_TYPE: event.content_type,\n Fields.EVENTHUB_CORRELATION_ID: event.correlation_id,\n Fields.EVENTHUB_MESSAGE_ID: event.message_id,\n Fields.EVENTHUB_SYSTEM_PROPERTIES: convert_binary_dict_to_string_dict(\n event.system_properties\n ),\n Fields.EVENTHUB_PROPERTIES: convert_binary_dict_to_string_dict(\n event.properties\n ),\n },\n }\n message = json.dumps(payload)\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id,\n message=message,\n props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n else:\n self.outgoing_operation_response_queue.put(\n OperationResponse(device_id=device_id, operation_id=received_operation_id,)\n )\n\n if Flags.RESPOND_IMMEDIATELY in body.get(Fields.FLAGS, []):\n self.force_send_operation_response.set()\n\n elif cmd == Commands.SET_DESIRED_PROPS:\n desired = body.get(Fields.DESIRED_PROPERTIES, {})\n if desired:\n logger.info(\"Updating desired props: {}\".format(desired))\n self.registry_manager.update_twin(\n device_id, Twin(properties=TwinProperties(desired=desired)), \"*\"\n )\n\n elif cmd == Commands.INVOKE_METHOD:\n self.executor.submit(self.handle_method_invoke, device_data, event)\n # TODO: add_done_callback -- code to handle this is in the device app, needs to be done here too, so we can count exceptions in non-critical threads\n\n elif cmd == Commands.INVOKE_PNP_COMMAND:\n self.executor.submit(self.handle_pnp_command_invoke, device_data, event)\n # TODO: add_done_callback -- code to handle this is in the device app, needs to be done here too, so we can count exceptions in non-critical threads\n\n elif cmd == Commands.GET_PNP_PROPERTIES:\n logger.info(\n \"Getting digital twin for {} with operationid {}\".format(\n device_id, received_operation_id\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n\n twin = self.digital_twin_client.get_digital_twin(device_id)\n\n message = json.dumps(\n {\n Fields.CMD: Commands.OPERATION_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: received_run_id,\n Fields.OPERATION_ID: received_operation_id,\n Fields.PNP_PROPERTIES_CONTENTS: twin,\n }\n )\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id, message=message, props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n elif cmd == Commands.UPDATE_PNP_PROPERTIES:\n logger.info(\n \"Updating digital twin for {} with operationid {}\".format(\n device_id, received_operation_id\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n\n self.digital_twin_client.update_digital_twin(\n device_id, body[Fields.PNP_PROPERTIES_UPDATE_PATCH]\n )\n\n # TODO: send ack for all of these ops, include error if failure\n\n elif cmd == Commands.SEND_C2D:\n logger.info(\n \"Sending C2D to {} with operationId {}\".format(\n device_id, received_operation_id,\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n message = json.dumps(\n {\n Fields.CMD: Commands.C2D_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: received_run_id,\n Fields.OPERATION_ID: received_operation_id,\n Fields.TEST_C2D_PAYLOAD: body[Fields.TEST_C2D_PAYLOAD],\n }\n )\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id, message=message, props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n else:\n logger.info(\n \"Unknown command received from {}: {}\".format(device_id, body),\n extra=custom_props(device_id, device_data.run_id),\n )", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def consume(self, handler) -> None:\n pass # pragma: no cover", "def incoming():\n create_table()\n viber_request = viber.parse_request(request.get_data())\n # Defining type of the request and replying to it\n if isinstance(viber_request, ViberMessageRequest):\n # Passing any message from user to message handler in handlers.py\n user_message_handler(viber, viber_request)\n elif isinstance(viber_request, ViberSubscribedRequest):\n viber.send_messages(viber_request.user.id, [\n TextMessage(text=\"Спасибо за подписку!\")\n ])\n elif isinstance(viber_request, ViberFailedRequest):\n logger.warn(\"client failed receiving message. failure: {0}\"\n .format(viber_request))\n elif isinstance(viber_request, ViberConversationStartedRequest):\n # First touch, sending to user keyboard with phone sharing button\n keyboard = kb.SHARE_PHONE_KEYBOARD\n viber.send_messages(viber_request.user.id, [\n TextMessage(\n text=txt.GREETING,\n keyboard=keyboard,\n min_api_version=3)\n ]\n )\n return Response(status=200)", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def processItem(self, item):\n\t\tif item[\"type\"] == \"message\":\n\t\t\t# Process the message only if the channel has received a message\n\t\t\t# Decode the message\n\t\t\titem[\"channel\"] = item[\"channel\"].decode(\"utf-8\")\n\n\t\t\t# Make sure the handler exists\n\t\t\tif item[\"channel\"] in self.handlers:\n\t\t\t\tlog.info(\"Redis pubsub: {} <- {} \".format(item[\"channel\"], item[\"data\"]))\n\t\t\t\tif isinstance(self.handlers[item[\"channel\"]], generalPubSubHandler.generalPubSubHandler):\n\t\t\t\t\t# Handler class\n\t\t\t\t\tself.handlers[item[\"channel\"]].handle(item[\"data\"])\n\t\t\t\telse:\n\t\t\t\t\t# Function\n\t\t\t\t\tself.handlers[item[\"channel\"]](item[\"data\"])", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def _incoming_read(self, client, data, error):\n\n if error is not None:\n client.close()\n del self._incoming[client]\n return\n\n incoming = self._incoming[client]\n incoming.unpacker.feed(data)\n for req_id, message in incoming.unpacker:\n self._call_handler(\n partial(self._queue_response,\n client, req_id),\n self._call_interface.queue_call,\n message,\n )", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def on_receive(self, msg):\n raise NotImplementedError", "def handle_message(self, msg, status):\n\n body = ensure_unicode(msg.Body)\n chat_id = get_chat_id(msg.Chat)\n\n if len(body) == 0:\n return False\n\n for name, cmd in self.commands.items():\n if body == name:\n cmd(msg, chat_id)\n return True\n\n\n if self.troller_is_running.get(chat_id):\n response = self.alice.respond(body)\n if response:\n msg.Chat.SendMessage(response)\n return True\n else:\n return False\n else:\n return False", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))", "def receive_message(self, message):", "def handle(self, msg, options):\n raise NotImplementedError()", "def register_handler(config):\n\n @respond_to(\".*\")\n def handle(message):\n \"\"\"Respond to every Slack message and dispatch to another handler based\n on the contents of the message.\n\n This duplicates a little bit of the work that slackbot does, but allows\n us to define handlers dynamically based on the job config.\n \"\"\"\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def onMessage(self, message):\n raise NotImplementedError", "def handle_message(self, data):\n message = Message.from_text(data)\n if message is not None:\n print(message.username, message.action, message.channel, message.content)\n self._callback(\"message\", message) # TODO: add additional callbacks", "def process(self, message: Message, **kwargs: Any) -> None:", "def handle(event, context):\n dynamodb = boto3.client('dynamodb')\n connection_id = event['requestContext']['connectionId']\n connection_ids = []\n paginator = dynamodb.get_paginator('scan')\n\n # Retrieve all connection_ids from the database\n for page in paginator.paginate(TableName=os.environ.get('CONNECTION_TABLE_NAME')):\n connection_ids.extend(page['Items'])\n\n endpoint_url = f\"https://{event['requestContext']['domainName']}/{event['requestContext']['stage']}\"\n apigatewaymanagementapi = boto3.client('apigatewaymanagementapi', endpoint_url=endpoint_url)\n\n msg_counter = dynamodb.get_item(TableName=os.environ.get('MSG_COUNTER_TABLE_NAME'), Key={'myid': {'S': 'counter'}})\n if not msg_counter:\n msg_counter = 0\n else:\n msg_counter = msg_counter['Item']['msgCount']['N']\n\n data = f\"Welcome to Simple Chat\\n\" \\\n f\"There are {len(connection_ids)} users connected.\\n\" \\\n f\"Total of {msg_counter} messages recorded as of today.\\n\\n\"\n\n messages = get_messages()\n data = data + '\\n'.join(messages)\n send_to_self(apigatewaymanagementapi, connection_id, data)\n\n response = dynamodb.get_item(\n TableName=os.environ.get('CONNECTION_TABLE_NAME'),\n Key={'connectionId': {'S': connection_id}}\n )\n data = f\"{response['Item']['username']['S']} has joined the chat room\"\n send_to_all(apigatewaymanagementapi, connection_ids, data)\n\n return {}" ]
[ "0.7198676", "0.70006275", "0.68786156", "0.6589068", "0.6574055", "0.6566164", "0.654098", "0.65259284", "0.6513632", "0.64729154", "0.64671236", "0.644941", "0.6425101", "0.64106405", "0.6365977", "0.63636965", "0.6342746", "0.6326662", "0.6321269", "0.6301516", "0.6211517", "0.61958945", "0.6183817", "0.61751306", "0.6158434", "0.615791", "0.6146266", "0.61234415", "0.6105492", "0.6103922", "0.6097392", "0.60886997", "0.60832053", "0.60825014", "0.60776305", "0.60725147", "0.6046746", "0.60239375", "0.60188615", "0.600968", "0.6000119", "0.5998437", "0.5996429", "0.5979537", "0.596835", "0.59546614", "0.5948462", "0.59463954", "0.59451485", "0.59439266", "0.59384084", "0.59332246", "0.59327286", "0.59263957", "0.59227026", "0.58975464", "0.58972675", "0.58967274", "0.589617", "0.5893914", "0.5874496", "0.5862108", "0.5854507", "0.5853563", "0.5851652", "0.5844436", "0.5836315", "0.5835235", "0.5832349", "0.58278006", "0.58250636", "0.5814339", "0.581131", "0.5806737", "0.5803786", "0.5795985", "0.5771478", "0.5758291", "0.57543826", "0.5753967", "0.57512236", "0.57424223", "0.5740197", "0.5737655", "0.573698", "0.57176805", "0.57121587", "0.5711473", "0.5704665", "0.57036287", "0.5702534", "0.57000345", "0.5695255", "0.56948614", "0.56888735", "0.56882167", "0.5685211", "0.56814814", "0.56811404", "0.5672526" ]
0.6243993
20
Prepares message for fake_next_op method
def fake_op_message(interface, reply, on_channel=None, on_message=None, after=None, execute=False, on_success=None, on_failure=None): assert isinstance(interface, str), "fake_op_info: interface should be a string" assert isinstance(reply, ProtocolReply), "fake_op_info: reply should be a ProtocolReply instance" options = {"reply": reply} if on_channel is not None: assert isinstance(on_channel, str), "fake_op_info: on_channel should be a string" options["on_channel"] = on_channel if on_message is not None: assert isinstance(on_message, PlatformMessage), "fake_op_info: on_message should be a PlatformMessage instance" options["on_message"] = on_message if after is not None: assert isinstance(after, int) and after >= 0, "fake_op_info: after should be a natural integer" options["after"] = after if execute is not None: assert isinstance(execute, bool) or execute in (1, 0) >= 0, "fake_op_info: execute should be a boolean " \ "or 0 or 1" options["execute"] = execute if on_success is None and on_failure is None: on_success = True on_failure = False if on_success is None and on_failure is False: on_success = True if on_failure is None and on_success is False: on_failure = True if on_success is True: assert isinstance(on_success, bool), "fake_op_info: on_success should be a boolean" options["on_success"] = on_success if on_failure is True: assert isinstance(on_failure, bool), "fake_op_info: on_failure should be a boolean" options["on_failure"] = on_failure else: options["on_failure"] = False return new_message(interface, "__testing__", "fake_next_op", options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fake_next_op(self, context, message, dry_run=False):\r\n if context.channel in self._fake_ops:\r\n channel = context.channel\r\n if len(self._fake_ops[channel]) > 0:\r\n if \"on_message\" not in self._fake_ops[channel][0] \\\r\n or self._fake_message_compare(self._fake_ops[channel][0][\"on_message\"], message):\r\n if \"after\" in self._fake_ops[channel][0] and self._fake_ops[channel][0][\"after\"] > 0:\r\n if dry_run:\r\n return False\r\n self._fake_ops[channel][0][\"after\"] -= 1\r\n return False\r\n if dry_run:\r\n return True\r\n instruction = self._fake_ops[channel].pop(0)\r\n if len(self._fake_ops[channel]) == 0:\r\n del self._fake_ops[channel]\r\n vprint(\"{}: faking reply\".format(self.name))\r\n reply = instruction[\"reply\"]\r\n if \"execute\" in instruction and instruction[\"execute\"] == True:\r\n result = {}\r\n if instruction[\"on_success\"]:\r\n result[\"on_success\"] = reply\r\n if instruction[\"on_failure\"]:\r\n result[\"on_failure\"] = reply\r\n return result\r\n if reply.success:\r\n self._worker.reply(context, PlatformMessage.success(reply.retval, reply.retval_name))\r\n else:\r\n self._worker.reply(context, PlatformMessage.failure(reply.state, reply.errcode))\r\n return True\r\n else:\r\n # TODO: Shouln't be here actually. Raise error!\r\n del self._fake_ops[channel]\r\n return False", "def _process_message_general(self, context, message):\r\n f = self._fake_next_op(context, message)\r\n\r\n if f is True:\r\n return True\r\n elif f is not False:\r\n return f\r\n elif message.method == \"__testing__\":\r\n self._general_testing(context, *message.args, **message.kwargs)\r\n return True\r\n else:\r\n return False", "def _pre_hook(self, msg: Message) -> Message:\n msg.add_route(self.name, self._id)\n\n expected_parts = self._get_expected_parts(msg)\n\n req_id = msg.envelope.request_id\n if expected_parts > 1:\n self._pending_msgs[req_id].append(msg)\n\n num_partial_requests = len(self._pending_msgs[req_id])\n\n if self.logger.debug_enabled:\n self._log_info_msg(\n msg,\n f'({num_partial_requests}/{expected_parts} parts)'\n if expected_parts > 1\n else '',\n )\n\n if expected_parts > 1 and expected_parts > num_partial_requests:\n # NOTE: reduce priority is higher than chain exception\n # otherwise a reducer will lose its function when earlier pods raise exception\n raise NoExplicitMessage\n\n if (\n msg.envelope.status.code == jina_pb2.StatusProto.ERROR\n and self.args.on_error_strategy >= OnErrorStrategy.SKIP_HANDLE\n ):\n raise ChainedPodException\n\n return msg", "def process_message(self, msg, src):", "def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])", "def nextinline(self, irc, msg, args):\n channel = self.registryValue('checkOpsInChannel')\n if channel == '':\n self.log.error('checkOpsInChannel not set!')\n return\n if channel not in irc.state.channels:\n self.log.warn('not in %s' % channel)\n return\n if msg.nick not in irc.state.channels[channel].ops:\n self.log.warn('denying access to non-chanop user %r' % msg.nick)\n return\n if len(self._queue) > 0:\n nick, notice = self._queue.pop(0)\n response = \"Next in line is %s\" % nick\n if notice is not None:\n response += \" with notice: %s\" % notice\n self._dump_queue()\n irc.reply(response)\n else:\n irc.reply(\"There's nobody queued up right now.\")", "def _add_message(self, message):\r\n self.result = self.result + message", "def __next__(self):\n return self.read_message()", "def next_operation(self):\n raise NotImplementedError", "def cmd(self, message):\n pass", "def _on_op_remark(self, msg):\r\n\r\n if \"success\" in msg and not msg[\"success\"]:\r\n if msg[\"message\"] == \"Invalid call\":\r\n self._on_invalid_call(msg)\r\n elif msg[\"message\"] == \"Order not found\":\r\n self._on_order_not_found(msg)\r\n elif msg[\"message\"] == \"Order amount is too low\":\r\n self._on_order_amount_too_low(msg)\r\n elif \"Too many orders placed\" in msg[\"message\"]:\r\n self._on_too_many_orders(msg)\r\n else:\r\n # we should log this, helps with debugging\r\n self.debug(msg)", "def skip(self):\n\tglobal message\n\tmessage=5", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def print_next(msg):\n print\n print \"-\" * 80\n print \"\\n\".join(textwrap.wrap(msg, width=80))\n print \"=\" * 80", "def process(self, msg):\n raise NotImplemented", "def testSendNextMessage(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(3)\n self.mgr.queueMsg(2)\n self.mgr.queueMsg(1)\n self.mgr.processMsgQueue()\n self.v.send_mavlink.assert_called_with(3)\n self.assertEqual( self.mgr.msgQueue.qsize(), 2)", "def _process_msg(cls, msg):\n raise NotImplementedError", "def _process_message(self, obj):\n pass", "def _next_message(self):\n msg = yield self._read_message()\n message = self._slack_to_chat(msg)\n\n raise gen.Return(message)", "def process_outgoing(self, msg):\n return msg, 0", "def processMessage(self, *args, **kwargs):\r\n pass", "async def intermediate(self, ctx):\n await ctx.send(f'Testing intermediate')", "def generate_message(self, mtu):\r\n raise GeneratorExit(\"No more message to send\")", "def test_make_dispatch_inbound_defaults(self):\n md_helper = MessageDispatchHelper(\n MessageHelper(), WorkerHelper('fooconn'))\n broker = self.setup_broker(md_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = yield md_helper.make_dispatch_inbound('inbound message')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])\n self.assert_message_fields(msg, {\n 'content': 'inbound message',\n 'from_addr': md_helper.msg_helper.mobile_addr,\n 'to_addr': md_helper.msg_helper.transport_addr,\n 'transport_type': md_helper.msg_helper.transport_type,\n 'transport_name': md_helper.msg_helper.transport_name,\n 'helper_metadata': {},\n 'transport_metadata': {},\n })", "def next(action, value, error_handle, skip_invoked=True):\n error_handle['action'] = 'NEXT'\n if skip_invoked:\n print_info(\"failure action= next\")\n return error_handle", "def exercise2():\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'", "def create_next_message(self, **kwargs):\n message = self._builder.create_message(**kwargs)\n return message", "def prepare_operation(self, command: str, opcode: str, *args, **kwargs) -> bool:\n _opcode_hex, _args_hex = self._opcode_fetch(opcode, *args)\n self.super_memory.PC.write(_opcode_hex)\n _assembler = [_opcode_hex]\n for x in _args_hex:\n for y in x[::-1]:\n self.super_memory.PC.write(y)\n _assembler.append(y)\n self._assembler[command] = \" \".join(_assembler).lower()\n return True", "def send(self, op):\n\t\ttokens = op.split()\n\t\tmsg = message()\n\t\tfor token in tokens:\t\n\t\t\tmsg.add(token)\n\t\tself.pb.send(msg)", "def _split_message(self, message, request_id):\n\n if message.code.is_request():\n outer_host = message.opt.uri_host\n proxy_uri = message.opt.proxy_uri\n\n inner_message = message.copy(\n uri_host=None,\n uri_port=None,\n proxy_uri=None,\n proxy_scheme=None,\n )\n inner_message.remote = None\n\n if proxy_uri is not None:\n # Use set_request_uri to split up the proxy URI into its\n # components; extract, preserve and clear them.\n inner_message.set_request_uri(proxy_uri, set_uri_host=False)\n if inner_message.opt.proxy_uri is not None:\n raise ValueError(\"Can not split Proxy-URI into options\")\n outer_uri = inner_message.remote.uri_base\n inner_message.remote = None\n inner_message.opt.proxy_scheme = None\n\n if message.opt.observe is None:\n outer_code = POST\n else:\n outer_code = FETCH\n else:\n outer_host = None\n proxy_uri = None\n\n inner_message = message.copy()\n\n outer_code = request_id.code_style.response\n\n # no max-age because these are always successsful responses\n outer_message = Message(code=outer_code,\n uri_host=outer_host,\n observe=None if message.code.is_response() else message.opt.observe,\n )\n if proxy_uri is not None:\n outer_message.set_request_uri(outer_uri)\n\n plaintext = bytes([inner_message.code]) + inner_message.opt.encode()\n if inner_message.payload:\n plaintext += bytes([0xFF])\n plaintext += inner_message.payload\n\n return outer_message, plaintext", "def handle(self, message):", "def process_msg(msg):\n time.sleep(random.randint(2, 10))\n print \"processor|%s::Processed message: %s\" % (UID, msg.body)", "def test_dispatch_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_inbound(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])", "def _register_fake_next_op(self, channel, fake_info):\r\n assert isinstance(fake_info, (list, tuple, dict)), \"fake_info should be a dict or list of dict or tuple of dict\"\r\n if isinstance(fake_info, (tuple, list)):\r\n for f in fake_info:\r\n assert isinstance(f, dict), \"fake_info should be a dict or list of dict or tuple of dict\"\r\n\r\n if isinstance(fake_info, dict):\r\n fake_info = [copy.deepcopy(fake_info)]\r\n else:\r\n fake_info = [copy.deepcopy(f) for f in fake_info]\r\n for f in fake_info:\r\n assert \"reply\" in f, \"fake_info should contain 'reply'\"\r\n assert isinstance(f[\"reply\"], ProtocolReply), \"fake_info's reply should be a ProtocolReply instance\"\r\n for o in f:\r\n assert o in (\"reply\", \"execute\", \"on_message\", \"on_channel\", \"after\", \"on_success\", \"on_failure\"), \\\r\n \"Unsupported fake_info options: {}\".format(o)\r\n if \"execute\" in f:\r\n assert isinstance(f[\"execute\"], bool) or f[\"execute\"] in (0, 1), \\\r\n \"fake_info option 'execute' should be a bool or 0 or 1\"\r\n\r\n if \"on_success\" in f:\r\n assert isinstance(f[\"on_success\"], bool), \\\r\n \"fake_info option 'on_success' should be a boolean\"\r\n\r\n if \"on_failure\" in f:\r\n assert isinstance(f[\"on_failure\"], bool), \\\r\n \"fake_info option 'on_failure' should be a boolean\"\r\n\r\n on_success = f.get(\"on_success\", None)\r\n on_failure = f.get(\"on_failure\", None)\r\n\r\n if on_success is None and on_failure is None:\r\n on_success = True\r\n on_failure = False\r\n\r\n if on_success is True or on_success is None and on_failure is False:\r\n on_success = True\r\n\r\n if on_failure is True or on_success is None and on_success is False:\r\n on_failure = True\r\n\r\n if on_success is True:\r\n f[\"on_success\"] = True\r\n else:\r\n f[\"on_success\"] = False\r\n if on_failure is True:\r\n f[\"on_failure\"] = True\r\n else:\r\n f[\"on_failure\"] = False\r\n\r\n if \"on_message\" in f:\r\n assert isinstance(f[\"on_message\"], PlatformMessage), \\\r\n \"fake_info option 'on_message' should be PlatformMessage\"\r\n if \"on_channel\" in f:\r\n assert isinstance(f[\"on_channel\"], (str, list, tuple)), \\\r\n \"fake_info option 'on_channel' should be a string or list/tuple of strings\"\r\n if isinstance(f[\"on_channel\"], (list, tuple)):\r\n for c in f[\"on_channel\"]:\r\n assert isinstance(c, str), \\\r\n \"fake_info option 'on_channel' should be a string or list/tuple of strings\"\r\n if \"after\" in f:\r\n assert isinstance(f[\"after\"], int), \"fake_info option 'after' should be an integer\"\r\n if \"on_channel\" not in f:\r\n on_channel = channel,\r\n elif isinstance(f[\"on_channel\"], (list, tuple)):\r\n on_channel = f[\"on_channel\"]\r\n else:\r\n on_channel = f[\"on_channel\"],\r\n\r\n for c in on_channel:\r\n if c not in self._fake_ops:\r\n self._fake_ops[c] = [f]\r\n else:\r\n self._fake_ops[c].append(f)", "def handle_message(self, message):", "def next(self, in_op):\n raise NotImplementedError", "def execute(self, message: ACLMessage):\n super().execute(message)\n\n # Filter for protocol\n if not message.protocol == ACLMessage.FIPA_REQUEST_PROTOCOL:\n return\n\n # Filter for session_id (conversation_id)\n session_id = message.conversation_id\n if session_id not in self.open_sessions:\n return\n\n # Resume generator\n generator = self.open_sessions[session_id]\n handlers = {\n ACLMessage.INFORM: lambda: generator.send(message),\n ACLMessage.AGREE: lambda: generator.throw(FipaAgreeHandler, message),\n ACLMessage.REFUSE: lambda: generator.throw(FipaRefuseHandler, message),\n ACLMessage.FAILURE: lambda: generator.throw(\n FipaFailureHandler, message)\n }\n try:\n handlers[message.performative]()\n except StopIteration:\n pass\n except KeyError:\n return\n\n # Clear session if final message was received\n if message.performative in (ACLMessage.REFUSE, ACLMessage.INFORM, ACLMessage.FAILURE):\n self.delete_session(session_id)", "def parse(self, data=''):\n self.scratch += data\n for i in self.scratch:\n if self.state == AWAITING_CONTROL_LINE:\n\n # MSG\n if self.scratch.startswith(MSG_OP):\n self.state = AWAITING_MSG_ARG\n\n # OK\n elif self.scratch.startswith(OK):\n # No op. But still consume OK from buffer and set next state.\n if len(self.scratch) > OK_SIZE:\n self.scratch = self.scratch[OK_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR\n elif self.scratch.startswith(ERR_OP):\n self.state = AWAITING_MINUS_ERR_ARG\n\n # PONG\n elif self.scratch.startswith(PONG):\n self.nc._process_pong()\n\n if len(self.scratch) > PONG_SIZE:\n self.scratch = self.scratch[PONG_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # PING\n elif self.scratch.startswith(PING):\n self.nc.send_command(PONG)\n if len(self.scratch) > PING_SIZE:\n self.scratch = self.scratch[PING_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n elif self.state == AWAITING_MSG_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n args = line.split(_SPC_)\n\n # Check in case of using a queue\n args_size = len(args)\n if args_size == 5:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = args[3]\n self.needed = int(args[4])\n elif args_size == 4:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = \"\"\n self.needed = int(args[3])\n else:\n raise ErrProtocol(\"Wrong number of arguments in MSG\")\n self.scratch = self.scratch[i+CRLF_SIZE:]\n self.state = AWAITING_MSG_PAYLOAD\n\n elif self.state == AWAITING_MSG_PAYLOAD:\n if len(self.scratch) >= self.needed:\n payload = self.scratch[:self.needed]\n subject = self.msg_arg[\"subject\"]\n sid = self.msg_arg[\"sid\"]\n reply = self.msg_arg[\"reply\"]\n\n # Set next stage already before dispatching to callback\n self.scratch = self.scratch[self.needed:]\n self.state = AWAITING_MSG_END\n\n msg = Msg(subject=subject, sid=sid, reply=reply, data=payload)\n self.nc._process_msg(msg)\n\n elif self.state == AWAITING_MSG_END:\n i = self.scratch.find(MSG_END)\n if i > 0:\n self.scratch = self.scratch[i+1:]\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR 'error'\n elif self.state == AWAITING_MINUS_ERR_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n _, err = line.split(_SPC_, 1)\n self.nc._process_err(err)\n if len(self.scratch) > i+CRLF_SIZE:\n self.scratch = self.scratch[i+CRLF_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE", "def handleMessage(msg):", "def dispatchMessageIteratorCall(self, tree):\n # simple case not a member function just an iterator with arguments\n if isinstance(tree.func, ast.Name):\n self.write(f\"FLAMEGPU->{tree.func.id}\")\n if isinstance(tree.func, ast.Attribute) :\n if isinstance(tree.func.value, ast.Name):\n # check that the iterator is supported\n if not tree.func.attr in self.fgpu_input_msg_iter_funcs:\n self.RaiseError(tree, f\"Message input loop iterator '{tree.func.attr}' is not supported.\")\n self.write(f\"FLAMEGPU->{tree.func.value.id}.{tree.func.attr}\")\n else:\n self.RaiseError(tree, \"Message input loop iterator format incorrect.\")\n\n # handle function arguments \n self.write(\"(\")\n self._CallArguments(tree)\n self.write(\")\")", "def doContinue(self, message, razzy):\n return", "def handle_instruction(self, msg):\n assert isinstance(msg, pb.Instruction)\n logging.info(\"NODE: handling instruction - {}\".format(msg).replace('\\n', ','))\n self.config.from_instruction = True\n\n call_later(msg.delay, self.tc_runner.bootstrap_promoters)\n\n if msg.instruction == 'bootstrap-only':\n pass\n\n elif msg.instruction == 'tx':\n rate = float(msg.param)\n interval = 1.0 / rate\n call_later(msg.delay, self.tc_runner.make_tx, interval, False)\n\n elif msg.instruction == 'tx-validate':\n rate = float(msg.param)\n interval = 1.0 / rate\n call_later(msg.delay, self.tc_runner.make_tx, interval, False)\n call_later(msg.delay + 10, self.tc_runner.make_validation, interval)\n\n elif msg.instruction == 'tx-random':\n rate = float(msg.param)\n interval = 1.0 / rate\n call_later(msg.delay, self.tc_runner.make_tx, interval, True)\n\n elif msg.instruction == 'tx-random-validate':\n rate = float(msg.param)\n interval = 1.0 / rate\n call_later(msg.delay, self.tc_runner.make_tx, interval, True)\n call_later(msg.delay + 10, self.tc_runner.make_validation, interval)\n\n else:\n raise AssertionError(\"Invalid instruction msg {}\".format(msg))", "def test_bogus():\n\n # Find a bogus op_type by trial and error\n #\n bogus_op_type = -1\n for bogus_op_type in range(0, 1000):\n if not bogus_op_type in SentmanRequest.MSG_TYPES:\n break\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n req1.op_type = bogus_op_type\n req1_buf = req1.pack()\n try:\n (_msgs, _buf) = SentmanRequest.recv(req1_buf)\n except SentmanRequestUnpackError, _exc:\n print \"Bogus op_type detected\"\n else:\n print \"Didn't catch bogus op_type\"\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n req1.version = SentmanRequest.PROTOCOL_VERSION + 20\n req1_buf = req1.pack()\n try:\n (_msgs, _buf) = SentmanRequest.recv(req1_buf)\n except SentmanRequestUnpackError, _exc:\n print \"Bogus version detected\"\n else:\n print \"Didn't catch bogus version\"\n\n req1 = SentmanRequest(SentmanRequest.ALLOCATE_SENTINEL)\n req1.msg_len = SentmanRequest.MESSAGE_LEN + 33\n req1_buf = req1.pack()\n try:\n (_msgs, _buf) = SentmanRequest.recv(req1_buf)\n except SentmanRequestUnpackError, _exc:\n print \"Bogus msg_len detected\"\n else:\n print \"Didn't catch bogus msg_len\"", "def _prepare_site(self, msg):\n name = msg[\"name\"]\n param_name = params.user_param_name(name)\n if isinstance(self.prior, dict) and param_name in self.prior.keys() \\\n or callable(self.prior):\n if msg[\"type\"] == \"param\":\n msg[\"done\"] = True\n return msg", "def _handle(self, msg: Message) -> Message:\n\n # skip executor for non-DataRequest\n if msg.envelope.request_type != 'DataRequest':\n if msg.request.command == 'TERMINATE':\n raise RuntimeTerminated()\n self.logger.debug(f'skip executor: not data request')\n return msg\n\n req_id = msg.envelope.request_id\n num_expected_parts = self._get_expected_parts(msg)\n self._data_request_handler.handle(\n msg=msg,\n partial_requests=[m.request for m in self._pending_msgs[req_id]]\n if num_expected_parts > 1\n else None,\n peapod_name=self.name,\n )\n\n return msg", "def process(self, message: Message, **kwargs: Any) -> None:", "def comsume_msg(self, msg_type):", "def advance(self):\n\n try: # kind of hacky ... use for loop instead!\n self.cmd = next(self.__iter__())\n except StopIteration:\n pass", "def unknown(label: str, conn: str, command: str):\n yield\n print(f\"{label} (fake:{conn}): {command} done\")", "def prepare_message(self, cmd: Dict, value: Any) -> Any:\n\n message = {}\n message[\"endpoint\"] = cmd[\"endpoint\"]\n message[\"method\"] = cmd[\"method\"]\n\n # Prepare payload\n payload = None\n # Check that value is empty for GET requests\n if cmd[\"method\"] == \"GET\":\n if value is not None:\n self.logger.warning(\"Trying to send GET request with non-empty payload <%s>\", value)\n else:\n path_to_payload = cmd[\"path\"].copy()\n parameter = path_to_payload.pop()\n payload = {parameter: value}\n # The easiest way to build the rest of the nested dict we need\n # is to start bottom up\n path_to_payload.reverse()\n # Wrap the rest of stuff around\n for item in path_to_payload:\n payload = {item: payload}\n payload = json.dumps(payload)\n message[\"data\"] = payload\n self.logger.debug(\"prepare_message()::constructed payload <%s>\", payload)\n return message", "def prepare_message(self, cmd: Dict, value: Any) -> Any:\n\n message = {}\n message[\"endpoint\"] = cmd[\"endpoint\"]\n message[\"method\"] = cmd[\"method\"]\n\n # Prepare payload\n payload = None\n # Check that value is empty for GET requests\n if cmd[\"method\"] == \"GET\":\n if value is not None:\n self.logger.warning(\"Trying to send GET request with non-empty payload <%s>\", value)\n else:\n path_to_payload = cmd[\"path\"].copy()\n parameter = path_to_payload.pop()\n payload = {parameter: value}\n # The easiest way to build the rest of the nested dict we need\n # is to start bottom up\n path_to_payload.reverse()\n # Wrap the rest of stuff around\n for item in path_to_payload:\n payload = {item: payload}\n payload = json.dumps(payload)\n message[\"data\"] = payload\n self.logger.debug(\"prepare_message()::constructed payload <%s>\", payload)\n return message", "def process_request(self, message):\n return NotImplemented()", "def test_chopped_message(self):\n queue = Queue()\n # Receive the message in a separate thread, because it blocks\n thread = Thread(target=lambda q: q.put(self.inverter.receive()), args=(queue,))\n thread.start()\n self.sock.send(message[0:1]) # Send some message parts\n sleep(0.01)\n self.sock.send(message[1:3])\n sleep(0.01)\n self.sock.send(message[3:7])\n sleep(0.01)\n self.sock.send(message[7:])\n thread.join()\n # Check result\n ident, payload = queue.get(timeout=1.0)\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)", "def step_impl(context, message):\n assert message in context.driver.title", "def next_token(self, context, token):", "def horde_message(self, message):", "def _handler_command_test(self, *args, **kwargs):\n next_state = None\n result = None\n\n next_state = SBE37ProtocolState.TEST\n \n return (next_state, result)", "def advance(self):\n if self.instr is not None:\n self.instr.opcode = self.instr.binary[25:]\n if opcode_decode[self.instr.opcode] == 'R-type':\n self.decode_rtype()\n elif opcode_decode[self.instr.opcode] == 'I-type' or opcode_decode[self.instr.opcode] == 'Load':\n self.decode_itype()\n else:\n raise SyntaxError(\"Invalid opcode\")", "def step_impl_the_msg_to_is_set_to_empty(context):\n context.bdd_helper.message_data[\"msg_to\"][0] = \"\"", "def move(msg):\n qpt.write(msg)\n #feedback = qpt.readline()\n #return feedback", "def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )", "def simulate_reply(self, data):\n self._data = data[:]", "def _do_some_logic(self, packet):\n\n\n pass", "def process(opcode):\n opcode.process()", "def preprocess(self, message):\n self._call_extension_method('preprocess', message)", "def FromRpcMessage(self, message):\n self.content = message.content\n self.completed = message.completed", "def process_messages(self):\n pass", "def _post_hook(self, msg: Message) -> Message:\n # do NOT access `msg.request.*` in the _pre_hook, as it will trigger the deserialization\n # all meta information should be stored and accessed via `msg.envelope`\n\n self._last_active_time = time.perf_counter()\n\n if self._get_expected_parts(msg) > 1:\n msgs = self._pending_msgs.pop(msg.envelope.request_id)\n msg.merge_envelope_from(msgs)\n\n msg.update_timestamp()\n return msg", "def handle_msg(self, state_id, msg):\n pass", "def test_dispatch_inbound_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(connector_name='fooconn')\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_inbound(msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])", "def test_process_packet_message(self):\n\n pkt = {'type': 'message',\n 'endpoint': '',\n 'data': 'woot'}\n data = self.ns.process_packet(pkt)\n self.assertEqual(data, pkt['data'])\n assert not self.environ['socketio'].error.called\n\n # processing a message packet with id and endpoint\n pkt = {'type': 'message',\n 'id': 5,\n 'ack': True,\n 'endpoint': '/tobi',\n 'data': ''}\n data = self.ns.process_packet(pkt)\n self.assertEqual(data, pkt['data'])\n assert not self.environ['socketio'].error.called", "def _f(message):\n return message", "def next():", "def next():", "def state_DATA(self, data):\n\t\tif data == '.':\n\t\t\tself.mode = 'COMMAND'\n\t\t\tself.sendCode(250, \" OK Queued as some message I'll neer deliver :)\")\n\t\t\tself.queue.add(self.message)\n\t\t\treturn\n\t\t\"\"\"remove buffed periods\"\"\"\t\t\n\t\tif( len(data) > 0 and data[0] == '.' ): \n\t\t\tdata = data[1:]\n\t\t\n\t\tself.message.addLine(data)", "def step(self):\r\n cmd = struct.pack('>B', 54)\r\n self.send(cmd)", "def nextAction(self, beliefstate, hyps):\n user_act = hyps[0][0]\n if \"inform(query=\" in user_act or 'i(=' in user_act: # last one is due to lazy typing\n query = user_act.split(\"=\")[1][0:-1] # assume passthroughsemi for now and inform(query=elliot smith) \n result = self.wiki.summary(query)\n if self.wm.msgs[self.wiki.status]:\n return unicode('inform(query=\"'+str(result)+'\")') \n elif self.wiki.status == \"DISAMBIGUATE\":\n a = result[0].replace('(','').replace(')','').rstrip()\n b = result[1].replace('(','').replace(')','').rstrip()\n return unicode('select(\"name='+str(a)+',name='+str(b)+'\")') #can only choose between top 2\n else:\n return unicode('inform(failed)')\n elif \"bye(\" in user_act:\n return 'bye()'\n else:\n return 'hello()'", "def processingInstruction(self, target, data):\n pass", "def HelloMsg(self, request_iterator, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def repeat(ctx, *, arg):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('repeat: ' + arg, extra={'invoker': ctx.message.author.name})\r\n await ctx.send(arg)", "def dispatch(self, msg):\n if self.session.addressed:\n self.session.reply(self.target, \"%s: %s\" % (self.source, msg))\n self.session.log.info(\"-#%s- <%s> %s: %s\" % (\n self.target,\n self.session.nick,\n self.source, msg))\n else:\n self.session.reply(self.target, msg)\n self.session.log.info(\"-#%s- <%s> %s\" % (\n self.target,\n self.session.nick,\n msg))", "def foo(self, message = None):\n if message:\n return 'foo ' + message\n else:\n return 'foo'", "def client_first_message(self):\n self.nonce = str(uuid4()).replace(\"-\", \"\")\n client_first_bare = \"n={},r={}\".format(self.user, self.nonce)\n self.auth_message = client_first_bare\n return \"n,,\" + client_first_bare", "def handle_message(self, msg):\n pass", "def new_message(self, message):\n self.message_counter += 1\n self.message_buffer.append(str(message))\n self.event_loop()", "def goto_first():\n\tglobal c1\n\tglobal a1\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c1.recv(BUF_SIZE) # wait for taken off message\n\tprint a1, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init1\n\t\tc1.send(json.dumps(new_msg))\n\t\tstate += 1", "async def extra_make_response(self, pkt, source):\n if False:\n yield None", "def test_dispatch_raw_with_exchange(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = worker_helper.broker\n broker.exchange_declare('blah', 'direct', durable=True)\n self.assertEqual(broker.get_messages('blah', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg, exchange='blah')\n self.assertEqual(broker.get_messages('blah', 'fooconn.foo'), [msg])", "def __call__(self,message):\n if self.header != self.prevHeader:\n if self.prevHeader:\n self.writeFooter()\n if self.header:\n self.writeHeader(self.header)\n self.prevHeader = self.header\n self.writeMessage(message)", "def nop(act_line: int, value: str):\n return act_line + 1", "def test_get_messaging_actions_for_order(self):\n pass", "def output_op(self, op):\n self.output['text'] += ' ' + op + ' ' \n self.seen_op = True", "def test_send_and_assert_no_op(fprime_test_api):\n length = 100\n failed = 0\n evr_seq = [\n \"cmdDisp.OpCodeDispatched\",\n \"cmdDisp.NoOpReceived\",\n \"cmdDisp.OpCodeCompleted\",\n ]\n any_reordered = False\n dropped = False\n for i in range(0, length):\n results = fprime_test_api.send_and_await_event(\n \"cmdDisp.CMD_NO_OP\", events=evr_seq, timeout=25\n )\n msg = \"Send and assert NO_OP Trial #{}\".format(i)\n if not fprime_test_api.test_assert(len(results) == 3, msg, True):\n items = fprime_test_api.get_event_test_history().retrieve()\n last = None\n reordered = False\n for item in items:\n if last is not None:\n if item.get_time() < last.get_time():\n fprime_test_api.log(\n \"during iteration #{}, a reordered event was detected: {}\".format(\n i, item\n )\n )\n any_reordered = True\n reordered = True\n break\n last = item\n if not reordered:\n fprime_test_api.log(\n \"during iteration #{}, a dropped event was detected\".format(i)\n )\n dropped = True\n failed += 1\n fprime_test_api.clear_histories()\n\n case = True\n case &= fprime_test_api.test_assert(\n not any_reordered, \"Expected no events to be reordered.\", True\n )\n case &= fprime_test_api.test_assert(\n not dropped, \"Expected no events to be dropped.\", True\n )\n msg = \"{} sequences failed out of {}\".format(failed, length)\n case &= fprime_test_api.test_assert(failed == 0, msg, True)\n\n assert (\n case\n ), \"Expected all checks to pass (reordering, dropped events, all passed). See log.\"", "def next_btn(self: object, controller: Iterator[str]) -> str:\n try:\n to_print = next(controller)\n if isinstance(to_print, list):\n return '\\n'.join(to_print)\n else: \n return to_print\n\n except StopIteration:\n return \"The protocole is finished\"", "def _msg_cb(self, main_loop, msg):\n self._msg_hdr(msg)", "def test_decision_maker_execute_w_wrong_input(self):\n default_message = DefaultMessage(\n type=DefaultMessage.Type.BYTES, content=b\"hello\"\n )\n\n self.decision_maker.message_in_queue.put_nowait(default_message)\n time.sleep(0.5)\n self.mocked_logger_warning.assert_called_with(\n \"[{}]: Message received by the decision maker is not of protocol_id=internal.\".format(\n self.agent_name\n )\n )", "def handle_message(self, session, message):\n # Handle an RPC call\n # Reason should come from inform call.\n response = {}\n if message['method'] == 'done' and message['id'] is None:\n # Here we switch roles, becoming RPC Client\n next_state, response = RPCS.SendingRpc, None\n else:\n # We have a valid method.\n # (VALID_METHODS checked in rpcsd:parse_message)\n next_state = RPCS.ExpectRpc\n response['error'] = {'code': -31998, 'message': 'Wrong request'}\n response['id'] = message['id']\n\n return next_state, response", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def get_next_request_message(self):\n if self.completed:\n raise PieceError(\"Piece Is Already Completed\")\n\n request_length = min(constants.REQUEST_LENGTH, self.length - self.bytes_downloaded)\n index_bytes = int.to_bytes(self.index, length=4, byteorder='big')\n offset_bytes = int.to_bytes(self.bytes_downloaded, length=4, byteorder='big')\n length_bytes = int.to_bytes(request_length, length=4, byteorder='big')\n request_payload = index_bytes + offset_bytes + length_bytes\n\n return Message(MessageType.REQUEST, request_payload)", "def test_add_skip(self):\n self.protocol.addSkip(\n self.test, \"Has it really?\")\n self.assertEqual(\n self.io.getvalue(),\n compat._b('skip: %s [\\nHas it really?\\n]\\n' % self.test.id()))" ]
[ "0.6204553", "0.57941633", "0.57386607", "0.5676188", "0.55851084", "0.5510728", "0.54746866", "0.5442715", "0.54413027", "0.5374735", "0.5304368", "0.5282957", "0.52620536", "0.5245205", "0.52339035", "0.52218646", "0.52083987", "0.52082664", "0.5190077", "0.5156617", "0.51207614", "0.5113483", "0.51063675", "0.51057166", "0.5103873", "0.50992644", "0.5082832", "0.5079936", "0.50639445", "0.506141", "0.5047189", "0.50436616", "0.50334585", "0.50313294", "0.5021949", "0.5002301", "0.49922717", "0.49922466", "0.4972157", "0.49658456", "0.49504933", "0.49364877", "0.49272853", "0.49241796", "0.4924121", "0.4913115", "0.49129236", "0.49108422", "0.49023247", "0.4895974", "0.4895974", "0.48831922", "0.48782918", "0.48757672", "0.4872751", "0.48645276", "0.4863993", "0.4857583", "0.4855545", "0.48551795", "0.48530695", "0.4849245", "0.48473367", "0.4839211", "0.48378158", "0.48356622", "0.48303387", "0.48246464", "0.48173064", "0.47972172", "0.47958988", "0.47957617", "0.47913134", "0.47913134", "0.4778316", "0.4771022", "0.47699994", "0.47672442", "0.47672042", "0.47656715", "0.4763707", "0.47607523", "0.47604057", "0.47490716", "0.47489887", "0.47481984", "0.47480646", "0.47477514", "0.47422382", "0.47408602", "0.473967", "0.47296378", "0.4729126", "0.4722695", "0.47214338", "0.47190088", "0.47156614", "0.47023606", "0.4701053", "0.46988082" ]
0.5297616
11
Checks whether incoming message could be processed
def supports(self, message): if message.method == '__testing__': return True return self._interface.supports(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for_incoming_info(self):\n\n if self.test_message_response:\n self.parse_incoming_message(self.test_message_response)\n return True\n\n POLL_ONLY_TIMEOUT_VALUE = 0\n got_at_least_one = False\n while (True):\n readables, writables, errors = select.select([self.socket_datastream], [], [], POLL_ONLY_TIMEOUT_VALUE)\n if not self.socket_datastream in readables:\n return got_at_least_one\n got_at_least_one = True\n data, remote_ip_port = self.socket_datastream.recvfrom(MAX_EXPECTED_MSG_SIZE)\n if remote_ip_port != self.ip_port_arduino_datastream:\n errorhandler.loginfo(\"Msg from unexpected source {}\".format(remote_ip_port))\n else:\n errorhandler.logdebug(\"msg received:{}\".format(data.hex()))\n self.parse_incoming_message(data)", "def process_message(self, context, message):\r\n r = self._process_message_general(context, message)\r\n if r is True:\r\n return\r\n elif r is not False:\r\n self._interface.incoming(context, message, r)\r\n else:\r\n self._interface.incoming(context, message, None)", "def has_an_incomming_message(self):\n return self.pipe_start.poll(1)", "def process_messages(self):\n for each_message in self.unprocessed_messages:\n if not ( 'message_type' in each_message):\n logging.error(\"(%s:%d) invalid message found...ignoring the message\",\\\n self.ip, self.port)\n else:\n if ( each_message['message_type'] is 'unchoke'):\n self.is_choking = 0\n elif ( each_message['message_type'] is 'choke'):\n self.is_choking = 1\n elif ( each_message['message_type'] is 'interested'):\n self.is_interested = 1\n elif ( each_message['message_type'] is 'not interested'):\n self.is_interested = 0\n elif ( each_message['message_type'] is 'have'):\n self.pieces.append(each_message['piece_index'])\n elif ( each_message['message_type'] is 'bitfield'):\n bitfield = each_message['bitfield']\n for index, each_bit in enumerate(bitfield):\n if ( each_bit is '1'):\n self.pieces.append(index)", "def process_messages(self):\n pass", "def check_message(self, msg):\n pass", "def _is_running(self, _):\n if self._shutdown_event.is_set():\n raise RequestProcessingError(\n \"Unable to process message - currently shutting down\"\n )", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def CanHandleMessage(self, status_message):\n if isinstance(status_message, (\n SeekAheadMessage,\n ProducerThreadMessage,\n FileMessage,\n ProgressMessage,\n FinalMessage,\n RetryableErrorMessage,\n PerformanceSummaryMessage,\n )):\n return True\n return False", "def CanHandleMessage(self, status_message):\n if isinstance(\n status_message,\n (SeekAheadMessage, ProducerThreadMessage, MetadataMessage, FinalMessage,\n RetryableErrorMessage, PerformanceSummaryMessage)):\n return True\n return False", "def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def receive_message(self, _message, data: dict):\n if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:\n self.logger.debug(\"(PlexController) MESSAGE RECEIVED: %r.\", data)\n return True\n\n return False", "def new_messages(self):\n ready, _, _ = select([self.socket], [], [], 0.0)\n return self.socket in ready", "def process_message(self, tag, value):\n return False", "def _handle_one_message(self):\n\n type, data = self.cxn.recv_message()\n\n if type.startswith(\"call\"):\n if len(data) != 3:\n message = (type, data)\n raise MessageError.invalid(message, \"incorrect number of args\")\n flags = {\n \"want_response\": type == \"call\",\n }\n call = Call(data[0], data[1], data[2], flags, self.client)\n self._handle_call(call)\n return False\n\n raise MessageError.bad_type(type)", "def handle_message(self, msg, status):\n\n body = ensure_unicode(msg.Body)\n chat_id = get_chat_id(msg.Chat)\n\n if len(body) == 0:\n return False\n\n for name, cmd in self.commands.items():\n if body == name:\n cmd(msg, chat_id)\n return True\n\n\n if self.troller_is_running.get(chat_id):\n response = self.alice.respond(body)\n if response:\n msg.Chat.SendMessage(response)\n return True\n else:\n return False\n else:\n return False", "def process_message(self):\n while True:\n if not self.next_message:\n return False\n\n # check if next message is in the past, and drop it\n if (self.next_message.round, self.next_message.phase) < (self.round, self.phase):\n (self.logger.debug if self.is_leader else self.logger.warning)(\n \"dropping past message from round %d / phase %s\",\n self.next_message.round, self.next_message.phase.name\n )\n self.drop_message()\n else:\n break\n\n # check if next message is in the future, and process it at a later point in time\n if (self.next_message.round, self.next_message.phase) > (self.round, self.phase):\n return False\n\n msg_item = self.dequeue_message()\n msg_type = get_message_type(msg_item.content)\n msg_sender = get_message_sender(msg_item.content)\n\n if msg_sender == self.leader and msg_type != MessageType.Propose:\n self.logger.warning(f\"FLAGGING NODE {msg_sender} AS ADVERSARY, LEADER SENT DIFFERENT MESSAGE\")\n self.flag_adversary(msg_sender)\n self.recover()\n return True\n\n signed_msg: SignedMessage = SignedMessage.deserialize(msg_item.content)\n msg = signed_msg.message # signature was already verified prior to insertion into the message buffer\n assert msg.round_idx == self.round\n assert msg.type.to_phase() == self.phase\n\n # TODO: add try/except for deserialization, and flag leader as adversial upon failure\n\n self.logger.debug(\"processing %s message\", msg_type.name)\n if msg_type == MessageType.Propose:\n self.process_propose(msg)\n elif msg_type == MessageType.Acknowledge:\n self.process_acknowledge(msg)\n elif msg_type == MessageType.Confirm:\n self.process_confirm(signed_msg)\n elif msg_type == MessageType.Recover:\n self.process_recover(msg)\n else:\n assert False, \"message type not considered\"\n\n return True", "def __check_for_messages(self):\n\n # Wait for at least poll_interval sec\n polling_interval = self.conf.messaging_server.polling_interval\n time.sleep(polling_interval)\n if self.conf.messaging_server.debug:\n LOG.debug(\"Topic {}: Checking for new messages\".format(\n self.target.topic))\n self._do()\n return True", "def handleMsgs(self):\n\n force_sheep_check = self.changed_last_step\n self.changed_last_step = False\n if not self.queue:\n return\n\n need_to_check = False\n for msg in self.popMsg(): # Receive message(s) from queue.\n if msg.type == Type.BLOCK:\n new_tx = msg.content\n if new_tx.hash in self.seen_tx:\n continue\n need_to_check = True\n self.changed_last_step = True\n self.handleNewTx(new_tx, msg.sender)\n elif msg.type == Type.REQUEST: # Requests are issued by other miners.\n target_hash = msg.content\n assert target_hash in self.seen_tx # I should never get a request for a tx I haven't seen.\n requestedTx = self.seen_tx[target_hash]\n self.sendMsg(msg.sender, Message(self.id, Type.BLOCK, requestedTx))\n if need_to_check or (self.hasSheep() and force_sheep_check): # Have to check every time if has sheep.\n self.checkAllTx()", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def check_messages(self):\n threadprop.get_app().processEvents(QtCore.QEventLoop.AllEvents)\n if self._stop_requested:\n raise threadprop.InterruptExceptionStop()", "def process(self):\n return False", "def has_message_available(self):\n return not self.feedback_log.empty()", "def processMessage(self, *args, **kwargs):\r\n pass", "def process_message(self, msg, src):", "def _check_message(self, _message_contents):\r\n if not type(_message_contents) is dict:\r\n self.logger.error(\"Message should be a dict.\")\r\n return False\r\n if not \"event\" in _message_contents:\r\n self.logger.error(\"Message dict has no event key.\")\r\n return False\r\n if not \"data\" in _message_contents:\r\n self.logger.error(\"Message dict has no data key.\")\r\n return False\r\n if not type(_message_contents[\"event\"]) == str:\r\n self.logger.error(\"Message event is not a string.\")\r\n return False\r\n if len(_message_contents[\"event\"]) == 0:\r\n self.logger.error(\"Message event cannot be empty.\")\r\n return False\r\n if not type(_message_contents[\"data\"]) == list:\r\n self.logger.error(\"Message data is not a list.\")\r\n return False\r\n if len(_message_contents[\"data\"]) == 0:\r\n self.logger.error(\"Message data cannot be empty list.\")\r\n return False\r\n return True", "def is_polling_done(self):\n if self.message_request_more:\n return False\n \n if self.message_cache:\n return False\n \n return True", "def process_outgoing(self, msg):\n return msg, 0", "def receive_message(self, message, data):\n\n self.logger.debug('Plex media receive function called.')\n if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:\n self.logger.debug('(PlexController) MESSAGE RECEIVED: ' + data)\n return True\n\n return False", "def incoming_message(self,msg):\n # check if we should drop the message entirely\n if np.random.binomial(1,self.drop_incoming_prob):\n return None\n else:\n if msg._type == 'offset_etddf/AgentMeasurement':\n self.agent_meas_pub.publish(msg)\n elif msg._type == 'offset_etddf/AgentState':\n self.agent_state_pub.publish(msg)", "def _process_message(self, obj):\n pass", "def checkConnection(self,msg):\n if (len(msg) == 0):\n sleep(self.m_to/2)\n print >>sys.stderr, 'Closing due to possible server fault'\n self.close()", "def handle_message(self, msg):\n pass", "def handleMessage(msg):", "def iteration(self):\n\n should_stop = self.stop.wait(EVENT_TIMEOUT)\n\n if should_stop:\n return True\n\n try:\n data = self.get_data()\n\n if not data:\n return True\n\n message = Message.deserialize(data)\n Logger.log_activity(message)\n\n self.handle_message(message)\n except socket.error:\n pass\n except:\n self.send_bad_request()\n\n return False", "def verify_raw_message(self, msg: bytes):\n if not (MIN_MESSAGE_SIZE < len(msg) < MAX_MESSAGE_SIZE):\n raise ValueError(\"Invalid message size!\")\n\n msg_type = get_message_type(msg) # yields a ValueError on invalid type\n msg_sender = get_message_sender(msg) # yields a ValueError if sender is invalid\n msg_round = get_message_round(msg)\n\n if msg_round < self.round:\n raise ValueError(f\"Message to late\")\n\n if msg_round == self.round:\n if msg_type == MessageType.Propose and self.phase > Phase.Propose:\n raise ValueError(f\"Message to late!\")\n if msg_type == MessageType.Acknowledge and self.phase > Phase.Acknowledge:\n if not self.is_leader:\n raise ValueError(f\"Message to late!\")\n elif self.is_leader and msg_type != MessageType.Confirm:\n raise ValueError(\"Leaders only process Confirm messages for current round!\")\n\n if self.node_status[msg_sender] == NodeStatus.ADVERSARIAL:\n return ValueError(\"Message sender is an adversary!\")\n\n # TODO: Drop message if some message of the same (type, round, sender)-combination\n # was previously added to the queue.\n\n # Drop messages with invalid signatures\n if not ed25519.verify_attached(msg, NODE_INFOS[msg_sender].public_key):\n return ValueError(\"Signature check failed!\")\n\n return True", "def process_incoming(self, msg, status):\n return msg[0]", "def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )", "def check_finish(self):\r\n return not self.proc.is_alive()", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def messages_pending(self):\r\n return bool(self._log_buffer)", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def _check_reply(self):\n self._more_packets_available = False\n try:\n if self._reply is None:\n self._status = (3, '{} without reply'.format(\n REPLAY_INFO[unpack_dint(self._message[:2])]))\n return False\n # Get the type of command\n typ = unpack_uint(self._reply[:2])\n\n # Encapsulation status check\n if unpack_dint(self._reply[8:12]) != SUCCESS:\n self._status = (3, \"{0} reply status:{1}\".format(\n REPLAY_INFO[typ],\n SERVICE_STATUS[unpack_dint(self._reply[8:12])]))\n return False\n\n # Command Specific Status check\n if typ == unpack_uint(ENCAPSULATION_COMMAND[\"send_rr_data\"]):\n status = unpack_usint(self._reply[42:43])\n if status != SUCCESS:\n status_msg = \"send_rr_data reply:{0} - Extend status:{1}\"\n self._status = (3, status_msg.format(\n SERVICE_STATUS[status],\n get_extended_status(self._reply, 42)))\n return False\n else:\n return True\n return True\n except Exception as e:\n raise DataError(e)", "def _process_message(self, json_object):\n\n message = json.loads(json_object)\n if message['type'] == \"relay\":\n self._process_relay(message)\n elif message['type'] == \"control\":\n self._process_control(message)\n else:\n print(\"ERROR Received message has invalid type\\n\")\n return", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def _check_queue(self):\n self._process_incoming_queue_messages()\n self._root.after(200, self._check_queue)", "def handle_message(self, message):\n if message['data_type'] == types.DT_SHARED_OBJECT and message['obj_name'] == self.name:\n events = message['events']\n\n if not self.use_success:\n assert events[0]['type'] == types.SO_USE_SUCCESS, events[0]\n assert events[1]['type'] == types.SO_CLEAR, events[1]\n events = events[2:]\n self.use_success = True\n\n self.handle_events(events)\n return True\n else:\n return False", "def __receive_request(self):\n # get the request's length\n request_size = self.__socket.recv(Commands.SIZE_LENGTH)\n # if the master sent an empty msg, then he has closed himself\n if not request_size:\n print \"Master Has Been Closed\"\n # TODO: close the peasant and start the run function all over again\n return False\n # fix the request's length\n request_size = int(request_size) - Commands.COMMAND_LENGTH\n # get the request's command's number\n command = int(Commands.decrypt(self.__socket.recv(Commands.COMMAND_LENGTH)))\n # if the request size's is 0, then there are not args\n args = []\n # else, there are args, read them (decrypted)\n if request_size != 0:\n args = Commands.decrypt(self.__socket.recv(request_size)).split(Commands.SEPARATE_CHAR)\n if self.__DEBUG:\n print args\n # handle the command and add the command number and return value to the responses list\n self.__responses.append(str(command) + Commands.handle_command_request(command, args))\n return True", "def validate_message(self, state_id, msg):\n pass", "def is_msg_inited(self):\n pass", "def process(self, msg):\n raise NotImplemented", "def ignore_if_busy(self):\r\n if self.is_waiting_for_message():\r\n self.beep()\r\n return True\r\n return False", "def process(self):\n return self.check()", "def valid_message_length(self):\n if self.message_len() > 0:\n if self.message_len() <= self.max_msg_length:\n return True\n return False", "def handle_message(self, message):", "def should_handle(self, stream, msg, idents):\n if not super().should_handle(stream, msg, idents):\n return False\n msg_id = msg['header']['msg_id']\n msg_type = msg['header']['msg_type']\n if msg_id in self.aborted:\n # is it safe to assume a msg_id will not be resubmitted?\n self.aborted.remove(msg_id)\n self._send_abort_reply(stream, msg, idents)\n return False\n self.log.info(f\"Handling {msg_type}: {msg_id}\")\n return True", "def handleIncomingMessage(self, resourceEnum: ResourceNameEnum, msg: str) -> bool:\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The handleIncomingMessage method is being called\")\n\t\t# Use the DataUtil class to convert the msg content (which should be JSON) to an ActuatorData instance\n\t\tad = DataUtil.jsonToActuatorData(self, msg)\n\t\tself._handleIncomingDataAnalysis(msg)", "def callback(parsed_msg, msg_object):\n assert msg_object.stream_id == stream_id\n assert parsed_msg in msg", "def has_messages(self) -> bool:\n return self._has_messages", "def check_channel_exec_request(self, channel, command):\n return False", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def handleReceived():\r\n global receivedAck\r\n receivedAck = True", "def process_inc(self):\n while True:\n # if connected\n if self.connected:\n # if socket is closed, reset\n if self.sock._closed:\n self.connack_rec = 0\n self.disconnect()\n # try to get a message from queue\n try:\n msg = self.recv_q.get_nowait()\n # convert from bytes to string\n msg = msg.decode(\"utf-8\")\n # analyze frame\n frame = Message.Frame(msg)\n\n # check frame type\n hd = frame.header.lower()\n\n # check if frame is good, otherwise disregard it\n if hd == \"error\" or hd == \"base\":\n pass\n # if ack frame\n elif hd == \"ack\":\n # process it\n self.process_ack(Message.AckFrame(msg))\n # if pub frame\n elif hd == \"pub\":\n # process it\n self.process_data(Message.PublishFrame(msg))\n\n except queue.Empty:\n # if no messages, do nothing\n pass", "def on_message(self, message):\n message = json.loads(message)\n self.log.info(\"on_message for %s, type %s\", self.cid, message['type'])\n self.log.debug(\"message: %s\", pprint.pformat(message))\n if message['type'] == custom_message_type:\n return False\n\n to_cid = message['dst']\n\n forward_message = {\n 'type': message['type'],\n 'src': self.cid,\n 'dst': to_cid,\n 'payload': message['payload'],\n }\n\n # I'm pretty sure you can get out of order messages somehow?\n assert to_cid in self.node.cids_in_use\n assert to_cid in self.cid_handlers\n\n self.cid_handlers[to_cid].write_message(json.dumps(forward_message))\n return True", "def _is_acknowledged(self):\n response = self._port_handle.read(1)\n if len(response) == 0:\n raise DfuException('DFU did not send the answer.')\n else:\n if response != self.__RESPONSE['ack']:\n print('dfu answered nack (0x{})'.format(response.hex()))\n return response == self.__RESPONSE['ack']", "def _is_message_valid(message):\n return isinstance(message, ev_envelope.Envelope)", "def receive_message(self, message):\r\n return", "def handle_request(self):\n try:\n data = self.sock.recv(1024)\n except socket.error as e: # ...,e:\n if e == 10040:\n print('Message too long, ignoring.')\n return\n raise\n self.append_to_seq(parse_packet(data))", "def event_check(self):\r\n if len(self.event_queue) > 0:\r\n event = self.event_queue.pop(0) # oldest\r\n self.event_queue_proc(event)\r\n return True\r\n return False", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None", "def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)", "def handle(self, message):", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def scan_for_message(self):\n\n while True:\n msg = self.consumer.poll(0.1)\n if msg is None:\n continue\n elif not msg.error():\n message = json.loads(msg.value().decode('utf8'))\n print('Received message: {0}'.format(message))\n if message['risk_level'] >= 4:\n user = User(message['user_id'].replace(' ', '.'))\n user.handle()\n elif msg.error().code() == KafkaError._PARTITION_EOF:\n print('End of partition reached {0}/{1}'\n .format(msg.topic(), msg.partition()))\n else:\n print('Error occured: {0}'.format(msg.error().str()))", "def _pubnub_receive(self, msg):\r\n self.signal_recv(self, msg)\r\n self._time_last_received = time.time()\r\n return not self._terminating", "def reply_received():\n return call_id in self._reply_inbox", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def _check_packet_corruption(self, header):\n data_corrupt = False\n if header.msg_type == 0 or header.msg_size == 0 or header.msg_size > 10000:\n if not self._file_corrupt and self._debug:\n print('File corruption detected')\n data_corrupt = True\n self._file_corrupt = True\n\n return data_corrupt", "def ready(self):\n if self.proc.stdout.readline() != \"OK\\n\":\n raise ValueError(\"Le bot {bot} n'arrive pas à se préparer\".format(bot=self.name))", "def on_message(self, data):\n logger.debug('on_message: %r', data)\n if not self.in_transfer_state:\n self.discard_chunked_messages()\n return NAK\n else:\n try:\n self.handle_message(data)\n return ACK\n except Exception as exc:\n logger.error('Error occurred on message handling. {!r}'\n .format(exc))\n return NAK", "def received_message(self, msg: Data, source: tuple, destination: tuple) -> bool:\n raise NotImplemented", "def Check_Communications(self):\n self.serial_status = False\n try:\n self.serial_status = self.ser.isOpen()\n except Exception as e:\n print \"No communication to stage serial bus. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.serial_status = False\n self.encoder_status = False\n try:\n self.encoder_status = True\n for i in range(3):\n value = self.fd_channel[i].read(3)+b'\\x00' \n # read the 24 bit register (3 bytes) and add a fourth byte \n # to make it an integer.\n signed_value = struct.unpack(\"=I\", value)[0] \n if signed_value < 0 or signed_value > 2**24:\n self.encoder_status = False\n break\n except Exception as e:\n print \"No communication to optical encoders. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.encoder_status = False\n self.comm_status = self.serial_status and self.encoder_status\n return", "def is_waiting_for_message(self):\r\n return self.waiting_for_message", "def can_handle(self, rsm_ctx):\n return False", "def process_incoming_packet(self, data, address):\n try:\n new_message = MessageFactory.create_message(\n packet_data=data,\n origin_address=address,\n destination_node=self.node)\n self._put_new_message_in_queue(new_message)\n self.report()\n if new_message.TYPE_STRING != \"ack\":\n ack_message = MessageFactory.generate_ack_message(new_message)\n self.send_message(ack_message)\n except Exception as e:\n print(e)", "def _handle_message(self, message):\n if not isinstance(message, tuple):\n raise TypeError(\"Expected tuple, got \" + str(type(message)))\n command, arg = message\n with lock:\n if exiting.is_set():\n raise RuntimeError(\"service is exiting, cannot connect\")\n if command == \"register\":\n process = psutil.Process(int(arg))\n with self.cond:\n if process not in self.clients:\n self.clients.add(process)\n start_daemon_thread(\n target=lambda: self._background_wait(process)\n )\n return True\n elif command == \"unregister\":\n process = psutil.Process(int(arg))\n self._notify_exit(process)\n return True\n else:\n raise ValueError(\"Unrecognized command: \" + repr(command))", "def is_ctrl_message(self):\n return self._id < 0", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def isComplete(self):\n return self.bytesToRead == 0", "def handleIncoming(self):\r\n\t\trawQueue = list()\r\n\r\n\t\twhile True:\r\n\t\t\tif not self.activeConnection:\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\trawQueue.append(self.serialPort.read(1).decode('ascii'))\r\n\t\t\texcept serial.serialutil.SerialException as e:\r\n\t\t\t\tcontinue\r\n\t\t\t# print(rawQueue[-1], int.from_bytes(rawQueue[-1], byteorder='big'))\r\n\t\t\t# if len(rawQueue) >= 1000:\r\n\t\t\t# \trawQueue.pop(0)\r\n\t\t\t# print(rawQueue)\r\n\t\t\tif rawQueue[0] != '$': # we pop items until the first one is a $ sign\r\n\t\t\t\t# print('popping the first character')\r\n\t\t\t\trawQueue.pop(0)\r\n\t\t\tif '\\n' in rawQueue: # we assume with the \\n we have a valid message\r\n\t\t\t\t# print('valid message')\r\n\t\t\t\trawQueue.pop(0) # remove the $\r\n\t\t\t\trawPayload = rawQueue[0:rawQueue.index(\"*\")]\r\n\t\t\t\tstringPayload = \"\".join(rawPayload)\r\n\t\t\t\tvalueList = stringPayload.split(\",\")\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\tfor i in range(1, len(valueList)):\r\n\t\t\t\t\tvalueList[i] = int(valueList[i])\r\n\t\t\t\tvalueList[0] = messageTypes[valueList[0]]\r\n\r\n\t\t\t\tself.eventQueue.put(valueList)\r\n\t\t\t\trawQueue.clear()\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\t# we are going to ignore checksums for now\r", "def getincomingmail(self):\n self.socket.send(\"fuglu scanner ready - please pipe your message\\r\\n\")\n try:\n (handle, tempfilename) = tempfile.mkstemp(\n prefix='fuglu', dir=self.config.get('main', 'tempdir'))\n self.tempfilename = tempfilename\n self.tempfile = os.fdopen(handle, 'w+b')\n except Exception as e:\n self.endsession('could not write to tempfile')\n\n while True:\n data = self.socket.recv(1024)\n if len(data) < 1:\n break\n self.tempfile.write(data)\n self.tempfile.close()\n self.logger.debug('Incoming message received')\n return True", "def __processMsg(self, sock, msgData):\n\n pass", "def handle_msg(self, state_id, msg):\n pass", "def handle_message(self, message):\n payload = json.loads(message.payload)\n action_type = payload[\"action_type\"]\n parameters = json.loads(payload[\"parameters\"])\n\n command = payload[\"command\"]\n if command == \"SSH-Start\":\n return self.start_ssh_task(parameters)\n elif command == \"SSH-Stop\":\n return self.stop_ssh_task(parameters)\n else:\n print(\"Message Error SSH Action Handler \" + str(message))\n return False", "def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")", "def has_message(self, character):\n messages = get_messages(character)\n messages = [ message[MESSAGE].id for message in messages ]\n if self.message.id in messages:\n return True\n else:\n return False", "def message_already_processed(msg):\n\n is_already_member = redis.sismember(redis_sqs_message_set, msg.message_id)\n if not is_already_member:\n redis.sadd(redis_sqs_message_set, msg.message_id)\n\n return is_already_member", "def process_message(self, message: Message[TPayload]) -> Optional[TResult]:\n pass" ]
[ "0.7115128", "0.6838178", "0.6745119", "0.6687847", "0.6675145", "0.66316074", "0.65846056", "0.65725505", "0.6522481", "0.6514949", "0.65149397", "0.629913", "0.629913", "0.6284352", "0.62244946", "0.61825305", "0.61751574", "0.613391", "0.61156756", "0.6089335", "0.60848534", "0.6079222", "0.6071875", "0.6045911", "0.6040965", "0.60313916", "0.5989665", "0.596668", "0.5963122", "0.59077805", "0.58950955", "0.5888553", "0.58859366", "0.5885281", "0.5875658", "0.5861189", "0.5853556", "0.58512986", "0.5850053", "0.5841774", "0.58386946", "0.58314687", "0.5818165", "0.5814951", "0.58026564", "0.5798704", "0.5787269", "0.57696396", "0.5753937", "0.5745918", "0.5744645", "0.5742489", "0.5741808", "0.5727338", "0.57263166", "0.57249206", "0.570117", "0.56989694", "0.5693051", "0.569062", "0.56884724", "0.568151", "0.5680245", "0.5679167", "0.566512", "0.56564903", "0.56484437", "0.5645185", "0.56384313", "0.56256294", "0.56198937", "0.56175244", "0.5616557", "0.56114626", "0.56113815", "0.56102407", "0.56100416", "0.5590476", "0.55714667", "0.55657274", "0.55637574", "0.55610126", "0.55593187", "0.55537945", "0.55330366", "0.55317724", "0.55312556", "0.55310005", "0.55284715", "0.55134887", "0.5512396", "0.5509197", "0.5507711", "0.5506292", "0.5504573", "0.55038655", "0.5499853", "0.54977345", "0.5497362", "0.54966104", "0.5491263" ]
0.0
-1
Checks whether or not reply to message m2 should be faked If m1's field is None then m2's field value is not compared at all
def _fake_message_compare(m1, m2): m1 = m1.serialize() m2 = m2.serialize() diff = False for i in range(len(m1)): if m1[i] is None: continue if m1[i] != m2[i]: diff = True break return not diff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testNoneAssignment(self):\n class MyMessage(messages.Message):\n\n my_field = messages.StringField(1)\n\n m1 = MyMessage()\n m2 = MyMessage()\n m2.my_field = None\n self.assertEquals(m1, m2)", "def __eq__(self, other):\n if not isinstance(other, SendMmsRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, UpdateInboxOptions):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_04_validate_relay_presence(self):\n try:\n self.record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"'None' value must be either True or False.\" in e.message_dict['relay'])", "def __eq__(self, other):\r\n if not isinstance(other, MessageConsumeResp):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, UpdateVehicleRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, SMS):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def _matcher(r1: vcr.request.Request, r2: vcr.request.Request) -> None:\n assert r1.uri == r2.uri and r1.body == r2.body and r1.headers == r2.headers", "def __ne__(self, other):\n if not isinstance(other, PutPlaceOfDeliveryRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(a,b): #Todo: this needs major testing of reading, writing\n if not a.assigner.title() == b.assigner.title():\n return False\n if not a.description == b.description:\n return False\n if not a.starttime == b.starttime:\n return False\n if not a.followups == b.followups:\n return False\n if not a.id == b.id:\n return False\n if not a._ticket_id == b._ticket_id:\n return False\n if not a.iscompleted == b.iscompleted:\n return False\n if not a.name == b.name:\n return False\n if not a.priority == b.priority:\n return False\n if not a.whose == b.whose:\n return False\n if not a.submitter_email == b.submitter_email:\n return False\n return True", "def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply", "def compare_fields(field1, field2):\r\n if field1 is None and field2 is None:\r\n return True\r\n\r\n if (field1 is None and field2 is not None) or\\\r\n (field2 is None and field1 is not None):\r\n return False\r\n\r\n if field1 == field2:\r\n return True\r\n\r\n return False", "def __flt_eq_mth(self, other):\n if self.mouth is None:\n return True\n\n return self.mouth == other.mouth", "def __eq__(self, other):\n if not isinstance(other, CreateCloudPhoneServerRequestBody):\n return False\n\n return self.__dict__ == other.__dict__", "def _assert_fields_equal(self, field1, field2):\n self._assert_field_descriptors_equal(\n field1.descriptor, field2.descriptor)\n self.assertEqual(field1.method_name, field2.method_name)\n self.assertEqual(field1.partial_args, field2.partial_args)\n self.assertEqual(field1.partial_kwargs, field2.partial_kwargs)\n if field1.context_args is None:\n self.assertIsNone(field2.context_args)\n else:\n self.assertEqual(\n set(field1.context_args), set(field2.context_args))\n self.assertEqual(field1.attr, field2.attr)", "def __ne__(self, other):\n if not isinstance(other, Prompt):\n return True\n\n return self.to_dict() != other.to_dict()", "def compare_fields(field1, field2):\n if field1 is None and field2 is None:\n return True\n\n if (field1 is None and field2 is not None) or\\\n (field2 is None and field1 is not None):\n return False\n\n if field1 == field2:\n return True\n\n return False", "def test_get_message_reply(self):\n message1 = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n message1.created_at = now() - datetime.timedelta(days=1)\n message1.save()\n message2 = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n message2.created_at = now() - datetime.timedelta(hours=2)\n message2.save()\n\n # thread.last_read_at is normally set by the by_user query\n self.thread.last_read_at = now() - datetime.timedelta(hours=3)\n messages = self.thread.messages_for_user(self.user)\n\n # Messages are returned sorted from newest to oldest\n self.assertEqual(messages[0], message2)\n self.assertFalse(messages[0].read)\n self.assertEqual(messages[1], message1)\n self.assertTrue(messages[1].read)", "def __ne__(self, other: 'GatewayChangeRequestGatewayClientGatewayUpdateAttributes') -> bool:\n return not self == other", "def __ne__(self, other):\n if not isinstance(other, OneOfFluidResultControlsFieldCalculations):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, InlineResponse200MessageFondo):\n return False\n\n return self.__dict__ == other.__dict__", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def _check_consistency(message: Message, to: str, sender: str) -> Message:\n if message.has_to:\n enforce(\n message.to == to, \"To specified on message does not match envelope.\"\n )\n else:\n message.to = to\n if message.has_sender:\n enforce(\n message.sender == sender,\n \"Sender specified on message does not match envelope.\",\n )\n else:\n message.sender = sender\n return message", "def __ne__(self, other):\n if not isinstance(other, AggregatedReturnsRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, FinancialPictureRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\r\n if not isinstance(other, ReqMargin):\r\n return True\r\n\r\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, CreateApplicantRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, InlineResponse200):\n return True\n\n return self.to_dict() != other.to_dict()", "def match(self, other_order):\n # Error checking\n # if other_order.type == self.type:\n # return False\n # if other_order.is_bid and other_order.price < self.price:\n # return False\n # if not other_order.is_bid and self.price < other_order.price:\n # return False\n # full size trade (peak_size <= other_order.peak_size)\n if self.peak_size <= other_order.peak_size:\n new_trade_size = self.peak_size\n # update both parties\n self.make_trade(new_trade_size)\n other_order.make_trade(new_trade_size)\n return True\n # partial trade (peak_size > other_order.peak_size)\n else:\n new_trade_size = other_order.peak_size\n self.make_trade(new_trade_size)\n other_order.make_trade(new_trade_size)\n return False", "def diff_log(self, other):\n if self._req_set != other._req_set:\n print(\"GC request lists do not match.\")\n return False\n else:\n return True", "def _rec_only_updated(cls, rec):\n return rec.get('uplinked', None) \\\n and not rec.get('queued', None) \\\n and not rec.get('announced', None) \\\n and not rec.get('blocked', None) \\\n and not rec.get('finished', None) \\\n and not rec.get('aborted', None)", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def __ne__(self, other):\n if not isinstance(other, MdHistoryRequestCO):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, TokenizeRequestSchema):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, OneOfReportProperties):\n return True\n\n return self.to_dict() != other.to_dict()", "def allow_relation(self, obj1, obj2, **hints):\n if (\n obj1._meta.label_lower in self.route_encuestas or\n obj2._meta.label_lower in self.route_encuestas\n ):\n return True\n return None", "def __ne__(self, other):\n if not isinstance(other, TokenizeResponseSchema):\n return True\n\n return self.to_dict() != other.to_dict()", "def checkSelfReply(body):\n return 'WHAT IS MY PURPOSE' in body", "def __eq__(self, other):\n result = NotImplemented\n if isinstance(other, Message):\n result = False\n if self is other:\n result = True\n else:\n # does not compare _id\n if (self._message_length == other._message_length and\n self._payload_length == other._payload_length and\n self._headers == other._headers and\n self._payload == other._payload):\n result = True\n\n return result", "def _valid(self):\n return all(map(lambda v: v is not None,\n (self.connection, self.sender, self.receiver, self.subject, self.body)))", "def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value", "def _is_consistent(self) -> bool:\n try:\n enforce(\n isinstance(self.dialogue_reference, tuple),\n \"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.\".format(\n type(self.dialogue_reference)\n ),\n )\n enforce(\n isinstance(self.dialogue_reference[0], str),\n \"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.\".format(\n type(self.dialogue_reference[0])\n ),\n )\n enforce(\n isinstance(self.dialogue_reference[1], str),\n \"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.\".format(\n type(self.dialogue_reference[1])\n ),\n )\n enforce(\n type(self.message_id) is int,\n \"Invalid type for 'message_id'. Expected 'int'. Found '{}'.\".format(\n type(self.message_id)\n ),\n )\n enforce(\n type(self.target) is int,\n \"Invalid type for 'target'. Expected 'int'. Found '{}'.\".format(\n type(self.target)\n ),\n )\n\n # Light Protocol Rule 2\n # Check correct performative\n enforce(\n isinstance(self.performative, SigningMessage.Performative),\n \"Invalid 'performative'. Expected either of '{}'. Found '{}'.\".format(\n self.valid_performatives, self.performative\n ),\n )\n\n # Check correct contents\n actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE\n expected_nb_of_contents = 0\n if self.performative == SigningMessage.Performative.SIGN_TRANSACTION:\n expected_nb_of_contents = 2\n enforce(\n isinstance(self.terms, CustomTerms),\n \"Invalid type for content 'terms'. Expected 'Terms'. Found '{}'.\".format(\n type(self.terms)\n ),\n )\n enforce(\n isinstance(self.raw_transaction, CustomRawTransaction),\n \"Invalid type for content 'raw_transaction'. Expected 'RawTransaction'. Found '{}'.\".format(\n type(self.raw_transaction)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGN_MESSAGE:\n expected_nb_of_contents = 2\n enforce(\n isinstance(self.terms, CustomTerms),\n \"Invalid type for content 'terms'. Expected 'Terms'. Found '{}'.\".format(\n type(self.terms)\n ),\n )\n enforce(\n isinstance(self.raw_message, CustomRawMessage),\n \"Invalid type for content 'raw_message'. Expected 'RawMessage'. Found '{}'.\".format(\n type(self.raw_message)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGNED_TRANSACTION:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.signed_transaction, CustomSignedTransaction),\n \"Invalid type for content 'signed_transaction'. Expected 'SignedTransaction'. Found '{}'.\".format(\n type(self.signed_transaction)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGNED_MESSAGE:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.signed_message, CustomSignedMessage),\n \"Invalid type for content 'signed_message'. Expected 'SignedMessage'. Found '{}'.\".format(\n type(self.signed_message)\n ),\n )\n elif self.performative == SigningMessage.Performative.ERROR:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.error_code, CustomErrorCode),\n \"Invalid type for content 'error_code'. Expected 'ErrorCode'. Found '{}'.\".format(\n type(self.error_code)\n ),\n )\n\n # Check correct content count\n enforce(\n expected_nb_of_contents == actual_nb_of_contents,\n \"Incorrect number of contents. Expected {}. Found {}\".format(\n expected_nb_of_contents, actual_nb_of_contents\n ),\n )\n\n # Light Protocol Rule 3\n if self.message_id == 1:\n enforce(\n self.target == 0,\n \"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.\".format(\n self.target\n ),\n )\n except (AEAEnforceError, ValueError, KeyError) as e:\n _default_logger.error(str(e))\n return False\n\n return True", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'emissions' or \\\n obj2._meta.app_label == 'emissions':\n return True\n return None", "def testReplyWhenRepliesFromAllNodesAreSame(looper, client1, wallet1):\n request = sendRandomRequest(wallet1, client1)\n looper.run(\n eventually(checkResponseRecvdFromNodes, client1,\n nodeCount, request.reqId,\n retryWait=1, timeout=20))\n checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)", "def has_both(self) -> bool:\n return self.has_required() and self.has_optional()", "def __ne__(self, other):\n if not isinstance(other, SnmpTransportWhereInput):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, ComplianceSummaryRuleResultRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, BusinessInvoiceAnalysisRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def empty(self):\n return (not self.q1.qlist) and (not self.q2.qlist)", "def __ne__(self, other):\n if not isinstance(other, BlogAuthorCloneRequestVNext):\n return True\n\n return self.to_dict() != other.to_dict()", "def try_combine(self, other):\n if self.saddr <= other.saddr and other.eaddr <= self.eaddr:\n self.sync_vars |= other.sync_vars\n return True\n return False", "def __ne__(self, other):\n if not isinstance(other, QuickSearchResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __flt_eq_emo(self, other):\n if self.emote is None:\n return True\n\n return self.emote == other.emote", "def __eq__(self, other):\n if not isinstance(other, TripRequestResponseJourneyFareTicketProperties):\n return False\n\n return self.__dict__ == other.__dict__", "def test_notification_ne(self) -> None:\n self.assertTrue(self.notification1 != self.notification2)\n\n # pylint: disable=unnecessary-dunder-call\n self.assertTrue(self.notification1.__ne__(self.notification2))", "def __eq__(self, other):\n if not isinstance(other, PostDirectMail):\n return False\n\n return self.__dict__ == other.__dict__", "def is_consistent(self, other):\n return self.name != other.name or self.type is other.type", "def __ne__(self, other):\n return not self._field1 == other._field1", "def allow_relation(self, obj1, obj2, **hints):\n return self._route_by_model_type(obj1) == self._route_by_model_type(obj2)", "def interferes(self, other):\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def test_outgoing_message_link(self):\n connection = self.create_connection()\n fields = {'extra-field': 'extra-value'}\n message = IncomingMessage(connection, 'test incoming message',\n fields=fields)\n response = message.respond('response')\n self.assertEqual(message, response.in_reply_to)\n self.assertTrue('extra-field' in response.in_reply_to.fields)", "def __eq__(self, other):\n if not isinstance(other, ListBareMetalServersRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other: 'GatewayChangeRequestGatewayClientGatewayCreate') -> bool:\n return not self == other", "def __ne__(self, other):\n if not isinstance(other, TransactionFeesDTO):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_conversation_with_zero_messages(self):\n response = self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_b.id,\n \"text\": \"test message\"\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Message.objects.count(), 1)\n self.assertEqual(Message.objects.get().text, 'test message')", "def __ne__(self, other):\n if not isinstance(other, Campaign):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n try:\n return super().__eq__(other) and self.body == other.body\n except AttributeError:\n return False", "def satisfied(self):\n\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return False\n\n return self.var1.get_value() != self.var2.get_value()", "def __ne__(self, other):\n if not isinstance(other, CtmagentBasicInfoType):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_different(self):\n\n x = self.start_request_tests(service.get_request('POST', {u'tree1_nwk': '((a,b)c);',\n u'tree2_nwk': '(a,(b,c));'}))\n # Insert: whether result is what it should be according to docs\n self.assert_success(x)\n json.dump(x.json(), sys.stdout, indent=2)\n mess = x.json().get(u'message')\n self.assertFalse(x.json()[u'are_same_tree'], mess)", "def __ne__(self, other):\n if not isinstance(other, ServerRateLimiting):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, ComputeProjectVmPasswordReset):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_user_not_in_conversation(self):\n self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_b.id,\n \"text\": \"test message\"\n }, format='json')\n response = self.client.post('/conversation/{}/message'.format(self.conversation.id), {\n \"sender_id\": self.user_a.id,\n \"receiver_id\": self.user_c.id,\n \"text\": \"test message\"\n }, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(Message.objects.count(), 1)", "def __eq__(self, other):\n if not isinstance(other, PresentationRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def is_correct_reply(self, reply: 'NbfCommand') -> bool:\n if not self.expects_reply:\n return False\n\n if self.opcode == OPCODE_WRITE_8:\n return reply.matches(OPCODE_WRITE_8, self.address_int, 0)\n elif self.opcode == OPCODE_READ_8:\n return reply.matches(OPCODE_READ_8, self.address_int, None)\n elif self.opcode == OPCODE_FENCE:\n return reply.matches(OPCODE_FENCE, 0, 0)\n elif self.opcode == OPCODE_FINISH:\n return reply.matches(OPCODE_FINISH, 0, 0)\n else:\n return False", "def __ne__(self, other):\n if not isinstance(other, BatchFuturesOrder):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, CrfItemRpc):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, UpdateArtefactPlacementDTO):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, DeliveryReportDeliveredSecondsByResolution):\n return True\n\n return self.to_dict() != other.to_dict()", "def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):\n # Since view no is always zero in the current setup\n looper.run(eventually(checkSufficientRepliesRecvd,\n client1.inBox,\n sent1.reqId,\n 2,\n retryWait=.5,\n timeout=5))\n originalRequestResponsesLen = nodeCount * 2\n duplicateRequestRepliesLen = nodeCount # for a duplicate request we need to\n client1.nodestack._enqueueIntoAllRemotes(sent1, None)\n\n def chk():\n assertLength([response for response in client1.inBox\n if (response[0].get(f.RESULT.nm) and\n response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or\n (response[0].get(OP_FIELD_NAME) == REQACK and\n response[0].get(f.REQ_ID.nm) == sent1.reqId)],\n originalRequestResponsesLen + duplicateRequestRepliesLen)\n\n looper.run(eventually(\n chk,\n retryWait=1,\n timeout=20))", "def __neq__(self, other): \n return not self == other", "def __eq__(self, other):\n if not isinstance(other, DeviceRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, WhatsApp):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, SubjectMatrixValuesRpc):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, TagArithmeticsRequest):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, Printer):\n return True\n\n return self.to_dict() != other.to_dict()", "def empty(self):\r\n if len(self.s1)==0 and len(self.s2)==0:\r\n return True", "def __eq__(self, other):\n if not isinstance(other, UpdateInboxOptions):\n return False\n\n return self.to_dict() == other.to_dict()", "def assert_is_not(self, first, second, msg=None):\r\n assert first is not second", "def assert_request_equal(obj1, obj2, do_raise=False):\n\n def assert_all_none(*args):\n for arg in args:\n _assert(arg is None, \"\")\n\n # Parent requests will not serialize their children since that's a loop\n def compare_parent(req1, req2):\n _assert_wrapper(\n req1,\n req2,\n expected_type=Request,\n deep_fields={\"children\": assert_all_none},\n do_raise=True,\n )\n\n # Child requests will not serialize their parent since that's also a loop\n # They also don't serialize their children for performance reasons\n def compare_child(req1, req2):\n _assert_wrapper(\n req1,\n req2,\n expected_type=Request,\n deep_fields={\"children\": assert_all_none, \"parent\": assert_all_none},\n do_raise=True,\n )\n\n return _assert_wrapper(\n obj1,\n obj2,\n expected_type=Request,\n deep_fields={\"children\": compare_child, \"parent\": compare_parent},\n do_raise=do_raise,\n )", "def allow_relation(self, obj1, obj2, **hints):\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == self.app_label or \\\n obj2._meta.app_label == self.app_label:\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'test' or \\\n obj2._meta.app_label == 'test':\n return True\n return None", "def __ne__(self, other):\n if not isinstance(other, SharedConditions):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, AttachmentResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def test_match_user_by_from_field_address_without_pref_set(self):\n email_message = _create_email(\n from_field=self.margaret.email\n )\n\n # explicitely set email_routing__by_user to False\n _set_email_routing_pref(self.margaret, \"by_user\", False)\n\n user = get_matching_user(\n email_message,\n by_user=True\n )\n\n # won't match because of the pref\n self.assertIsNone(user)" ]
[ "0.61221015", "0.60199493", "0.5698596", "0.56491196", "0.56256944", "0.55935615", "0.5585704", "0.55634177", "0.55492973", "0.55470234", "0.55111665", "0.5495307", "0.546077", "0.5453699", "0.5440305", "0.5392359", "0.5371638", "0.5361735", "0.5357879", "0.53478", "0.53415495", "0.53387886", "0.5313279", "0.53092784", "0.5294485", "0.5275662", "0.5272938", "0.5271955", "0.52666485", "0.524124", "0.5231793", "0.5219945", "0.52173924", "0.52063054", "0.5190704", "0.5182789", "0.51804435", "0.51801145", "0.5177848", "0.5163635", "0.51580334", "0.51570666", "0.51322126", "0.5126621", "0.51240027", "0.5123868", "0.51235443", "0.51047665", "0.51002985", "0.5099125", "0.50971943", "0.50959206", "0.5092223", "0.50906014", "0.50795054", "0.5074568", "0.5072827", "0.50713074", "0.50694305", "0.5063701", "0.50605875", "0.5060276", "0.5060276", "0.50564486", "0.50535125", "0.50487804", "0.50459564", "0.50428104", "0.5039955", "0.5035683", "0.5021841", "0.50183123", "0.5015493", "0.50121695", "0.5011752", "0.5010083", "0.5009444", "0.5005646", "0.5005107", "0.5004501", "0.4992602", "0.49911723", "0.49777022", "0.497587", "0.49735725", "0.49707264", "0.4968536", "0.4967656", "0.4965615", "0.49655548", "0.4965127", "0.4957326", "0.4956017", "0.49552184", "0.49547443", "0.49519864", "0.49490812", "0.49488276", "0.49472418", "0.4944055" ]
0.7252573
0
Registers information for faking replies
def _register_fake_next_op(self, channel, fake_info): assert isinstance(fake_info, (list, tuple, dict)), "fake_info should be a dict or list of dict or tuple of dict" if isinstance(fake_info, (tuple, list)): for f in fake_info: assert isinstance(f, dict), "fake_info should be a dict or list of dict or tuple of dict" if isinstance(fake_info, dict): fake_info = [copy.deepcopy(fake_info)] else: fake_info = [copy.deepcopy(f) for f in fake_info] for f in fake_info: assert "reply" in f, "fake_info should contain 'reply'" assert isinstance(f["reply"], ProtocolReply), "fake_info's reply should be a ProtocolReply instance" for o in f: assert o in ("reply", "execute", "on_message", "on_channel", "after", "on_success", "on_failure"), \ "Unsupported fake_info options: {}".format(o) if "execute" in f: assert isinstance(f["execute"], bool) or f["execute"] in (0, 1), \ "fake_info option 'execute' should be a bool or 0 or 1" if "on_success" in f: assert isinstance(f["on_success"], bool), \ "fake_info option 'on_success' should be a boolean" if "on_failure" in f: assert isinstance(f["on_failure"], bool), \ "fake_info option 'on_failure' should be a boolean" on_success = f.get("on_success", None) on_failure = f.get("on_failure", None) if on_success is None and on_failure is None: on_success = True on_failure = False if on_success is True or on_success is None and on_failure is False: on_success = True if on_failure is True or on_success is None and on_success is False: on_failure = True if on_success is True: f["on_success"] = True else: f["on_success"] = False if on_failure is True: f["on_failure"] = True else: f["on_failure"] = False if "on_message" in f: assert isinstance(f["on_message"], PlatformMessage), \ "fake_info option 'on_message' should be PlatformMessage" if "on_channel" in f: assert isinstance(f["on_channel"], (str, list, tuple)), \ "fake_info option 'on_channel' should be a string or list/tuple of strings" if isinstance(f["on_channel"], (list, tuple)): for c in f["on_channel"]: assert isinstance(c, str), \ "fake_info option 'on_channel' should be a string or list/tuple of strings" if "after" in f: assert isinstance(f["after"], int), "fake_info option 'after' should be an integer" if "on_channel" not in f: on_channel = channel, elif isinstance(f["on_channel"], (list, tuple)): on_channel = f["on_channel"] else: on_channel = f["on_channel"], for c in on_channel: if c not in self._fake_ops: self._fake_ops[c] = [f] else: self._fake_ops[c].append(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def register_message():\n global mss_cnt\n\n gmess = Graph()\n\n # Construimos el mensaje de registro\n gmess.bind('foaf', FOAF)\n gmess.bind('dso', DSO)\n reg_obj = agn[InfoAgent.name+'-Register']\n gmess.add((reg_obj, RDF.type, DSO.Register))\n gmess.add((reg_obj, DSO.Uri, InfoAgent.uri))\n gmess.add((reg_obj, FOAF.Name, Literal(InfoAgent.name)))\n gmess.add((reg_obj, DSO.Address, Literal(InfoAgent.address)))\n gmess.add((reg_obj, DSO.AgentType, DSO.TransportAgent))\n\n # Lo metemos en un envoltorio FIPA-ACL y lo enviamos\n gr = send_message(\n build_message(gmess, perf= ACL.request,\n sender= InfoAgent.uri,\n receiver= AgentDirectori.uri,\n content= reg_obj,\n msgcnt= mss_cnt),\n AgentDirectori.address)\n mss_cnt += 1\n\n return gr", "def register_message():\n global mss_cnt\n\n gmess = Graph()\n\n # Construimos el mensaje de registro\n gmess.bind('foaf', FOAF)\n gmess.bind('dso', DSO)\n reg_obj = agn[InfoAgent.name+'-Register']\n gmess.add((reg_obj, RDF.type, DSO.Register))\n gmess.add((reg_obj, DSO.Uri, InfoAgent.uri))\n gmess.add((reg_obj, FOAF.Name, Literal(InfoAgent.name)))\n gmess.add((reg_obj, DSO.Address, Literal(InfoAgent.address)))\n gmess.add((reg_obj, DSO.AgentType, DSO.HotelsAgent))\n\n # Lo metemos en un envoltorio FIPA-ACL y lo enviamos\n gr = send_message(\n build_message(gmess, perf= ACL.request,\n sender= InfoAgent.uri,\n receiver= AgentDirectori.uri,\n content= reg_obj,\n msgcnt= mss_cnt),\n AgentDirectori.address)\n mss_cnt += 1\n\n return gr", "def register_msg(self, path, msgtype, msg):", "def massage_addinfo(self) -> str:\n self.message_str= \"{}, {}\\n\".format(self.sent_by, self.time)", "def post_reply(self, comment):\n\t\tpass", "def registr(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"Bio of %s: %s\", user.first_name, update.message.text)\n update.message.reply_text('REGISTRAZIONE DA IMPLEMENTARE, alla prossima.')\n\n return ConversationHandler.END", "def add_info(self, info_message: dict):\n self.info.update(info_message)", "def set_info_message(msg):\n set_message(msg, TYPE_INFO)", "def _register(self):\n self._log(self.botlog, 'Registering as %s' % self.nickname)\n self._send('USER %s B C :%s' % (self.ident, self.realname))\n self._send('NICK %s' % self.nickname)", "def register_reply_code(self, string, integer):\n if not string in self.__reply_codes:\n self.log(2, \"Registered reply code: {reply_code_str} => {reply_code_int}\", {\"reply_code_str\": string, \"reply_code_int\": integer})\n self.__reply_codes[string] = integer", "def massage_addinfo(self) -> str:\n self.message_str = f'{self.time}\\n{self.sent_by}\\n'\n return self.message_str", "def _register(self, comm, handler):", "def reply(f, *args, **kwargs):\n r = f(*args, **kwargs)\n\n if r:\n if isinstance(r, dict):\n r['needs_reply'] = True\n elif isinstance(r, basestring):\n r = dict(answer=r, needs_reply=True)\n\n return r", "def massage_addinfo(self) -> str:\n super().massage_addinfo()\n self.message_str += f'{self.location}\\n'", "def _add_info(self, msg, **kwargs):\n\n args, extensions = self._filter_args(msg, **kwargs)\n for key, val in args.items():\n setattr(msg, key, val)\n\n if extensions:\n if msg.extension_elements:\n msg.extension_elements.extend(extensions)\n else:\n msg.extension_elements = extensions", "def on_namreply(self, raw_msg, nicknames, **kwargs):\n pass", "def inform(self, information):\n self.know = merge(self.know, information)", "def reply(cls, user, context, message, reply_message):\r\n pass", "def say_to_user(self, user, reply):\n self.line_queue.put(user + \": \" + reply)", "def reply(self, message):\n self.logger.info(\"message came as {}\".format(message))\n message = message.lower()\n if message in [\"start over\", \"get started\", \"hello\", \"hi\", \"say hello\"]:\n self.params = \"\"\n self.readyseteatparams = \"\"\n # self.api.send_text_facebook(\n # self.user_id,\n # 'What type of recipe would you like to make? You can type \"start over\" at any time'\n # )\n # return self.api.send_facebook(self.user_id, self.config.QUESTION_MAIN)\n self.send_welcome_messages()\n return self.api.send_facebook(self.user_id, self.config.QUICK_REPLY_MAIN)\n if message in [\"more\", \"show more\"] and self.data:\n self.index += 5\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n return self.api.send_facebook(self.user_id, m_data)\n if message == \"ask-tomorrow-payload\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"ask-week-payload\":\n self.usersModule.makeNotificationWeekly(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"activate notifications\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"Notification has been activated.\")\n if message in [\"do-nothing\", \"payload_unsubscribe\"]:\n if message == \"payload_unsubscribe\":\n self.usersModule.deactivateNotification(self.user_id)\n return self.api.send_text_facebook(\n self.user_id,\n 'Notification has been deactivated. You can type \"start over\" anytime.')\n else:\n return self.api.send_text_facebook(\n self.user_id,\n 'You can type \"start over\" when you are looking for new recipes.')\n\n try:\n title, choice = message.split(\"_\")\n except:\n title = None\n choice = message\n\n if title == \"category\":\n self.params = \"\"\n self._type = choice\n if choice == \"dinner\":\n self.params += \"&category=89\"\n self.readyseteatparams += \"&category=89\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient:\")\n # return self.api.send_facebook(self.user_id, self.config.DINNER_INGREDIENTS)\n return self.api.send_facebook(self.user_id, self.config.DINNER_GUICK_REPLY)\n elif choice == \"dessert\":\n self.params += \"&category=88\"\n self.readyseteatparams += \"&category=88\"\n # self.api.send_text_facebook(self.user_id, \"What kind of dessert would you like to make?\")\n # return self.api.send_facebook(self.user_id, self.config.DESSERTS)\n return self.api.send_facebook(self.user_id, self.config.DESSERTS_QUICK_REPLY)\n elif choice == \"breakfast\":\n self.params += \"&category=87\"\n self.readyseteatparams += \"&category=87\"\n # self.api.send_text_facebook(self.user_id, \"What kind of breakfast do you want?\")\n # return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUICK_REPLY)\n elif choice == \"appetizer\":\n self.params += \"&category=85\"\n self.readyseteatparams += \"&category=85\"\n # self.api.send_text_facebook(self.user_id, \"What kind of appetizer or snack sounds good?\")\n # return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUICK_REPLY)\n elif choice == \"side dish\":\n self.params += \"&category=95\"\n self.readyseteatparams += \"&category=95\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient\")\n # return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUICK_REPLY)\n else:\n return self.api.send_text_facebook(self.user_id,\n \"I don't know answer that belongs to {} yet\".format(message))\n\n if title == \"main-ingredient\":\n self.mainIngredient = choice\n if choice == \"chicken\":\n self.params += \"&mainingredient=76\"\n self.readyseteatparams += \"&mainingredient=76\"\n elif choice == \"beef\":\n self.params += \"&mainingredient=70\"\n self.readyseteatparams += \"&mainingredient=70\"\n elif choice == \"pork\":\n self.params += \"&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=249\"\n elif choice == \"seafood\":\n self.params += \"&mainingredient=73\"\n self.readyseteatparams += \"&mainingredient=73\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"vegetarian\":\n self.params += \"&lifestyle=299\"\n self.readyseteatparams += \"&lifestyle=299\"\n return self.api.send_facebook(self.user_id, self.config.TIME_QUICK_REPLY)\n if title == \"bre-time\":\n self.breakfastTime = choice\n if choice == \"15\":\n self.params += \"&totaltime=15\"\n self.readyseteatparams += \"&totaltime=15\"\n elif choice == \"30\":\n self.params += \"&totaltime=30\"\n self.readyseteatparams += \"&totaltime=30\"\n elif choice == \"45\":\n pass\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n if title == \"time\":\n self.time = choice\n self.params += \"&totaltime={}\".format(choice)\n self.readyseteatparams += \"&totaltime={}\".format(choice)\n # self.api.send_text_facebook(self.user_id, \"What sounds Good?\")\n # return self.api.send_facebook(self.user_id, self.config.REGION_DINNER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.REGION_QUICK_REPLY)\n\n if title == \"region\":\n self.region = choice\n if choice == \"asian\":\n self.params += \"&cuisine=44\"\n self.readyseteatparams += \"&cuisine=44\"\n elif choice == \"italian\":\n self.params += \"&cuisine=46\"\n self.readyseteatparams += \"&cuisine=46\"\n elif choice == \"mediterranean\":\n self.params += \"&cuisine=367\"\n self.readyseteatparams += \"&cuisine=367\"\n elif choice == \"mexican\":\n self.params += \"&cuisine=45\"\n self.readyseteatparams += \"&cuisine=45\"\n elif choice == \"american\":\n self.params += \"&suppresstraits=44,35,355,46,367,45,356,261\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"dessert\":\n self.dessert = choice\n if choice == \"cookies\":\n self.params += \"&trait=48,10,20,110&suppresstraits=22,24&keywords=cookies\"\n self.readyseteatparams += \"&trait=48,10,20,110&keywords=cookies\"\n elif choice == \"cakes\":\n self.params += \"&suppresstraits=24&keywords=cake\"\n self.readyseteatparams += \"&keywords=cake\"\n elif choice == \"pies\":\n self.params = \"sortby=season,rating&order=desc,desc&negativeingredientkeyword=pieces&keywords=pie&suppresstraits=24&category=88\"\n self.readyseteatparams = \"&negativeingredientkeyword=pieces&keywords=pie&category=88\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n elif choice == \"seasonal\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=88&season=330\"\n self.readyseteatparams = \"&category=88&season=330\"\n elif choice == \"quick\":\n self.params = \"&totaltime=30\"\n self.readyseteatparams = \"&totaltime=30\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"breakfast\":\n self.breakfastIngredient = choice\n if choice == \"eggs\":\n self.params += \"&mainingredient=72\"\n self.readyseteatparams += \"&mainingredient=72\"\n self.params += \"&trait=9\"\n self.readyseteatparams += \"&trait=9\"\n elif choice == \"casserole\":\n self.params += \"&keywords=casserole\"\n self.readyseteatparams += \"&keywords=casserole\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260&goodforyou=258\"\n self.readyseteatparams += \"&goodforyou=260&goodforyou=258\"\n elif choice == \"sweet\":\n self.params += \"&trait=22\"\n self.readyseteatparams += \"&trait=22\"\n # will add something sweet\n pass\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_TIME_QUICK_REPLY)\n\n if title == \"appetizer\":\n self.appetizerIng = choice\n if choice == \"cheesy\" or choice == \"meaty\":\n if choice == \"cheesy\":\n self.params += \"&keywords=cheese\"\n self.readyseteatparams += \"&keywords=cheese\"\n elif choice == \"meaty\":\n self.params += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n elif choice == \"veggies\" or choice == \"healthier\":\n if choice == \"veggies\":\n self.params += \"&mainingredient=77&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=77&mainingredient=310\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=260\"\n return self.api.send_facebook(self.user_id, self.config.HOT_OR_COLD_QUICK_REPLY)\n\n if title == \"hot-cold\":\n self.appetizerType = choice\n if choice == \"hot\":\n self.params += \"&suppresstraits=252\"\n elif choice == \"cold\":\n self.params += \"&cookingmethod=252\"\n self.readyseteatparams += \"&cookingmethod=252\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"side-dish\":\n self.sideDish = choice\n if choice == \"potato\":\n self.params += \"&mainingredient=298\"\n self.readyseteatparams += \"&mainingredient=298\"\n elif choice == \"vegetable\":\n self.params += \"&mainingredient=77\"\n self.readyseteatparams += \"&mainingredient=77\"\n elif choice == \"rice\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=75\"\n self.readyseteatparams += \"&mainingredient=75\"\n elif choice == \"salad\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=95&mainingredient=77\"\n self.readyseteatparams = \"&category=95&mainingredient=77&trait=92\"\n elif choice == \"beans\":\n self.params += \"&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=310\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n isParamInMessage = self.fetch_parameters(message)\n if isParamInMessage:\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n return self.api.send_text_facebook(self.user_id, \"You can write ‘start over’ to go to the first step\")", "def mechanism(self):", "def received_information(update: Update, context: CallbackContext) -> int:\r\n user_data = context.user_data\r\n text = update.message.text\r\n category = user_data['choice']\r\n user_data[category] = text\r\n del user_data['choice']\r\n\r\n update.message.reply_text(\r\n \"Genial, tu pedido está avanzando de esta manera:\"\r\n f\"{facts_to_str(user_data)}Puedes agregar algún comentario o cambio en tu orden en Comentarios...\",\r\n reply_markup=markup,\r\n )\r\n\r\n return CHOOSING", "def _register(self, noun):\n self.noun = noun", "def on_accept(self, update, _context):\n self.updater.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard_first_responses()),\n )", "def received_information(update: Update, context: CallbackContext) -> int:\n text = update.message.text\n for a in user_d:\n category = user_d[a]\n if category == 'Public_Key' and len(text) == 58:\n assert len(text) == 58, update.message.reply_text(\"The address is invalid address\")\n user_d[category] = text\n elif category == 'Quantity' and type(int(text) == int):\n user_d[category] = int(text)\n elif category == 'Secret_Key' and len(text) > 58:\n user_d[category] = text\n else:\n user_d[category] = text\n user_data = context.user_data\n user_data[category] = user_d[category]\n\n update.message.reply_text(\n \"I got this from you:\\n\"\n f\"{facts_to_str(user_d)}\",\n reply_markup=markup_r,\n )\n user_d.clear()\n\n return CHOOSING", "def sendMessage(self, Message_, Content_):\r\n self.messagesToSend[Message_] = Content_", "def contact(update: Update) -> None:\n update.message.text(\"@New GEN\")", "def _post_question(self, question):\n self.messages_sent.append(question)\n return input(question)", "async def append(self, reply: Reply) \\\n -> None:\n result = reply.result\n identifier = result.get(f.IDENTIFIER.nm)\n txnId = result.get(TXN_ID)\n logger.debug(\"Reply being sent {}\".format(reply))\n if self._isNewTxn(identifier, reply, txnId):\n self.addToProcessedTxns(identifier, txnId, reply)\n if identifier not in self.responses:\n self.responses[identifier] = asyncio.Queue()\n await self.responses[identifier].put(reply)", "def agentbehavior1():\n gr = register_message()\n\n pass", "def message(self, function_address, new_name):\n pass", "def on_data(self, data):\r\n if 'in_reply_to_status_id' in data:\r\n self.keep_or_update_tgid()\r\n self.insert_data(data)", "def info(self, msg, *args, **kwargs):\n pass", "def reply(cls, user, context, message, reply_message):\n pass", "def addInfo(self, name, information):\r\n gamethread.delayed(0, gamethread.delayed, (0, self.setSkillInfo, (name, information))) # delay by 2 ticks to allow skills to register\r\n header = \"\\n%s\\n%s\\n\\n\" % ('*' * 50, name.center(50))\r\n footer = \"\\n%s\" % (\"*\" * 50)\r\n information = information.strip() # strip whitespace at begggining and end of lines\r\n information = (header + information + footer).replace('\\n', '\\n// ')\r\n self.text(information, False)", "def notify(plaintext_message, signature):", "def receiverMapping():", "def note():", "def ask_msg(self, context):\n msg = self._get_base_message(self.ASK_QUESTION)\n self._add_thread(msg)\n self._add_relationship(msg, self.for_relationship)\n msg['text'] = self.question\n msg['detail'] = self.descr\n msg['valid_responses'] = self.valid_responses or []\n msg['signature_required'] = self.signature_required\n return msg", "def step_impl_the_msg_to_is_set_to_respondent(context):\n step_impl_the_msg_to_is_set_to(context, context.bdd_helper.respondent_id)", "def sms_reply():\n # Start our TwiML response\n # if body.lower()==\"good\":\n message=\"Hi I'm IRIS, an Immediately Responsive Intelligent System\\nHow are you feeling today?\"\n user=request.form['Body']\n\n # message=\"Hi \"+ name+ \"\"\n # user=request.form['Body']\n\n if user==\"good\":\n message=\"Glad to hear it! I hope you continue to feel this way! Celebrate this feeling and hold onto what happened ot make you feel this way so that you can repeat it in the future!\"\n\n if user==\"sad\":\n message=\"I’m sorry to hear that. Here are some things I do to make me feel better: take a walk outside, listen to uplifting music, call or message a loved one, or watch or read something positive to take my mind off of what I’m feeling.\"\n\n if user==\"nervous\":\n message=\"It’s going to be ok! This feeling will not last forever.\"\n if user==\"lonely\":\n message=\"I’m here for you, and know that you are loved, supported, and important. The world would not be the same without you! For a loving quote respond\"\n\n if user==\"angry\":\n message=\"“Let me help you turn your anger into something positive. Here are some ways to burn off energy productively: take a long walk, remove yourself from the situation, paint of draw, listen to loud music, or take a break from what you are doing.\"\n\n if user==\"tired\":\n message=\"I understand what you are feeling well. I recommend taking a break to do an activity you enjoy, taking a nap, getting a coffee, doing 20 jumping jacks, listening to a pump-up playlist, or standing up to stretch for a bit.\"\n\n if user==\"average\":\n message=\"There are many things to look forward to!\"\n resp = MessagingResponse()\n\t # Add a message\n \n resp.message(message)\n\t # Add a picture message\n\t #msg.media(\"https://farm8.staticflickr.com/7090/6941316406_80b4d6d50e_z_d.jpg\")\n\n return str(resp)", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def __init__(self, fritz_box, call_forwarding_dict):\n self.fritz_box = fritz_box\n self._name = \"callforwarding_\" + call_forwarding_dict['uid']\n self.uid = call_forwarding_dict['uid']\n self.from_number = call_forwarding_dict['from_number']\n self.to_number = call_forwarding_dict['to_number']\n self.connection_type = call_forwarding_dict['connection_type']\n self.enabled = call_forwarding_dict['enabled']", "def process(self):\n # hello_message = HelloMessage(*self.message.value)\n # TODO: assert realm is in allowed list\n welcome_message = WelcomeMessage()\n self.answer_message = welcome_message", "async def register(ctx, *args):\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")", "def register_manager(self, update, context):\r\n new_manager_chat_id = update['message']['chat']['id']\r\n new_manager_name = update['message']['chat']['first_name']\r\n\r\n with open('managers.json') as obj:\r\n managers = json.load(obj)\r\n\r\n managers[new_manager_name] = new_manager_chat_id\r\n\r\n with open('managers.json', 'w') as obj:\r\n json.dump(managers, obj)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'{new_manager_name} - {new_manager_chat_id}')", "def __setattr__(self, name, value):\n super(Message, self).__setattr__(name, value)\n if name not in ('bcc', '_dirty', '_processed'): \n self.__dict__['_dirty'] = True", "def _add_receipt(self, msg, identity):\n self._receipt_identities[msg.get_token()] = identity\n self._receipts[msg.get_token()] = msg\n if msg.get_label():\n self._receipt_labels[msg.get_label()] = msg", "def register():\n\n print(\"Request: \", request)\n print(\"foo: \", request.app.ep_mapping)\n print(json.load(request.body))\n endpoint_details = json.load(request.body)\n print(endpoint_details)\n\n # Here we want to start an executor client.\n # Make sure to not put anything into the client, until after an interchange has\n # connected to avoid clogging up the pipe. Submits will block if the client has\n # no endpoint connected.\n endpoint_id = str(uuid.uuid4())\n fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)\n connection_info = fw.connection_info\n ret_package = {'endpoint_id': endpoint_id}\n ret_package.update(connection_info)\n print(\"Ret_package : \", ret_package)\n\n print(\"Ep_id: \", endpoint_id)\n request.app.ep_mapping[endpoint_id] = ret_package\n return ret_package", "def define(update, context):\n word = update.message.text\n output = make_output(word)\n if output:\n response_message = output\n else:\n response_message = 'Sorry, I was unable to complete that request.'\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=response_message)", "def responder():\n pass", "def on_bot_start(update, context):\n user = update.effective_user\n chat_id = update.effective_chat.id\n log.info(\n \"ADD %s, %s, %s, %s\", user.username, user.full_name, chat_id, user.language_code,\n )\n\n context.bot.send_message(\n chat_id=chat_id,\n text=c.MSG_PHONE_QUERY,\n reply_markup=ReplyKeyboardMarkup([[k.contact_keyboard]], one_time_keyboard=True),\n )\n\n # set some context data about this user, so we can rely on this later\n context.user_data[\"state\"] = c.State.EXPECTING_PHONE_NUMBER", "def after_send(self):", "def information(self, bot, update):\n update.message.reply_markdown(\"Nun werden einige Informationen zu Alfred angezeigt.\",\n reply_markup=self.option_markup)", "def reply_to(self, reply_to):\n\n self._reply_to = reply_to", "def process_med_info_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def msg_register(channel, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(REGISTER, channel, \"\", version, order)", "def cmd_here(command: IncomingCommand, replies: Replies) -> None:\n if command.payload:\n address = command.payload\n else:\n address = \"Berlin, Germany\"\n # print(address)\n replies.add(**get_maptile(address))", "def _receive(self, what, address, **kwargs):\n\n print('_receive: please override me.')", "def process_refill_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tdef process_response(return_message_type):\n\t\t\tfor feedback in message.feedbacks.all():\n\t\t\t\tfeedback.note = Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()]\n\t\t\t\tfeedback.save()\n\t\t\ttemplate = 'messages/refill_questionnaire_responses/' + \\\n\t\t\t Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()] + \\\n\t\t\t '.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=return_message_type, content=content, previous_message=message)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\n\t\t# Switch on type of response\n\t\t# a - Haven't gotten the chance\n\t\tif response.lower() == 'a':\n\t\t\t# Schedule a medication reminder for later\n\t\t\tone_hour = datetime.datetime.now() + datetime.timedelta(hours=1)\n\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# b - Too expensive\n\t\telif response.lower() == 'b':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone needs to refill\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# c - Concerned about side effects\n\t\telif response.lower() == 'c':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone has side effects\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# d - Other\n\t\telif response.lower() == 'd':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.OPEN_ENDED_QUESTION)\n\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def __init__(self):\n self._msg_dict = {}", "def add_note():\n pass", "def send_reply(self, username, msg_type, content, target, server):\n self.replyer.queue.put(\n message_parsing.Message(pseudo=username, msg_type=msg_type, content=content, target=target, server=server))", "def myself(self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n self.message_queue.append('_%s %s_' % (self.users[user], args))\n self.log.info( '%s says %s in third person.' % (user, args))", "def responder(update, context):\n minuscula=update.message.text\n minuscula=(minuscula.lower())\n minuscula=''.join(minuscula.split())\n ciudad=datos_ciudad(minuscula)\n usuario = update.message.from_user\n if (ciudad==\"mapa\"):\n\t obtener_datos(ciudad)\n\t context.bot.send_photo(chat_id=update.effective_chat.id, photo=open('mapa_temporal.png','rb'))\n else:\n\t update.message.reply_text(obtener_datos(ciudad), parse_mode=ParseMode.HTML)\n ahora = datetime.datetime.now()\n print(str(ahora.strftime(\"%Y-%m-%d %H:%M:%S\"))+' Se respondio con Exito, Comando: '+ciudad+\" Usuario:\"+usuario.first_name+\" \"+usuario.last_name)", "def register_message(self, object, timestamp, data):\n self.perf_metrics[\"events_out\"] += 1\n self.messages += [{\n \"ts\": timestamp,\n \"object\": object,\n \"data\": data\n }]", "def receive_notification(self, *args, **kwargs):\n\t\tprint(f\"{self.__location} is now hearing \\\"{args[0]}\\\" on {args[1]}\")", "def hook_request_assistance(self, data):\n request_id = data[\"request_id\"]\n log.info(\"NEW request for assistance %s\", request_id)\n volunteers_to_contact = data[\"volunteers\"]\n\n needs = \"\"\n for item in data[\"needs\"]:\n needs += f\"- {item}\\n\"\n\n assistance_request = c.MSG_REQUEST_ANNOUNCEMENT % (data[\"address\"], needs)\n\n for chat_id in volunteers_to_contact:\n if chat_id not in self.updater.persistence.user_data:\n log.debug(\"User %s hasn't added the updater to their contacts, skipping.\", chat_id)\n continue\n\n current_state = self.updater.persistence.user_data[chat_id].get(\"state\", None)\n\n if current_state in [c.State.REQUEST_IN_PROGRESS, c.State.REQUEST_ASSIGNED]:\n log.debug(\"Vol%s is already working on a request, skippint\")\n continue\n\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=assistance_request,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=ReplyKeyboardMarkup(k.initial_responses, one_time_keyboard=True),\n )\n\n # update this user's state and keep the request_id as well, so we can use it later\n updated_state = {\"state\": c.State.REQUEST_SENT, \"reviewed_request\": request_id}\n self.updater.dispatcher.user_data[chat_id].update(updated_state)\n\n self.updater.dispatcher.bot_data.update({request_id: data})\n self.updater.dispatcher.update_persistence()", "def set_info(self, msg):\n logger.debug(\"Setting info: %s\", msg)\n self.vars[\"info\"].set(msg)", "def add_header(self, name, value):\r\n self.__reply_header_list.append((name, value))", "def sms_ahoy_reply():\n # Start our response\n resp = MessagingResponse()\n if request.method == 'POST':\n msg = request.form['Body']\n joke = re.search(r'(.*)joke(.*)', msg, re.I)\n greet = re.search(r'(.*)[hi|hey|hello](.*)', msg, re.I)\n quote = re.search(r'(.*)quote(.*)', msg, re.I)\n # joke = re.search(r'(.*)joke(.*)', msg, re.I)\n\n if joke: resp.message(\"I wanted to look for my watch but I couldn't find the time!\")\n elif quote: resp.message(\"A great player is the one who makes the game look easy!\")\n elif greet: resp.message(\"Greetings! I am your assistant!\")\n\n # Add a message\n else: resp.message(\"Ahoy! You said, '\" + msg + \"'\")\n print(request.form)\n\n else: resp.message(\"Greetings! I am your assistant!\") \n\n return str(resp)", "def on_welcome(self, raw_msg, server, port, nickname, **kwargs):", "def arp_announce(self):\n pass", "def setup(self):\r\n _ = self.capa_system.i18n.ugettext\r\n submitted_msg = _(\"Your answer has been submitted. As soon as your submission is\"\r\n \" graded, this message will be replaced with the grader's feedback.\")\r\n self.submitted_msg = submitted_msg\r\n\r\n self.setup_code_response_rendering()", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def ndemeye(self, message):\n\n try: activate(message.contact.language)\n except: activate('rw')\n\n try:\n message.reporter = Reporter.objects.filter(national_id = message.connection.contact.name )[0]\n except Exception, e:\n try: message.supervisor = Supervisor.objects.filter(email = message.connection.contact.name )[0]\n except Exception,e:\n message.respond(_(\"You need to be registered first\"))\n return True\n\n try:\n cnf = RegistrationConfirmation.objects.get(reporter = message.reporter)\n cnf.received = datetime.now()\n cnf.responded = True\n cnf.answer = True\n cnf.save()\n except Exception, e:\n print e\n if message.supervisor:\n message.respond(\"Muraho murakomeye! Ohereza ijambo 'WHO' urebeko wanditse neza, kandi wibutse abajyanamako bagomba kohereza ubutumwa kuri %s. Murakoze\" % settings.SHORTCODE) \n else: message.respond(_(\"You need to be registered first\"))\n return True \t\t\t \n\n message.respond(\"Muraho murakomeye! Mwatangira kohereza ubutumwa ku buzima bw'umubyeyi n'umwana kuri Rapidsms numero %s.\\\n Ohereza ijambo 'WHO' urebeko wanditse neza. Murakoze\" % settings.SHORTCODE)\n\n return True", "def add_information_about_person(self, session_info):\n\n session_info = dict(session_info)\n name_id = session_info[\"name_id\"]\n issuer = session_info.pop(\"issuer\")\n self.cache.set(name_id, issuer, session_info, session_info[\"not_on_or_after\"])\n return name_id", "def message_recording(client):\n client.register_and_login('foo', 'default')\n client.add_message('test message 1')\n client.add_message('<test message 2>')\n rv = client.get('/')\n assert 'test message 1' in rv.data\n assert '&lt;test message 2&gt;' in rv.data", "def write(self, notification):", "async def on_speaking(self, speaking, uid):\n pass", "def __bot_info(self):\n log.debug(\"Displaying __bot_info\")\n self.bot.send_message(self.chat.id, self.loc.get(\"bot_info\"))", "def tag(self, sent):\n # WORK HERE!!", "def __whatsmyid(self, update, context):\n user = self.User(update)\n output = f\"your ID is: {user.id}\"\n user.send_message(output)\n self.data_base.log(user, update.message.text, \"*\" * len(str(user.id)))", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def start(_bot, update):\n start_message = 'Hi\\nuse /add for add retro reminder'\n update.message.reply_text(start_message)", "def replypriv(self, m):\n self.reply(m)", "def func(self):\n if not self.raw:\n self.msg(\"Say what?\")\n return\n options = {\"is_pose\": True}\n speech = self.raw.lstrip(\" \")\n # calling the speech hook on the location\n speech = self.caller.location.at_say(speech)\n # Feedback for the object doing the talking.\n langstring = \"\"\n current = self.caller.languages.current_language\n if current and current.lower() != \"arvani\":\n langstring = \" in %s\" % current.capitalize()\n options.update({\"language\": current, \"msg_content\": speech})\n self.msg(\n 'You say%s, \"%s{n\"' % (langstring, speech),\n from_obj=self.caller,\n options=options,\n )\n # Build the string to emit to neighbors.\n pre_name_emit_string = ' says%s, \"%s{n\"' % (langstring, speech)\n self.caller.location.msg_action(\n self.caller, pre_name_emit_string, exclude=[self.caller], options=options\n )\n self.caller.posecount += 1", "def __init__(self):\n self.subscribers = {}\n self.followers = {}\n self.nackables = {}\n self.threads = []", "def update_servicech(self, conf, phone_num, body):\n\t\tpass", "def addToProcessedTxns(self,\n identifier: str,\n txnId: str,\n reply: Reply) -> None:\n self.transactions[txnId] = reply\n if identifier not in self.processedRequests:\n self.processedRequests[identifier] = {}\n self.processedRequests[identifier][reply.reqId] = txnId", "def register_message():\n\n logger.info('Nos registramos')\n\n gr = registerAgent(VendedorAgent, DirectoryAgent, VendedorAgent.uri, getMessageCount())\n return gr", "def update(self, msg):\n pass", "def notify(guid, message):", "def forwardPMAnswer(r, answer_msg):\n message_id = getIdFromSubject(answer_msg.subject)\n\n if message_id:\n old_message = r.inbox.message(message_id)\n\n if old_message:\n log.debug(\"forwarded answer to message id: %s\", old_message.id)\n old_message.reply(answer_msg.body)\n answer_msg.reply(\"answer forwarded\")", "def get_expected_replies(self):\n # () -> ([{\"intent-name\": str}])\n return [cfg.INFORM_INTENT_PREFIX+self.slot_description[\"name\"]]" ]
[ "0.576329", "0.576329", "0.5649313", "0.56348217", "0.55767995", "0.5420812", "0.5407427", "0.5336492", "0.5320446", "0.53109264", "0.5305628", "0.5286324", "0.5285838", "0.5274294", "0.52733195", "0.52711815", "0.5264393", "0.5212218", "0.518291", "0.51799375", "0.50915813", "0.50861406", "0.5043947", "0.5043204", "0.50305367", "0.49853575", "0.49792004", "0.4979036", "0.4970835", "0.4969722", "0.49611574", "0.49530035", "0.495031", "0.49454606", "0.49404967", "0.4936821", "0.49352175", "0.49269155", "0.4920653", "0.4915848", "0.49055085", "0.49042246", "0.4901695", "0.48991895", "0.48870227", "0.4882124", "0.48809662", "0.48799875", "0.48795584", "0.4865743", "0.48576906", "0.48548943", "0.48467696", "0.48370555", "0.48357883", "0.4832913", "0.48319635", "0.48194933", "0.48179838", "0.48169193", "0.48133573", "0.48096058", "0.48087698", "0.48075303", "0.47993577", "0.47941342", "0.4791676", "0.47828242", "0.47815803", "0.47812235", "0.47731552", "0.47558555", "0.47422433", "0.47369367", "0.47282863", "0.47280297", "0.47255054", "0.4724323", "0.4724323", "0.47201115", "0.47190914", "0.47173154", "0.47083107", "0.4705623", "0.47037226", "0.4700958", "0.470083", "0.469729", "0.469729", "0.46912965", "0.46906868", "0.46906707", "0.4680535", "0.46782368", "0.46763447", "0.46748713", "0.467476", "0.46733552", "0.4671409", "0.46711954" ]
0.47367492
74
Implements method "__testing__" that would be supported by all protocols
def _general_testing(self, context, kind, *args, **kwargs): if kind == "fake_next_op": self._register_fake_next_op(context.channel, *args, **kwargs) self._reply(context, proto_success({}, None), None) return True self._reply(context, proto_failure({"Unsupported testing function '{}'".format(kind)}), None) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_should_implement(self):\n pass", "def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None", "def test_protocols(container, protocol):\n assert isinstance(container, protocol)", "def test(self):\n raise NotImplementedError", "def test_differentProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n protocols = []\n\n class FakeProtocol(object):\n def __init__(self):\n self.transport = StubPort()\n\n def query(self, address, query, timeout=10, id=None):\n protocols.append(self)\n return defer.succeed(dns.Message())\n\n resolver._connectedProtocol = FakeProtocol\n resolver.query(dns.Query('foo.example.com'))\n resolver.query(dns.Query('bar.example.com'))\n self.assertEqual(len(set(protocols)), 2)", "def test_stub(self):\n pass", "def setup_protocol(self):\n self.protocol = pysubunit.TestProtocolServer(self.client)\n self.protocol.lineReceived(compat._b(\"test mcdonalds farm\\n\"))\n self.test = self.client._events[-1][-1]", "def setup_protocol(self):\n self.protocol = pysubunit.TestProtocolServer(self.client)\n self.protocol.lineReceived(compat._b(\"test mcdonalds farm\\n\"))\n self.test = self.client._events[-1][-1]", "def test_required_methods(self):", "def test(self):\n\t\treturn describeInterface(self)", "def test_for_client():", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test(self) -> Any:\n pass", "def test_websocket_mechanics():\n transport = StringTransportWithDisconnection()\n service = hey_joe.WebSocketService(\"127.0.0.1\", 9000)\n protocol = service.buildProtocol(service._hey_joe_addr)\n protocol.transport = transport\n transport.protocol = protocol\n protocol.connectionMade()\n data_to_send = b'GET / HTTP/1.1\\r\\nHost: somewhere_in_the_world:9000\\r\\nConnection: keep-alive, Upgrade\\r\\nUpgrade: websocket\\r\\nSec-WebSocket-Version: 13\\r\\nSec-WebSocket-Key: F76ObkF/aCKX8WkmAgx2OQ==\\r\\n\\r\\n'\n protocol.dataReceived(data_to_send)\n assert transport.value().startswith(b'HTTP/1.1 101 Switching Protocols\\r\\nServer: hendrix')", "def test(self):\n pass", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def testBinaryProtocolEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolFactory())", "def test_interface(self):\n reactor = self.buildReactor()\n connector = self.connect(reactor, ClientFactory())\n self.assertTrue(verifyObject(IConnector, connector))", "def test_subsystems(self):\n pass", "def testApi(self):", "def setUp(self):\n super(TestTestProtocolServerAddSkip, self).setUp()\n self.client = doubles.ExtendedTestResult()\n self.protocol = pysubunit.TestProtocolServer(self.client)\n self.protocol.lineReceived(compat._b(\"test mcdonalds farm\\n\"))\n self.test = self.client._events[-1][-1]", "def test1(self):\n\t\treturn describeInterface(self)", "def protocol(self):\n ...", "def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}", "def test_verify_connection_to_a_device():", "def forward_test(self, *args, **kwargs):\n pass", "def tests():", "def testable(self):\n\t\treturn True", "def test_let(self):", "def test_protocols_updated(self):\n assert self.agent_config.protocols == {self.new_protocol_id}", "def test_protocols_updated(self):\n assert self.skill_config.protocols == {self.new_protocol_id}", "def __init__(self, test_proto):\n super().__init__(\"check_error_protocol_exists\")\n self.test_proto = test_proto", "def unitary_test():", "def testinternfunc(self):\n\t\treturn describeInterface(self)", "def setUp(self):\n self.transport = StringTransport()\n self.client = self.clientProtocol()\n self.client.makeConnection(self.transport)\n self.client.dataReceived(b'* PREAUTH Hello unittest\\r\\n')", "def test_emirp_check():\r\n pass", "def test_port(self):\n self.assertEqual(self.gmail_case.port, None)\n self.assertEqual(self.telnet_case.port, 80)\n self.assertEqual(self.foo_case.port, 8042)", "def test_buildProtocol(self):\n f = AvatarFactory('world')\n p = f.buildProtocol(None)\n self.assertEqual(p.factory, f)\n self.assertEqual(p.world, 'world')\n self.assertTrue(isinstance(p, AvatarProtocol))", "def allow(self, test):\n raise NotImplementedError()", "def test_im_chat_messages(self):\n pass", "def test_messages(self):\n pass", "def test_customChallengers(self):\n\n @implementer(IChallengeResponse, IUsernamePassword)\n class SPECIALAuth(object):\n\n def getChallenge(self):\n return b'SPECIAL'\n\n\n def setResponse(self, response):\n self.username, self.password = response.split(None, 1)\n\n\n def moreChallenges(self):\n return False\n\n\n def checkPassword(self, password):\n self.password = self.password\n\n special = SPECIALAuth()\n verifyObject(IChallengeResponse, special)\n\n server = imap4.IMAP4Server({b'SPECIAL': SPECIALAuth})\n server.portal = self.portal\n\n transport = StringTransport()\n server.makeConnection(transport)\n self.addCleanup(server.connectionLost,\n error.ConnectionDone(\"Connection done.\"))\n\n self.assertIn(b\"AUTH=SPECIAL\", transport.value())\n\n transport.clear()\n server.dataReceived(b'001 AUTHENTICATE SPECIAL\\r\\n')\n\n self.assertIn(base64.b64encode(special.getChallenge()),\n transport.value())\n\n transport.clear()\n server.dataReceived(base64.b64encode(b'username password') + b'\\r\\n')\n\n self.assertEqual(transport.value(),\n b\"001 OK Authentication successful\\r\\n\")", "def test_orchestrator_http_simple(self):\n pass", "def spec_tests():\n pass", "def setup_python26(self):\n self.client = doubles.Python26TestResult()\n self.setup_protocol()", "def setup_python26(self):\n self.client = doubles.Python26TestResult()\n self.setup_protocol()", "def supports(self, message):\r\n if message.method == '__testing__':\r\n return True\r\n return self._interface.supports(message)", "def isProtocolDefined(self) -> bool:\n ...", "def test_4_4_1_1(self):\n pass", "def test_recv(self):\n # Required to get useful test names\n super(TestCisPlyInput_local, self).test_recv()", "def test_buildProtocol(self):\n queryData = (\"fromUser\", None, None)\n factory = irc.DccChatFactory(None, queryData)\n protocol = factory.buildProtocol(\"127.0.0.1\")\n self.assertIsInstance(protocol, irc.DccChat)\n self.assertEqual(protocol.factory, factory)", "def testBinaryProtocolAcceleratedEof(self):\n self.eofTestHelper(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())\n self.eofTestHelperStress(TBinaryProtocol.TBinaryProtocolAcceleratedFactory())", "def test_recv(self):\n # Required to get useful test names\n super(TestCisObjInput_local, self).test_recv()", "def test_if(self):", "async def test_websocket_communicator():\n communicator = WebsocketCommunicator(SimpleWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text\n await communicator.send_to(text_data=\"hello\")\n response = await communicator.receive_from()\n assert response == \"hello\"\n # Test sending bytes\n await communicator.send_to(bytes_data=b\"w\\0\\0\\0\")\n response = await communicator.receive_from()\n assert response == b\"w\\0\\0\\0\"\n # Test sending JSON\n await communicator.send_json_to({\"hello\": \"world\"})\n response = await communicator.receive_json_from()\n assert response == {\"hello\": \"world\"}\n # Close out\n await communicator.disconnect()", "def __test__():\n#-------------------------------------------------------------------------------\n import pylib.tester as tester\n return 0", "async def test_plaintext_connection(conn: APIConnection, resolve_host, socket_socket):\n loop = asyncio.get_event_loop()\n protocol = _get_mock_protocol(conn)\n messages = []\n protocol: Optional[APIPlaintextFrameHelper] = None\n transport = MagicMock()\n connected = asyncio.Event()\n\n def _create_mock_transport_protocol(create_func, **kwargs):\n nonlocal protocol\n protocol = create_func()\n protocol.connection_made(transport)\n connected.set()\n return transport, protocol\n\n def on_msg(msg):\n messages.append(msg)\n\n remove = conn.add_message_callback(on_msg, {HelloResponse, DeviceInfoResponse})\n transport = MagicMock()\n\n with patch.object(\n loop, \"create_connection\", side_effect=_create_mock_transport_protocol\n ):\n connect_task = asyncio.create_task(conn.connect(login=False))\n await connected.wait()\n\n protocol.data_received(\n b'\\x00@\\x02\\x08\\x01\\x10\\x07\\x1a(m5stackatomproxy (esphome v2023.1.0-dev)\"\\x10m'\n )\n protocol.data_received(b\"5stackatomproxy\")\n protocol.data_received(b\"\\x00\\x00$\")\n protocol.data_received(b\"\\x00\\x00\\x04\")\n protocol.data_received(\n b'\\x00e\\n\\x12\\x10m5stackatomproxy\\x1a\\x11E8:9F:6D:0A:68:E0\"\\x0c2023.1.0-d'\n )\n protocol.data_received(\n b\"ev*\\x15Jan 7 2023, 13:19:532\\x0cm5stack-atomX\\x03b\\tEspressif\"\n )\n await asyncio.sleep(0)\n await connect_task\n assert conn.is_connected\n assert len(messages) == 2\n assert isinstance(messages[0], HelloResponse)\n assert isinstance(messages[1], DeviceInfoResponse)\n assert messages[1].name == \"m5stackatomproxy\"\n remove()\n await conn.force_disconnect()\n await asyncio.sleep(0)", "def test_unknown_service(self):\n raise NotImplementedError # FIXME", "def test_message_user():", "def test_subscribe_offer(self):\n pass", "def test_register_network(self):\n pass", "def test(self):", "def test(self):", "def test_service_support(self):\n self.assertTrue(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "def setUp(self):\n self.transport = StringTransport()\n self.protocol = IRCClient()\n self.protocol.performLogin = False\n self.protocol.makeConnection(self.transport)\n\n # Sanity check - we don't want anything to have happened at this\n # point, since we're not in a test yet.\n self.assertEqualBufferValue(self.transport.value(), \"\")\n\n self.addCleanup(self.transport.loseConnection)\n self.addCleanup(self.protocol.connectionLost, None)", "def test_service_support(self):\n self.assertFalse(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "def gotProtocol(self,p): \n p.send_hello()", "def test_module(self):\n pass", "def testable(self):\n return False", "def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))", "def testTheType(self, theTestType):\n \n pass", "def run_test(self):\n raise NotImplementedError", "def test_rpcCall(self):\n pass", "def setUp(self):\n self.transport = StringTransport()\n self.protocol = imap4.IMAP4Client()\n self.protocol.makeConnection(self.transport)\n self.protocol.dataReceived(b'* OK [IMAP4rev1]\\r\\n')", "def test_connect(server):\n assert server", "def test_differentProtocolAfterTimeout(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n protocols = []\n results = [defer.fail(failure.Failure(DNSQueryTimeoutError(None))),\n defer.succeed(dns.Message())]\n\n class FakeProtocol(object):\n def __init__(self):\n self.transport = StubPort()\n\n def query(self, address, query, timeout=10, id=None):\n protocols.append(self)\n return results.pop(0)\n\n resolver._connectedProtocol = FakeProtocol\n resolver.query(dns.Query('foo.example.com'))\n self.assertEqual(len(set(protocols)), 2)", "def run_tests(self):\n raise NotImplementedError", "def test_handle(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.act()", "def test():\n pass", "def test_get_network(self):\n pass", "def test_get_connection(self):\n\n http_client = bce_http_client.BceHttpClient(None)\n\n # test unknown protocol\n test_protocol = Expando({'name': 'unknown', 'default_port': 65535})\n host = \"1.2.3.4\"\n port = 8080\n connection_timeout = 1000\n self.assertRaises(ValueError,\n http_client._get_connection,\n test_protocol, host, port, connection_timeout)\n\n # test http protocol\n test_protocol = protocol.HTTP\n host = \"1.2.3.4\"\n port = 8080\n connection_timeout = 1000\n conn = http_client._get_connection(test_protocol, host, port, connection_timeout)\n self.assertEqual(conn.host, \"1.2.3.4\")\n self.assertEqual(conn.port, 8080)\n self.assertEqual(conn.timeout, 1)\n\n # test https protocol\n test_protocol = protocol.HTTPS\n host = \"1.2.3.4\"\n port = 8080\n connection_timeout = 1000\n conn = http_client._get_connection(test_protocol, host, port, connection_timeout)", "def test_parse_host_port(self):\n # test default port for http\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test default port for https\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTPS)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test specific port\n endpoint = \"1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)\n\n # test value error\n endpoint = \"1.2.3.4:abcd\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # protocol unsupported\n endpoint = \"ftp://1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # test of endpoint dominates the protocol\n endpoint = \"http://1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)", "def test_send(self):\n # Required to get useful test names\n super(TestCisPlyOutput_local, self).test_send()", "def test_something():", "def test_method(self):", "def test_method(self, test, another_test, _): # noqa: D213, D407", "def test_switch_serivce(self):\n network = NetworkAPI(\"main\")\n network.list_of_apis = collections.deque([MockErrorApi, MockApi])\n assert network.list_of_apis[0] == MockErrorApi\n assert \"\" == network.get_balance(TEST_ADDRESS_USED2)\n # API rotated, confirm default api has changed.\n assert network.list_of_apis[0] == MockApi", "def test_portforward(self):\n realServerFactory = protocol.ServerFactory()\n realServerFactory.protocol = lambda: self.serverProtocol\n realServerPort = reactor.listenTCP(0, realServerFactory, interface=\"127.0.0.1\")\n self.openPorts.append(realServerPort)\n self.proxyServerFactory = TestableProxyFactory(\n \"127.0.0.1\", realServerPort.getHost().port\n )\n proxyServerPort = reactor.listenTCP(\n 0, self.proxyServerFactory, interface=\"127.0.0.1\"\n )\n self.openPorts.append(proxyServerPort)\n\n nBytes = 1000\n received = []\n d = defer.Deferred()\n\n def testDataReceived(data):\n received.extend(iterbytes(data))\n if len(received) >= nBytes:\n self.assertEqual(b\"\".join(received), b\"x\" * nBytes)\n d.callback(None)\n\n self.clientProtocol.dataReceived = testDataReceived\n\n def testConnectionMade():\n self.clientProtocol.transport.write(b\"x\" * nBytes)\n\n self.clientProtocol.connectionMade = testConnectionMade\n\n clientFactory = protocol.ClientFactory()\n clientFactory.protocol = lambda: self.clientProtocol\n\n reactor.connectTCP(\"127.0.0.1\", proxyServerPort.getHost().port, clientFactory)\n\n return d", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def test_resolverProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n self.addCleanup(setWarningMethod, getWarningMethod())\n warnings = []\n setWarningMethod(\n lambda message, category, stacklevel:\n warnings.append((message, category, stacklevel)))\n protocol = resolver.protocol\n self.assertIsInstance(protocol, dns.DNSDatagramProtocol)\n self.assertEqual(\n warnings, [(\"Resolver.protocol is deprecated; use \"\n \"Resolver.queryUDP instead.\",\n PendingDeprecationWarning, 0)])\n self.assertIdentical(protocol, resolver.protocol)", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()", "def test_interfaceAttribute(self):\n port = self.port(store=self.store, interface=self.someInterface)\n self.assertEqual(port.interface, self.someInterface)", "def test_with_links_cases_and_issues():\n pass" ]
[ "0.71973604", "0.71074444", "0.7052137", "0.70288616", "0.67791927", "0.6558383", "0.6532375", "0.6532375", "0.6508606", "0.6500069", "0.6478221", "0.6394548", "0.6394548", "0.6394548", "0.63722515", "0.63722515", "0.63722515", "0.63722515", "0.63722515", "0.62949234", "0.62888896", "0.62881756", "0.6286108", "0.6251965", "0.62407947", "0.6231375", "0.622285", "0.6221106", "0.6219977", "0.61989874", "0.6197552", "0.61832166", "0.6161605", "0.6148136", "0.60988355", "0.6090571", "0.60786796", "0.6073405", "0.6064922", "0.60565555", "0.5995147", "0.59940916", "0.5979701", "0.5972385", "0.59721756", "0.5966502", "0.59590054", "0.5951279", "0.5940332", "0.5938639", "0.59334713", "0.5932607", "0.5932607", "0.5930669", "0.592745", "0.5912009", "0.59082323", "0.5906865", "0.5903982", "0.58998215", "0.5890184", "0.5882086", "0.5877732", "0.5876745", "0.58704585", "0.5867322", "0.5860505", "0.5855678", "0.5849871", "0.5849871", "0.58332926", "0.5822895", "0.5819009", "0.58164936", "0.5816005", "0.5814999", "0.58143723", "0.5813072", "0.5808249", "0.5797338", "0.5796567", "0.5794123", "0.57931733", "0.57903874", "0.5789924", "0.5782734", "0.5774871", "0.5756142", "0.57553035", "0.5749793", "0.57402974", "0.5732027", "0.57269824", "0.57215977", "0.57190937", "0.57189304", "0.5717928", "0.5715066", "0.5713011", "0.57074803" ]
0.6247752
24
Checks whether reply to this message should be faked and fakes it if required
def _fake_next_op(self, context, message, dry_run=False): if context.channel in self._fake_ops: channel = context.channel if len(self._fake_ops[channel]) > 0: if "on_message" not in self._fake_ops[channel][0] \ or self._fake_message_compare(self._fake_ops[channel][0]["on_message"], message): if "after" in self._fake_ops[channel][0] and self._fake_ops[channel][0]["after"] > 0: if dry_run: return False self._fake_ops[channel][0]["after"] -= 1 return False if dry_run: return True instruction = self._fake_ops[channel].pop(0) if len(self._fake_ops[channel]) == 0: del self._fake_ops[channel] vprint("{}: faking reply".format(self.name)) reply = instruction["reply"] if "execute" in instruction and instruction["execute"] == True: result = {} if instruction["on_success"]: result["on_success"] = reply if instruction["on_failure"]: result["on_failure"] = reply return result if reply.success: self._worker.reply(context, PlatformMessage.success(reply.retval, reply.retval_name)) else: self._worker.reply(context, PlatformMessage.failure(reply.state, reply.errcode)) return True else: # TODO: Shouln't be here actually. Raise error! del self._fake_ops[channel] return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_message_missing_body(self):\n receipt_handle = 'blah'\n msg = [{\"ReceiptHandle\": receipt_handle}]\n with patch.object(self.dead_letter, 'remove_message_from_queue') as dequeue_fake:\n self.dead_letter.handle_messages(msg)\n\n # Ensure message dequeued.\n dequeue_fake.assert_called_with(receipt_handle)", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def process_refill_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tdef process_response(return_message_type):\n\t\t\tfor feedback in message.feedbacks.all():\n\t\t\t\tfeedback.note = Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()]\n\t\t\t\tfeedback.save()\n\t\t\ttemplate = 'messages/refill_questionnaire_responses/' + \\\n\t\t\t Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()] + \\\n\t\t\t '.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=return_message_type, content=content, previous_message=message)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\n\t\t# Switch on type of response\n\t\t# a - Haven't gotten the chance\n\t\tif response.lower() == 'a':\n\t\t\t# Schedule a medication reminder for later\n\t\t\tone_hour = datetime.datetime.now() + datetime.timedelta(hours=1)\n\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# b - Too expensive\n\t\telif response.lower() == 'b':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone needs to refill\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# c - Concerned about side effects\n\t\telif response.lower() == 'c':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone has side effects\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# d - Other\n\t\telif response.lower() == 'd':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.OPEN_ENDED_QUESTION)\n\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def reply(f, *args, **kwargs):\n r = f(*args, **kwargs)\n\n if r:\n if isinstance(r, dict):\n r['needs_reply'] = True\n elif isinstance(r, basestring):\n r = dict(answer=r, needs_reply=True)\n\n return r", "def checkSelfReply(body):\n return 'WHAT IS MY PURPOSE' in body", "def test_make_reply(self):\n msg_helper = MessageHelper()\n msg = msg_helper.make_inbound('inbound')\n reply = msg_helper.make_reply(msg, 'reply content')\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def reply(self, message):\n self.logger.info(\"message came as {}\".format(message))\n message = message.lower()\n if message in [\"start over\", \"get started\", \"hello\", \"hi\", \"say hello\"]:\n self.params = \"\"\n self.readyseteatparams = \"\"\n # self.api.send_text_facebook(\n # self.user_id,\n # 'What type of recipe would you like to make? You can type \"start over\" at any time'\n # )\n # return self.api.send_facebook(self.user_id, self.config.QUESTION_MAIN)\n self.send_welcome_messages()\n return self.api.send_facebook(self.user_id, self.config.QUICK_REPLY_MAIN)\n if message in [\"more\", \"show more\"] and self.data:\n self.index += 5\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n return self.api.send_facebook(self.user_id, m_data)\n if message == \"ask-tomorrow-payload\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"ask-week-payload\":\n self.usersModule.makeNotificationWeekly(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"This notification has been set up.\")\n if message == \"activate notifications\":\n self.usersModule.makeNotificationDaily(self.user_id)\n return self.api.send_text_facebook(self.user_id, \"Notification has been activated.\")\n if message in [\"do-nothing\", \"payload_unsubscribe\"]:\n if message == \"payload_unsubscribe\":\n self.usersModule.deactivateNotification(self.user_id)\n return self.api.send_text_facebook(\n self.user_id,\n 'Notification has been deactivated. You can type \"start over\" anytime.')\n else:\n return self.api.send_text_facebook(\n self.user_id,\n 'You can type \"start over\" when you are looking for new recipes.')\n\n try:\n title, choice = message.split(\"_\")\n except:\n title = None\n choice = message\n\n if title == \"category\":\n self.params = \"\"\n self._type = choice\n if choice == \"dinner\":\n self.params += \"&category=89\"\n self.readyseteatparams += \"&category=89\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient:\")\n # return self.api.send_facebook(self.user_id, self.config.DINNER_INGREDIENTS)\n return self.api.send_facebook(self.user_id, self.config.DINNER_GUICK_REPLY)\n elif choice == \"dessert\":\n self.params += \"&category=88\"\n self.readyseteatparams += \"&category=88\"\n # self.api.send_text_facebook(self.user_id, \"What kind of dessert would you like to make?\")\n # return self.api.send_facebook(self.user_id, self.config.DESSERTS)\n return self.api.send_facebook(self.user_id, self.config.DESSERTS_QUICK_REPLY)\n elif choice == \"breakfast\":\n self.params += \"&category=87\"\n self.readyseteatparams += \"&category=87\"\n # self.api.send_text_facebook(self.user_id, \"What kind of breakfast do you want?\")\n # return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_QUICK_REPLY)\n elif choice == \"appetizer\":\n self.params += \"&category=85\"\n self.readyseteatparams += \"&category=85\"\n # self.api.send_text_facebook(self.user_id, \"What kind of appetizer or snack sounds good?\")\n # return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.APPETIZER_QUICK_REPLY)\n elif choice == \"side dish\":\n self.params += \"&category=95\"\n self.readyseteatparams += \"&category=95\"\n # self.api.send_text_facebook(self.user_id, \"Select a main ingredient\")\n # return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.SIDE_DISH_QUICK_REPLY)\n else:\n return self.api.send_text_facebook(self.user_id,\n \"I don't know answer that belongs to {} yet\".format(message))\n\n if title == \"main-ingredient\":\n self.mainIngredient = choice\n if choice == \"chicken\":\n self.params += \"&mainingredient=76\"\n self.readyseteatparams += \"&mainingredient=76\"\n elif choice == \"beef\":\n self.params += \"&mainingredient=70\"\n self.readyseteatparams += \"&mainingredient=70\"\n elif choice == \"pork\":\n self.params += \"&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=249\"\n elif choice == \"seafood\":\n self.params += \"&mainingredient=73\"\n self.readyseteatparams += \"&mainingredient=73\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"vegetarian\":\n self.params += \"&lifestyle=299\"\n self.readyseteatparams += \"&lifestyle=299\"\n return self.api.send_facebook(self.user_id, self.config.TIME_QUICK_REPLY)\n if title == \"bre-time\":\n self.breakfastTime = choice\n if choice == \"15\":\n self.params += \"&totaltime=15\"\n self.readyseteatparams += \"&totaltime=15\"\n elif choice == \"30\":\n self.params += \"&totaltime=30\"\n self.readyseteatparams += \"&totaltime=30\"\n elif choice == \"45\":\n pass\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n if title == \"time\":\n self.time = choice\n self.params += \"&totaltime={}\".format(choice)\n self.readyseteatparams += \"&totaltime={}\".format(choice)\n # self.api.send_text_facebook(self.user_id, \"What sounds Good?\")\n # return self.api.send_facebook(self.user_id, self.config.REGION_DINNER_QUESTION)\n return self.api.send_facebook(self.user_id, self.config.REGION_QUICK_REPLY)\n\n if title == \"region\":\n self.region = choice\n if choice == \"asian\":\n self.params += \"&cuisine=44\"\n self.readyseteatparams += \"&cuisine=44\"\n elif choice == \"italian\":\n self.params += \"&cuisine=46\"\n self.readyseteatparams += \"&cuisine=46\"\n elif choice == \"mediterranean\":\n self.params += \"&cuisine=367\"\n self.readyseteatparams += \"&cuisine=367\"\n elif choice == \"mexican\":\n self.params += \"&cuisine=45\"\n self.readyseteatparams += \"&cuisine=45\"\n elif choice == \"american\":\n self.params += \"&suppresstraits=44,35,355,46,367,45,356,261\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"dessert\":\n self.dessert = choice\n if choice == \"cookies\":\n self.params += \"&trait=48,10,20,110&suppresstraits=22,24&keywords=cookies\"\n self.readyseteatparams += \"&trait=48,10,20,110&keywords=cookies\"\n elif choice == \"cakes\":\n self.params += \"&suppresstraits=24&keywords=cake\"\n self.readyseteatparams += \"&keywords=cake\"\n elif choice == \"pies\":\n self.params = \"sortby=season,rating&order=desc,desc&negativeingredientkeyword=pieces&keywords=pie&suppresstraits=24&category=88\"\n self.readyseteatparams = \"&negativeingredientkeyword=pieces&keywords=pie&category=88\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=257&goodforyou=258&goodforyou=260\"\n elif choice == \"seasonal\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=88&season=330\"\n self.readyseteatparams = \"&category=88&season=330\"\n elif choice == \"quick\":\n self.params = \"&totaltime=30\"\n self.readyseteatparams = \"&totaltime=30\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"breakfast\":\n self.breakfastIngredient = choice\n if choice == \"eggs\":\n self.params += \"&mainingredient=72\"\n self.readyseteatparams += \"&mainingredient=72\"\n self.params += \"&trait=9\"\n self.readyseteatparams += \"&trait=9\"\n elif choice == \"casserole\":\n self.params += \"&keywords=casserole\"\n self.readyseteatparams += \"&keywords=casserole\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260&goodforyou=258\"\n self.readyseteatparams += \"&goodforyou=260&goodforyou=258\"\n elif choice == \"sweet\":\n self.params += \"&trait=22\"\n self.readyseteatparams += \"&trait=22\"\n # will add something sweet\n pass\n return self.api.send_facebook(self.user_id, self.config.BREAKFAST_TIME_QUICK_REPLY)\n\n if title == \"appetizer\":\n self.appetizerIng = choice\n if choice == \"cheesy\" or choice == \"meaty\":\n if choice == \"cheesy\":\n self.params += \"&keywords=cheese\"\n self.readyseteatparams += \"&keywords=cheese\"\n elif choice == \"meaty\":\n self.params += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n self.readyseteatparams += \"&mainingredient=70&mainingredient=76&mainingredient=249\"\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n elif choice == \"veggies\" or choice == \"healthier\":\n if choice == \"veggies\":\n self.params += \"&mainingredient=77&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=77&mainingredient=310\"\n elif choice == \"healthier\":\n self.params += \"&goodforyou=260\"\n self.readyseteatparams += \"&goodforyou=260\"\n return self.api.send_facebook(self.user_id, self.config.HOT_OR_COLD_QUICK_REPLY)\n\n if title == \"hot-cold\":\n self.appetizerType = choice\n if choice == \"hot\":\n self.params += \"&suppresstraits=252\"\n elif choice == \"cold\":\n self.params += \"&cookingmethod=252\"\n self.readyseteatparams += \"&cookingmethod=252\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n\n if title == \"side-dish\":\n self.sideDish = choice\n if choice == \"potato\":\n self.params += \"&mainingredient=298\"\n self.readyseteatparams += \"&mainingredient=298\"\n elif choice == \"vegetable\":\n self.params += \"&mainingredient=77\"\n self.readyseteatparams += \"&mainingredient=77\"\n elif choice == \"rice\":\n self.params += \"&mainingredient=272\"\n self.readyseteatparams += \"&mainingredient=272\"\n elif choice == \"pasta\":\n self.params += \"&mainingredient=75\"\n self.readyseteatparams += \"&mainingredient=75\"\n elif choice == \"salad\":\n self.params = \"sortby=season,newest,rating,publisheddate&order=desc,desc,desc,desc&category=95&mainingredient=77\"\n self.readyseteatparams = \"&category=95&mainingredient=77&trait=92\"\n elif choice == \"beans\":\n self.params += \"&mainingredient=310\"\n self.readyseteatparams += \"&mainingredient=310\"\n\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n isParamInMessage = self.fetch_parameters(message)\n if isParamInMessage:\n recipes = self.api.getRecipes(self.params)\n if not recipes:\n return self.send_no_results()\n viewMoreUrl = self.api.prepareViewMoreUrl(self.readyseteatparams)\n elems = self.api.prepareRecipes(recipes, viewMoreUrl)\n self.data = elems\n m_data = self.config.DEFAULT_TEMPLATE.copy()\n m_data[\"message\"][\"attachment\"][\"payload\"][\"elements\"] = self.data[self.index:self.index + 3]\n r = self.api.send_facebook(self.user_id, m_data)\n self.logger.warning(r)\n return\n return self.api.send_text_facebook(self.user_id, \"You can write ‘start over’ to go to the first step\")", "def check_if_should_respond(self, has_been_mentioned) -> bool:\n should_respond = random.random() < self.response_chance\n\n return should_respond", "def test_get_response_no_dialog(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response()\n self.assertEqual(response, expected_response)\n self.assertFalse(skill.speak_dialog.called)\n self.assertTrue(skill.bus.emit.called)\n sent_message = skill.bus.emit.call_args[0][0]\n self.assertEqual(sent_message.msg_type, 'mycroft.mic.listen')", "def test_decision_maker_handle_tx_message_not_ready(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -2},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n info=self.info,\n ledger_id=self.ledger_id,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker.ledger_apis, \"token_balance\", return_value=1000000\n ):\n with mock.patch.object(\n self.decision_maker.ledger_apis,\n \"transfer\",\n return_value=\"This is a test digest\",\n ):\n with mock.patch(\n \"aea.decision_maker.base.GoalPursuitReadiness.Status\"\n ) as mocked_status:\n mocked_status.READY.value = False\n self.decision_maker.handle(tx_message)\n assert not self.decision_maker.goal_pursuit_readiness.is_ready\n\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -2},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n info=self.info,\n ledger_id=self.ledger_id,\n tx_nonce=\"transaction nonce\",\n )\n self.decision_maker.handle(tx_message)\n assert not self.decision_maker.message_out_queue.empty()", "def expects_reply(self) -> bool:\n return self.opcode in [\n OPCODE_WRITE_8,\n OPCODE_READ_8,\n OPCODE_FENCE,\n OPCODE_FINISH,\n ]", "def test_solicitation_no_reply_resend(self):\n waittime = self.autoconflayer._solicitation_timeout * 4.0\n self.autoconflayer.start_process()\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure the forwarder solicitation was sent more than once\n solictiation = Interest(Name('/autoconfig/forwarders'))\n solictiation_count = len([1 for data in tolower if data == [bcfid, solictiation]])\n self.assertGreater(solictiation_count, 1)", "def AlwaysNeededExpectation(self, expectation: BaseExpectation) -> bool:\n return self.did_never_pass", "def process_medication_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\tdef process_response(return_message_type):\n\t\t\tfor feedback in message.feedbacks.all():\n\t\t\t\tfeedback.note = Message.MEDICATION_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()]\n\t\t\t\tfeedback.save()\n\t\t\ttemplate = 'messages/medication_questionnaire_responses/' + \\\n\t\t\t Message.MEDICATION_QUESTIONNAIRE_RESPONSE_DICTIONARY[response.upper()] + \\\n\t\t\t '.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=return_message_type, content=content, previous_message=message)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\t# Switch on type of response\n\t\t# a - Haven't gotten the chance\n\t\tif response.lower() == 'a':\n\t\t\t# Schedule a medication reminder for later\n\t\t\tone_hour = datetime.datetime.now() + datetime.timedelta(hours=1)\n\t\t\tn = Notification.objects.create(to=sender, _type=Notification.REPEAT_MESSAGE, repeat=Notification.NO_REPEAT,\n\t\t\t message=message.previous_message, send_datetime=one_hour)\n\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# b - Need to refill\n\t\telif response.lower() == 'b':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone needs to refill\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# c - Side effects\n\t\telif response.lower() == 'c':\n\t\t\t#TODO(mgaba): Figure out what else should happen if someone has side effects\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\t# Send response\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# d - Meds don't work\n\t\telif response.lower() == 'd':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# e - Prescription changed\n\t\telif response.lower() == 'e':\n\t\t\t#TODO(mgaba): Add doctors name to personalize messages\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# f - I feel sad :(\n\t\telif response.lower() == 'f':\n\t\t\treturn process_response(Message.STATIC_ONE_OFF)\n\n\t\t# g - Other\n\t\telif response.lower() == 'g':\n\t\t\t#TODO(mgaba): Add doctors name to personalize message\n\t\t\treturn process_response(Message.OPEN_ENDED_QUESTION)\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def handle_mycroft_ready(self, message):\n with self.pair_dialog_lock:\n if is_paired() and self.pairing_performed:\n self.speak_dialog(self.paired_dialog)\n else:\n self.mycroft_ready = True", "def fainted(self):\n self.pkmn.faint()\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [], \"Should receive no messages since nothing was performed\"", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=6)\n\n self.request_buffer.process_reply(reply)\n self.assertEqual(len(self.request_buffer.requests), 5)", "def NeverNeededExpectation(self, expectation: BaseExpectation) -> bool:\n return self.did_fully_pass", "def verify_as_target(self, message_handler):", "def _handle_reply(self, fullname, body):\n handled = False\n thing = self._reddit.get_thing_from_fullname(fullname)\n if thing:\n logger.id(logger.info, self,\n 'Processing {color_thing} ...',\n color_thing=reddit.display_id(thing),\n )\n # only handle specific types of things\n if isinstance(thing, RateLimitHandler.VALID_THINGS):\n logger.id(logger.info, self,\n 'Replying to {color_thing} ...',\n color_thing=reddit.display_id(thing),\n )\n\n # Note: we may be rate-limited again\n success = self._reddit.do_reply(\n thing, body, self._killed,\n )\n\n if success or success is None:\n logger.id(logger.debug, self,\n 'Removing \\'{color_thing}\\' from'\n ' reddit ratelimit queue ...',\n color_thing=thing,\n )\n # reply either succeeded or a reply is not possible\n # (eg. 403 Forbidden)\n # remove the element from the queue database\n with self.rate_limit_queue:\n self.rate_limit_queue.delete(thing, body=body)\n\n if success:\n # try to add the thing to the reply history\n # (but only if we can find instagram users\n # in the body)\n ig_users = replies.Formatter.ig_users_in(\n body\n )\n if ig_users:\n try:\n with self.reply_history:\n self.reply_history.insert(\n thing, ig_users,\n )\n\n except database.UniqueConstraintFailed:\n display = reddit.display_id(\n thing\n )\n logger.id(logger.warn, self,\n 'Duplicate instagram user'\n ' posted in'\n ' {color_submission}!'\n ' (users={color_users})',\n color_submission=display,\n color_users=ig_users,\n exc_info=True,\n )\n handled = True\n\n return handled", "async def maybe_reply(self, content=None, mention_author=False, **kwargs):\r\n await asyncio.sleep(0.05)\r\n with contextlib.suppress(discord.HTTPException):\r\n if getattr(self.channel, \"last_message\", False) != self.message:\r\n return await super().reply(\r\n content, mention_author=mention_author, **kwargs\r\n )\r\n return await super().send(content, **kwargs)", "def process_medication_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\t# Switch on type of response\n\t\tif self.is_yes(response):\n\t\t\t# Send out a medication ack message\n\t\t\t# Update state\n\t\t\tfeedbacks = message.feedbacks.all()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.completed = True\n\t\t\t\tfeedback.datetime_responded = now\n\t\t\t\tfeedback.save()\n\n\t\t\t# Create new message\n\t\t\tcontent = self._return_best_ack_response_content(sender, message)\n\t\t\tMessage.objects.create(to=sender, _type=Message.MEDICATION_ACK, previous_message=message, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\telif self.is_no(response):\n\t\t\t# Send out a medication questionnaire message\n\t\t\t# Update state\n\t\t\tfeedbacks = message.feedbacks.all()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.completed = False\n\t\t\t\tfeedback.datetime_responded = now\n\t\t\t\tfeedback.save()\n\n\t\t\t# Create a questionnaire message\n\t\t\ttemplate = 'messages/medication_questionnaire_message.txt'\n\t\t\tcontext = {'response_dict': iter(sorted(Message.MEDICATION_QUESTIONNAIRE_RESPONSE_DICTIONARY.items()))}\n\t\t\tcontent = render_to_string(template, context)\n\n\t\t\t# Create new message\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.MEDICATION_QUESTIONNAIRE, previous_message=message,\n\t\t\t content=content)\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tnew_m.feedbacks.add(feedback)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\telif self.is_med_info(response):\n\t\t\t# Send out a med info message\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\tcontent = \"Medication information is a work in progress.\\n\\n\"+ \\\n\t\t\t \"Did you take your meds?\\n\"+ \\\n\t\t\t \"y - yes\\n\"+ \\\n\t\t\t \"n - no\"\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\t\t\tpass\n\n\t\telif self.is_time_change(response):\n\t\t\t# Update reminder time and send out a time change ack\n\t\t\tpass\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')", "def ndemeye(self, message):\n\n try: activate(message.contact.language)\n except: activate('rw')\n\n try:\n message.reporter = Reporter.objects.filter(national_id = message.connection.contact.name )[0]\n except Exception, e:\n try: message.supervisor = Supervisor.objects.filter(email = message.connection.contact.name )[0]\n except Exception,e:\n message.respond(_(\"You need to be registered first\"))\n return True\n\n try:\n cnf = RegistrationConfirmation.objects.get(reporter = message.reporter)\n cnf.received = datetime.now()\n cnf.responded = True\n cnf.answer = True\n cnf.save()\n except Exception, e:\n print e\n if message.supervisor:\n message.respond(\"Muraho murakomeye! Ohereza ijambo 'WHO' urebeko wanditse neza, kandi wibutse abajyanamako bagomba kohereza ubutumwa kuri %s. Murakoze\" % settings.SHORTCODE) \n else: message.respond(_(\"You need to be registered first\"))\n return True \t\t\t \n\n message.respond(\"Muraho murakomeye! Mwatangira kohereza ubutumwa ku buzima bw'umubyeyi n'umwana kuri Rapidsms numero %s.\\\n Ohereza ijambo 'WHO' urebeko wanditse neza. Murakoze\" % settings.SHORTCODE)\n\n return True", "def careful_reply(api,reply):\r\n\r\n debug_print('Preparing to reply to #%d' % (reply.id,))\r\n normalized_tweet = reply.text.lower().strip()\r\n\r\n # Don't reply to a retweet\r\n if hasattr(reply, 'retweeted_status'):\r\n return\r\n\r\n debug_print('Replying to #%d' % (reply.id,))\r\n update = \"@%s We'd estimate about a %d percent chance, actually.\" % (reply.user.screen_name, random.randint(0,100),)\r\n return api.update_status(update, reply.id)", "def test_decision_maker_execute_w_wrong_input(self):\n default_message = DefaultMessage(\n type=DefaultMessage.Type.BYTES, content=b\"hello\"\n )\n\n self.decision_maker.message_in_queue.put_nowait(default_message)\n time.sleep(0.5)\n self.mocked_logger_warning.assert_called_with(\n \"[{}]: Message received by the decision maker is not of protocol_id=internal.\".format(\n self.agent_name\n )\n )", "def is_correct_reply(self, reply: 'NbfCommand') -> bool:\n if not self.expects_reply:\n return False\n\n if self.opcode == OPCODE_WRITE_8:\n return reply.matches(OPCODE_WRITE_8, self.address_int, 0)\n elif self.opcode == OPCODE_READ_8:\n return reply.matches(OPCODE_READ_8, self.address_int, None)\n elif self.opcode == OPCODE_FENCE:\n return reply.matches(OPCODE_FENCE, 0, 0)\n elif self.opcode == OPCODE_FINISH:\n return reply.matches(OPCODE_FINISH, 0, 0)\n else:\n return False", "async def prepare_message(message: types.Message) -> types.Message:\n reply = message.reply_to_message\n if reply is not None:\n return reply\n else:\n await message.reply(\"Sorry, there doesn't seem to be any reply in your message. \"\n \"Please, reply to the message of the user you want to vote on.\")", "def notFainted(self):\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [AfterTurnEffect.message], \"Should receive messages from afterTurn function\"", "def _process_message_general(self, context, message):\r\n f = self._fake_next_op(context, message)\r\n\r\n if f is True:\r\n return True\r\n elif f is not False:\r\n return f\r\n elif message.method == \"__testing__\":\r\n self._general_testing(context, *message.args, **message.kwargs)\r\n return True\r\n else:\r\n return False", "def is_reply(self):\n return (not self.is_forwarded and (\n bool(self.header('In-Reply-To'))\n or bool(re.match(RE_PATTERNS, self.header('Subject', '')))\n ))", "def reply(cls, user, context, message, reply_message):\r\n pass", "def process_refill_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\t# Switch on type of response\n\t\tif self.is_yes(response):\n\t\t\t# TODO(mgaba): Implement questions about weekly, monthly prescriptions. What's the right day?\n\t\t\t# Send out a medication ack message\n\t\t\t# Update state\n\t\t\tfeedbacks = message.feedbacks.all()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.completed = True\n\t\t\t\tfeedback.datetime_responded = now\n\t\t\t\tfeedback.save()\n\n\t\t\tnotifications = message.notifications.all()\n\t\t\tfor notification in notifications:\n\t\t\t\tnotification.active = False\n\t\t\t\tnotification.save()\n\n\t\t\t# Calculate the time of the next earliest notification to put in the message that gets sent back\n\t\t\tearliest_notification = None\n\t\t\tnow = datetime.datetime.now()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.prescription.filled = True\n\t\t\t\tfeedback.prescription.save()\n\t\t\t\tmed_notifications = Notification.objects.filter(prescription=feedback.prescription, _type=Notification.MEDICATION)\n\t\t\t\tfor med_notification in med_notifications:\n\t\t\t\t\tif med_notification.send_datetime < now:\n\t\t\t\t\t\tmed_notification.update_to_next_send_time()\n\t\t\t\t\tif earliest_notification == None or earliest_notification.send_datetime > med_notification.send_datetime:\n\t\t\t\t\t\tearliest_notification = med_notification\n\n\t\t\t# Convert the time of the next earliest notification to a string for the template\n\t\t\thour = earliest_notification.send_datetime.hour\n\t\t\tminute = earliest_notification.send_datetime.minute\n\t\t\tif hour == 0:\n\t\t\t\thour = 12\n\t\t\t\tampm = 'am'\n\t\t\telif hour == 12:\n\t\t\t\thour = 12\n\t\t\t\tampm = 'pm'\n\t\t\telif hour > 12:\n\t\t\t\thour = hour - 12\n\t\t\t\tampm = 'pm'\n\t\t\telse:\n\t\t\t\tampm = 'am'\n\t\t\tif earliest_notification.send_datetime.date() == now.date():\n\t\t\t\tday = \"today\"\n\t\t\telif earliest_notification.send_datetime.date() == now.date() + datetime.timedelta(days=1):\n\t\t\t\tday = \"tomorrow\"\n\t\t\telif earliest_notification.send_datetime.date() < now.date() + datetime.timedelta(days=7):\n\t\t\t\tweekdays = {'0':'Monday',\n\t\t\t\t '1':'Tuesday',\n\t\t\t\t '2':'Wednesday',\n\t\t\t\t '3':'Thursday',\n\t\t\t\t '4':'Friday',\n\t\t\t\t '5':'Saturday',\n\t\t\t\t '6':'Sunday'}\n\t\t\t\tday = \"on \" + weekdays[str(earliest_notification.send_datetime.weekday())]\n\n\t\t\t# Create new message\n\t\t\tcontext = {'hour':hour,\n\t\t\t 'minute':minute,\n\t\t\t 'ampm':ampm,\n\t\t\t 'day':day}\n\t\t\ttemplate = 'messages/refill_ack_message.txt'\n\t\t\tcontent = render_to_string(template, context)\n\t\t\tMessage.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, previous_message=message, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\telif self.is_no(response):\n\t\t\t# Send out a medication questionnaire message\n\t\t\t# Update state\n\t\t\tfeedbacks = message.feedbacks.all()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.completed = False\n\t\t\t\tfeedback.datetime_responded = now\n\t\t\t\tfeedback.save()\n\n\t\t\t# Create a questionnaire message\n\t\t\ttemplate = 'messages/refill_questionnaire_message.txt'\n\t\t\tcontext = {'response_dict': iter(sorted(Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY.items()))}\n\t\t\tcontent = render_to_string(template, context)\n\n\t\t\t# Create new message\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.REFILL_QUESTIONNAIRE, previous_message=message,\n\t\t\t content=content)\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tnew_m.feedbacks.add(feedback)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\telif self.is_med_info(response):\n\t\t\t# Send out a med info message\n\t\t\t# TODO:Implement med info for real\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\tcontent = \"Medication information is a work in progress.\\n\\n\"+\\\n\t\t\t\t\t \"Did you pick up your meds?\\n\"+\\\n\t\t\t\t\t \"y - yes\\n\"+\\\n\t\t\t\t\t \"n - no\"\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\t\t\tpass\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\t\traise Exception(\"Not yet implemented\")", "async def test_sent_correct_message(self):\n # The following test tuples are made up of:\n # duration, expected message, and the success of the _set_silence_overwrites function\n test_cases = (\n (0.0001, silence.MSG_SILENCE_SUCCESS.format(duration=0.0001), True,),\n (None, silence.MSG_SILENCE_PERMANENT, True,),\n (5, silence.MSG_SILENCE_FAIL, False,),\n )\n\n targets = (MockTextChannel(), MockVoiceChannel(), None)\n\n for (duration, message, was_silenced), target in itertools.product(test_cases, targets):\n with (\n mock.patch.object(self.cog, \"_set_silence_overwrites\", return_value=was_silenced),\n self.subTest(was_silenced=was_silenced, target=target, message=message),\n mock.patch.object(self.cog, \"send_message\") as send_message\n ):\n ctx = MockContext()\n await self.cog.silence.callback(self.cog, ctx, target, duration)\n send_message.assert_called_once_with(\n message,\n ctx.channel,\n target or ctx.channel,\n alert_target=was_silenced\n )", "async def setdmreply(self, ctx: commands.Context):\n if not ctx.invoked_subcommand:\n pass", "def _platformix_start_reply_handler(self, context, message, dry_run, timeouted):\r\n if not timeouted:\r\n # Ignore messages other than success or failure\r\n if not message.is_failure and not message.is_success:\r\n return False\r\n if not self._validate_context({\"action\": \"start\"}):\r\n return False\r\n if timeouted or message.sender in self._context[\"waiting_for\"]:\r\n if dry_run:\r\n if timeouted:\r\n return False\r\n else:\r\n return True\r\n if timeouted or message.is_failure:\r\n self._worker.starting = False\r\n self._worker.start_in_progress = False\r\n self._context[\"action\"] = \"start_failed\"\r\n for c in self._context[\"reply_to\"]:\r\n try:\r\n status = {}\r\n if timeouted:\r\n status[\"__timeouted__\"] = True\r\n if message.is_failure:\r\n status[\"dependency failed to start\"] = message.sender\r\n self._worker.unregister_reply_handler(c, False, status, dont_check=True)\r\n except AssertionError:\r\n pass\r\n if not timeouted:\r\n self._reply_all(self._context[\"reply_to\"], proto_failure(\r\n \"Aborting start due to platform {} failed to start\".format(message.sender)), None)\r\n else:\r\n self._reply_all(self._context[\"reply_to\"], proto_failure(\r\n \"Aborting start due to wait timeout. Platforms {} have not started within \"\r\n \"timeout {}\".format(self._context[\"waiting_for\"], self._worker.start_max_wait)), None)\r\n self._context = None\r\n if timeouted:\r\n return False\r\n else:\r\n return True\r\n else: # Success\r\n self._platformix_start(context, None)\r\n return True\r\n return False", "def test_is_affordable_ledger_state_proxy(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=self.ledger_id,\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker, \"_is_acceptable_for_settlement\", return_value=True\n ):\n with mock.patch.object(\n self.decision_maker, \"_settle_tx\", return_value=\"tx_digest\"\n ):\n self.decision_maker._is_affordable(tx_message)\n assert not self.decision_maker.message_out_queue.empty()", "async def test_sent_correct_message(self):\n unsilenced_overwrite = PermissionOverwrite(send_messages=True, add_reactions=True)\n test_cases = (\n (True, silence.MSG_UNSILENCE_SUCCESS, unsilenced_overwrite),\n (False, silence.MSG_UNSILENCE_FAIL, unsilenced_overwrite),\n (False, silence.MSG_UNSILENCE_MANUAL, self.text_overwrite),\n (False, silence.MSG_UNSILENCE_MANUAL, PermissionOverwrite(send_messages=False)),\n (False, silence.MSG_UNSILENCE_MANUAL, PermissionOverwrite(add_reactions=False)),\n )\n\n targets = (None, MockTextChannel())\n\n for (was_unsilenced, message, overwrite), target in itertools.product(test_cases, targets):\n ctx = MockContext()\n ctx.channel.overwrites_for.return_value = overwrite\n if target:\n target.overwrites_for.return_value = overwrite\n\n with (\n mock.patch.object(self.cog, \"_unsilence\", return_value=was_unsilenced),\n mock.patch.object(self.cog, \"send_message\") as send_message,\n self.subTest(was_unsilenced=was_unsilenced, overwrite=overwrite, target=target),\n ):\n await self.cog.unsilence.callback(self.cog, ctx, channel=target)\n\n call_args = (message, ctx.channel, target or ctx.channel)\n send_message.assert_awaited_once_with(*call_args, alert_target=was_unsilenced)", "def test_make_dispatch_reply(self):\n md_helper = MessageDispatchHelper(\n MessageHelper(), WorkerHelper('fooconn'))\n broker = self.setup_broker(md_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.outbound'), [])\n msg = md_helper.msg_helper.make_inbound('inbound')\n reply = yield md_helper.make_dispatch_reply(msg, 'reply content')\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.outbound'), [reply])\n self.assert_message_fields(reply, {\n 'content': 'reply content',\n 'to_addr': msg['from_addr'],\n 'from_addr': msg['to_addr'],\n 'in_reply_to': msg['message_id'],\n })", "def process(self):\n # hello_message = HelloMessage(*self.message.value)\n # TODO: assert realm is in allowed list\n welcome_message = WelcomeMessage()\n self.answer_message = welcome_message", "def test_delay_by_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n proof_msg = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n\n # NODE creates message\n tmessage = node.create_protected_full_sync_text(\"Protected message\", 42)\n other.give_message(tmessage, node)\n\n # must NOT have been stored in the database\n other.assert_not_stored(tmessage)\n\n # OTHER sends dispersy-missing-proof to NODE\n _, message = node.receive_message(names=[u\"dispersy-missing-proof\"]).next()\n self.assertEqual(message.payload.member.public_key, node.my_member.public_key)\n self.assertEqual(message.payload.global_time, 42)\n\n # NODE provides proof\n other.give_message(proof_msg, node)\n\n # must have been stored in the database\n other.assert_is_stored(tmessage)", "def process_non_adherent_questionnaire_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")", "def response(self, context, message):\r\n return True", "def skip_wait_for_response(self):\n # If the response to wait for was CORRECT or INCORRECT,\n # randomly select a robot response to an incorrect user\n # action.\n if \"CORRECT\" in self._last_response_to_get:\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),\n properties=self._incorrect_responses[random.randint(0, \\\n len(self._incorrect_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play an incorrect \"\n + \"response. Maybe none were loaded?\")\n\n # If response to wait for was YES or NO, randomly select a\n # robot response for a NO user action.\n elif \"NO\" in self._last_response_to_get:\n try:\n self._ros_node.send_robot_command(\"DO\",\n response=\"ROBOT_NOT_SPEAKING\",\n timeout=datetime.timedelta(seconds=int(self.WAIT_TIME)),\n properties=self._no_responses[random.randint(0,\n len(self._no_responses)-1)])\n except AttributeError:\n self._logger.exception(\"Could not play a response to \"\n + \"user's NO. Maybe none were loaded?\")", "def forwardPMAnswer(r, answer_msg):\n message_id = getIdFromSubject(answer_msg.subject)\n\n if message_id:\n old_message = r.inbox.message(message_id)\n\n if old_message:\n log.debug(\"forwarded answer to message id: %s\", old_message.id)\n old_message.reply(answer_msg.body)\n answer_msg.reply(\"answer forwarded\")", "def process_non_adherent_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")", "def test__resend(self, mock_send):\n track = self._clean_track()\n fromcall = \"KFART\"\n tocall = \"KHELP\"\n message = \"somthing\"\n msg = messaging.TextMessage(fromcall, tocall, message)\n msg.last_send_attempt = 3\n track.add(msg)\n\n track._resend(msg)\n msg.send.assert_called_with()\n self.assertEqual(0, msg.last_send_attempt)", "def test_accepted_with_no_message(self):\n response = {\"status_code\": 202, \"content\": \"\"}\n self.mock_response.configure_mock(**response)\n\n post_to_ext_app(\"fake_url\", \"fake_data\", \"fake_headers\")\n\n self.mock_post.assert_called_once_with(\"fake_url\", data=\"fake_data\", headers=\"fake_headers\")\n self.assertFalse(self.mock_send_mail.called)", "def reply(cls, user, context, message, reply_message):\n pass", "def _on_reply(self, cb, reply_tag='', answer_tag='', correlation_id=''):\n result = yield cb\n encoded_result = json.dumps(result)\n\n # We want to have some discard ability for a really fatal situations\n # however, I still have no idea how to select Exceptions\n # (handled by _wrap_handler, for example) that are really fatal\n # and corresponding request should not be marked\n # as acked. It is practically impossible to handle every exception in\n # python. That is one thing I like implemented good in go.\n # So, I am not really sure, but it appears this `discard` is pretty\n # useless in general\n if \"discard\" in result and result[\"discard\"]:\n LOGGER.debug('Discarding result to %s' % reply_tag)\n return\n\n # I really would like to see a way to make these two actions atomic\n self._channel.basic_publish(\n exchange='',\n routing_key=answer_tag,\n properties=pika.BasicProperties(correlation_id=correlation_id),\n body=encoded_result)\n self._channel.basic_ack(reply_tag)", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def test_is_affordable_off_chain(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=\"off_chain\",\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n assert self.decision_maker._is_affordable(tx_message)", "def _assert_message_is_ask_response(\n self,\n message: W24TechreadMessage,\n ask_type: W24AskType,\n ) -> None:\n self._check_request_id(message)\n self._check_message_type(\n message,\n W24TechreadMessageType.ASK,\n ask_type)", "def test_is_not_affordable_ledger_state_proxy(self):\n with mock.patch(\n \"aea.decision_maker.messages.transaction.TransactionMessage._is_consistent\",\n return_value=True,\n ):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=\"bitcoin\",\n info=self.info,\n )\n var = self.decision_maker._is_affordable(tx_message)\n assert not var", "def _wait_for_reply(self, message, expectedreply, timeout):\r\n \r\n # Parse the first part (the message) from the expected reply. We need\r\n # to do this, because parts of some replies will contain parameters\r\n # that differ between replies.\r\n if '_' in expectedreply:\r\n expected = copy.copy(expectedreply[:expectedreply.find('_')])\r\n else:\r\n expected = copy.copy(expectedreply)\r\n \r\n # Send the message to the server.\r\n self._msg_server(message)\r\n\r\n # Wait for the expected reply.\r\n success, reply = self._wait_for_message(expected, timeout)\r\n\r\n # Return a success Boolean and the reply/fault.\r\n return (success, reply)", "def handle_message(self, msg, status):\n\n body = ensure_unicode(msg.Body)\n chat_id = get_chat_id(msg.Chat)\n\n if len(body) == 0:\n return False\n\n for name, cmd in self.commands.items():\n if body == name:\n cmd(msg, chat_id)\n return True\n\n\n if self.troller_is_running.get(chat_id):\n response = self.alice.respond(body)\n if response:\n msg.Chat.SendMessage(response)\n return True\n else:\n return False\n else:\n return False", "def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!", "def test_handle_response_wrong_message_type(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = OK(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._blacklist = mock.MagicMock()\n lookup._handle_error = mock.MagicMock()\n lookup._handle_response(uuid, contact, response)\n lookup._blacklist.assert_called_once_with(contact)\n self.assertEqual(lookup._handle_error.call_count, 1)\n args = lookup._handle_error.call_args[0]\n self.assertEqual(args[0], uuid)\n self.assertEqual(args[1], contact)\n self.assertIsInstance(args[2], TypeError)\n self.assertEqual(args[2].args[0],\n \"Unexpected response type from {}\".format(contact))", "def incoming(self, context, message, fake_reply=None):\r\n if message.interface != self._id:\r\n return False\r\n if message.is_reply:\r\n return False\r\n if message.method not in self._methods:\r\n eprint(\"{}:{} Unsupported method {}\".format(self._host.name, self._name, message.method))\r\n return False\r\n if self._map[message.method] is None:\r\n eprint(\"{}:{} Method {} is not implemented\".format(self._host.name, self._name, message.method))\r\n return False\r\n self._incoming_handler(context, message, fake_reply)", "def takeoff_first():\n\tglobal c1\n\tglobal a1\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c1.recv(BUF_SIZE) # wait for the armed message\n\tprint a1, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init1[2]\n\t\tc1.send(json.dumps(new_msg))\n\t\tstate += 1", "def test_not_accept(mocker, client, application, decision, should_send_email):\n order = create_test_order(application, 123, fulfilled=False)\n\n data = {\"req_reference_number\": make_reference_id(order), \"decision\": decision}\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=True\n )\n send_email = mocker.patch(\"ecommerce.api.MailgunClient.send_individual_email\")\n resp = client.post(reverse(\"order-fulfillment\"), data=data)\n assert resp.status_code == statuses.HTTP_200_OK\n assert len(resp.content) == 0\n order.refresh_from_db()\n assert Order.objects.count() == 1\n assert order.status == Order.FAILED\n\n if should_send_email:\n assert send_email.call_count == 1\n assert send_email.call_args[0] == (\n \"Order fulfillment failed, decision={decision}\".format(\n decision=\"something else\"\n ),\n \"Order fulfillment failed for order {order}\".format(order=order),\n \"[email protected]\",\n )\n else:\n assert send_email.call_count == 0", "def test_xfail_with_run_false_and_with_reason():\n pass", "def test_receive__user_has_prefs(self, mock_emailmessage_constructor):\n starrer_3_pref = models.UserPref(\n email='[email protected]',\n notify_as_starrer=False)\n starrer_3_pref.put()\n\n bounce_message = testing_config.Blank(\n original={'to': '[email protected]',\n 'from': 'sender',\n 'subject': 'subject',\n 'text': 'body'})\n\n self.handler.receive(bounce_message)\n\n updated_pref = models.UserPref.get_by_id(starrer_3_pref.key.integer_id())\n self.assertEqual('[email protected]', updated_pref.email)\n self.assertTrue(updated_pref.bounced)\n self.assertFalse(updated_pref.notify_as_starrer)\n expected_subject = \"Mail to '[email protected]' bounced\"\n mock_emailmessage_constructor.assert_called_once_with(\n sender=self.sender, to=self.expected_to, subject=expected_subject,\n body=mock.ANY)\n mock_message = mock_emailmessage_constructor.return_value\n mock_message.check_initialized.assert_called_once_with()\n mock_message.send.assert_called()", "def test_accepted_with_message(self):\n response = {\"status_code\": 202, \"content\": \"something's wrong\"}\n self.mock_response.configure_mock(**response)\n\n post_to_ext_app(\"fake_url\", \"fake_data\", \"fake_headers\")\n\n self.mock_post.assert_called_once_with(\"fake_url\", data=\"fake_data\", headers=\"fake_headers\")\n self.assertEqual(self.mock_send_mail.call_count, 1)", "def test_matcher_called(self):\n\n skill = _TestSkill(None, None)\n message = Mock()\n skill.hello_skill(message)\n\n self.assertTrue(message.respond.called_once)", "def finished(self, reply):\n pass", "def test_forwarder_solicitation_sent(self):\n waittime = 3.0\n self.autoconflayer.start_process()\n # Pass an interest to the autoconfig layer to trigger forwarder solicitation\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure a forwarder solicitation was sent downwards\n solictiation = Interest(Name('/autoconfig/forwarders'))\n self.assertIn([bcfid, solictiation], tolower)", "def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply", "def test_brands_reply(self):\n # 1. Setup service channel / dispatch channel\n # 2. send a post to brand\n # 3. Reply with custom response\n # 4. Route a reply\n # 5. check there is no extra responses created\n # 6. create a matchable and repeat 1-5\n brand = 'brand'\n channel, dispatch_channel = self.setup_channels(brand)\n user = self._create_db_user(email='[email protected]', password='test', is_superuser=True)\n user.account = self.account\n user.save()\n profiles = set()\n\n def do_test(matchable):\n profile = gen_profile()\n user_name = profile['user_name']\n profiles.add(user_name)\n post = self._create_db_post(\n '@%s I need some carrot' % brand,\n channel=channel,\n user_profile=profile)\n\n response = Response.objects.get(id=id_from_post_id(post.id))\n self.assertIsInstance(response.matchable, matchable.__class__)\n assert response.matchable == matchable\n\n # post custom response\n creative = \"U could find some carrot there\"\n self.login(user.email, 'test')\n data = dict(creative=creative,\n response=str(response.id),\n latest_post=str(response.post.id))\n resp = self.client.post('/commands/custom_response', data=json.dumps(data))\n resp = json.loads(resp.data)\n\n # check responses and conversations\n self.assertEqual(Response.objects(conversation_id=None).count(), 0)\n self.assertEqual(\n Response.objects(channel__in=[channel, channel.inbound_channel, channel.outbound_channel]).count(),\n 0)\n self.assertEqual(Response.objects(conversation_id=response.conversation.id).count(), 1)\n self.assertEqual(Response.objects(channel__in=[dispatch_channel]).count(), len(profiles))\n\n matchable = EmptyMatchable.get()\n do_test(matchable)\n\n matchable = self._create_db_matchable('Here is your carrot',\n intention_topics=['carrot'],\n channels=[channel.inbound_channel])\n do_test(matchable)", "def _check_reply(self):\n self._more_packets_available = False\n try:\n if self._reply is None:\n self._status = (3, '{} without reply'.format(\n REPLAY_INFO[unpack_dint(self._message[:2])]))\n return False\n # Get the type of command\n typ = unpack_uint(self._reply[:2])\n\n # Encapsulation status check\n if unpack_dint(self._reply[8:12]) != SUCCESS:\n self._status = (3, \"{0} reply status:{1}\".format(\n REPLAY_INFO[typ],\n SERVICE_STATUS[unpack_dint(self._reply[8:12])]))\n return False\n\n # Command Specific Status check\n if typ == unpack_uint(ENCAPSULATION_COMMAND[\"send_rr_data\"]):\n status = unpack_usint(self._reply[42:43])\n if status != SUCCESS:\n status_msg = \"send_rr_data reply:{0} - Extend status:{1}\"\n self._status = (3, status_msg.format(\n SERVICE_STATUS[status],\n get_extended_status(self._reply, 42)))\n return False\n else:\n return True\n return True\n except Exception as e:\n raise DataError(e)", "def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"", "def set_reply(msg):\n \n result = Message(msg.content, correlation_id=msg.correlation_id ) \n return result", "def resolve_message(self, rq):\n\n if rq.command == u\"initialize\":\n self.next_seq += 1\n DAPInitializeResponse.create(self.next_seq, rq.seq, True, rq.command, body=DAPCapabilities.create(**features)).send(self._current_client)\n self.next_seq += 1\n DAPInitializedEvent.create(self.next_seq).send(self._current_client)\n elif rq.command == u\"setBreakpoints\":\n self.next_seq += 1\n bkps = self.create_breakpoints(**rq.get_arguments().as_current_kwargs())\n body = DAPSetBreakpointsResponseBody.create([b.serialize() for b in bkps])\n DAPSetBreakpointsResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"configurationDone\":\n self.next_seq += 1\n DAPConfigurationDoneResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n elif rq.command == u\"launch\":\n # no special noDebug\n self.next_seq += 1\n DAPLaunchResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n self._ready_for_events = True\n elif rq.command == u\"disconnect\":\n self.next_seq += 1\n DAPDisconnectResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n self._current_client.close()\n self._current_client = None\n return\n elif rq.command == u\"continue\":\n self.next_seq += 1\n body = DAPContinueResponseBody.create(all_threads_continued=True)\n DAPContinueResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n debugger.stepping = SteppingMode.STEP_NO_STEP\n debugger.continue_next()\n elif rq.command == u\"threads\":\n self.next_seq += 1\n body = DAPThreadsResponseBody.create([DAPThread.create(0, \"renpy_main\")])\n DAPThreadsResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"stackTrace\":\n self.next_seq += 1\n body = DAPStackTraceResponseBody.create(debugger.get_stack_frames(**rq.get_arguments().as_current_kwargs()))\n DAPStackTraceResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"scopes\":\n self.next_seq += 1\n body = DAPScopesResponseBody.create(debugger.get_scopes(int(rq.get_arguments().get_frame_id())))\n DAPScopesResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"variables\":\n self.next_seq += 1\n body = DAPVariablesResponseBody.create(debugger.format_variable(**rq.get_arguments().as_current_kwargs()))\n DAPVariablesResponse.create(self.next_seq, rq.seq, True, body).send(self._current_client)\n elif rq.command == u\"pause\":\n self.next_seq += 1\n DAPPauseResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.break_pause = True\n elif rq.command == u\"next\":\n print(\"STEP\")\n self.next_seq += 1\n DAPNextResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_NEXT\n debugger.continue_next()\n elif rq.command == u\"stepIn\":\n self.next_seq += 1\n DAPStepInResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_INTO\n debugger.continue_next()\n elif rq.command == u\"stepOut\":\n self.next_seq += 1\n DAPStepOutResponse.create(self.next_seq, rq.seq, True).send(self._current_client)\n debugger.store_frames()\n debugger.stepping = SteppingMode.STEP_OUT\n debugger.continue_next()\n else:\n self.next_seq += 1\n DAPErrorResponse.create(self.next_seq, rq.seq, False, message=\"NotImplemented\").send(self._current_client)", "def default_reply():\n return random.choice(DEFAULT_RESPONSES)", "def trysay(self, msg):\n if self.channel:\n try:\n self.say(self.channel, msg)\n return True\n except: pass", "def answerDHCP(self, shouldAnswer):\n assert False, \"Deriving class must implement\"", "def _receive(self, what, address, **kwargs):\n\n print('_receive: please override me.')", "def _process_unknown_message(self, msg: Message) -> NoReturn:\n dev = self.reddit.redditor('barrycarey')\n try:\n dev.message(f'FWD: {msg}', f'From {msg.author.name}\\n\\n{msg.body}')\n msg.reply(\n 'Thank you for your message. This inbox is not monitored. I have forwarded your message to the developer')\n except Exception as e:\n log.exception('Failed to send message to dev', exc_info=True)", "def processFaxbotMessage(self, txt):\r\n with self.__lock:\r\n if \"I do not understand your request\" in txt:\r\n replyTxt = (\"FaxBot does not have the requested monster '{}'. \"\r\n \"(Check the list at {} )\"\r\n .format(self._lastRequest, self.fax_list_url)) \r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return replyTxt\r\n if \"just delivered a fax\" in txt:\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return (\"FaxBot received the request too early. \"\r\n \"Please try again.\")\r\n if \"try again tomorrow\" in txt:\r\n self._noMoreFaxesTime = utcTime()\r\n txt = (\"I'm not allowed to request any more faxes today. \"\r\n \"Request manually with /w FaxBot {}\"\r\n .format(self._lastRequest))\r\n self._lastRequest = None\r\n self._lastRequestTime = utcTime()\r\n return txt\r\n m = re.search(r'has copied', txt)\r\n if m is not None:\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n self._lastFaxBotTime = utcTime()\r\n # suppress output from checkForNewFax since we are returning\r\n # the text, to be output later\r\n return self.checkForNewFax(False)\r\n self._lastRequest = None\r\n self._lastRequestTime = None\r\n return \"Received message from FaxBot: {}\".format(txt)", "async def _ask(self, ctx, question: _QuestionData):\n check = BaseLock(ctx, lock=self.lock)\n msg_text = question.question if not question.failed else question.parse_fail_response\n msg_embed = question.embed if not question.failed else question.parse_fail_embed\n self._messages.append(await ctx.send(content=msg_text, embed=msg_embed))\n answer_msg = await ctx.bot.wait_for('message', check=check, timeout=self.timeout)\n self._messages.append(answer_msg)\n if answer_msg.content.lower().strip() == self.stop:\n return False\n if question.parser:\n try:\n if isinstance(question.parser, Converter):\n answer = await question.parser.convert(ctx, answer_msg.content)\n elif isinstance(question.parser, type) and issubclass(question.parser, Converter):\n answer = await question.parser().convert(ctx, answer_msg.content)\n else:\n answer = question.parser(answer_msg.content)\n if isawaitable(question.parser):\n answer = await answer\n except Exception as e:\n question.failed = True\n question.parse_fail_response = (question.parse_fail_response.format(answer_msg.content)\n if question.parse_fail_response else None)\n raise UserAnswerParsingError(f\"Failed to parse {question}\") from e\n else:\n answer = answer_msg.content\n return answer", "def _message_check(self, fuzz_case_iterator):\n self.server_init()\n\n try:\n for fuzz_args in fuzz_case_iterator:\n self._check_message(*fuzz_args)\n except KeyboardInterrupt:\n # TODO: should wait for the end of the ongoing test case, and stop gracefully netmon and procmon\n self.export_file()\n self._fuzz_data_logger.log_error(\"SIGINT received ... exiting\")\n raise\n except sex.BoofuzzRestartFailedError:\n self._fuzz_data_logger.log_error(\"Restarting the target failed, exiting.\")\n self.export_file()\n raise\n except sex.BoofuzzTargetConnectionFailedError:\n self._fuzz_data_logger.log_error(\n \"Cannot connect to target; target presumed down.\"\n \" Note: Normally a failure should be detected, and the target reset.\"\n \" This error may mean you have no restart method configured, or your error\"\n \" detection is not working.\")\n self.export_file()", "def test_ctcpQuery_FINGER(self):\n self.client.fingerReply = \"reply\"\n self.client.ctcpQuery_FINGER(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods, [(\"ctcpMakeReply\", (\"Wolf\", [(\"FINGER\", \"reply\")]))]\n )", "def test_if_the_reason_is_logged_correctly(self):\n self.data['reason'] = 'Device did not now answer'\n with LogCapture() as log:\n self.client.post(self.hangup_reason_url, self.data)\n log.check(\n (\n 'django',\n 'INFO',\n ('No remote logging ID - middleware - sduiqayduiryqwuioeryqwer76789 | APNS Device '\n 'not available because: Device did not now answer on 12:00:00.133700'),\n ),\n )", "def should_respond(self, text, author, server, channel=None):\n if ((server in self.config[\"servers\"] and\n channel not in self.config[\"servers\"][server][\"ignore\"]) and\n (text.split(' ', 1)[0].lower().startswith(self.config[\"name\"].lower()) or\n random.random() < self.config[\"servers\"][server][\"responsiveness\"]) and\n author != self.name):\n return True\n return False", "def ready(self, all_ok, message):\r\n assert self.status == WAIT_PROCESS\r\n if not all_ok:\r\n self.close()\r\n self.wake_up()\r\n return\r\n self.len = ''\r\n if len(message) == 0:\r\n # it was a oneway request, do not write answer\r\n self.message = ''\r\n self.status = WAIT_LEN\r\n else:\r\n self.message = struct.pack('!i', len(message)) + message\r\n self.status = SEND_ANSWER\r\n self.wake_up()", "def support_message(bot, update):\n if update.message.reply_to_message and \\\n update.message.reply_to_message.forward_from:\n # If it is a reply to the user, the bot replies the user\n bot.send_message(chat_id=update.message.reply_to_message\n .forward_from.id,\n text=update.message.text)\n else:\n # If it is a request from the user, the bot forwards the message\n # to the group\n bot.forward_message(chat_id=int(config['DEFAULT']['support_chat_id']),\n from_chat_id=update.message.chat_id,\n message_id=update.message.message_id)\n bot.send_message(chat_id=update.message.chat_id,\n text=_(\"Give me some time to think. Soon I will return to you with an answer.\"))", "def test_declined_card(self, order_placed):\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'), fetch_redirect_response=False)\n\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')", "async def sdmdefaultreply(self, ctx, *, message):\n await self.config.defaultreply.set(message)\n await ctx.message.add_reaction(\"✅\")", "def _check_comm_reply(self):\n if len(self._pending_comms) == 0:\n return\n for comm in self._pending_comms.values():\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(1, self._check_comm_reply)", "def receipt(self, jid, messageId, wantsReceipt):\n\t\tif wantsReceipt and self.sendReceipts:\n\t\t\tself.methodsInterface.call(\"message_ack\", (jid, messageId))", "def send(self):\n if self.postage >= self.postage_needed():\n self.was_sent = True\n else:\n raise InsufficientPostage", "def reply(self, result):\n if self._reply_channel is None:\n assert False, \"can only reply to a synchronous message, e.g. somebody must be calling us with 'call'\"\n else:\n self._reply_channel.send(result)", "def test_reply_from_banned_user(self):\n # Create 2 users. By default neither is banned, but one will be soon\n viewing_user = self.create_user()\n banned_user = self.create_user()\n\n # Create a new group and add both our users to it\n group = self.create_group()\n viewing_user.add_to_group(group.pk)\n banned_user.add_to_group(group.pk)\n\n # Create a new thread sent to the group we created above\n thread = self.create_thread(group=group)\n\n # Create a reply sent by a soon-to-be-banned user\n message = mommy.make(\n 'connectmessages.Message', thread=thread, sender=banned_user)\n\n # Confirm both users can see the message, as neither is banned\n self.assertTrue(message.visible_to_user(viewing_user))\n self.assertTrue(message.visible_to_user(banned_user))\n\n # Ban the banned user\n banned_user.is_banned = True\n banned_user.save()\n\n # Confirm the non-banned user can no longer see the banned user's reply\n # but the banned user can see his or her own message\n self.assertFalse(message.visible_to_user(viewing_user))\n self.assertTrue(message.visible_to_user(banned_user))", "async def ask_msg(self, ctx, msg: str, timeout: int = 10):\n nene = self.get_cog('Nene')\n p = await ctx.reply(msg)\n nene.no_ai.append(p.id)\n r = None\n # Waits for the time specified\n try:\n reply = await self.wait_for(\n 'message', timeout=timeout,\n # Checks whether mess is replying to p\n check=lambda mess: mess.reference and mess.reference.cached_message == p)\n r = reply.content\n except asyncio.TimeoutError:\n pass\n nene.no_ai.remove(p.id)\n return r", "def replypriv(self, m):\n self.reply(m)", "def test_post_process_forwarder_bad_message(kafka_message_payload):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n\n # Use a version which does not exist to create a bad message\n kafka_message_payload[0] = 100\n mock_message = Mock()\n mock_message.value = MagicMock(return_value=json.dumps(kafka_message_payload))\n mock_message.partition = MagicMock(\"1\")\n\n future = forwarder.process_message(mock_message)\n\n with pytest.raises(InvalidVersion):\n forwarder.flush_batch([future])\n\n forwarder.shutdown()", "def simulate_reply(self, data):\n self._data = data[:]", "def _check_message(self, path):\n target = self.targets[0]\n\n self.pause() # only pauses conditionally\n\n message_path = \"->\".join([self.nodes[e.dst].name for e in path])\n\n test_case_name = \"FEATURE-CHECK->{0}\".format(message_path)\n\n self._fuzz_data_logger.open_test_case(\"{0}: {1}\".format(self.total_mutant_index, test_case_name),\n name=test_case_name, index=self.total_mutant_index)\n\n if target.procmon:\n self._fuzz_data_logger.open_test_step('Calling procmon pre_send()')\n target.procmon.pre_send(self.total_mutant_index)\n\n if target.netmon:\n self._fuzz_data_logger.open_test_step('Calling netmon pre_send()')\n target.netmon.pre_send(self.total_mutant_index)\n\n try:\n target.open()\n\n self.pre_send(target)\n\n try:\n for e in path[:-1]:\n node = self.nodes[e.dst]\n callback_data = self._callback_current_node(node=node, edge=e)\n self._fuzz_data_logger.open_test_step(\"Prep Node '{0}'\".format(node.name))\n self.transmit_normal(target, node, e, callback_data=callback_data)\n\n callback_data = self._callback_current_node(node=self.fuzz_node, edge=path[-1])\n except sex.BoofuzzTargetConnectionReset:\n # TODO: Switch _ignore_connection_reset for _ignore_transmission_error, or provide retry mechanism\n if self._ignore_connection_reset:\n self._fuzz_data_logger.log_info(\"Target connection reset.\")\n else:\n self._fuzz_data_logger.log_fail(\"Target connection reset.\")\n except sex.BoofuzzTargetConnectionAborted as e:\n # TODO: Switch _ignore_connection_aborted for _ignore_transmission_error, or provide retry mechanism\n if self._ignore_connection_aborted:\n self._fuzz_data_logger.log_info(\"Target connection lost (socket error: {0} {1}): You may have a \"\n \"network issue, or an issue with firewalls or anti-virus. Try \"\n \"disabling your firewall.\"\n .format(e.socket_errno, e.socket_errmsg))\n else:\n self._fuzz_data_logger.log_fail(\"Target connection lost (socket error: {0} {1}): You may have a \"\n \"network issue, or an issue with firewalls or anti-virus. Try \"\n \"disabling your firewall.\"\n .format(e.socket_errno, e.socket_errmsg))\n\n self._fuzz_data_logger.open_test_step(\"Node Under Test '{0}'\".format(self.fuzz_node.name))\n self.transmit_normal(target, self.fuzz_node, path[-1], callback_data=callback_data)\n\n self._fuzz_data_logger.open_test_step(\"Calling post_send function:\")\n try:\n self.post_send(target=target, fuzz_data_logger=self._fuzz_data_logger, session=self, sock=target)\n except sex.BoofuzzTargetConnectionReset:\n self._fuzz_data_logger.log_fail(\n \"Target connection reset -- considered a failure case when triggered from post_send\")\n except sex.BoofuzzTargetConnectionAborted as e:\n self._fuzz_data_logger.log_info(\"Target connection lost (socket error: {0} {1}): You may have a \"\n \"network issue, or an issue with firewalls or anti-virus. Try \"\n \"disabling your firewall.\"\n .format(e.socket_errno, e.socket_errmsg))\n pass\n except sex.BoofuzzTargetConnectionFailedError:\n self._fuzz_data_logger.log_fail(\n \"Cannot connect to target; target presumed down.\"\n \" Note: Normally a failure should be detected, and the target reset.\"\n \" This error may mean you have no restart method configured, or your error\"\n \" detection is not working.\")\n except Exception:\n self._fuzz_data_logger.log_fail(\n \"Custom post_send method raised uncaught Exception.\" + traceback.format_exc())\n\n target.close()\n except sex.BoofuzzTargetConnectionFailedError:\n self._fuzz_data_logger.log_fail(\n \"Cannot connect to target; target presumed down.\"\n \" Note: Normally a failure should be detected, and the target reset.\"\n \" This error may mean you have no restart method configured, or your error\"\n \" detection is not working.\")\n\n self._fuzz_data_logger.open_test_step(\"Sleep between tests.\")\n self._fuzz_data_logger.log_info(\"sleeping for %f seconds\" % self.sleep_time)\n time.sleep(self.sleep_time)\n\n self.poll_pedrpc(target)\n\n if self._process_failures(target=target):\n print(\"FAIL: {0}\".format(test_case_name))\n else:\n print(\"PASS: {0}\".format(test_case_name))\n\n self.export_file()", "def test_handshake_wrong_reply(tchannel_pair, dummy_headers):\n server, client = tchannel_pair\n\n client.initiate_handshake(headers=dummy_headers)\n server.initiate_handshake(headers=dummy_headers)\n with pytest.raises(InvalidMessageException):\n client.await_handshake_reply()", "def test_decision_maker_handle_tx_message(self):\n assert self.decision_maker.message_out_queue.empty()\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -2},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n info=self.info,\n ledger_id=self.ledger_id,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker.ledger_apis, \"token_balance\", return_value=1000000\n ):\n with mock.patch.object(\n self.decision_maker.ledger_apis,\n \"transfer\",\n return_value=\"This is a test digest\",\n ):\n self.decision_maker.handle(tx_message)\n assert not self.decision_maker.message_out_queue.empty()", "def pm_handler(self, msg):\n\t\tif str(msg['from']).split('/')[0] == self.boundjid.bare:\n\t\t\tself.recipient = str(msg['to']).split('/')[0]\n\t\telse:\n\t\t\tself.recipient = str(msg['from']).split('/')[0]\n\t\t# For some reason carbons sent by you come twice (from gajim at least)\n\t\tif self.user().last_msg == msg:\n\t\t\treturn\n\t\tif msg['body'][0] == '!':\n\t\t\tself.parse(msg)\n\t\telif msg['body'].split()[0].lower() in self.qwords \\\n\t\t\t\tor msg['body'][-1] == '?' \\\n\t\t\t\tor self.user().force[str(msg['from']).split('/')[0]]:\n\t\t\tself.assist(msg)\n\t\tself.user().last_msg = msg" ]
[ "0.59258664", "0.59085613", "0.57893336", "0.57723594", "0.5757017", "0.57384187", "0.5699918", "0.5680963", "0.5651382", "0.5646977", "0.5619524", "0.5575746", "0.55438524", "0.5535469", "0.5488948", "0.5482735", "0.5448364", "0.54328763", "0.5418796", "0.5418293", "0.53829753", "0.53691936", "0.53221387", "0.53195757", "0.5319464", "0.530189", "0.52744126", "0.5238729", "0.5237531", "0.5233361", "0.5227128", "0.5210737", "0.5209344", "0.52088463", "0.52072096", "0.5203709", "0.519939", "0.5174716", "0.515441", "0.5153909", "0.5148623", "0.5145878", "0.5143718", "0.51413715", "0.51220226", "0.51157194", "0.5113896", "0.5111509", "0.5090298", "0.5087438", "0.50824356", "0.5081174", "0.50703186", "0.5058858", "0.50578815", "0.5048436", "0.50386465", "0.50302166", "0.501868", "0.5014551", "0.501299", "0.5011", "0.5009957", "0.50087786", "0.5000241", "0.49966785", "0.4994612", "0.49817097", "0.49778283", "0.49732292", "0.49603048", "0.49602556", "0.49519327", "0.49513957", "0.49497655", "0.4944248", "0.49370024", "0.49368775", "0.49353307", "0.49334747", "0.4913339", "0.49090555", "0.49058235", "0.49020165", "0.49006993", "0.49004903", "0.4898074", "0.4897016", "0.48917022", "0.4881202", "0.48771396", "0.4872942", "0.48682976", "0.48664185", "0.48647153", "0.48646966", "0.48645929", "0.48622528", "0.48609656", "0.48531646" ]
0.5759425
4
Implements builtin methods of protocol base class
def _process_message_general(self, context, message): f = self._fake_next_op(context, message) if f is True: return True elif f is not False: return f elif message.method == "__testing__": self._general_testing(context, *message.args, **message.kwargs) return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def protocol(self):\n ...", "def protocol(self):\n\n raise NotImplementedError()", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(self, *args, **kwargs): # real signature unknown\n pass", "def protocol_factory_method(self):\n pass", "def __call__(self):\r\n raise NotImplementedError('override me')", "def ProtocolType(self) -> ProtocolType:", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __subclasshook__(*args):", "def __dispatch__(interface, *args, **kwds):\n # Override in subclasses.\n return None", "def __subclasshook__(*args,**kw):\n pass", "def protocol(self):\n raise UnsupportedCall(f\"'{self.__class__.__name__}' object has no attribute 'protocol'\")", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __subclasshook__(*args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(*args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(*args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(*args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(*args, **kwargs): # real signature unknown\n pass", "def __subclasshook__(*args, **kwargs): # real signature unknown\n pass", "def __init__(self, proto):\n self.proto = proto", "def base_operator(self):\n raise NotImplementedError()", "def __call__(self):\n raise NotImplementedError()", "def gotProtocol(self,p): \n p.send_hello()", "def __call__(self):\n raise NotImplementedError", "def __subclasshook__(self, ???):" ]
[ "0.6998625", "0.66639465", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.62314713", "0.618585", "0.6161645", "0.6112236", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6073936", "0.6070819", "0.6070524", "0.60037136", "0.5924754", "0.5924754", "0.5924754", "0.5894338", "0.5894338", "0.5894338", "0.5894338", "0.5894338", "0.5894338", "0.58852845", "0.58825326", "0.58618736", "0.5852663", "0.58202523", "0.58022755" ]
0.0
-1
Processes incoming message First tries to process protocol's builtins If message weren't processed by builtins then message is passed to protocol's interface object
def process_message(self, context, message): r = self._process_message_general(context, message) if r is True: return elif r is not False: self._interface.incoming(context, message, r) else: self._interface.incoming(context, message, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_msg(cls, msg):\n raise NotImplementedError", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def process_message(self, msg, src):", "def handleMessage(msg):", "def _process_message(self, obj):\n pass", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(self._parse_message, string, protocol)\n d.addCallback(self._r_process_message, protocol)\n d.addCallbacks(callback=self._r_send_result, errback=self._r_send_error, callbackArgs=(protocol,), errbackArgs=(protocol,))", "def handle_message(self, message):", "def handle(self, message):", "def processMessage(self, *args, **kwargs):\r\n pass", "def process(self, message: Message, **kwargs: Any) -> None:", "def process(self, msg):\n raise NotImplemented", "def gotProtocol(self,p): \n p.send_hello()", "def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))", "def processmessage(self,pktMessage):\n raise NotImplementedError()", "def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))", "def process_message(self, message):\n processors = {\n \"^org.chicago.cta.stations.\": self._handle_station,\n \"^org.chicago.cta.arrivals.\": self._handle_arrival,\n \"org.chicago.cta.turnstiles\": self._handle_turnstiles\n }\n processor = processors.get(message.topic, False)\n if processor:\n processor(message)\n else:\n logger.debug(\n \"unable to find handler for message from topic %s\", message.topic\n )", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def processMessage(self, msg, binary):\r\n if binary:\r\n self._handleBinary(msg)\r\n else:\r\n try:\r\n msg = json.loads(msg)\r\n except ValueError:\r\n raise InvalidRequest('Message is not in valid JSON format.')\r\n\r\n uris = self._recursiveURISearch(msg)\r\n\r\n if uris:\r\n self._handleString(msg, uris)\r\n else:\r\n self._protocol.processCompleteMessage(msg)", "def on_ofp_message(self, message: IncomingMessage) -> None:\n with message.process():\n log.debug(f\"received [x] {message.routing_key}:{message.body}\")\n (version, msg_type, msg_len, xid) = ofproto_parser.header(message.body)\n log.debug(\n f\"msg {version} {msg_type} {msg_len} {xid} {len(message.body)} {type(message.body)}\"\n )\n msg = ofproto_parser.msg(\n version, msg_type, msg_len, xid, message.body[:msg_len]\n )\n if msg_type == self.ofproto.OFPT_PACKET_IN:\n pkt_in = self.ofparser.OFPPacketIn.parser(msg_len, xid, msg.buf)\n pkt_in.serialize()\n dpid = int(message.routing_key.split(\".\")[-1])\n self.loop.create_task(self.handle_pktin(pkt_in, dpid))", "def handle_message(self, msg):\n pass", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def receive_message(self, message):", "def on_receive(self, msg):\n raise NotImplementedError", "def receive(self, message):", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def dispatch(message,var=None):\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\")", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "def _process_message(self, json_object):\n\n message = json.loads(json_object)\n if message['type'] == \"relay\":\n self._process_relay(message)\n elif message['type'] == \"control\":\n self._process_control(message)\n else:\n print(\"ERROR Received message has invalid type\\n\")\n return", "def __processMsg(self, sock, msgData):\n\n pass", "def handle_comm_msg(self, message):\n msg = self._unwrap(message)\n\n try:\n self.geonotebook._recv_msg(msg)\n\n except jsonrpc.JSONRPCError as e:\n self.geonotebook._send_msg(\n json_rpc_result(None, e.tojson(), msg['id'])\n )\n self.log.error(u\"JSONRPCError (%s): %s\" % (e.code, e.message))\n\n except Exception as e:\n self.log.error(u\"Error processing msg: {}\".format(str(e)))", "def incoming(self,message):\n #Convert to Dictionary, Whatever the input is\n if isinstance(message, str):\n message = json.loads(message)\n elif isinstance(message, bytes):\n message = self.deserialize(message)\n\n op = message.get(\"op\")\n if op == \"publish\":\n message[\"msg\"] = self.decompress(message[\"topic\"],message.get(\"msg\"))\n message[\"topic\"] = self.remap_topic(message[\"topic\"]) \n elif op == \"advertise\":\n message[\"topic\"] = self.remap_topic(message[\"topic\"])\n elif op == \"advertise_service\" or op == \"service_response\":\n message[\"service\"] = self.remap_service(message[\"service\"])\n\n\n message = json.dumps(message)\n #--------\n #replace JSON Null values in float32 types with infinity datatype (changed according to the error for LaserScan values)\n message = message.replace(\"null\", \"Infinity\")\n #--------\n self._protocol.incoming(message)", "def onMessage(self, payload, isBinary):", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "def receive(self) -> Message[ValueType]:", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def dispatch_message(self, addr, message_dict, kind):\n try:\n yield from self.dispatcher.dispatch_message(addr, message_dict, kind)\n except Exception as e:\n self.logger.error(\n \"Failed to dispatch mochad message {}: {}\".format(\n message_dict, e))", "def process(self, message, callback):\n\n\t\tif message.type == message_type.EMIT:\n\t\t\t# We are in the server, the message has just been built.\n\t\t\t# Forward it nearly \"as is\". Only the message type is changed,\n\t\t\t# to make us know it has been processed one time since emission,\n\t\t\t# and thus the next hop will be the client, which has the task\n\t\t\t# to display it, and eventually get an interactive answer.\n\n\t\t\tassert ltrace(TRACE_MESSAGING, ' MessageProcessor.process(EMIT)')\n\n\t\t\tif message.interaction:\n\n\t\t\t\tif message.interaction == interactions.ASK_FOR_REPAIR:\n\n\t\t\t\t\tmessage.answer = ttyutils.interactive_ask_for_repair(message.data,\n\t\t\t\t\t\tauto_answer=message.auto_answer)\n\n\t\t\t\telif message.interaction == interactions.GET_PASSWORD:\n\n\t\t\t\t\tmessage.answer = getpass.getpass(message.data)\n\n\t\t\t\telse:\n\t\t\t\t\tassert ltrace(TRACE_MESSAGING,\n\t\t\t\t\t\t'unsupported interaction type in message %s.' % message)\n\t\t\t\t\tmessage.answer = None\n\n\t\t\t\tmessage.type = message_type.ANSWER\n\t\t\t\treturn callback.process(message, self.getAttrProxy())\n\n\t\t\telse:\n\t\t\t\tif message.clear_terminal:\n\t\t\t\t\tttyutils.clear_terminal(MessageProcessor.channels[message.channel])\n\n\t\t\t\tchan_flush = MessageProcessor.channels[message.channel].flush\n\t\t\t\tchan_write = MessageProcessor.channels[message.channel].write\n\n\t\t\t\tif message.word_delay:\n\t\t\t\t\tdelay = message.word_delay\n\t\t\t\t\tfor word in message.data.split(' '):\n\t\t\t\t\t\tchan_write(word + ('' if word.endswith('\\n') else ' '))\n\t\t\t\t\t\tchan_flush()\n\t\t\t\t\t\ttime.sleep(delay)\n\n\t\t\t\telif message.char_delay:\n\t\t\t\t\tdelay = message.char_delay\n\t\t\t\t\tfor char in message.data:\n\t\t\t\t\t\tchan_write(char)\n\t\t\t\t\t\tchan_flush()\n\t\t\t\t\t\ttime.sleep(min(delay*4, 0.4) if char == ' ' else delay)\n\n\t\t\t\telse:\n\t\t\t\t\tchan_write(message.data)\n\n\t\t\t\tmessage.answer = None\n\n\t\telif message.type == message_type.ANSWER:\n\t\t\t# We are on the server, this is the answer from the client to\n\t\t\t# ourquestion. Return it directly to the calling process. The\n\t\t\t# message loop ends here.\n\n\t\t\tassert ltrace(TRACE_MESSAGING, ' MessageProcessor.process(ANSWER)')\n\n\t\t\t#message.channel.write(message.data)\n\t\t\treturn message.answer\n\t\telif message.type == message_type.PUSH_STATUS:\n\n\t\t\t# FIXME: is this really needed ? will the status be really pushed by this way ?\n\t\t\tfrom licorn.core import LMC\n\t\t\tLMC.machines.update_status(mid=message.sender,\n\t\t\t\tstatus=message.status)\n\n\t\telse:\n\t\t\traise exceptions.LicornRuntimeException('''Unrecognized message '''\n\t\t\t\t'''type %s for message %s.''' % (message.type, message))", "def handler(self, input_message: FSPluginMessageBase, context: FSContext) -> FSPluginOutput:\n raise NotImplementedError()", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def _decode1(self, body, data):\r\n if \" \" in body:\r\n evtype,body = body.split(\" \",1)\r\n else:\r\n evtype,body = body,\"\"\r\n evtype = evtype.upper()\r\n if evtype == \"CIRC\":\r\n m = re.match(r\"(\\d+)\\s+(\\S+)(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"CIRC event misformatted.\")\r\n ident,status,path,purpose,reason,remote = m.groups()\r\n ident = int(ident)\r\n if path:\r\n if \"PURPOSE=\" in path:\r\n remote = reason\r\n reason = purpose\r\n purpose=path\r\n path=[]\r\n elif \"REASON=\" in path:\r\n remote = reason\r\n reason = path\r\n purpose = \"\"\r\n path=[]\r\n else:\r\n path_verb = path.strip().split(\",\")\r\n path = []\r\n for p in path_verb:\r\n path.append(p.replace(\"~\", \"=\").split(\"=\")[0])\r\n else:\r\n path = []\r\n\r\n if purpose and \"REASON=\" in purpose:\r\n remote=reason\r\n reason=purpose\r\n purpose=\"\"\r\n\r\n if purpose: purpose = purpose[9:]\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n event = CircuitEvent(evtype, ident, status, path, purpose, reason,\r\n remote, body)\r\n elif evtype == \"STREAM\":\r\n #plog(\"DEBUG\", \"STREAM: \"+body)\r\n m = re.match(r\"(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)?:(\\d+)(\\sREASON=\\S+)?(\\sREMOTE_REASON=\\S+)?(\\sSOURCE=\\S+)?(\\sSOURCE_ADDR=\\S+)?(\\s+PURPOSE=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM event misformatted.\")\r\n ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()\r\n ident,circ = map(int, (ident,circ))\r\n if not target_host: # This can happen on SOCKS_PROTOCOL failures\r\n target_host = \"(none)\"\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n if source: source = source[8:]\r\n if source_addr: source_addr = source_addr[13:]\r\n if purpose:\r\n purpose = purpose.lstrip()\r\n purpose = purpose[8:]\r\n event = StreamEvent(evtype, ident, status, circ, target_host,\r\n int(target_port), reason, remote, source, source_addr,\r\n purpose, body)\r\n elif evtype == \"ORCONN\":\r\n m = re.match(r\"(\\S+)\\s+(\\S+)(\\sAGE=\\S+)?(\\sREAD=\\S+)?(\\sWRITTEN=\\S+)?(\\sREASON=\\S+)?(\\sNCIRCS=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"ORCONN event misformatted.\")\r\n target, status, age, read, wrote, reason, ncircs = m.groups()\r\n\r\n #plog(\"DEBUG\", \"ORCONN: \"+body)\r\n if ncircs: ncircs = int(ncircs[8:])\r\n else: ncircs = 0\r\n if reason: reason = reason[8:]\r\n if age: age = int(age[5:])\r\n else: age = 0\r\n if read: read = int(read[6:])\r\n else: read = 0\r\n if wrote: wrote = int(wrote[9:])\r\n else: wrote = 0\r\n event = ORConnEvent(evtype, status, target, age, read, wrote,\r\n reason, ncircs, body)\r\n elif evtype == \"STREAM_BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM_BW event misformatted.\")\r\n event = StreamBwEvent(evtype, body, *m.groups())\r\n elif evtype == \"BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"BANDWIDTH event misformatted.\")\r\n read, written = map(long, m.groups())\r\n event = BWEvent(evtype, read, written, body)\r\n elif evtype in (\"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERR\"):\r\n event = LogEvent(evtype, body)\r\n elif evtype == \"NEWDESC\":\r\n ids_verb = body.split(\" \")\r\n ids = []\r\n for i in ids_verb:\r\n ids.append(i.replace(\"~\", \"=\").split(\"=\")[0].replace(\"$\",\"\"))\r\n event = NewDescEvent(evtype, ids, body)\r\n elif evtype == \"ADDRMAP\":\r\n # TODO: Also parse errors and GMTExpiry\r\n m = re.match(r'(\\S+)\\s+(\\S+)\\s+(\\\"[^\"]+\\\"|\\w+)', body)\r\n if not m:\r\n raise ProtocolError(\"ADDRMAP event misformatted.\")\r\n fromaddr, toaddr, when = m.groups()\r\n if when.upper() == \"NEVER\": \r\n when = None\r\n else:\r\n when = time.strptime(when[1:-1], \"%Y-%m-%d %H:%M:%S\")\r\n event = AddrMapEvent(evtype, fromaddr, toaddr, when, body)\r\n elif evtype == \"NS\":\r\n event = NetworkStatusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"NEWCONSENSUS\":\r\n event = NewConsensusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"BUILDTIMEOUT_SET\":\r\n m = re.match(\r\n r\"(\\S+)\\sTOTAL_TIMES=(\\d+)\\sTIMEOUT_MS=(\\d+)\\sXM=(\\d+)\\sALPHA=(\\S+)\\sCUTOFF_QUANTILE=(\\S+)\",\r\n body)\r\n set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()\r\n event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),\r\n int(timeout_ms), int(xm), float(alpha),\r\n float(quantile), body)\r\n elif evtype == \"GUARD\":\r\n m = re.match(r\"(\\S+)\\s(\\S+)\\s(\\S+)\", body)\r\n entry, guard, status = m.groups()\r\n event = GuardEvent(evtype, entry, guard, status, body)\r\n elif evtype == \"TORCTL_TIMER\":\r\n event = TimerEvent(evtype, data)\r\n else:\r\n event = UnknownEvent(evtype, body)\r\n\r\n return event", "def _parse_message(self, string, protocol):\n #print(\"Parsing message: %s\" % string)\n msg = parse_message_string(string)\n result = MessageResult(original_message=msg)\n\n if isinstance(msg, MethodCallMessage):\n # Handle method call\n res = self._method_call(msg)\n response_msg = ResponseMessage(result_code=0, result=res, response_to=msg.id)\n result.response = create_message_string(response_msg)\n elif isinstance(msg, SubscribeMessage):\n # Handle subscription to event\n response_msg = ResponseMessage(result_code=0, result=None, response_to=msg.id)\n result.response = create_message_string(response_msg)\n else:\n raise MessageHandleError(MessageHandleError.RESULT_UNEXPECTED_MESSAGE, msg)\n\n return result", "def _r_process_message(self, result, protocol):\n if isinstance(result.original_message, SubscribeMessage):\n self._r_subscribe_to_event(result.original_message.service_name,\n result.original_message.event_name,\n protocol)\n \n return result.response", "def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)", "def processMessage(self, msg):\r\n LOG(\"Received message: \" + msg.getId())\r\n \r\n # Process messages incoming from child executor, if any\r\n procId = msg[FIELD_PROC_ID]\r\n if procId != self.procId:\r\n if self.childManager.hasChild():\r\n self.childManager.processChildMessage(msg)\r\n else:\r\n LOG(\"Unexpected child message: \" + msg.getId(), LOG_ERROR)\r\n elif msg.getType() == MSG_TYPE_COMMAND:\r\n if msg.getId() == Messages.MSG_ADD_CLIENT:\r\n self.addClient(msg)\r\n elif msg.getId() == Messages.MSG_REMOVE_CLIENT:\r\n self.removeClient(msg)\r\n elif msg.getId() == Messages.CMD_CLOSE:\r\n self.cleanup()\r\n elif msg.getId() == Messages.CMD_RELOAD:\r\n REGISTRY['CIF'].clearAsRun()\r\n self.cleanup( executionOnly = True )\r\n self.setupResources()\r\n self.prepareExecution()\r\n else:\r\n cmdId = msg[\"Id\"]\r\n if cmdId in [ Messages.CMD_ABORT, Messages.CMD_PAUSE ]:\r\n self.mailbox.push( msg, high_priority = True )\r\n else:\r\n self.mailbox.push( msg )\r\n else:\r\n LOG(\"Unexpected message: \" + msg.getId() + \"/\" + msg.getType(), LOG_ERROR)", "def obj_received(self, obj):\n\n # TODO do something like handler registry\n\n if isinstance(obj, pb.Ping):\n self.handle_ping(obj)\n\n elif isinstance(obj, pb.Pong):\n self.handle_pong(obj)\n\n elif isinstance(obj, pb.ACS):\n if self.factory.config.failure != 'omission':\n res = self.factory.acs.handle(obj, self.remote_vk)\n self.process_acs_res(res, obj)\n\n elif isinstance(obj, pb.TxReq):\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.TxResp):\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationReq):\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationResp):\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.SigWithRound):\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\n\n elif isinstance(obj, pb.CpBlock):\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Cons):\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\n\n elif isinstance(obj, pb.AskCons):\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\n\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\n\n elif isinstance(obj, pb.Bracha):\n if self.factory.config.failure != 'omission':\n self.factory.bracha.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Mo14):\n if self.factory.config.failure != 'omission':\n self.factory.mo14.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Dummy):\n logging.info(\"NODE: got dummy message from {}\".format(b64encode(self.remote_vk)))\n\n else:\n raise AssertionError(\"invalid message type {}\".format(obj))\n\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()", "def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )", "def _forward_message(self, name, message):\n unhashed = self.message_hashes[repr(name)]\n if unhashed in self.handlers:\n for handler in self.handlers[unhashed]:\n handler(message)", "def on_message(client, userdata, msg):\n TOPIC_DISPATCH_DICTIONARY[msg.topic][\"method\"](msg)", "def preprocess(self, message):\n self._call_extension_method('preprocess', message)", "def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)", "def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))", "def receive(self, msg):\n pass", "def start(self):\n while True:\n ident = self.reply_socket.recv()\n assert self.reply_socket.rcvmore(), \"Missing message part.\"\n msg = self.reply_socket.recv_json()\n omsg = Message(msg)\n print>>sys.__stdout__\n print>>sys.__stdout__, omsg\n handler = self.handlers.get(omsg.msg_type, None)\n if handler is None:\n print >> sys.__stderr__, \"UNKNOWN MESSAGE TYPE:\", omsg\n else:\n handler(ident, omsg)", "def onMessage(self, message):\n raise NotImplementedError", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def protocol_output(self, message, req=None):\n try:\n # so, utf16 doubles the size of the FLAP packets, which\n # really limits our max message size. if none of the ordinals\n # are outside the 7bit ascii range, convert to ascii bytes\n if not [ch for ch in message if ord(ch) > 127]:\n message = message.encode('us-ascii')\n\n # i don't know what's going on here anymore.. let's try something\n # completely different!\n message = message.replace('&', '&amp;')\n message = message.replace('<', '&lt;')\n message = message.replace('>', '&gt;')\n message = newline_re.sub('<br>', message)\n\n # AIM reacts indignantly to overlong messages, so we need to\n # wrap. try not to break up html tags injected by colorlib.\n if not hasattr(req, 'chat'):\n req.chat = None\n if not hasattr(req, 'aim'):\n req.aim = self.oscar_connection\n\n if req.chat:\n width = 2048\n func = req.chat.sendMessage\n else:\n width = 2545 # longer than chatrooms, who knows...\n func = req.aim.sendMessage\n\n # unicode stuff takes two bytes due to shitty utf-16\n if isinstance(message, unicode):\n width = int(width / 2) - 1\n\n for line in self.xmlwrap(message, width):\n args = [line]\n if not req.chat:\n if not req.nick:\n req.nick = req.sendto\n args.insert(0, req.nick)\n reactor.callFromThread(func, *args)\n\n # don't spam ourselves off the server\n sleep(1)\n\n except Exception, error:\n self.log.exception(error)", "def handle_message(self, data, channel):\n pass", "def process_messages(self):\n pass", "def parse(self, message: Message):\n\t\tpass", "def handle_message(self, msg):\n Logger.debug(\"Slave: Trying to parse\")\n if MessageKeys.command_key in msg.fields:\n Logger.info(\"Slave: Message command: %s\", str(msg.get_command()))\n return self.messagehandler[msg.get_command()](self, msg)\n return self.handle_invalid_command(msg)", "def parse_message(self, message):\n pass", "def handle(self, msg, peer_protocol):\n msg_id = msg[0]\n if msg_id == 0:\n self._handle_handshake(msg, peer_protocol)\n elif msg_id == 1: #update\n print(msg, len(msg))\n self._handle_update(msg)", "def handle_message(self, msg: mqtt.MQTTMessage) -> None:\n payload = json.loads(msg.payload.decode(\"utf-8\"))\n logging.info(f\"Received a new message: {payload}\")\n if \"volume\" in payload:\n validate(payload, schema=self.volume_schema)\n self.volume = payload[\"volume\"]\n elif \"volumeCtrl\" in payload:\n validate(payload, schema=self.volume_ctrl_schema)\n self.volume_up() if payload[\"volumeCtrl\"] == \"+\" else self.volume_down()\n elif \"mute\" in payload:\n validate(payload, schema=self.mute_schema)\n self.mute = payload[\"mute\"]\n elif \"toggle\" in payload:\n validate(payload, schema=self.toggle_schema)\n self.toggle_mute() if payload[\"toggle\"] == \"mute\" else self.toggle_pause()\n elif \"ctrl\" in payload:\n validate(payload, schema=self.ctrl_schema)\n self.skip_forward() if payload[\"ctrl\"] == \">>\" else self.skip_backward()\n else:\n raise ValueError(f\"Cannot handle message: {payload}, not a valid command\")", "def _recv_msg(self, msg):\n # If this is a response, pass it along to the Remote object to be\n # processesd by the correct reply/error handler\n if is_response(msg):\n self._remote.resolve(msg)\n\n # Otherwise process the request from the remote RPC client.\n elif is_request(msg):\n method, params = msg['method'], msg['params']\n if method in self._protocol.keys():\n try:\n args, kwargs = self._reconcile_parameters(method, params)\n\n result = getattr(self, method)(*args, **kwargs)\n self._send_msg(json_rpc_result(result, None, msg['id']))\n except Exception as e:\n if isinstance(e, jsonrpc.JSONRPCError):\n raise e\n else:\n raise jsonrpc.ServerError(str(e))\n else:\n raise jsonrpc.MethodNotFound(\"Method not allowed\")\n else:\n raise jsonrpc.ParseError(\"Could not parse msg: %s\" % msg)", "def comsume_msg(self, msg_type):", "def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])", "def _parse_msg(self, msg):\n try:\n self.received_msg += msg.decode()\n except:\n self.log.warning(\"invalid parse frame '%s'\" % msg)\n\n while True:\n pos = self.received_msg.find('\\r')\n if pos == -1: # no full msg\n break\n m = self.received_msg[:pos].strip()\n if not len(m):\n break\n self.platform.process_received_message(m)\n self.received_msg = self.received_msg[pos + 1:]", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass", "def onMessageBegin(self, isBinary):", "def handle_incoming_message(obj, reply_channel):\n if int(obj[message_type_key]) == 0:\n try:\n sub_obj = create_subscriber_object(reply_channel, obj)\n subscribers[reply_channel.name] = sub_obj\n except ApiException as exc:\n send_save_to_channel(reply_channel, str(exc))\n\n elif int(obj[message_type_key]) == 1:\n disconnect_subscriber(reply_channel)\n\n print(\"incoming_msg_handled\")", "def receive(self):\n data = self._read()\n if data == b'':\n return None\n\n type = self.MsgHeader.unpack(data).type\n data = data[self.MsgHeader.size:]\n\n if type == self.SONY_MSG_Common:\n header = self.CommonMsgHeader.unpack(data)\n data = data[self.CommonMsgHeader.size:header.size]\n if header.type == self.SONY_MSG_Common_Hello:\n n = self.ProtocolMsgHeader.unpack(data).numProtocols\n protos = (self.ProtocolMsgProto.unpack(data, self.ProtocolMsgHeader.size+i*self.ProtocolMsgProto.size) for i in range(n))\n return InitResponseMessage([(p.name, p.id) for p in protos])\n elif header.type == self.SONY_MSG_Common_Bye:\n raise Exception('Bye from camera')\n else:\n raise Exception('Unknown common message type: 0x%x' % header.type)\n\n elif type == self.SONY_MSG_Tcp:\n header = self.CommonMsgHeader.unpack(data)\n data = data[self.CommonMsgHeader.size:header.size]\n tcpHeader = self.TcpMsgHeader.unpack(data)\n data = data[self.TcpMsgHeader.size:]\n if header.type == self.SONY_MSG_Tcp_ProxyConnect:\n proxy = self.ProxyConnectMsgHeader.unpack(data)\n host = data[self.ProxyConnectMsgHeader.size:self.ProxyConnectMsgHeader.size+proxy.hostSize]\n return SslStartMessage(tcpHeader.socketFd, host.decode('latin1'), proxy.port)\n elif header.type == self.SONY_MSG_Tcp_ProxyDisconnect:\n return SslEndMessage(tcpHeader.socketFd)\n elif header.type == self.SONY_MSG_Tcp_ProxyData:\n size = self.SslDataMsgHeader.unpack(data).size\n return SslSendDataMessage(tcpHeader.socketFd, data[self.SslDataMsgHeader.size:self.SslDataMsgHeader.size+size])\n else:\n raise Exception('Unknown tcp message type: 0x%x' % header.type)\n\n elif type == self.SONY_MSG_Rest:\n header = self.RestMsgHeader.unpack(data)\n data = data[self.RestMsgHeader.size:self.RestMsgHeader.size+header.size]\n if header.type == self.SONY_MSG_Rest_Out:\n return ResponseMessage(data)\n elif header.type == self.SONY_MSG_Rest_In:\n return RequestMessage(data)\n else:\n raise Exception('Unknown rest message type: 0x%x' % header.type)\n\n else:\n raise Exception('Unknown message type: 0x%x' % type)", "def on_message(data):\n pass", "def execute_message_received(self, message_received):\n pass", "def on_receive(self, message):\n\n if message.get('command') == 'dispatch':\n self.dispatch(message['filename'])\n\n else:\n log.error('Dispatcher received unexpected message type: {}'.format(\n message))", "def process_request(self, message):\n return NotImplemented()", "def handle_msg(self, state_id, msg):\n pass", "def parse(self, message, prefix, cmd_list):\n\n self.parse_type = \"\"\n self.parse_msg = []\n for i in message:\n if i[0].isdigit():\n self.parse_number(i, \"w\")\n elif len(i) == 1:\n self.parse_type += \"w\"\n self.parse_msg.append(i)\n elif i == \"@everyone\" or i == \"@here\":\n self.parse_type += \"s\"\n self.parse_msg.append(i)\n elif i[0] == prefix:\n self.parse_command(i, cmd_list)\n elif i[0] == \"-\":\n self.parse_number(i, \"o\")\n elif i[0] == \"<\" and len(i) > 3:\n self.parse_mention(i)\n else:\n self.parse_type += \"w\"\n self.parse_msg.append(i[(i[0] == \"\\\\\"):])", "def process_message(self, message: Message[TPayload]) -> Optional[TResult]:\n pass", "def _process_message(self, message: pubsub_message.Message) -> None:\n # Extract the task proto from the message.\n try:\n task = task_pb2.Task.FromString(message.data)\n except proto_message.DecodeError as e:\n logging.error('Unable to deserialize Task proto: %s', e)\n # If the message is gibberish, nacking keeps putting it back, wasting\n # resources for no reason. If the message is fine but there's a parsing\n # bug, nacking makes it possible to process the message normally after\n # fixing the bug. If the expected format of the message ever changes in an\n # incompatible way and a message with the new format is sent before the\n # worker is updated, nacking makes it possible to process the message\n # normally after updating the worker.\n message.nack()\n return\n\n # Find the registration, based on the type of proto stored in task.args.\n _, _, full_name = task.args.type_url.partition('/')\n try:\n registration = self._message_type_registry[full_name]\n except KeyError:\n logging.warning('Unknown type of task: %s', task.args.type_url)\n # If the task has a bogus type, nacking keeps putting it back, wasting\n # resources for no reason. If a new task type is added and those tasks are\n # requested before the worker code is updated, nacking makes it possible\n # to process the tasks after the worker code is updated. If an existing\n # task type is removed from the running worker code before all tasks of\n # that type have been processed, nacking keeps putting it back, wasting\n # resources.\n message.nack()\n return\n\n # Get the args proto.\n args = registration.task_args_class()\n task.args.Unpack(args)\n\n # Convert the task to a loggable string.\n try:\n task_string = self._task_to_string(task)\n except Exception: # pylint: disable=broad-except\n logging.exception(\n 'Unable to convert task of type %s to a string for logging.',\n full_name)\n # If self._task_to_string() fails for a reason unrelated to the task\n # itself, nacking makes it possible to process the task once\n # self._task_to_string() is working again. If something about the task\n # makes self._task_to_string() fail consistently, nacking makes it\n # possible to process the task once the bug in self._task_to_string() is\n # fixed. Additionally, users can catch and ignore exceptions in\n # self._task_to_string() itself if they want to always process tasks\n # regardless of whether it's possible to log the contents of the task.\n message.nack()\n return\n\n # Call the registered callback.\n logging.info('Processing task (message_id=%s):\\n%s', message.message_id,\n task_string)\n try:\n registration.callback(args)\n except Exception: # pylint: disable=broad-except\n logging.exception('Task failed (message_id=%s).', message.message_id)\n # See the comment above about nacking on self._task_to_string() failures\n # for the considerations here.\n message.nack()\n else:\n logging.info('Finished task (message_id=%s).', message.message_id)\n message.ack()", "def parse(self, connection, outgoing, raw, **kwargs):\n try:\n prefix, command, params = parsemsg(raw)\n except IndexError:\n parsed_kwargs = {'action': 'unknown'}\n else:\n parsed_kwargs = {'actor': Hostmask.from_string(prefix)}\n if command in self.functions:\n try:\n parsed_kwargs['action'] = command.lower()\n parsed_kwargs.update(\n self.functions[command](command, params))\n except IndexError:\n del parsed_kwargs['action']\n if 'action' not in parsed_kwargs:\n parsed_kwargs['action'] = 'unknown'\n parsed_kwargs['subaction'] = command\n splits = 2 if raw.startswith(':') else 1\n params = raw.split(None, splits)\n if len(params) > splits:\n parsed_kwargs['content'] = params[splits]\n else:\n parsed_kwargs['content'] = ''\n parsed_kwargs.update(kwargs)\n return Message(connection, outgoing, raw=raw, **parsed_kwargs)", "def received(self, message):\n raise NotImplementedError()", "def onMessageFrame(self, payload):", "def process(self, message):\n try:\n self.messages.remove(message)\n except ValueError:\n pass # nothing to see here, just a message that was already processed and is not on the list any more\n except Exception as e:\n print('error removing message from self.message:', e)\n \n try:\n if message['type'] in [\"ticker\"]:\n self.process_tickers(message)\n elif message['type'] in [\"snapshot\", \"l2update\"]:\n self.process_orderbook(message)\n elif message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] and 'user' in self.data:\n self.process_orders(message)\n except Exception as e:\n raise Exception(\"Process raised an error: {}\\n\\t{}\".format(e,message))", "def receive_message(self, message):\r\n return", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def message_handle(ws, message):\n try:\n data = json.loads(message)\n method = data['method']\n params = data['params']\n except json.JSONDecodeError:\n ws.close((1003, 'Message `{}` is invalid'.format(message)))\n except KeyError:\n keys = str(list(data.keys()))\n ws.close((1003, 'Message keys {} are missing or invalid'.format(keys)))\n else:\n try:\n public[method](ws, **params)\n except KeyError:\n ws.close((1007, 'Method `{}` not found'.format(method)))\n except TypeError:\n ws.close((1007, 'Parameters `{}` are wrong'.format(data['params'])))\n except InstanceNotFound as instance_id:\n ws.close((1007, 'Instance `{}` not found'.format(instance_id)))\n except EnvironmentMalformed as env_id:\n ws.close((1007, 'Environment `{}` is malformed'.format(env_id)))\n except EnvironmentNotFound as env_id:\n ws.close((1007, 'Environment `{}` not found'.format(env_id)))\n except WrongAction as action:\n ws.close((1007, 'Action `{}` is wrong'.format(action)))\n except Exception as err:\n ws.close((1007, 'Unknonwn error: {}'.format(err)))", "def __init__(self, *args):\r\n \r\n self.bl = None\r\n self.buddy = None\r\n self.connection = None\r\n \r\n #\r\n # incoming\r\n #\r\n #__init__(self, bl, connection, command, encoded)\r\n if type(args[0]) == BuddyList:\r\n self.bl = args[0]\r\n self.connection = args[1]\r\n if self.connection:\r\n self.buddy = self.connection.buddy\r\n self.command = args[2]\r\n \r\n # decode from line format to raw binary\r\n # and then let the message parse it \r\n self.blob = decodeLF(args[3])\r\n self.parse()\r\n \r\n # the incoming message is now properly initialized and somebody\r\n # could now call its execute() method to trigger its action\r\n return\r\n \r\n \r\n #\r\n # outgoing\r\n #\r\n #__init__(self, connection, blob)\r\n #__init__(self, buddy, blob)\r\n if type(args[0]) in [InConnection, OutConnection, Buddy]:\r\n if type(args[0]) in [InConnection, OutConnection]:\r\n self.connection = args[0]\r\n if self.connection.buddy:\r\n self.buddy = self.connection.buddy\r\n \r\n elif type(args[0]) == Buddy:\r\n self.buddy = args[0]\r\n self.connection = self.buddy.conn_out\r\n \r\n if len(args) > 1:\r\n blob = args[1]\r\n if type(blob) in [list, tuple]:\r\n self.blob = \" \".join(str(part) for part in blob)\r\n else:\r\n self.blob = str(blob)\r\n else:\r\n self.blob = \"\"\r\n \r\n self.command = type(self).__name__[12:]", "def handleModuleMessage(self, data):\n # print (\"got a message in silverline module.\")\n (messageType, content) = self.ofxAgent.unpackModuleMessage(data)\n # print \"\\tmessage type: %s\"%messageType\n if messageType == ENABLEDECLASSIFIER:\n self.enableDeclassifier()\n elif messageType == ADDFLOWPERMISSION:\n self.addFlowPermission(content)", "def handle_incoming_ipc_message(self, ipc_message: CyBldIpcMessage):\n if not self.command_group.codeword_regex_matches(ipc_message.codeword):\n return\n\n if ipc_message.cmd_type == cybld_ipc_message.CyBldIpcMessageType.set_cmd:\n self._change_cmd(ipc_message.cmd_number, ipc_message.setcmd_param)\n elif ipc_message.cmd_type == cybld_ipc_message.CyBldIpcMessageType.exec_cmd:\n self._exec_cmd(ipc_message.cmd_number, ipc_message.nvim_ipc)\n else:\n assert False", "def processItem(self, item):\n\t\tif item[\"type\"] == \"message\":\n\t\t\t# Process the message only if the channel has received a message\n\t\t\t# Decode the message\n\t\t\titem[\"channel\"] = item[\"channel\"].decode(\"utf-8\")\n\n\t\t\t# Make sure the handler exists\n\t\t\tif item[\"channel\"] in self.handlers:\n\t\t\t\tlog.info(\"Redis pubsub: {} <- {} \".format(item[\"channel\"], item[\"data\"]))\n\t\t\t\tif isinstance(self.handlers[item[\"channel\"]], generalPubSubHandler.generalPubSubHandler):\n\t\t\t\t\t# Handler class\n\t\t\t\t\tself.handlers[item[\"channel\"]].handle(item[\"data\"])\n\t\t\t\telse:\n\t\t\t\t\t# Function\n\t\t\t\t\tself.handlers[item[\"channel\"]](item[\"data\"])", "async def on_socket_receive(self, msg: \"Msg | MsgProto\") -> None:" ]
[ "0.6494044", "0.6347405", "0.6252538", "0.61391824", "0.6027273", "0.6013206", "0.59676176", "0.59675497", "0.594518", "0.59192914", "0.5894395", "0.5877927", "0.58471686", "0.5832104", "0.58276415", "0.58168703", "0.5714494", "0.5707053", "0.56793517", "0.567809", "0.5669059", "0.5652435", "0.5649311", "0.56309825", "0.558079", "0.55801326", "0.5561875", "0.5561564", "0.5560558", "0.55570275", "0.5527255", "0.5521355", "0.5471914", "0.54581356", "0.5457532", "0.5431987", "0.5431951", "0.54117376", "0.5387086", "0.5385305", "0.5385268", "0.5373637", "0.5371336", "0.53685987", "0.5366058", "0.5352303", "0.5351542", "0.53481585", "0.5347927", "0.5344916", "0.53413916", "0.5320809", "0.5307867", "0.52946085", "0.529424", "0.5288619", "0.52881277", "0.5271118", "0.5266298", "0.52657557", "0.52640104", "0.52569205", "0.52565616", "0.52519", "0.52460206", "0.5232033", "0.5223843", "0.52213705", "0.52194047", "0.5217889", "0.5216346", "0.52129775", "0.5205437", "0.52011883", "0.5197551", "0.5196818", "0.5193103", "0.51918024", "0.51882297", "0.51763046", "0.5174771", "0.51709944", "0.5170562", "0.5160501", "0.5159684", "0.51533353", "0.51502275", "0.51492274", "0.51420766", "0.5140371", "0.5137564", "0.5127143", "0.5126814", "0.51207745", "0.5115213", "0.5108134", "0.51076764", "0.5104701", "0.5103693", "0.5098624" ]
0.547723
32
Validates that certain fields are exist in self._context and are having specified value Use to make sure if protocol's FSM is in right state
def _validate_context(self, content): result = False if self._context is not None: for k in content: if k not in self._context or self._context[k] != content[k]: break result = True return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate(self, instance, value):", "def is_valid(self, value):\r\n pass", "def validate(self, instance, value):", "def validate(self, instance, value):", "def _validate_post_fields(self, value, name, result):\n state = result.get(\"state\")\n persistent_state = result.get(\"persistent_state\")\n\n # minimal settings not related to runtime changes\n valid_fields = [\"actions\", \"ignore_errors\", \"name\", \"persistent_state\", \"state\"]\n\n # when type is present, a profile is completely specified (using\n # defaults or other settings)\n if \"type\" in result:\n valid_fields += list(self.nested.keys())\n\n # If there are no runtime changes, \"wait\" and \"force_state_change\" do\n # not make sense\n # FIXME: Maybe this restriction can be removed. Need to make sure that\n # defaults for wait or force_state_change do not interfer\n if not state:\n while \"wait\" in valid_fields:\n valid_fields.remove(\"wait\")\n while \"force_state_change\" in valid_fields:\n valid_fields.remove(\"force_state_change\")\n else:\n valid_fields += [\"force_state_change\", \"wait\"]\n\n # FIXME: Maybe just accept all values, even if they are not\n # needed/meaningful in the respective context\n valid_fields = set(valid_fields)\n for k in result:\n if k not in valid_fields:\n raise ValidationError(\n name + \".\" + k,\n \"property is not allowed for state '%s' and persistent_state '%s'\"\n % (state, persistent_state),\n )\n\n if \"name\" not in result:\n if persistent_state == \"absent\":\n result[\"name\"] = \"\" # set to empty string to mean *absent all others*\n else:\n raise ValidationError(name, \"missing 'name'\")\n\n # FIXME: Seems to be a duplicate check since \"wait\" will be removed from\n # valid_keys when state is considered to be not True\n if \"wait\" in result and not state:\n raise ValidationError(\n name + \".wait\",\n \"'wait' is not allowed for state '%s'\" % (result[\"state\"]),\n )\n\n result[\"state\"] = state\n result[\"persistent_state\"] = persistent_state\n\n self.VALID_FIELDS = valid_fields\n return result", "def _validate_post_fields(self, value, name, result):\n state = result.get(\"state\")\n persistent_state = result.get(\"persistent_state\")\n\n # minimal settings not related to runtime changes\n valid_fields = [\"actions\", \"ignore_errors\", \"name\", \"persistent_state\", \"state\"]\n\n # when type is present, a profile is completely specified (using\n # defaults or other settings)\n if \"type\" in result:\n valid_fields += list(self.nested.keys())\n\n # If there are no runtime changes, \"wait\" and \"force_state_change\" do\n # not make sense\n # FIXME: Maybe this restriction can be removed. Need to make sure that\n # defaults for wait or force_state_change do not interfer\n if not state:\n while \"wait\" in valid_fields:\n valid_fields.remove(\"wait\")\n while \"force_state_change\" in valid_fields:\n valid_fields.remove(\"force_state_change\")\n else:\n valid_fields += [\"force_state_change\", \"wait\"]\n\n # FIXME: Maybe just accept all values, even if they are not\n # needed/meaningful in the respective context\n valid_fields = set(valid_fields)\n for k in result:\n if k not in valid_fields:\n raise ValidationError(\n name + \".\" + k,\n \"property is not allowed for state '%s' and persistent_state '%s'\"\n % (state, persistent_state),\n )\n\n if \"name\" not in result:\n if persistent_state == \"absent\":\n result[\"name\"] = \"\" # set to empty string to mean *absent all others*\n else:\n raise ValidationError(name, \"missing 'name'\")\n\n # FIXME: Seems to be a duplicate check since \"wait\" will be removed from\n # valid_keys when state is considered to be not True\n if \"wait\" in result and not state:\n raise ValidationError(\n name + \".wait\",\n \"'wait' is not allowed for state '%s'\" % (result[\"state\"]),\n )\n\n result[\"state\"] = state\n result[\"persistent_state\"] = persistent_state\n\n self.VALID_FIELDS = valid_fields\n return result", "def validate(self):\n if self.trans_rec_id is None:\n raise Exception('trans_rec_id cannot be none')\n if self.packet_id is None:\n raise Exception('packet_id cannot be none')\n if self.type_id is None:\n raise Exception('type_id cannot be none')\n if self.version is None:\n raise Exception('version cannot be none')\n if self.state_id is None:\n raise Exception('state_id cannot be none')\n if self.outgoing_flag is None:\n raise Exception('outgoing_flag cannot be none')\n if self.expected_reply_type is None:\n raise Exception('expected_reply_type cannot be none')", "def check_value(self, value):", "def _validate(self, value):\n return True", "def _check_value(self,val,obj=None):\n if not val in self.objects:\n # CEBALERT: can be called before __init__ has called\n # super's __init__, i.e. before attrib_name has been set.\n try:\n attrib_name = self._attrib_name\n except AttributeError:\n attrib_name = \"\"\n raise ValueError(\"%s not in Parameter %s's list of possible objects\" \\\n %(val,attrib_name))", "def validate(self, value, obj=None):\n return True", "def check_validity(self):", "def _check_valid_value(self, value):\n if self._possible_values is None: # validation not defined (profile)\n return\n if value in self._possible_values:\n return\n if value is not None and \"ANY\" in self._possible_values:\n return\n msg = (\"'%s' is not a valid 'options.%s' value.\\nPossible values are %s\"\n % (value, self._name, self._possible_values))\n raise ConanException(msg)", "def _is_initiated(self, context):\n user_data = context.user_data\n has_attr = 'id' in user_data and 'email' in user_data\n has_values = self._id_valid(user_data['id'])\n return has_attr and has_values", "def _validate(self):\n pass", "def _check_validity(self):\n pass", "def is_valid(self, object_to_validate, field_name):\n pass", "def validate(self):\n self.valid = True\n\n if self._value is None and self._strict:\n self.valid = False\n raise self.MissingFieldData\n\n elif self._value is not None:\n self._type_specific_validation()", "def _validate(self):\n assert type(self.cmd) is dict\n assert self.cmd.keys() == {\"operation\",\"data\"}\n assert self.cmd[\"operation\"] == self._class_operation()", "def valid(self):\n pass", "def _check_value(self, value):\n raise NotImplementedError", "def _check_required_fields(self):\n assert self.title\n assert self.format", "def assert_valid(self, instance, value=None):\n valid = super(Instance, self).assert_valid(instance, value)\n if not valid:\n return False\n if value is None:\n value = instance._get(self.name)\n if isinstance(value, HasProperties):\n value.validate()\n return True", "def validate(self, converted_value, context):\n pass", "def validate(self, value):\n return True", "def _validate(self, *_):\n provider = self.provider_entry.get_text()\n username = self.account_name_entry.get_text()\n token = \"\".join(self.token_entry.get_text().split())\n\n if not username:\n self.account_name_entry.get_style_context().add_class(\"error\")\n valid_name = False\n else:\n self.account_name_entry.get_style_context().remove_class(\"error\")\n valid_name = True\n\n if not provider:\n self.provider_combobox.get_style_context().add_class(\"error\")\n valid_provider = False\n else:\n self.provider_combobox.get_style_context().remove_class(\"error\")\n valid_provider = True\n\n if (not token or not OTP.is_valid(token)) and not self.props.is_edit:\n self.token_entry.get_style_context().add_class(\"error\")\n valid_token = False\n else:\n self.token_entry.get_style_context().remove_class(\"error\")\n valid_token = True\n\n self.emit(\"changed\", all([valid_name, valid_provider, valid_token]))", "def __validate(self):\n pass", "def _check_value(self,val,obj=None):\n if not (isinstance(val,self.class_)) and not (val is None and self.allow_None):\n raise ValueError(\n \"Parameter '%s' value must be an instance of %s, not '%s'\" %\n (self._attrib_name, self.class_.__name__, val))", "def _check_vals(self):\n\n try:\n self.is_set = True\n self.pack()\n except Exception as err:\n # Set default values again\n raise ValueError(\"Invalid arguments. Could not packed since: {}\".format(err))\n self.__init__()", "def _is_valid_for(self, object, name, value):\n if self.is_valid_for(value):\n return value\n\n self.error(object, name, value)", "def _is_valid_for(self, object, name, value):\n if self.is_valid_for(value):\n return value\n\n self.error(object, name, value)", "def validate(self):\n super(ContextMapping, self).validate()\n self.check_observatory()\n for key, mapping in self.selections.normal_items():\n self._check_nested(\"observatory\", self.observatory, mapping)\n # mapping.validate()", "def validate(self, value):\n\n return True", "def validate_params(self, ctx):\n try:\n return self._validate_params()\n except formencode.Invalid as exc:\n unpack = exc.unpack_errors()\n self.__request.set_property(lambda: unpack,\n self._invalid_params_attr,\n reify=True)\n if self._raise_exc is True:\n self._raise(self._invalid_params_exc, unpack)", "def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_invalid = self.invalid_values\n if value in current_invalid:\n raise self.in_value_error(self.in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_invalid)))", "def validate(self, request):\n\n value = request._get_parameter_value(self)\n\n if value.object is None:\n if self.required:\n self.add_error(request, \"This input is required\")\n\n return\n\n self.do_validate(request, value.object)", "def validate(self, value, clean=True):\n pass", "def validate(self, value, clean=True):\n pass", "def check_state(self):\n pass", "def validate(self, value, model_instance):\r\n # raise validation error if the use of this field says it can't be blank but it is\r\n if not self.blank and value is self.Empty:\r\n raise ValidationError(self.error_messages['blank'])\r\n else:\r\n return super(CourseKeyField, self).validate(value, model_instance)", "def check(self, value):\n raise NotImplementedError", "def valid(self, *args, **kwargs) -> Any:\n pass", "def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()", "def _validate_on_value(self, value: Any) -> None:\n if not self._is_nullable and value is None:\n msg = \"Non-nullable field cannot have None value\"\n if self._resolve_field_name() is not None:\n msg += f\" (field name = '{self._resolve_field_name()}')\"\n raise FieldValueValidationError(msg)", "def validate(self) -> None:\n\n if self.field not in self.model.table_fields:\n raise ValueError(f\"Value field {self.field} not present in {self.model.table}\")\n\n if self.pivot:\n if self.pivot not in self.model.table_fields:\n raise ValueError(\n f\"Pivot field {self.pivot} not present in {self.model.table}\"\n )\n\n if self.connector:\n if self.connector not in self.model.table_fields:\n raise ValueError(\n f\"Connector field {self.connector} not present in {self.model.table}\"\n )\n\n for field in self.selectors:\n if field not in self.model.table_fields:\n raise ValueError(f\"Selector field {field} not present in {self.model.table}\")", "def validate(self,state,data):\n if self.VALIDATE:\n raise NotImplementedError(\"must override self.validate() when self.VALIDATE is defined\")\n return None", "def is_valid(self):\r\n raise NotImplementedError", "def __validate():\n # TODO: implement", "def _check_params_do(name, val):\n if name == 'info_hash':\n return len(val) == 20\n elif name == 'peer_id':\n return len(val) == 20 and STORAGE.check_peer(val)\n elif name == 'numwant':\n return int(val) < 250\n fail(REASON_REQUEST_ERROR)", "def validation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def validate(self):\n # name\n assert self.name, \"Empty bitfield name is not allowed!\"\n assert utils.is_first_letter(self.name), \\\n \"Name value '%s' for is wrong! Must start from a letter.\" % (self.name)\n\n # reset\n assert utils.is_non_neg_int(self.reset), \\\n \"Reset value '%s' for '%s' is wrong! Only non-negative integers are allowed.\" % (self.reset, self.name)\n\n # width\n assert self.width, \"Empty bitfield width is not allowed!\"\n assert utils.is_pos_int(self.width), \\\n \"Width value '%s' for '%s' is wrong! Only positive integers are allowed.\" % (self.width, self.name)\n\n # lsb\n assert utils.is_non_neg_int(self.lsb), \\\n \"LSB value '%s' for '%s' is wrong! Only non-negative integers are allowed.\" % (self.lsb, self.name)\n\n # access\n assert self.access in ['rw', 'rw1c', 'rw1s', 'rw1t', 'ro', 'roc', 'roll', 'rolh', 'wo', 'wosc'], \\\n \"Unknown access mode '%s' for '%s' field!\" % (self.access, self.name)\n\n # hardware\n if 'q' in self.hardware or 'n' in self.hardware or 'f' in self.hardware:\n assert len(self.hardware) == 1, \\\n \"Wrong hardware mode '%s' for field '%s'!\" % (self.hardware, self.name)\n else:\n wrong_hw = list(set(self.hardware) - (set(self.hardware) & set('ioecsla')))\n assert wrong_hw == [], \\\n \"Wrong hardware mode(s) '%s' in '%s' for the field '%s'!\" % (wrong_hw, self.hardware, self.name)\n if 'q' in self.hardware:\n q_access_allowed = ['rw', 'ro', 'wo']\n assert self.access in q_access_allowed, \\\n \"Hardware mode 'q' is allowed to use only with '%s'!\" % (q_access_allowed)\n\n # enums\n for enum in self.enums:\n assert enum.value.bit_length() <= self.width, \\\n \"Enum '%s' value %d exceeds bitfield width %d!\" % (enum.name, enum.value, self.width)\n assert self.enum_names.count(enum.name) == 1, \\\n \"Enum '%s' name is not unique!\" % (enum.name)\n assert [e.value for e in self].count(enum.value) == 1, \\\n \"Enum '%s' value is not unique!\" % (enum.value)\n enum.validate()", "def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('object_type', context)\n self.assertEqual(context['object_type'], 'variable')\n self.assertIn('form', context)\n self.assertIsInstance(context['form'], forms.SourceTraitLookupForm)\n self.assertIn('text', context)\n self.assertIsInstance(context['text'], str)", "def validate(self, name, values):\r\n \r\n pass", "def validate_value(self):\n raise NotImplementedError('validate_value() must implement in subclass.')", "def validate(self, source_value):\n errors = defaultdict(list)\n\n for field in self.get_mapping().fields:\n value = get_attribute(source_value, field.name)\n try:\n field.is_valid(value)\n except ValidationError as e:\n errors[field.name].append(e.message)\n\n if errors:\n raise ValidationError(errors)\n else:\n return super(Nested, self).validate(source_value)", "def is_valid(self):\n raise NotImplementedError", "def validate(self, attrs):\n\n errors = {}\n order_obj = Order.objects.get(order_id=attrs['order_id'])\n if order_obj.courier_id.courier_id != attrs['courier_id'].courier_id:\n errors['order_id'] = f'Order with id {order_obj.order_id} is assigned to another courier.'\n unknown = set(self.initial_data) - set(self.fields)\n if unknown:\n errors['Unknown field(s)'] = ''.join(unknown)\n if order_obj.assign_time > attrs['complete_time']:\n errors['complete_time'] = 'complete_time cannot be greater then assign_time.'\n if errors:\n raise ValidationError(errors)\n return attrs", "def check_argument(self, struct_class, item, keyword, value):\n pass", "def validate(self, value):\r\n return value", "def check_validity(self) -> None: # pylint: disable=no-self-use # pragma: nocover\n return None", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def _validate(self):\n for p in self.parameters:\n #Check for missing required parameters:\n if p.is_required and not(p.is_set):\n raise ValueError(\"Parameter %s is not set.\" \\\n % p.names[-1])\n #Also repeat the parameter validation here, just in case?", "def _validate_fields(self, change_fields):\n pass", "def __call__(self, value):\n return self.validate(value)", "def validate(cls, value):\n return cls.properties.validate(value)", "def verify(self):\n data = [\"rfc\", \"tel\", \"email\", \"name\", \"use\"]\n state = False\n for item in data:\n if getattr(self, item + \"Input\").text() != \"\":\n state = True\n else:\n return False\n return state", "def _check_params(self):\n pass", "def validate_python(self, value, state):\n if state is not None and hasattr(state, 'session'):\n\n # This will get the list of all of the Clients\n clients =[]\n cli = state.session.query(Clients).all()\n \n for index in range(len(cli)):\n # turn all of the elements into lower case\n clients.append(cli[index].name.lower())\n\n if value.lower() in clients:\n raise formencode.Invalid(self.message(\"client_taken\", state), value, state)\n else:\n # This means a Session object wasn't passed in as the \"state\".\n raise ValueError(\"state object needs session attribute\", value, state)", "def test_empty_other_context(self):\n val = DwcaValidator(yaml.load(self.empty3, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'field_1': ''}\n self.assertFalse(val.validate(document))\n document = {'field_2': ''}\n self.assertFalse(val.validate(document))\n document = {'field_3': ''}\n self.assertFalse(val.validate(document))\n document = {'field_4': ''}\n self.assertFalse(val.validate(document))\n document = {'field_5': ''}\n self.assertFalse(val.validate(document))\n document = {'field_6': ''}\n self.assertFalse(val.validate(document))", "def is_valid(self): # -> bool:\n ...", "def validate_python(self, value, state):\n if state is not None and hasattr(state, 'session'):\n acct = state.session.query(Users).filter_by(email=value).first()\n if acct is not None:\n raise formencode.Invalid(self.message(\"email_taken\", state), value, state)\n \n else:\n # This means a Session object wasn't passed in as the \"state\".\n raise ValueError(\"state object needs session attribute\", value, state)", "def validate(self):\n if self.id == None:\n return False\n return self.validator.validate(self.fields)", "def grok_state(self, obj):\n if 'state' in obj:\n my_state = obj['state'].lower()\n if my_state != 'absent' and my_state != 'present':\n raise aomi \\\n .exceptions \\\n .Validation('state must be either \"absent\" or \"present\"')\n\n self.present = obj.get('state', 'present').lower() == 'present'", "def validate(self, request):\n values = {\n 'robot_match_comments':request.POST['robot_match_comments'],\n 'did_foul':'did_foul' in request.POST,\n 'did_technical_foul':'did_technical_foul' in request.POST,\n 'foul_description':request.POST['foul_description'],\n 'did_shoot':'did_shoot' in request.POST,\n 'auto_1':request.POST['auto_1'],\n 'auto_2':request.POST['auto_2'],\n 'auto_3':request.POST['auto_3'],\n 'auto_miss':request.POST['auto_miss'],\n 'teleop_1':request.POST['teleop_1'],\n 'teleop_2':request.POST['teleop_2'],\n 'teleop_3':request.POST['teleop_3'],\n 'teleop_5':request.POST['teleop_5'],\n 'teleop_miss':request.POST['teleop_miss'],\n 'shooting_description':request.POST['shooting_description'],\n 'did_climb':'did_climb' in request.POST,\n 'climb_start':request.POST['climb_start'],\n 'climb_finish':request.POST['climb_finish'],\n 'level_reached':request.POST.get('level_reached'),\n 'frisbees_dumped':request.POST['frisbees_dumped'],\n 'climbing_description':request.POST['climbing_description'],\n 'did_human_load':'did_human_load' in request.POST,\n 'did_ground_load':'did_ground_load' in request.POST,\n 'auto_frisbees_ground_loaded':\\\n request.POST['auto_frisbees_ground_loaded'],\n 'loading_description':request.POST['loading_description'],\n }\n if ((values['did_foul'] or values['did_technical_foul']) and\n not values['foul_description']):\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'Please enter a description of the foul(s) the robot committed',\n new_values\n )\n if values['did_shoot']:\n try:\n values['auto_1'] = int(values['auto_1'])\n values['auto_2'] = int(values['auto_2'])\n values['auto_3'] = int(values['auto_3'])\n values['auto_miss'] = int(values['auto_miss'])\n values['teleop_1'] = int(values['teleop_1'])\n values['teleop_2'] = int(values['teleop_2'])\n values['teleop_3'] = int(values['teleop_3'])\n values['teleop_5'] = int(values['teleop_5'])\n values['teleop_miss'] = int(values['teleop_miss'])\n except ValueError:\n raise ValidationError(\n 'You must enter a number for all of the shooting numbers',\n self.__dict__.copy().update(values)\n )\n if values['did_climb']:\n try:\n values['climb_start'] = int(values['climb_start'])\n values['climb_finish'] = int(values['climb_finish'])\n try:\n values['level_reached'] = int(values['level_reached'])\n except TypeError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'You must select a level the robot climbed too',\n new_values\n )\n values['frisbees_dumped'] = int(values['frisbees_dumped'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All climbing related numbers must be numbers',\n new_values\n )\n if values['did_ground_load']:\n try:\n values['auto_frisbees_ground_loaded'] = int(\n values['auto_frisbees_ground_loaded'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All numbers of frisbees ground loaded must be numbers',\n new_values\n )\n return values", "def _check_fields(self, content: JsonDict) -> None:\n self.assertIn(\"id\", content)\n self.assertIn(\"received_ts\", content)\n self.assertIn(\"room_id\", content)\n self.assertIn(\"event_id\", content)\n self.assertIn(\"user_id\", content)\n self.assertIn(\"sender\", content)\n self.assertIn(\"canonical_alias\", content)\n self.assertIn(\"name\", content)\n self.assertIn(\"event_json\", content)\n self.assertIn(\"score\", content)\n self.assertIn(\"reason\", content)\n self.assertIn(\"auth_events\", content[\"event_json\"])\n self.assertIn(\"type\", content[\"event_json\"])\n self.assertIn(\"room_id\", content[\"event_json\"])\n self.assertIn(\"sender\", content[\"event_json\"])\n self.assertIn(\"content\", content[\"event_json\"])", "def _ValidateFields(self, entity):\n # if field_universe is not defined just return true\n if not self._field_universe:\n return True\n\n valid = True\n for field_tuple in entity.local_field_names.values():\n if not self._ValidateField(field_tuple.field, entity):\n valid = False\n return valid", "def _check_missing(self, key: str, value: Any):\n required = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"required\", True)\n\n if required and value is None:\n raise Exception(f\"Value for '{key}' is empty but a value is required\")", "def validate(self, key, val):\n return True", "def validate(self, key, val):\n return True", "def do_validate(self, request, _object):\n\n pass", "def _validate(self, model_instance, value):\r\n if self.empty(value) and self.is_required:\r\n raise ValidationError(\"Field '%s' is required.\", self.name)\r\n\r\n if self._selection and value not in self._selection_list:\r\n raise ValidationError(\r\n _(\"Field '%(name)s' is '%(value)s'; must be one of %(selection)s\",\r\n name=self.name, value=value, selection=self._selection_list))\r\n\r\n if self._validator:\r\n self._validator(model_instance, value)\r\n\r\n if value is None:\r\n return value\r\n\r\n return self.validate(value)", "def validate(self, value):\n if value is None:\n msg = message_factory.get_message('vapi.data.validate.mismatch',\n self.type,\n 'None')\n return [msg]\n return None", "def is_valid_value(self, value):\n raise NotImplementedError(\"subclass must implement is_valid_value()\")", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def validate(self, data):\n if data.has_key('site'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, site=data['site']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n elif data.has_key('project'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, project=data['project']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n return data", "def __validate_input(self, request_data):\n call_id = request_data.get(strings.CALL_ID_KEY)\n request_timestamp = request_data.get(strings.TIMESTAMP_KEY)\n request_start = request_data.get(strings.START_KEY)\n validation = None\n if call_id and request_timestamp and request_start is not None:\n call_detail_query = CallDetail.objects.filter(call_id=call_id)\n if call_detail_query:\n if len(call_detail_query) < CALL_DETAILS_LIMIT:\n stored_call_detail = call_detail_query[0]\n if isinstance(request_start, str):\n if request_start in strings.TRUE_VALUES:\n request_start = True\n else:\n request_start = False\n if stored_call_detail.start == request_start:\n validation = {strings.INPUT_ERROR_KEY:\n strings.START_END_ERROR}\n stored_timestamp = standardize_date(\n stored_call_detail.timestamp,\n strings.COMPLETE_DATE_PATTERN)\n request_timestamp = standardize_date(request_timestamp,\n strings.\n COMPLETE_DATE_PATTERN)\n if stored_timestamp == request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.EQUAL_TIMESTAMPS_ERROR}\n if stored_call_detail.start and not request_start:\n if stored_timestamp > request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n elif not stored_call_detail.start and request_start:\n if stored_timestamp < request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n else:\n validation = {strings.INPUT_ERROR_KEY:\n strings.CALL_LIMIT_ERROR}\n\n return validation", "def validation_statement(self, request, value=None, **kwargs):\n raise NotImplementedError", "def validate(self):\n raise NotImplementedError", "def validate(self):\n raise NotImplementedError", "def _validate(self, value, **options):\n\n super()._validate(value, **options)\n\n current_valid = self.valid_values\n if value not in current_valid:\n raise self.not_in_value_error(self.not_in_value_message.format(\n param_name=self._get_field_name(**options),\n values=self._get_list_representation(current_valid)))", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def validate(self):\n if self._data is None:\n raise BadRequest(\"Malformed request\")\n\n missing = []\n for field in self.MANDATORY:\n if self.get_parameter(field) is None:\n missing.append(field)\n\n if missing:\n raise BadRequest(\"Missing mandatory fields: {}\".format(missing))\n\n return True", "def is_valid(self, user_specific_config: Any, factor: str) -> bool:" ]
[ "0.63824505", "0.6050479", "0.60258853", "0.60258853", "0.5951332", "0.5951332", "0.5933391", "0.5919774", "0.5896552", "0.5887481", "0.5868621", "0.58041704", "0.57922333", "0.57876384", "0.5754241", "0.57441854", "0.5739001", "0.5719968", "0.57187736", "0.569956", "0.5681051", "0.5676943", "0.5662414", "0.5648677", "0.5637717", "0.5636188", "0.5622192", "0.5616914", "0.56052285", "0.55920315", "0.55920315", "0.558599", "0.55538255", "0.5548589", "0.55261475", "0.55206406", "0.55021065", "0.55021065", "0.549093", "0.54759884", "0.547345", "0.5470062", "0.5462208", "0.546108", "0.5455256", "0.5401603", "0.53985274", "0.5396973", "0.5386745", "0.5384246", "0.5382686", "0.5379654", "0.53596485", "0.53559345", "0.5355034", "0.5353111", "0.5343416", "0.5336629", "0.5335194", "0.53315073", "0.53304505", "0.53304505", "0.53304505", "0.53304505", "0.53304505", "0.53304505", "0.53304505", "0.53304505", "0.5327225", "0.53222346", "0.5316221", "0.5313677", "0.5303978", "0.5299016", "0.5296311", "0.52957934", "0.5295201", "0.5293231", "0.5290071", "0.5280273", "0.5279088", "0.5278655", "0.5277354", "0.5277241", "0.527267", "0.527267", "0.52690065", "0.52636105", "0.526024", "0.5252478", "0.5247606", "0.52390033", "0.5238941", "0.5234587", "0.5234176", "0.5234176", "0.523327", "0.5232527", "0.52305555", "0.5229137" ]
0.55769426
32
Exposes protected data to a caller. Be extremely careful with it contains originals, not a copies
def expose_data(self): return _ExposedFarmData(self._platforms, self._awaiting, self._channels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_data_protected(self): \n pass", "def protected(_):\n return False # This protects nothing", "def write_protected(cls, **kwargs: Any) -> \"DataSchema[ObjType]\":\n return super().write_protected(**kwargs) # type: ignore", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def Secure(self,passphrase=None,public_attributes=[]):\n\n\t\tif passphrase == None:\n\t\t\treturn self.Dictify()\n\t\telse:\n\t\t\tself.data = Encrypting.Symmetric.Encrypt(json.dumps(self.Dictify()).encode('utf-8'),passphrase).decode('utf-8')\n\t\t\t\n\t\t#secure data and dictify\n\t\tmy_secure_dict = self.Dictify()\n\n\t\t#new obfuscated obj\n\t\tnew_me = {'data':my_secure_dict['data']}\n\n\t\tfor pub_att in public_attributes:\n\t\t\tnew_me[pub_att] = my_secure_dict[pub_att]\n\n\t\treturn new_me", "def _make_information_storable( self, data ):\n\t\tpass", "def write_protected(cls, **kwargs: Any) -> \"DataSchemaConcrete\":\n this_exclude = kwargs.get(\"exclude\", list())\n this_exclude.extend(cls.WRITE_PROTECTED)\n kwargs[\"exclude\"] = this_exclude\n return cls(**kwargs)", "def build_private_data(self, instance, private_data_dir):", "def modify_user(user_data):\r\n raise NotImplementedError()", "def _localWhatDoINeed(self):\n needDict = super()._localWhatDoINeed()\n\n return needDict", "def Modified(self, *args):\n return _BRepAlgo.BRepAlgo_DSAccess_Modified(self, *args)", "def is_private():", "def public(*args):\n def public_wrapper():\n pass\n return public_wrapper", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def data(self, int_role=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\r\n pass", "def Unprotected():\n def wrapper(original_class):\n orig_init = original_class.__init__\n\n @functools.wraps(original_class)\n def __init__(self, *args, **kws):\n self.falcon_security__roles = []\n self.falcon_security__unprotected = True\n orig_init(self, *args, **kws)\n\n original_class.__init__ = __init__\n return original_class\n return wrapper", "def changes_data(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n self.modified = True\n return f(self, *args, **kwargs)\n return wrapper", "def NewData(self, data):\n return data", "def exclude_protected(cls):\n cls.decls(pd.access_type_matcher_t(pd.ACCESS_TYPES.PROTECTED),allow_empty=True).exclude()", "def is_protected(self, is_protected):\n \"\"\"\n if is_protected is None:\n raise ValueError(\"Invalid value for `is_protected`, must not be `None`\")\n \"\"\"\n\n self.container['is_protected'] = is_protected", "def private(func):\n func._private_ = True\n return func", "def data(self):\n retval = copy.deepcopy(self.__dict__)\n retval[\"_Serializable_classname\"] = type(self).__name__\n retval[\"_Serializable_version\"] = \"1.0\"\n return retval", "def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data", "def inject_data_hook(self, data):\n return data", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def manipulate_data(data):\n log.info(\"Doing some fun stuff here!\")\n return data", "def original(self) -> Any:\n raise NotImplementedError", "def update_original_data(self):\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def unprotected_method():\n return {\"message\": \"Anyone access this function\"}", "def public(self, view):\n view.public = True\n return view", "def bound_data_with_bug_19611_patch(original_function, self, data, initial):\n return initial", "def get_pure_data_copy(self):\n import copy\n data=copy.copy(self)\n data.xp = data.xp.get_pure_data_copy()\n data.timetable = data.timetable.get_pure_data_copy() \n return data", "def dump(self, data: dict, original_data=None):\n return data", "def data(self):", "def Expose(obj_as_dict: dict,passphrase=None):\n\t\tif passphrase == None:\n\t\t\treturn obj_as_dict\n\t\telse:\n\t\t\tinterpreted = json.loads(Encrypting.Symmetric.Decrypt(obj_as_dict['data'].encode('utf-8'),passphrase).decode('utf-8'))\n\n\t\tinterpreted['data'] = None #so its clear need to obfuscate again\n\n\t\treturn interpreted", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def safe_data(self):\r\n hide = ['_password', 'password', 'is_admin', 'api_key']\r\n return dict(\r\n [(k, v) for k, v in dict(self).iteritems() if k not in hide]\r\n )", "def get_data(self):\r\n return self.data.copy()", "def test_anon_private(self):\n self.do_visible(True, None, False)", "def access():", "def protected(self) -> bool:\n return pulumi.get(self, \"protected\")", "def get_restricted_data(self, full_data, user):\n if (self.can_retrieve(user) and\n (not full_data['is_hidden'] or\n user.has_perm('agenda.can_see_hidden_items'))):\n data = full_data\n else:\n data = None\n return data", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def data(self, data):\n self.__data = data", "def prepare_data(self):", "def get_sharable_data(self):\n raise NotImplementedError", "def private(self):\n return self._private", "def make_priv(self, handler):\n self.handler = handler", "def data(self, key=None):\n pass # pragma: no cover", "def private(self, private):\n\n self._private = private", "def private(self, private):\n\n self._private = private", "def test_anon_public(self):\n self.do_visible(True, None, True)", "def me(self, data, *args, **kwargs):\n return self._me(data, *args, **kwargs)", "def get_data(self):\r\n pass", "def _to_be_wrapped(self) -> None:", "def data(self):\r\n raise NotImplementedError", "def get_data():\n pass", "def _get_user_data(self):\n return {\"key\": self._key}", "def data_out(f):\n @functools.wraps(f)\n def dec(*args, **kwargs):\n rtn, dout = f(*args, **kwargs)\n _stash_set_data(dout)\n return rtn\n return dec", "def replace_user_data(self, new_data):\n self._user_data = new_data", "def __call__(self, data, **kwargs):", "def data(self):\n pass", "def data(self):\n pass", "def _extract_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def hide(obj):\n obj._spec__is_private = True\n return obj", "def copy (a_data) :\n return a_data.copy()", "def __setitem__(self,key,value):\n if self._extract:\n raise RuntimeError('This archive is read-only!')\n else:\n self._setitem(key,value)", "def data(self):\r\n print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))", "def data(self):\r\n print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))", "def test_anon_public_owned(self):\n self.do_visible(True, 'pattieblack', True)", "def private(function):\n if not getattr(function, \"private\", False):\n function.private = True\n return function", "def _get_data(self):\n raise NotImplementedError()", "def Obfuscate(self,passphrase=None,public_attributes=[]):\n\t\t\n\t\tnew_me = self.Secure(passphrase,['ID','status'])\n\n\t\treturn new_me", "def __call__(self, data, keep):\n return self.transform(data, keep)", "def secretstore():\n pass", "def get_data():\n return", "def Obfuscate(self,passphrase=None,public_attributes=[]):\n\t\t\n\t\tnew_me = self.Secure(passphrase,['nowDN','status']+public_attributes)\n\n\t\treturn new_me", "def mock_rdata(): \n return {\n \"authors\": [{\"full_name\": \"N. Ame\"}],\n \"owners\": [{\"full_name\": \"N. Ame\"}],\n \"submitter\": {\"full_name\": \"N. Ame\"},\n \"paper_id\": \"1234.56789\",\n \"title\": \"some title\",\n \"abstract\": \"An abstract with math $/alpha * /alpha$ for you.\",\n }", "def get_data(self):\n pass", "def get_data(self):\n pass", "def encrypt_data(self, params):\n raise NotImplementedError", "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "def get_data(self):\n\n return super().get_data()", "def __copy__(self):\n raise NotImplementedError", "def private(self, private: bool):\n\n self._private = private", "def wrapper():\n return get_data(keychain_file)", "def __init__(self, data, origin=None, raw_data=None,\n field_offset_map=None):\n self._data = data\n if raw_data is None:\n raw_data = data\n self._raw_data = raw_data\n if origin is None:\n origin = Origin.get_caller_origin()\n self._origin = origin\n self._field_offset_map = field_offset_map", "def data_provider(fn_data_provider):\n def test_decorator(fn):\n def repl(self):\n for i in fn_data_provider():\n try:\n if hasattr(i, '_asdict'):\n fn(self, **i._asdict())\n else:\n fn(self, *i)\n except AssertionError as e:\n print(i)\n raise e\n return repl\n return test_decorator", "def ExtraInfo(self) -> object:", "def getPublicUserInfo(self, username):\r\n pass", "def package_data(self, data):\n pass", "def request_data(self):\n pass", "def initiatePassingData(self, ):\n\t\tpdata = PassingData(xValue2yValueLs={}, x_ls=[], y_ls=[], invariantPData=self.invariantPData)\n\t\t#2012.8.16 pass to global data\n\t\tself.invariantPData.y_ls = pdata.y_ls\n\t\tself.invariantPData.x_ls = pdata.x_ls\n\t\treturn pdata" ]
[ "0.7407494", "0.6308666", "0.597678", "0.5667471", "0.5667471", "0.5665087", "0.5659497", "0.56185216", "0.5615482", "0.55298316", "0.5414067", "0.53964454", "0.53828067", "0.53819466", "0.53760314", "0.5359521", "0.535882", "0.5358776", "0.5326243", "0.5324696", "0.529068", "0.5287715", "0.5287565", "0.52839047", "0.52630347", "0.5258382", "0.5257336", "0.52556413", "0.52551126", "0.52265966", "0.52265966", "0.52265966", "0.52222216", "0.5209851", "0.5190195", "0.5184954", "0.5164821", "0.51599914", "0.51526463", "0.51525635", "0.51525635", "0.51525635", "0.5151663", "0.5151468", "0.51348233", "0.5129233", "0.51213956", "0.5100016", "0.5095447", "0.5094879", "0.50935775", "0.50911784", "0.50898784", "0.5076855", "0.5076187", "0.5060919", "0.5060919", "0.5058736", "0.50563705", "0.5049241", "0.50421304", "0.5030069", "0.50267863", "0.50256014", "0.50193405", "0.5018628", "0.50080955", "0.5007344", "0.5007344", "0.49968007", "0.4994073", "0.4992453", "0.49799368", "0.4968081", "0.4968081", "0.49577376", "0.4947126", "0.4944187", "0.49378744", "0.4922834", "0.49122953", "0.49112678", "0.4907122", "0.48941368", "0.4892133", "0.4892133", "0.48866087", "0.48744056", "0.48744056", "0.48714212", "0.486209", "0.48520014", "0.48492876", "0.4832427", "0.48309198", "0.4826887", "0.48260874", "0.48237807", "0.48236924", "0.4817002" ]
0.481632
100
Returns running state for specified platform
def is_running(self, platform): if platform not in self._platforms: raise ValueError("Platform {} is not registered".format(platform)) return self._platforms[platform].running
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state(self):\n data = self.coordinator.data[self._host_name][self._node_name][self._vm_id]\n if data[\"status\"] == \"running\":\n return STATE_ON\n return STATE_OFF", "def running_state(self) -> int | None:\n return self.cluster.get(\"running_state\")", "def running(self):\n info = self.info()\n return info['running']", "async def read_running_state(self):\n # PROTECTED REGION ID(AsyncTabata.running_state_read) ENABLED START #\n return self._running_state\n # PROTECTED REGION END # // AsyncTabata.running_state_read", "def get_state(self):\n\t\treturn Job(SDK.PrlVm_GetState(self.handle)[0])", "def get_running_ver(self):\n module = 'version/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n runningver = response.json()['version']['oper']['sw-version']\n currentpart = response.json()['version']['oper']['boot-from']\n print(self.device + ' The current running version is: ' + runningver)\n print(self.device + ' The device is currently booted from: ' + currentpart)\n return runningver", "def _get_run_state(self):\n if not self._run_state:\n state = self.lookup_enum([\"State\", \"state\"])\n if not state:\n self._run_state = STATE_DISHWASHER_POWER_OFF\n else:\n self._run_state = state\n return self._run_state", "def platform(self) -> Platform:\n _args: list[Arg] = []\n _ctx = self._select(\"platform\", _args)\n return _ctx.execute_sync(Platform)", "def running_state(self):\n self.toggle_components(self.event)\n\n if self.event == 'e_stop':\n status = 'e_stopped'\n self.event_out.publish(status)\n self.reset_component_data(status)\n return 'INIT'\n if self.pose_shifter_status == 'e_failure':\n status = 'e_failure'\n self.event_out.publish(status)\n self.reset_component_data(status)\n return 'INIT'\n if self.pose_shifter_status == 'e_success':\n status = 'e_success'\n self.event_out.publish(status)\n self.reset_component_data(status)\n return 'INIT'\n else:\n return 'RUNNING'", "def status(name='default'):\n machine_states = dict(_status())\n return machine_states[name]", "def runtime_status(self):\n try:\n return self.yarn_api.state(self.app_id)\n except:\n return \"NONE\"", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def is_running(self):\n data = self._poll()\n return data.get('building', False)", "def platform_status(**params):\n endpoint = 'platform/status'\n return request(authenticate=False, version=2, endpoint=endpoint, method='GET', query_params=params)", "def get_arch():\n with settings(hide('running', 'stdout')):\n arch = run('uname -m')\n return arch", "def get_working_status(self):\n #TODO: fix some issue on restarting and so on about current status\n return self.working_map[self.get_status()]", "def systemd_running_state(name, path=None):\n try:\n ret = run_all(\n name, \"systemctl is-system-running\", path=path, ignore_retcode=True\n )[\"stdout\"]\n except CommandExecutionError:\n ret = \"\"\n return ret", "def get_provisioning_state(self):\n url = \"/api/v1/machine/{}\".format(self.machine_id)\n return self.urlhandler.get(url)", "def get_running_status(self):\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n return dict_processor\n else:\n return False", "def _get_state(self):\n print(\"GET STATE\")\n res = self._send_command(\n \"RS;\",\n fb_required=True,\n res_pattern=\"STATE:\")\n # The received answer is supposed to be something like\n # STATE:0|1|-1\n state = int(res.split(':')[1])\n if state == PVDriver.IDLE:\n return \"IDLE\"\n elif state == PVDriver.MOVING:\n return \"MOVING\"\n else:\n return \"ERROR\"", "def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def running(self): # type: () -> bool\n return self.state['Running']", "def get_platform():\n platforms = [\n \"Android\",\n \"Linux.RaspberryPi\",\n \"Linux\",\n \"XBOX\",\n \"Windows\",\n \"ATV2\",\n \"IOS\",\n \"OSX\",\n \"Darwin\",\n ]\n\n for platform in platforms:\n if xbmc.getCondVisibility('System.Platform.'+platform):\n return platform\n return \"Unknown\"", "def all_is_running(self):\r\n return all(p.running for p in self._platforms.values())", "def status(self):\n return self.microblaze.state", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def get_state(self):\n\t\treturn call_sdk_function('PrlVmInfo_GetState', self.handle)", "def is_running(self):\n return self.type_id == STATE_RUNNING", "def project_state(self, project):\n r = self.client.get(project)\n self.assertCode(r, 200)\n return r.data['state']", "def get_state(self):\n return self.env.sim.get_state()", "def next_task_state(self):\n return MODULE_RUNNING", "def state(self):\n return self.device.status(station=self.station_number)", "def getPlatform(self):\n\t\treturn None", "def update_state(self):\n if not self.platforms:\n self.state = self.states['Win']\n elif self.player.y > 389:\n self.state = self.states['Lose'] \n else:\n self.state = self.states['Game']", "def state(self):\n state = 'Unknown'\n closest = 0.5\n for device, method_name in self._methods:\n if method_name.startswith('wm_'):\n state_name = method_name.replace('wm_', '', 1)\n wm_state = getattr(device, method_name)\n diff = wm_state()\n if diff < closest:\n state = state_name\n closest = diff\n return state", "def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")", "def is_running(self):\n return self.current_state == self.States.RUNNING", "def Platforms():\n return platforms", "def get_state(self):\n return self._env.get_state()", "def get(self):\n if not self.__name in g_platform_variables:\n raise RuntimeError(\"unknown platform variable '%s'\" % (self.__name))\n current_var = g_platform_variables[self.__name]\n combinations = get_platform_combinations()\n for ii in combinations:\n if ii in current_var:\n return current_var[ii]\n raise RuntimeError(\"current platform %s not supported for variable '%s'\" % (str(combinations), self.__name))", "def get_current(cls):\n plt = platform.platform()\n distname = platform.linux_distribution()[0]\n if not distname:\n raise RuntimeError('Unsupported platform %s' % plt)\n LOG.debug('Looking for distro data for %s (%s)', plt, distname)\n for p in cls.load_all():\n if p.supports_distro(plt):\n LOG.info('Using distro \"%s\" for platform \"%s\"', p.name, plt)\n return p\n else:\n raise RuntimeError(\n 'No platform configuration data for %s (%s)' %\n (plt, distname))", "def get_current_auto_os_patch_state(self):\n self.composite_logger.log(\"Fetching the current automatic OS patch state on the machine...\")\n\n current_auto_os_patch_state_for_yum_cron = self.__get_current_auto_os_patch_state_for_yum_cron()\n current_auto_os_patch_state_for_dnf_automatic = self.__get_current_auto_os_patch_state_for_dnf_automatic()\n current_auto_os_patch_state_for_packagekit = self.__get_current_auto_os_patch_state_for_packagekit()\n\n self.composite_logger.log(\"OS patch state per auto OS update service: [yum-cron={0}] [dnf-automatic={1}] [packagekit={2}]\"\n .format(str(current_auto_os_patch_state_for_yum_cron), str(current_auto_os_patch_state_for_dnf_automatic), str(current_auto_os_patch_state_for_packagekit)))\n\n if current_auto_os_patch_state_for_yum_cron == Constants.AutomaticOSPatchStates.ENABLED \\\n or current_auto_os_patch_state_for_dnf_automatic == Constants.AutomaticOSPatchStates.ENABLED \\\n or current_auto_os_patch_state_for_packagekit == Constants.AutomaticOSPatchStates.ENABLED:\n current_auto_os_patch_state = Constants.AutomaticOSPatchStates.ENABLED\n elif current_auto_os_patch_state_for_yum_cron == Constants.AutomaticOSPatchStates.DISABLED \\\n and current_auto_os_patch_state_for_dnf_automatic == Constants.AutomaticOSPatchStates.DISABLED \\\n and current_auto_os_patch_state_for_packagekit == Constants.AutomaticOSPatchStates.DISABLED:\n current_auto_os_patch_state = Constants.AutomaticOSPatchStates.DISABLED\n else:\n current_auto_os_patch_state = Constants.AutomaticOSPatchStates.UNKNOWN\n\n self.composite_logger.log_debug(\"Overall Auto OS Patch State based on all auto OS update service states [OverallAutoOSPatchState={0}]\".format(str(current_auto_os_patch_state)))\n return current_auto_os_patch_state", "def get_chromeos_platform_name():\r\n try:\r\n platform = cros_config.call_cros_config_get_output('/ name', utils.run)\r\n if platform == '':\r\n platform = get_board()\r\n return platform\r\n except:\r\n logging.info(\"Not found\")\r\n return -1", "def get_platform():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/platform\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_current_kernel_arch():\r\n try:\r\n return os.popen('uname -m').read().rstrip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def GetCurrentPlatform():\n if sys.platform == 'darwin':\n return 'mac'\n if sys.platform == 'win32':\n return 'win'\n if sys.platform == 'linux2':\n return 'linux'\n raise RuntimeError('Unknown platform')", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def get_state(self) -> Any:\n raise NotImplementedError(\n 'This environment has not implemented `get_state()`.'\n )", "def query_running(self):\n qp=win32com.client.constants.__dicts__[0]['EXP_RUNNING_EXPERIMENT']\n return self.app.GetParam(qp)[0]", "def get_platform(self):\n return self._platform", "def get_current_state(self):\n return self.game.get_current_state()", "def get_ha_state():\n\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><high-availability><state></state></high-availability></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")", "def _get_desired_state(schedule):\n current_hour = int(time.strftime(\"%H\", time.gmtime()))\n current_week_day = time.strftime(\"%A\", time.gmtime()).lower()\n start = schedule[current_week_day]['start']\n stop = schedule[current_week_day]['stop']\n\n state = 'stop'\n if current_hour >= start and current_hour < stop:\n state = 'start'\n\n return state", "def _get_current_game_state(board):\n return np.concatenate((_get_pieces_one_hot(board, color=False),\n _get_pieces_one_hot(board, color=True)),\n axis=-1)", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def _is_running(self):\n return self._run_state.is_running()", "def get_game_state(self):\n return self._game_status", "def state(self) -> RunState:\n return self._async_scheduler.state", "def get_game_state(board):\n if not is_valid_board(board):\n return config.ERROR, 'The given board is not valid.'\n if player_has_won(board, config.COMPUTER):\n return config.CPU_WINS, 'Computer wins.'\n elif player_has_won(board, config.HUMAN):\n return config.USER_WINS, 'User wins.'\n elif is_full_board(board):\n return config.CATS_GAME, \"Cat's game\"\n return config.ONGOING, 'Ongoing.'", "def getStatus(self):\n pid = self._getPid()\n if pid:\n if q.system.process.isPidAlive(pid):\n return AppStatusType.RUNNING\n return AppStatusType.HALTED", "def state(self):\n if 'power' in self._status and self._status['power'] == '0':\n return STATE_OFF\n if 'mode' in self._status:\n if self._status['mode'] == 'pause':\n return STATE_PAUSED\n if self._status['mode'] == 'play':\n return STATE_PLAYING\n if self._status['mode'] == 'stop':\n return STATE_IDLE\n return STATE_UNKNOWN", "def is_running(self):\n\t\treturn self._running", "def getPlatform(self, name):\r\n if self.platforms.has_key(name):\r\n return self.platforms[name]\r\n else:\r\n self.platforms[name] = Platform(name)\r\n return self.platforms[name]", "def get_state(self, run_id):\n raise NotImplementedError()", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def _get_state(self):\n # gst's get_state function returns a 3-tuple; we just want the\n # status flag in position 1.\n return self.pipeline.get_state(Gst.CLOCK_TIME_NONE)[1]", "def _get_state(self):\n fw_wp_en = (self._interface.get('fw_wp_en') == 'on')\n fw_wp = (self._interface.get('fw_wp') == 'on')\n if fw_wp_en:\n return self._STATE_FORCE_ON if fw_wp else self._STATE_FORCE_OFF\n else:\n return self._STATE_ON if fw_wp else self._STATE_OFF", "def this_host():\n host_os = platform.system()\n print('This platform OS is: ', host_os)\n return", "def build_state(device, config):\n capaRAW = device.capabilities(absinfo=False)\n state = {}\n\n for code in capaRAW[1]:\n state[config[code]] = 0\n\n for code in capaRAW[3]:\n state[config[code]] = 0\n\n print(\"State Dict: \" + str(state))\n return state", "def get_game_state(self):\n return self._current_state", "async def fetch_status(self, params={}):\n #\n # [1] # operative\n # [0] # maintenance\n #\n response = await self.publicGetPlatformStatus(params)\n statusRaw = self.safe_string(response, 0)\n return {\n 'status': self.safe_string({'0': 'maintenance', '1': 'ok'}, statusRaw, statusRaw),\n 'updated': None,\n 'eta': None,\n 'url': None,\n 'info': response,\n }", "def process_state(self):\n process = self._get_process()\n if not self.is_on:\n process = StateOptions.NONE\n return self._update_feature(WashDeviceFeatures.PROCESS_STATE, process)", "def get_platform():\n try:\n import RPi.GPIO\n return PI\n except ImportError:\n pass\n\n if platform.system() == 'Linux':\n return LINUX\n else:\n return UNKNOWN", "def state(self):\n return self.coordinator.data[PVS_DEVICE_TYPE][self.base_unique_id][PVS_STATE]", "def get_status(self):\n\n return self._system", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def getplatform():\n\n # Return the system platform\n return sys.platform", "def run_state(self):\n run_state = self._get_run_state()\n if STATE_DISHWASHER_POWER_OFF in run_state:\n run_state = StateOptions.NONE\n return self._update_feature(WashDeviceFeatures.RUN_STATE, run_state)", "def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def get_platform(self, name):\n if name in self.platforms:\n return name\n else:\n try:\n p = self.platforms['name'] = Platform.load(self, name)\n return p\n except IOError as e:\n print('Failed loading platform: {0}'.format(str(e)))\n return None", "def GetCurrent():\n global ENV\n return ENV[threading.current_thread().ident]", "def in_runtime(self):\n\n return self.is_valid_platform() and self['ENVIRONMENT']", "def state(self):\n # We can't actually tell if it's actively running in any of these\n # modes, just what mode is set\n if (self._device.mode == self._device.MODE_OFF) and (\n self._device.fan == self._device.FAN_ON):\n return STATE_FAN_ONLY\n elif self._device.mode == self._device.MODE_OFF:\n return STATE_IDLE\n elif (self._device.mode == self._device.MODE_HEAT) or (\n self._device.mode == self._device.MODE_HEAT_EMERGENCY):\n return STATE_HEAT\n elif self._device.mode == self._device.MODE_COOL:\n return STATE_COOL\n elif self._device.mode == self._device.MODE_AUTO:\n return STATE_AUTO\n return STATE_UNKNOWN", "def state(self):\n return self.device.value()", "def state(self):\n\t\tif self._state in JOB_PS:\n\t\t\treturn JOB_PS[self._state]\n\t\telse:\n\t\t\treturn str(self._state)", "def onboarding_state(self) -> Optional[str]:\n return pulumi.get(self, \"onboarding_state\")", "def onboarding_state(self) -> Optional[str]:\n return pulumi.get(self, \"onboarding_state\")", "def onboarding_state(self) -> Optional[str]:\n return pulumi.get(self, \"onboarding_state\")", "def is_running(self):\n\n return self._state == \"RUNNING\"", "def get_sequencing_platform(self):\n platform = self.data[\"platform\"]\n if platform == \"miseq\":\n platform = \"MiSeq\"\n elif platform == \"hiseq4000\":\n platform == \"HiSeq4000\"\n elif platform == \"hiseq2000\":\n platform == \"HiSeq2000\"\n else:\n raise Exception(\"Unknown platform {platform} for sequencing run {run}\".format(platform=platform,run=self.run))\n return platform", "def get_status(self, run_id):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/runnables/%s/state/%s\" % (self.project_key, self.runnable_type, run_id))", "def platform():\n return ['linux']", "def platforms(self):\n return self.rpc.call(MsfRpcMethod.ModulePlatforms)", "def get_home_state(self):\n raw_status = self.get_raw_status()\n is_home = raw_status & self.STATUS_HOMED\n is_homing = raw_status & self.STATUS_HOMING\n if is_homing:\n return 2\n if not is_home:\n return 1\n return 0", "def getSystemState(self, serialNum, internal=False):\r\n\r\n self._logger.debug(\"in API getSystemStatus()...\")\r\n\r\n # check the auth tokens and TTL unless this is a get state call (a non-polling call)\r\n if not internal:\r\n self._checkTokens()\r\n\r\n # format url parameters\r\n params = {\r\n \"actionID\": \"command\",\r\n \"command\": _SESSION_COMMAND_GET_HOME,\r\n \"serial\": serialNum,\r\n \"sessionID\": self._sessionID,\r\n } \r\n\r\n # call the session API with the parameters\r\n response = self._call_api(_API_SESSION, params=params)\r\n \r\n # if data returned, format system state and return\r\n if response and response.status_code == 200:\r\n\r\n respData = response.json()\r\n return self._buildSystemState(respData)\r\n \r\n # otherwise return error (False)\r\n else:\r\n return False" ]
[ "0.61530626", "0.60770965", "0.6040421", "0.60027874", "0.5839394", "0.58060783", "0.5787307", "0.57715446", "0.57025546", "0.56921524", "0.56680083", "0.55934256", "0.55586237", "0.553357", "0.5515652", "0.55087674", "0.5503442", "0.5490447", "0.5443907", "0.54368734", "0.5435472", "0.54352564", "0.5406959", "0.53855395", "0.5379651", "0.53771764", "0.5367942", "0.5356819", "0.5356088", "0.5349455", "0.5347072", "0.53406835", "0.53344625", "0.53289413", "0.532462", "0.5324138", "0.5318588", "0.53118616", "0.53117037", "0.5291515", "0.52789205", "0.52725995", "0.5264951", "0.52592856", "0.5257664", "0.5257336", "0.5254901", "0.524495", "0.5239555", "0.5215608", "0.5214081", "0.5213396", "0.5209197", "0.5208639", "0.52029526", "0.5194786", "0.51922566", "0.5186713", "0.51854604", "0.5181424", "0.51792735", "0.517877", "0.5166195", "0.5165893", "0.5158751", "0.51548505", "0.5151074", "0.51397765", "0.5137088", "0.51350623", "0.51338077", "0.5129453", "0.51286536", "0.51247275", "0.51214415", "0.5118434", "0.5108656", "0.5099136", "0.5089191", "0.50888175", "0.5075637", "0.5073296", "0.5073296", "0.5073296", "0.50730443", "0.50675887", "0.50576824", "0.5057622", "0.50519323", "0.5045994", "0.50411975", "0.50411975", "0.50411975", "0.5038037", "0.5030919", "0.5030526", "0.5021979", "0.5009387", "0.5007121", "0.49999383" ]
0.6958679
0
Checks whether all platforms are running or not
def all_is_running(self): return all(p.running for p in self._platforms.values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_platform():\n system = platform.system()\n distro = platform.platform()\n is_raspberry_pi = False\n try:\n info = open(\"/proc/cpuinfo\").read()\n except FileNotFoundError:\n is_raspberry_pi = False\n else:\n # bcm2708: Raspberry Pi 1\n # bcm2709: Raspberry Pi 2\n # bcm2710: Raspberry Pi 3\n is_raspberry_pi = 'BCM27' in info or 'ODROID' in info\n\n return system == \"Linux\" and (\n os.path.isfile('/proc/device-tree/hat/uuid') or\n 'boot2docker' in distro.lower() or\n is_raspberry_pi or\n os.path.isfile('/sys/hypervisor/uuid') or\n os.path.isdir('/var/lib/digitalocean')\n )", "def platform_supported(self):\n return platform.system().lower() in self.platforms if self.platforms else False", "def win():\n if platform.system() in WINDOWS:\n return True\n return False", "def check_platform(target_platform):\n if target_platform == PLATFORM_LINUX:\n pass\n elif target_platform == PLATFORM_WINDOWS:\n # requires wine\n try:\n subprocess.run([\"wine\", \"--help\"], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n except:\n log_error(\"wine needs to be installed\")\n else:\n log_error(f\"something is strange with the platform type '{target_platform}'\")", "def _os_supported(self, plugin):\r\n return sys.platform in plugin.plugin_object.get_supported_os()", "def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values", "def Platforms():\n return platforms", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def is_running(self, platform):\r\n if platform not in self._platforms:\r\n raise ValueError(\"Platform {} is not registered\".format(platform))\r\n return self._platforms[platform].running", "def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")", "def os_is_pi():\n return \"raspberrypi\" in platform.uname()", "def is_system(self) -> bool:", "def _check_all_systems_ready(self):\n \n self._check_all_sensors_ready()\n #self._check_joint_states_ready()\n self._check_cmd_vel_pub()\n \n return True", "def all_is_stopped(self):\r\n return all(not p.running for p in self._platforms.values())", "def race_detector_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"amd64\", \"ppc64le\", \"arm64\", \"s390x\")\n elif goroot.goos == \"darwin\":\n return goroot.goarch in (\"amd64\", \"arm64\")\n elif goroot.goos in (\"freebsd\", \"netbsd\", \"openbsd\", \"windows\"):\n return goroot.goarch == \"amd64\"\n else:\n return False", "def platforms(self):\n return self.rpc.call(MsfRpcMethod.ModulePlatforms)", "def check_os():\n\n if platform.system() != \"Darwin\":\n print \"This script only works on macos system\"\n exit(1)", "def get_platforms(self):\n if self.platform == 'All':\n return PLATFORMS\n else:\n return self.platform.split(':')", "def os_is_linux():\n return platform.system() == \"Linux\" and \"raspberrypi\" not in platform.uname()", "def is_valid_platform(self):\n\n return 'APPLICATION_NAME' in self", "def has_platform(self, platform_name):\n return platform_name in self.platform_list", "def is_on_pi() -> bool:\n return os.name != \"nt\" and os.uname()[4][:3] == \"arm\"", "def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True", "def is_system(self) -> undefined.UndefinedOr[bool]:", "def check_connected(self):\n return\\\n (self.setup is not None) and\\\n (self.design is not None) and\\\n (self.project is not None) and\\\n (self.desktop is not None) and\\\n (self.app is not None)", "def available_platforms(verbose=True):\n\n\n import simtk.openmm as mm\n\n platforms_available = []\n\n for ii in range(mm.Platform.getNumPlatforms()):\n platform_name = mm.Platform.getPlatform(ii).getName()\n platform = mm.Platform.getPlatformByName(platform_name)\n platform_speed = platform.getSpeed()\n platforms_available.append(platform_name)\n if verbose:\n print('Platform {} with speed {}'.format(platform_name,platform_speed))\n del(platform_name, platform, platform_speed)\n\n if verbose is False:\n return platforms_available", "def in_runtime(self):\n\n return self.is_valid_platform() and self['ENVIRONMENT']", "def os_is_windows():\n return platform.system() == \"Windows\"", "def _in_wsl():\n return \"microsoft-standard\" in uname().release", "def platforms(self):\n logger.debug(\"Get platforms\")\n return self._raw_api.platforms.get()", "def has_platform(cls, plist, platforms):\n if platforms is None:\n raise Exception(\"no platforms?\")\n\n return (\n 'CFBundleSupportedPlatforms' in plist and\n any(map(lambda p: p in plist['CFBundleSupportedPlatforms'], platforms))\n )", "def evaluate_for_platform(self, **kwargs):\n return False", "def msan_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"amd64\", \"arm64\")\n elif goroot.goos == \"freebsd\":\n return goroot.goarch == \"amd64\"\n else:\n return False", "def is_xpu_available():\n xpu_count = int(os.getenv(\"FLAGS_selected_xpus\", \"-1\"))\n if xpu_count < 0:\n return False\n\n if _HAS_FLUID:\n from paddle import fluid\n if not fluid.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n if _HAS_PADDLE:\n import paddle\n if not paddle.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n return True", "def system_valid(self):\n return self.udev.devices_exist", "def platform():\n return ['linux']", "def _check_all_systems_ready(self):\n for r in self.robots:\n r.joints = None\n while r.joints is None and not rospy.is_shutdown():\n try:\n r.joints = rospy.wait_for_message(\n r.ns + '/joint_states', JointState, timeout=3.0)\n except:\n rospy.logerr(\"Current /joint_states not ready yet.\\n\\\n Do you spawn the robot and launch ros_control?\")\n try:\n r.model_index = rospy.wait_for_message('/gazebo/model_states', ModelStates, 3).name.index(r.ns[1:])\n except rospy.exceptions.ROSException:\n rospy.logerr(\"Robot model does not exist.\")\n\n # rospy.logdebug(\"ALL SYSTEMS READY\")\n return True", "def check_os():\n if sys.platform == \"win32\":\n print(\"WARNING:\")\n print(\"This program use Scapy. Scapy is primarily being developed for Unix-like systems and works best on those platforms.\")\n print(\"You should to change your OS, because some Scapy functions may not be available.\")\n time.sleep(5)", "def test_os_system(self):\n self.assertEqual(self.settings.OS_SYSTEM, platform.system())", "def is_system_ready_for_benchmarking():\n\n # check if scaling_governor is set to 'performance' for all cpu cores\n cpu_governors = glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')\n if not cpu_governors:\n logger.error('no scaling_governor found. Do you run on a Linux System?')\n return False\n for governor in sorted(cpu_governors):\n with open(governor, 'r') as f:\n line = f.read().splitlines()[0]\n logger.debug('%s is set to \\\"%s\\\"', governor, line)\n if line != 'performance':\n logger.warning('please set all scaling_governor to \\\"performance\\\" (using \"sudo ./ondemand.sh start\")')\n return False\n\n return True", "def evaluate_for_platform(self, **kwargs):\n return True", "def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def _on_windows() -> bool:\n return os.name == \"nt\"", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )", "def asan_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"arm64\", \"amd64\", \"riscv64\", \"ppc64le\")\n else:\n return False", "def is_available():", "def check_sysapps():\n return sysapps.test", "def get_platform():\n platforms = [\n \"Android\",\n \"Linux.RaspberryPi\",\n \"Linux\",\n \"XBOX\",\n \"Windows\",\n \"ATV2\",\n \"IOS\",\n \"OSX\",\n \"Darwin\",\n ]\n\n for platform in platforms:\n if xbmc.getCondVisibility('System.Platform.'+platform):\n return platform\n return \"Unknown\"", "def is_linux():\r\n return sys.platform.startswith('linux')", "def check_supported_features(self):", "def is_linux() -> bool:\n\n return sys.platform.startswith('linux')", "def is_windows():\n return sys.platform == \"win32\"", "def in_build(self):\n\n return self.is_valid_platform() and not self['ENVIRONMENT']", "def is_linux():\n return sys.platform[:5] == \"linux\"", "def is_windows() -> bool:\n return sys.platform == \"win32\"", "def test_os_processor(self):\n self.assertEqual(self.settings.OS_PROCESSOR, platform.processor())", "def check_build(self, bld_num):\n # QQQ In future this should be replaced with a query to the\n # build database\n bld_dir = os.path.join(self.ver_dir, str(bld_num))\n for plat in self.plats.keys():\n if self.plats[plat]:\n # QQQ Assumes format of filename unique to couchbase-server\n files = glob.glob(\"{}/couchbase-server-enterprise?{}*{}*\".format(\n bld_dir, self.version, plat\n ))\n files = [x for x in files if not (x.endswith(\".md5\") or x.endswith(\".sha256\"))]\n if len(files) == 0:\n print (\"Platform {} is missing\".format(plat))\n return False\n return True", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def check_platform():\n if os.getcwd() != os.path.dirname(os.path.abspath(__file__)):\n error = \"must be ran in the directory it's located at\"\n if os.path.sep != '/':\n error = \"a unix-like operating system is required\"\n elif not shutil.which('dpkg-deb'):\n error = \"cannot find dpkg-deb\"\n elif os.getuid() != 0:\n error = \"must be ran as root (or with fakeroot)\"\n else:\n return\n sys.exit(\"{}: error: {}\".format(sys.argv[0], error))", "def is_windows() -> bool:\n\n return sys.platform == 'win32'", "def isOnNao():\n szCpuInfo = \"/proc/cpuinfo\";\n if not os.path.exists( szCpuInfo ): # already done by the getFileContents\n return False;\n szAllFile = getFileContents( szCpuInfo, bQuiet = True );\n if( szAllFile.find( \"Geode\" ) == -1 and szAllFile.find( \"Intel(R) Atom(TM)\" ) == -1 ):\n return False;\n return True;", "def platforms(self) -> Any:\n\n return search_api(\"platforms\")", "def test_os_machine(self):\n self.assertEqual(self.settings.OS_MACHINE, platform.machine())", "def is_win():\n return sys.platform[:3] == \"win\"", "def is_windows():\n if os.name == \"nt\":\n return True\n return False", "def is_windows():\r\n return sys.platform == \"win32\"", "def _check_linux():\n\n # Only run checks on Linux\n if platform.system() != \"Linux\":\n return\n\n # Skip check for root as there will be no issue for this user\n if os.getuid() == 0:\n return\n\n # Skip check if user is trying to update udev rules:\n if \"--update-udev\" in sys.argv:\n return\n\n if not os.path.isfile(udev.RULES_FILE_PATH):\n print(\"W: udev rules are not installed. You may not be able to open the device using a regular user.\") # noqa\n print(\"\\n Run 'rivalcfg --update-udev' as root to fix.\\n\")\n elif not udev.is_rules_file_up_to_date():\n print(\"W: Installed udev rules were generated by an other rivalcfg version.\") # noqa\n print(\"\\n Run 'rivalcfg --update-udev' as root to update.\\n\")", "def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)", "def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)", "def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True", "def check_and_set(self):\n self.validate_paths()\n if not self.slirp_enabled:\n check_tun()\n # Check audio\n if self.audio_enabled:\n if not self.get('QB_AUDIO_DRV'):\n raise Exception(\"QB_AUDIO_DRV is NULL, this board doesn't support audio\")\n if not self.get('QB_AUDIO_OPT'):\n logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work')\n else:\n self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT')\n os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV'))\n else:\n os.putenv('QEMU_AUDIO_DRV', 'none')\n\n self.check_kvm()\n self.check_fstype()\n self.check_rootfs()\n self.check_ovmf()\n self.check_kernel()\n self.check_biosdir()\n self.check_mem()\n self.check_tcpserial()", "def get_platforms(self):\n _log.debug(\"Passing platforms back: {}\".format(\n self._registered_platforms.keys()))\n return self._registered_platforms.values()", "def check_environment():\n if 'OS_USERNAME' not in os.environ:\n print \"Error gathering facts! Please ensure that the openstack\" +\\\n \" credentials of an admin user are set as environment\" + \\\n \" variables.\"\n sys.exit(-1)\n if not find_executable('nova'):\n return False\n if not find_executable('openstack'):\n return False\n if not find_executable('glance'):\n return False\n if not find_executable('cinder'):\n return False\n return True", "def _check_requirements(self): # pylint: disable=too-many-branches, too-many-statements\n self._dev_emu = False\n self._is_root = False\n self._is_su = False\n self._alternate_su_binary = False\n\n if not self._device_serial:\n return\n\n if self._adb_available:\n # try some command\n date_res = self._do_adb_command('shell date')\n # adb not authorized\n if date_res and 'device unauthorized' in date_res:\n # kill adb daemon\n utils.do_shell_command('adb kill-server')\n utils.show_message_box(\n 'device not authorized! allow access from this computer on the device'\n )\n\n if date_res and 'no devices/emulators' in date_res:\n self._dev_emu = False\n return\n elif date_res and 'device not found' in date_res:\n self._dev_emu = False\n return\n else:\n self._dev_emu = True\n\n if self._dev_emu and date_res:\n try:\n # if date was fine it should end with year\n # Thu Feb 8 16:47:32 MST 2001\n date_res = date_res.split(' ')\n res_year = int(date_res[len(date_res) - 1])\n except ValueError:\n return # TODO: raise exceptions\n\n # try some su command to check for su binary\n res = self._do_adb_command('shell su -c date')\n if res and 'invalid' in res:\n res = self._do_adb_command('shell su 0 date')\n if res:\n self._alternate_su_binary = True\n\n if res:\n try:\n # if su date was fine it should end with year\n # Thu Feb 8 16:47:32 MST 2001\n su_res = res.split(' ')\n res_year = int(su_res[len(su_res) - 1])\n if res_year:\n # su cmd is available\n self._is_su = True\n\n # check if both date results matches otherwise its no valid result\n res_len = len(su_res)\n date_len = len(date_res)\n if su_res[res_len - 1] == date_res[date_len -\n 1]: # year\n if su_res[res_len - 2] == date_res[date_len -\n 2]: # timezone\n if su_res[res_len - 4] == date_res[date_len -\n 4]: # day\n if su_res[res_len - 5] == date_res[\n date_len - 5]: # month\n self._is_root = True\n\n except ValueError:\n pass\n\n res = self._do_adb_command('shell mount | grep system')\n if '/sbin/.magisk/block/system /' in res:\n self._syspart_name = '/sbin/.magisk/mirror/system'\n if '/system_root' in res:\n self._syspart_name = '/system_root'\n if '/sbin/.magisk/block/system_root /' in res:\n self._syspart_name = '/sbin/.magisk/mirror/system_root'\n\n # check status of selinux\n res = self._do_adb_command('shell getenforce')\n if res:\n res = res.join(res.split())\n if res != 'Permissive' and res != 'Disabled':\n self._do_adb_command('shell setenforce 0')\n\n # nox fix\n res = self.su_cmd('mount -o ro,remount ' + self._syspart_name)\n if res and 'invalid' in res:\n self._alternate_su_binary = True\n\n # no su -> try if the user is already root\n # on some emulators user is root\n if not self._is_su and self._dev_emu:\n res = self._do_adb_command('shell mount -o ro,remount ' + self._syspart_name)\n if res or res == '':\n if res and 'not user mountable' in res:\n # no root user\n self._is_root = False\n elif res == '':\n # cmd executed fine\n self._is_root = True\n else:\n # dont know some other output\n self._is_root = False\n # check for uid 0\n res = self._do_adb_command('shell id')\n # root should be 0\n # https://superuser.com/questions/626843/does-the-root-account-always-have-uid-gid-0/626845#626845\n self._is_root = 'uid=0' in res\n\n if self._dev_emu:\n # get some infos about the device and keep for later\n self._sdk_version = self._do_adb_command(\n 'shell getprop ro.build.version.sdk')\n if self._sdk_version:\n self._sdk_version = self._sdk_version.join(\n self._sdk_version.split()) # cleans '\\r\\n'\n self._android_version = self._do_adb_command(\n 'shell getprop ro.build.version.release')\n if self._android_version:\n self._android_version = self._android_version.join(\n self._android_version.split())\n\n try:\n self._oreo_plus = (int(\n self._android_version.split('.')[0]) >= 8)\n except ValueError:\n try:\n self._oreo_plus = (int(self._sdk_version) > 25)\n except ValueError:\n pass\n\n # fix some frida server problems\n # frida default port: 27042\n utils.do_shell_command('adb forward tcp:27042 tcp:27042')\n\n # check if we have pidof\n res = self._do_adb_command('shell pidof -s pidof')\n self._have_pidof = 'not found' not in res\n res = self._do_adb_command('shell killall')\n self._have_killall = 'not found' not in res\n\n # check for correct userid\n if self._is_root:\n res = self.su_cmd('id')\n # root should be 0\n # https://superuser.com/questions/626843/does-the-root-account-always-have-uid-gid-0/626845#626845\n self._is_root = 'uid=0' in res", "def check_subsystem_commands(self):\n self.communications.check_controls()\n self.__check_video()\n self.__check_picture()\n self.__check_ping()\n self.__check_motion()", "def is_gpu_available():\n ret = get_gpu_count() > 0\n if _HAS_PADDLE:\n import paddle\n if ret is True and not paddle.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n if _HAS_FLUID:\n from paddle import fluid\n if ret is True and not fluid.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n return ret", "def is_vrpd():\n cpu_type = platform.machine()\n\n if cpu_type in ['i686', 'i386', 'x86_64', 'armv7l']:\n return True\n\n return False", "def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0", "def is_linux():\n (sysname, nodename, release, version, machine) = os.uname()\n return sysname == 'Linux'", "def on_powerpc():\n return processor() == 'powerpc' or machine().startswith('ppc')", "def available(self):\n\t\treturn self.executable(self.path[0]) and \\\n\t\t\tself.executable(self.path[1])" ]
[ "0.7448283", "0.7354531", "0.69025147", "0.68859076", "0.6782263", "0.6772269", "0.67584515", "0.6750433", "0.6750433", "0.67330426", "0.6725543", "0.6633071", "0.6604187", "0.65855706", "0.65599746", "0.6514661", "0.65100974", "0.6507308", "0.650659", "0.6505434", "0.643089", "0.642224", "0.64124227", "0.64059955", "0.6380827", "0.6378887", "0.6324887", "0.6312129", "0.6283708", "0.62826306", "0.62652814", "0.62611413", "0.62326", "0.6224753", "0.6224689", "0.621491", "0.61942196", "0.6193636", "0.61927915", "0.61709535", "0.61540484", "0.6142406", "0.61370707", "0.6133077", "0.61139125", "0.61099213", "0.6108401", "0.6105029", "0.6070656", "0.60632163", "0.6062767", "0.60623455", "0.6061422", "0.604048", "0.6026308", "0.60173994", "0.60153794", "0.601168", "0.60050976", "0.60030985", "0.59939516", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5985922", "0.5982697", "0.5968078", "0.5963587", "0.5953623", "0.59339064", "0.59329927", "0.5924453", "0.5918684", "0.59161264", "0.591293", "0.5909134", "0.5908837", "0.5898613", "0.58971876", "0.5895011", "0.5884582", "0.5877904", "0.58466464", "0.5842154", "0.58199096", "0.5806713", "0.58037037", "0.5800918" ]
0.7654313
0
Checks whether all platforms are stopped or not
def all_is_stopped(self): return all(not p.running for p in self._platforms.values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def emergency_stop(self):\r\n eprint(\"Emergency platforms stop\")\r\n stop_list = []\r\n for p in self._platforms:\r\n stop_list.append(self._platforms[p])\r\n\r\n success = True\r\n while len(stop_list) > 0: # NOTE: stop platforms in reverse order\r\n p = stop_list.pop(-1)\r\n vprint(\"Emergency stop for {}\".format(p))\r\n try:\r\n r = p._stop([])\r\n except Exception as e:\r\n success = False\r\n eprint(\"Exception occurred while stopping platform {} emergently: {}\".format(p, e))\r\n exprint()\r\n continue\r\n if not r.success:\r\n success = False\r\n return success", "def all_is_running(self):\r\n return all(p.running for p in self._platforms.values())", "def stopEngines():\n pass", "def platform_stop(self):\n self.platform.stop()", "def check_stop_flag(con):\n k, v = con.kv.get(\"service/rebootmgr/stop\")\n if v:\n return True\n return False", "def lysis(self) :\n self.kill()\n return True", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def should_stop(self):\n return self._cmaes.should_stop()", "def stop(self):\n # Cleanup platform first.\n self.cleanup()\n\n if self.init_lhost:\n self._lhost.stop()\n\n self.status = False # pylint: disable=attribute-defined-outside-init", "def check_device_state(self):", "def stop_all():\n subprocess.check_call(\n ['./run.py --down'], shell=True,\n cwd=orc8_docker_path,\n )\n subprocess.check_call(\n 'docker-compose down', shell=True,\n cwd=feg_docker_integ_test_path,\n )\n subprocess.check_call(\n 'vagrant halt magma', shell=True,\n cwd=agw_path,\n )", "def stopped(self):\r\n return self._stop.isSet()", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def stopped(self):\n return self.state == 'stop'", "def shooting(self):\r\n return not self.stopped", "def must_stop(self):\n global_stop = self.data_persister.exists(path=self.get_worker_path(), key=\"stop\", write_type=SavingType.txt)\n if global_stop:\n return True, \"global_stop\"\n\n worker_stop = self.data_persister.exists(\n path=self.get_worker_path(), key=str(self.worker_informations[\"id\"]) + \"_stop\", write_type=SavingType.txt\n )\n if worker_stop:\n return True, \"worker_stop\"\n\n if self._keyboard_funct() == 17:\n return True, \"ctrq_pressed\"\n\n return False, None", "def __bool__(self):\n return not self._stopped", "def __check_stop(self):\n if not self.__parent_thread.is_alive():\n global _iom_shutdown\n self.__logger.info(\"Parent thread ended. Stopping IOManager.\")\n _iom_shutdown = True\n self.__running = False\n\n if not self.__wrappers and not self.__disconnected_wrappers and time.time() > self.__empty_time:\n self.__logger.info(\"No IOWrappers registered. Stopping IOManager\")\n self.__running = False\n elif self.__wrappers or self.__disconnected_wrappers:\n self.__empty_time = time.time() + 30", "def stopped(self):\n return self.stop_event.is_set()", "def stop_all():\r\n motors.stop_all_motors()\r\n led.set_colour_solid(0)\r\n display.clear()", "def check_events():\n\tfor event in pygame.event.get():\n \tif event.type == pygame.QUIT:\n \t\tsys.exit()", "def check_off_screen(self):\n for bullet in self.bullets:\n if bullet.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\n self.bullets.remove(bullet)\n\n for target in self.targets:\n if target.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\n self.targets.remove(target)\n\n for cloud in self.clouds:\n if cloud.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\n self.clouds.remove(cloud)", "def stopped_check(self, timeout=None):", "def user_is_disconnected_from_all_devices():\n assert web_app.disconnect_from_device()", "def stopped(self):\n return self._stop_event.is_set()", "def stopped(self):\n return self._stop_event.is_set()", "def stopped(self):\n return self._stop_event.is_set()", "def stopped(self):\n return self._stop_event.is_set()", "def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()", "def check_main_stop(notifier):\n pass", "def is_running(self, platform):\r\n if platform not in self._platforms:\r\n raise ValueError(\"Platform {} is not registered\".format(platform))\r\n return self._platforms[platform].running", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def _thread_check_stop_event(self):\n self._require_controller_modes(['running_as_thread','running_as_blocking_call'])\n return self.thread.check_stop_event()", "def stopAll():\n \n # Get paired and connected devices\n pairedDevices, connectedDevices = getPairConDevices()\n \n print('\\nStoping bluetooth profiles\\n')\n \n if connectedDevices:\n \n # Power on bluetooth controller\n bluePoweronStdout = execCommand('bluetoothctl power on')\n \n for device in connectedDevices:\n\n # Disconnect bluetooth device\n blueDisconnectStdout = execCommand('bluetoothctl disconnect {}'.format(device[1]))\n\n if not 'Successful disconnected' in blueDisconnectStdout:\n print(blueDisconnectStdout)\n print('Is device connected?\\n')\n else:\n print('Device {} was successfully stopped\\n'.format(device[0]))\n \n return", "def stopped(self):\n return self._stopper.isSet()", "def clean_shutdown(self):\n if not self._done:\n rospy.logwarn('Aborting: Shutting down safely...')\n if any(self._arm_state['collide'].values()):\n while self._rs.state().enabled != False:\n [pub.publish(Empty()) for pub in self._disable_pub.values()]\n self._enable_pub.publish(False)\n self._tuck_rate.sleep()", "def is_stopped(self) -> bool:\n return self._stop_event.is_set()", "def _check_all_systems_ready(self):\n for r in self.robots:\n r.joints = None\n while r.joints is None and not rospy.is_shutdown():\n try:\n r.joints = rospy.wait_for_message(\n r.ns + '/joint_states', JointState, timeout=3.0)\n except:\n rospy.logerr(\"Current /joint_states not ready yet.\\n\\\n Do you spawn the robot and launch ros_control?\")\n try:\n r.model_index = rospy.wait_for_message('/gazebo/model_states', ModelStates, 3).name.index(r.ns[1:])\n except rospy.exceptions.ROSException:\n rospy.logerr(\"Robot model does not exist.\")\n\n # rospy.logdebug(\"ALL SYSTEMS READY\")\n return True", "def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")", "def _check_all_systems_ready(self):\n \n self._check_all_sensors_ready()\n #self._check_joint_states_ready()\n self._check_cmd_vel_pub()\n \n return True", "def stop(self):\n logging.debug(\"footprint/stop entered\")\n logging.info(\"Stopping cloud instances\")\n print \"Stopping machines\"\n for machine in self.machines:\n logging.debug(\"stopping %s\" % machine)\n server = self.machines[machine]\n server.stop()\n \n # monitor until all the machines are down\n active_machines = 1\n while active_machines:\n running = 0\n active_machines = 0\n for machine in self.machines:\n server = self.machines[machine]\n try:\n tmp = cs.servers.get(self.machines[machine].id)\n active_machines = 1\n running = running + 1 \n except novaclient.exceptions.NotFound:\n continue\n # if running == 0:\n # break\n time.sleep(10)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n logging.info(\"Stopping Networks\")\n print\n print \"Stopping networks\"\n \n for network in self.networks:\n logging.debug(\"stopping %s\" % str(network))\n n = self.networks[network]\n n.stop()\n \n while True:\n running = 0\n # print self.networks\n for network in self.networks:\n n = self.networks[network]\n\n try:\n tmp = cn.find(id=n.id)\n running = running + 1\n except pyrax.exceptions.NotFound:\n continue\n if running == 0:\n break\n time.sleep(1)\n sys.stdout.write(\".\")\n sys.stdout.flush()", "def _stop(self):\n return True", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def running(self):\n return not self._kill_event.is_set()", "def check_subsystem_commands(self):\n self.communications.check_controls()\n self.__check_video()\n self.__check_picture()\n self.__check_ping()\n self.__check_motion()", "def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def kill_all():\n compose_kill_all()", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def is_stop(self):\n return self.p_state._getvalue()['stop']", "async def get_stopped(self) -> bool:\n play_state = await self.get_play_state()\n return play_state == models.player.PlayState.Stopped", "def system_valid(self):\n return self.udev.devices_exist", "def stopall(pidevice, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n pidevice.StopAll(noraise=True)\n waitonready(pidevice, **kwargs) # there are controllers that need some time to halt all axes", "def shutdown_simulators():\n logging.info(\"Shutting down all simulators...\")\n\n try:\n subprocess.Popen(\n \"xcrun simctl shutdown all\",\n shell=True\n ).wait()\n\n except Exception as e:\n logging.error(\"Shutting down the simulators failed with error '{ERROR}'\".format(ERROR=e))\n return False\n\n logging.info(\"Simulators shut down!\")\n return True", "def stopped(self) -> bool:\n return self._stopped.is_set()", "def available_platforms(verbose=True):\n\n\n import simtk.openmm as mm\n\n platforms_available = []\n\n for ii in range(mm.Platform.getNumPlatforms()):\n platform_name = mm.Platform.getPlatform(ii).getName()\n platform = mm.Platform.getPlatformByName(platform_name)\n platform_speed = platform.getSpeed()\n platforms_available.append(platform_name)\n if verbose:\n print('Platform {} with speed {}'.format(platform_name,platform_speed))\n del(platform_name, platform, platform_speed)\n\n if verbose is False:\n return platforms_available", "def is_container_stopped(driver):\n return driver.find_element_by_css_selector(\".flex>.Stopped\").is_displayed()", "def test_conditions(self):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()", "def is_on(self):\n run_state = self._get_run_state()\n return STATE_DISHWASHER_POWER_OFF not in run_state", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def stop(self):\n return self.setup.stop", "def stop(self):\n res = {}\n for i in range(len(self)):\n if self.kernels[i].stop():\n res[i] = self.kernels[i].stop()\n else:\n return False\n return res", "def stop() -> None:", "def _platformix_stop(self, context, fake_reply): # TODO: Force parameter\r\n assert fake_reply is None, \"platformix_stop replies shouldn't be faked!\"\r\n\r\n stopping = self._worker.stopping # Store current stopping state\r\n need_stop = self._worker.stopping = self._worker.running or self._worker.starting\r\n self._worker.stopping = True # Set _stopping right in the beginning\r\n\r\n new_thread = False\r\n if not stopping and self._context is not None: # Break startup process if necessary\r\n self._reply_all(self._context[\"reply_to\"], proto_failure(\"interrupted by stop\"), None)\r\n if self._worker.starting:\r\n self._worker.starting = False\r\n self._worker.start_in_progress = False\r\n self._context = None\r\n if not stopping and not need_stop: # If not running and not starting - do nothing more\r\n self._worker.stopping = False\r\n self._reply(context, proto_success(\"already stopped\", \"state\"), None)\r\n return\r\n if stopping: # If were already stopping - update reply list\r\n if context not in self._context[\"reply_to\"]:\r\n new_thread = True\r\n self._context[\"reply_to\"].append(context)\r\n else: # Otherwise initiate context\r\n new_thread = True\r\n self._context = {\"action\": \"stop\", \"reply_to\": [context],\r\n \"waiting_for\": [], \"wait_ignore\": []}\r\n self._notify(context, \"received stop signal\")\r\n # TODO: do recursive stop? parent->childs? and call only root platforms stop?\r\n assert self._worker.stopping, \"At this point stopping should be True\"\r\n # Update waiting list\r\n # TODO: also wait those that are depends on this one\r\n self._context[\"waiting_for\"] = [w.name for w in self.host.subplatforms + self.host.depended\r\n if w.running is True or w.stopping is True\r\n and w.name not in self._context[\"wait_ignore\"]]\r\n\r\n # If there is some platforms to wait - notify about this\r\n if self.waiting_count > 0 and new_thread:\r\n self._worker.register_reply_handler(context,\r\n self._platformix_stop_reply_handler, [], {},\r\n timeout=self._worker.stop_max_wait, force=True)\r\n self._notify_all(self._context[\"reply_to\"], \"waiting\")\r\n # If no one left to wait for - do stop at last\r\n elif not self._worker.stop_in_progress and self.waiting_count == 0:\r\n for c in self._context[\"reply_to\"]:\r\n self._worker.unregister_reply_handler(c, True, {}, dont_check=True)\r\n self._worker.running = False\r\n self._worker.stop_in_progress = True\r\n self._notify_all(self._context[\"reply_to\"], \"stopping\")\r\n result = self._worker.stop(self._context[\"reply_to\"])\r\n reply_to = self._context[\"reply_to\"]\r\n self._context = None\r\n assert isinstance(result, ProtocolReply), \"Worker should return result as ProtocolReply instance\"\r\n if result.success:\r\n self._reply_all(reply_to, proto_success(None), None)\r\n else:\r\n self._reply_all(reply_to, result, None)", "def stop():", "def stop():", "def stop():", "def stop():", "async def sun_down(self) -> bool:\n return await self.AD.sched.sun_down()", "def stopAllMotors():\n return RoboCaller().call(\"stopAllMotors\", \"void\")", "def isstarted():", "def check_loss(self):\n return POKEMON in self.get_game()", "def test_conditions(self):\n if not CalculatorUtils.clear_calc(self.device):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()", "def is_running(program):\n return program in get_running()", "def is_stop(self) -> bool:\n return self.__stop", "def stopCond(self):\n\t\treturn False", "def shutdown(self):\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n self.running = False\n print('Good Bye')\n ev3.Sound.speak(\"Good Bye\")", "def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0", "def killAll(controller=False):", "def stop_check(self):\n pass", "def shutdown(self):\n self.running = False\n ev3.Leds.all_off()\n self.left_motor.stop()\n self.right_motor.stop()", "def check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.menu_running = False\n if event.key == pygame.K_RETURN:\n self.START_KEY = True\n if event.key == pygame.K_BACKSPACE:\n self.BACK_KEY = True \n if event.key == pygame.K_DOWN:\n self.DOWN_KEY = True\n if event.key == pygame.K_UP:\n self.UP_KEY = True", "def detect_infrared():\n try:\n count = 0\n while True:\n if GPIO.input(PIN_NO) == True:\n count += 1\n print('[+] Detected ' + str(count))\n output_sound()\n send_message()\n time.sleep(2)\n except Exception as e:\n GPIO.cleanup()", "def check(self, context):\r\n return context.config.stopAt is not None", "def all_services_running():\n return all(['RUNNING' in line or 'EXITED' in line\n for line in supervisor_status()])", "def stop(self):\n self.running = False\n self.clear_game_objects()\n print 'GAME STOPPED'", "def check_game_end(self):\n\n return any([i != 0 for i in self.board[0]])", "def __fullBlackBoxExit(self):\r\n\r\n if not core.FW_conf['blackbox'].isVideoRecorderAvailable():\r\n raise Exception('Exit: No video camera available. Check settings.')\r\n\r\n mcStart = core.FW_conf['blackbox'].getCountMotionFrames()\r\n mcAfter = None\r\n\r\n self.inIdle = False\r\n\r\n #exitmethods = ['single back','3 back presses', 'swipe', 'keylock']\r\n\r\n for i in range(5):\r\n\r\n# for method in exitmethods:\r\n import sys\r\n sys.path.append('..')\r\n from override.Exit import ExitOverrides\r\n exitOverrides = ExitOverrides(self.phone)\r\n exitMethods = [m for m in dir(exitOverrides) if m.startswith('EXIT')]\r\n debug.brf(exitMethods)\r\n\r\n for method in exitMethods:\r\n\r\n mcBefore = core.FW_conf['blackbox'].getCountMotionFrames()\r\n\r\n if self._isInIdle():\r\n continue\r\n\r\n self.phone.comment('Exit: Try %s to get idle..' % method)\r\n getattr(exitOverrides,method)()\r\n\r\n #if method == 'single back': # if in some application, this could close it\r\n # self.phone.select.long('KBD_KEY_BACK',doNotReport=True)\r\n\r\n #elif method == '3 back presses': # if in some application, this could close it\r\n # self.phone.select.long('KBD_KEY_BACK',doNotReport=True)\r\n # self.phone.delay(1000)\r\n # self.phone.select('KBD_KEY_BACK', doNotReport=True)\r\n # self.phone.delay(1000)\r\n # self.phone.select('KBD_KEY_BACK', doNotReport=True)\r\n\r\n #elif method == 'swipe':\r\n # self.__backToIdleWithSwipe() # close application with swipe\r\n\r\n #elif method == 'keylock':\r\n # self.phone.select('KBD_KEY_KEYLOCK_TOGGLE', doNotReport=True)\r\n\r\n #else:\r\n # self.phone.fail('Exit: %s is not handled' % method)\r\n\r\n self.phone.delay(2000, False)\r\n\r\n if self._isInIdle():\r\n continue\r\n\r\n mcAfter = core.FW_conf['blackbox'].getCountMotionFrames()\r\n if mcBefore != mcAfter:\r\n #self.phone.comment('Exit: %s caused motion' % method)\r\n\r\n if i >= 2: # keylock?\r\n self.phone.input('201426\\n', mode='pin', delayBetweenPresses=1000)\r\n self.phone.delay(3000)\r\n if self._isInIdle():\r\n continue\r\n\r\n elif i > 3 and mcBefore == mcAfter: # enough tries, crash note might be on the screen\r\n self.phone.comment('try closing crash note')\r\n self.phone.select((260, 490))\r\n self.phone.delay(3000)\r\n mcAfter = core.FW_conf['blackbox'].getCountMotionFrames() # if nothing happened, check ui freeze\r\n if mcBefore == mcAfter:\r\n self._checkUiFreeze()\r\n\r\n if mcAfter == None and self.inIdle: # nothing was done, check freeze if phone has freezed in idle state\r\n self._checkUiFreeze()\r\n return True\r\n\r\n elif self.inIdle:\r\n return True\r\n\r\n self._checkUiFreeze()\r\n return False", "def verify_all_threads_closed(allowable_threads=None):\n if allowable_threads is None:\n allowable_threads = []\n allowable_threads += ['pydevd.Writer',\n 'pydevd.Reader',\n 'pydevd.CommandThread',\n 'profiler.Reader',\n 'MainThread']\n\n open_threads = [t.name for t in threading.enumerate()\n if t.name not in allowable_threads]\n\n if len(open_threads) != 0:\n raise EnvironmentError(\n \"Not all threads were shut down! Currently running threads: \" +\n str(open_threads))", "def status(self, *args):\n for k, v in self.processers.items():\n if v:\n if v.poll() is None:\n status = 'running'\n else:\n status = 'dead'\n else:\n status = 'stoped'\n print '%s - %s' % (k, status)", "def emergency_stop(self):\r\n return self._arm.emergency_stop()", "def check4kill(self,threadPID = -1):\n #TODO: check in the kill log if my main or my thred PID are there.\n # In case True, kill all. /var/log/check_status/check_kills.log\n # kill $TOPPID\n # /var/log/check_status/check_off.log\n off_log = \"/var/log/check_status/check_off.log\"\n kill_log = \"/var/log/check_status/check_kills.log\"\n try:\n f = open (off_log, \"r\")\n l = f.read(self.conn.data_buf_size)\n while (l or self.status != Modem.Status.KILL):\n if l == \"poweroff\":\n self.status = Modem.Status.KILL\n l = f.read(self.conn.data_buf_size)\n f.close()\n except IOError:\n print off_log + \" not found\"\n try:\n f = open (kill_log, \"r\")\n l = f.read(self.conn.data_buf_size)\n while (l or self.status != Modem.Status.KILL):\n if (l == (\"kill \" + str(threadPID)) or \\\n l == (\"kill \" + str(self.mainPID))):\n self.status = Modem.Status.KILL\n l = f.read(self.conn.data_buf_size)\n f.close()\n except IOError:\n print kill_log + \" not found\"", "def stop_all(self):\n print(\"Stopping Clipper and all running models...\")\n with hide(\"output\", \"warnings\", \"running\"):\n self._execute_root(\"docker-compose stop\", warn_only=True)\n self._execute_root(\"docker stop $(docker ps -a -q)\", warn_only=True)\n self._execute_root(\"docker rm $(docker ps -a -q)\", warn_only=True)", "def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False", "def is_running(self):\n\t\treturn self in _running", "def _check_for_events(self):\n\n for event in pygame.event.get():\n # if user exits out of window during pause menu\n if event.type == pygame.QUIT:\n sys.exit()\n\n # if user presses escape to unpause\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.__pauseMenuActive = False\n\n # if exit game button is pressed\n if self.__exitGameButton.isButton_pressed_event_handler(event) == True:\n sys.exit()\n\n # if resume button is pressed\n if self.__resumeButton.isButton_pressed_event_handler(event) == True:\n self.__pauseMenuActive = False\n\n # if main menu button is pressed\n if self.__mainMenuButton.isButton_pressed_event_handler(event) == True:\n self.__pauseMenuActive = False\n self.__toMainMenu = True", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # if the exit button on screen is clicked close the program\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def shutting_down(self):\n return self._shutdown.is_set()" ]
[ "0.75852925", "0.7146642", "0.6602631", "0.65657175", "0.628147", "0.62783736", "0.61895704", "0.61452276", "0.60707706", "0.60059714", "0.5987051", "0.5946374", "0.5929576", "0.5919849", "0.5919849", "0.5917849", "0.58654445", "0.58415127", "0.582378", "0.58181137", "0.57904536", "0.57837963", "0.5751704", "0.5743883", "0.5736359", "0.5727888", "0.571863", "0.571863", "0.571863", "0.571863", "0.56977344", "0.5691368", "0.56854534", "0.567509", "0.56698877", "0.56671596", "0.5664803", "0.56563526", "0.5638323", "0.5635146", "0.5632464", "0.5630372", "0.5625811", "0.56211454", "0.5614558", "0.5601485", "0.5598462", "0.5596306", "0.557137", "0.55674416", "0.55553913", "0.5554962", "0.555448", "0.55433303", "0.5541579", "0.5532311", "0.55316913", "0.552836", "0.5517372", "0.5514315", "0.55099595", "0.5497108", "0.54901695", "0.5487454", "0.54826385", "0.5480151", "0.5474956", "0.5474956", "0.5474956", "0.5474956", "0.54695076", "0.5464701", "0.54627246", "0.54591405", "0.5454348", "0.54474807", "0.54442334", "0.544098", "0.54351836", "0.54316247", "0.54242456", "0.5419189", "0.54086906", "0.53948116", "0.5391797", "0.5386591", "0.5378037", "0.5376792", "0.53741556", "0.53715485", "0.53657585", "0.53648067", "0.53626466", "0.53534997", "0.5353246", "0.5352763", "0.5352727", "0.53506577", "0.53505516", "0.534634" ]
0.82884616
0
Registers new platform (or at least tries). If new platform depends on platforms that were not registered yet then registration of this platform would be deferred. Registration would be continued after registering all the platforms that this platform depends on
def register_platform(self, factory, kind, parent=None, wait=None): self._try_register_platform(factory, kind, parent, wait)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _register_hardware_platform(\n hass: HomeAssistant, integration_domain: str, platform: HardwareProtocol\n) -> None:\n if integration_domain == DOMAIN:\n return\n if not hasattr(platform, \"async_info\"):\n raise HomeAssistantError(f\"Invalid hardware platform {platform}\")\n hass.data[DOMAIN][\"hardware_platform\"][integration_domain] = platform", "def _try_register_platform(self, factory, kind, parent, wait, awaiting=False):\r\n name = factory.name\r\n assert kind is not None, \"instance kind can't be None (instance name is {})\".format(name)\r\n\r\n if factory.name is None:\r\n factory.name = name = \"random_name\" # TODO: use GUID\r\n\r\n assert name not in self._platforms and (awaiting or name not in self._awaiting),\\\r\n \"encountered second platform with name {}\".format(name)\r\n\r\n # TODO: analyze args and update wait if there are references to other platforms\r\n assert wait is None or name not in wait, \"platform {} can't wait for self!\".format(name)\r\n\r\n # If all necessary parent and co-platforms are already created - finish registration of this one\r\n if (parent is None or parent in self._platforms) \\\r\n and (wait is None or all(w in self._platforms for w in wait)):\r\n np = factory.finish_registration()\r\n self._platforms[name] = np\r\n if parent is not None:\r\n assert np not in self._platforms[parent].subplatforms, \"Subplatform {} is already within \" \\\r\n \"parent's ({}) subplatforms list, \" \\\r\n \"but shouldn't be\".format(name, parent)\r\n np.parent = self._platforms[parent]\r\n self._platforms[parent].subplatforms.append(np)\r\n if wait is not None:\r\n for w in wait:\r\n assert np not in self._platforms[w].depended, \"Subplatform {} is already within \" \\\r\n \"depended's list of {}, \" \\\r\n \"but shouldn't be\".format(name, w)\r\n self._platforms[w].depended.append(np)\r\n if awaiting:\r\n del self._awaiting[name]\r\n self._check_awaiting()\r\n # Otherwise put it into waiting list\r\n else:\r\n self._awaiting[name] = {\r\n \"instance\": factory,\r\n \"kind\": kind,\r\n \"parent\": parent,\r\n \"wait\": wait}", "def add_platform(self, platform: KetraPlatformBase):\n self.platforms.append(platform)", "async def async_process_hardware_platforms(hass: HomeAssistant) -> None:\n hass.data[DOMAIN][\"hardware_platform\"] = {}\n\n await async_process_integration_platforms(hass, DOMAIN, _register_hardware_platform)", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def register_platform(self, address, serverkey=None, display_name=None):\n _log.info('Attempting registration of vcp at address: '\n '{} display_name: {}, serverkey: {}'.format(address,\n display_name,\n serverkey))\n parsed = urlparse(address)\n if parsed.scheme not in ('tcp', 'ipc'):\n raise ValueError(\n 'Only ipc and tpc addresses can be used in the '\n 'register_platform method.')\n try:\n connection = self._build_connection(address, serverkey)\n except gevent.Timeout:\n _log.error(\"Initial building of connection not found\")\n raise\n\n try:\n if connection is None:\n raise ValueError(\"Connection was not able to be found\")\n manager_key = connection.call('get_manager_key')\n except gevent.Timeout:\n _log.error(\"Couldn't retrieve managment key from platform\")\n raise\n\n try:\n if manager_key is not None:\n if manager_key == self.core.publickey:\n _log.debug('Platform is already managed and connected.')\n return\n else:\n _log.warn(\n 'Platform is registered with a different vc key.'\n 'This could be expected.')\n\n if parsed.scheme == 'tcp':\n self.core.publickey\n _log.debug(\n 'TCP calling manage. my serverkey: {}, my publickey: {}'.format(\n self._serverkey, self.core.publickey))\n pk = connection.call(\n 'manage', self._external_addresses[0], self._serverkey,\n self.core.publickey)\n else:\n pk = connection.call('manage', self.core.address)\n except gevent.Timeout:\n _log.error('RPC call to manage did not return in a timely manner.')\n raise\n # If we were successful in calling manage then we can add it to\n # our list of managed platforms.\n if pk is not None and len(pk) == 43:\n try:\n address_uuid = self._address_to_uuid.get(address)\n time_now = format_timestamp(get_aware_utc_now())\n\n if address_uuid is not None:\n _log.debug('Attempting to get instance id to reconfigure '\n 'the agent on the remote instance.')\n current_uuid = connection.call('get_instance_uuid')\n\n if current_uuid != address_uuid:\n _log.debug('Reconfiguring with new uuid. {}'.format(\n address_uuid\n ))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n if self._registered_platforms.get(address_uuid) is None:\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n else:\n address_uuid = str(uuid.uuid4())\n _log.debug(\"New platform with uuid: {}\".format(\n address_uuid))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n self._address_to_uuid[address] = address_uuid\n if display_name is None:\n display_name = address\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n self._platform_connections[address_uuid] = connection\n self._registered_platforms.sync()\n except gevent.Timeout:\n _log.error(\n 'Call to reconfigure did not return in a timely manner.')\n raise", "def add_platforms(project, env_spec_name, platforms):\n return _modify_platforms(project, env_spec_name, additions=platforms, removals=[])", "async def _register_system_health_platform(hass, integration_domain, platform):\n platform.async_register(hass, RegisterSystemHealth(hass, integration_domain))", "def register_platform_services(platform: entity_platform.EntityPlatform) -> None:\n platform.async_register_entity_service(\n SERVICE_ENABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_DISABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TOGGLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_CANCEL, ENTITY_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TIME_ADJUST, TIME_ADJUST_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_MANUAL_RUN, MANUAL_RUN_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_SUSPEND, SUSPEND_SCHEMA, async_entity_service_handler\n )", "async def reload_platform(self) -> None:", "def setup_platform(hass, config, add_devices, discovery_info=None):\n token = load_token(hass)\n \n if not token:\n request_app_setup(hass, config, add_devices, discovery_info)\n else:\n continue_setup_platform(hass, config, token, add_devices, discovery_info)", "def new_platform(self, id):\n p = Platform(self, id, [])\n self.platforms[id] = p\n return p", "def add_to_platform_start(\n self,\n hass: HomeAssistant,\n platform: EntityPlatform,\n parallel_updates: asyncio.Semaphore | None,\n ) -> None:\n super().add_to_platform_start(hass, platform, parallel_updates)\n\n # Bail out if the sensor doesn't have a unique_id or a device class\n if self.unique_id is None or self.device_class is None:\n return\n registry = er.async_get(self.hass)\n\n # Bail out if the entity is not yet registered\n if not (\n entity_id := registry.async_get_entity_id(\n platform.domain, platform.platform_name, self.unique_id\n )\n ):\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._sensor_option_unit_of_measurement = self._get_initial_suggested_unit()\n return\n\n registry_entry = registry.async_get(entity_id)\n assert registry_entry\n\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self.registry_entry = registry_entry\n self._async_read_entity_options()\n\n # If the sensor has 'unit_of_measurement' in its sensor options, the user has\n # overridden the unit.\n # If the sensor has 'sensor.private' in its entity options, it already has a\n # suggested_unit.\n registry_unit = registry_entry.unit_of_measurement\n if (\n (\n (sensor_options := registry_entry.options.get(DOMAIN))\n and CONF_UNIT_OF_MEASUREMENT in sensor_options\n )\n or f\"{DOMAIN}.private\" in registry_entry.options\n or self.unit_of_measurement == registry_unit\n ):\n return\n\n # Make sure we can convert the units\n if (\n (unit_converter := UNIT_CONVERTERS.get(self.device_class)) is None\n or registry_unit not in unit_converter.VALID_UNITS\n or self.unit_of_measurement not in unit_converter.VALID_UNITS\n ):\n return\n\n # Set suggested_unit_of_measurement to the old unit to enable automatic\n # conversion\n self.registry_entry = registry.async_update_entity_options(\n entity_id,\n f\"{DOMAIN}.private\",\n {\"suggested_unit_of_measurement\": registry_unit},\n )\n # Update _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._async_read_entity_options()", "def platform_start(self):\n self.platform.start()", "async def _async_setup_platform(\n opp: OpenPeerPower,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict],\n) -> None:\n if integration_platform not in opp.data:\n await async_setup_component(\n opp, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component = opp.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "async def _async_setup_platform(\n hass: HomeAssistant,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict[str, Any]],\n) -> None:\n if integration_platform not in hass.data:\n await async_setup_component(\n hass, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component: EntityComponent[Entity] = hass.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "def _check_awaiting(self):\r\n # TODO: check for wait loops\r\n for w in list(self._awaiting.values()):\r\n self._try_register_platform(w[\"instance\"], w[\"kind\"], w[\"parent\"], w[\"wait\"], awaiting=True)", "def add_to_platforms(\n self, data_store, platform_name, platform_type, nationality, privacy, change_id\n ):\n print(\"Ok, adding new platform.\")\n\n platform_name = prompt(\"Please enter a name: \", default=platform_name)\n trigraph = prompt(\n \"Please enter trigraph (optional): \", default=platform_name[:3]\n )\n quadgraph = prompt(\n \"Please enter quadgraph (optional): \", default=platform_name[:4]\n )\n pennant_number = prompt(\"Please enter pennant number (optional): \", default=\"\")\n\n # Choose Nationality\n if nationality:\n chosen_nationality = data_store.add_to_nationalities(nationality, change_id)\n else:\n chosen_nationality = self.resolve_nationality(\n data_store, platform_name, change_id\n )\n\n if chosen_nationality is None:\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )\n\n # Choose Platform Type\n if platform_type:\n chosen_platform_type = data_store.add_to_platform_types(\n platform_type, change_id\n )\n else:\n chosen_platform_type = self.resolve_platform_type(\n data_store, platform_name, change_id\n )\n\n if chosen_platform_type is None:\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )\n\n # Choose Privacy\n if privacy:\n chosen_privacy = data_store.add_to_privacies(privacy, change_id)\n else:\n chosen_privacy = self.resolve_privacy(data_store, change_id)\n\n if chosen_privacy is None:\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )\n\n print(\"-\" * 61)\n print(\"Input complete. About to create this platform:\")\n print(f\"Name: {platform_name}\")\n print(f\"Trigraph: {trigraph}\")\n print(f\"Quadgraph: {quadgraph}\")\n print(f\"Pennant Number: {pennant_number}\")\n print(f\"Nationality: {chosen_nationality.name}\")\n print(f\"Class: {chosen_platform_type.name}\")\n print(f\"Classification: {chosen_privacy.name}\")\n\n choice = create_menu(\n \"Create this platform?: \",\n [\"Yes\", \"No, make further edits\"],\n validate_method=is_valid,\n )\n\n if choice == str(1):\n return (\n platform_name,\n trigraph,\n quadgraph,\n pennant_number,\n chosen_platform_type,\n chosen_nationality,\n chosen_privacy,\n )\n elif choice == str(2):\n return self.add_to_platforms(\n data_store, platform_name, None, None, None, change_id\n )\n elif choice == \".\":\n print(\"-\" * 61, \"\\nReturning to the previous menu\\n\")\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )", "def setup_platform(opp, config, add_entities, discovery_info=None):\n\n for scene in pywink.get_scenes():\n _id = scene.object_id() + scene.name()\n if _id not in opp.data[DOMAIN][\"unique_ids\"]:\n add_entities([WinkScene(scene, opp)])", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "def setup_platform(hass, config, add_devices, discovery_info=None):\n thread1 = QQ(config[QQ_NUMBER])\n thread1.start()\n object_qq = Qqsensor(hass, QQ_NUMBER, thread1)\n add_devices([object_qq])", "def find_hardware(self, device_info=None):\n if os.name is not 'nt': # If not on a Windows system, just set up soundcard\n self.setup_soundcard()\n self.hardware.append('Soundcard')\n self.out_samplefreq = 44100\n else:\n if 'NIDAQ' in self.required_hardware and self.setup_nidaq(device_info):\n self.hardware.append('NIDAQ')\n if 'RP21' in self.required_hardware and self.setup_RP21('c:\\pystartle\\startle.rco'):\n self.hardware.append('RP21')\n if 'PA5' in self.required_hardware and self.setup_PA5():\n self.hardware.append('PA5')\n if 'RZ5D' in self.required_hardware and self.setup_RZ5D():\n self.hardware.append('RZ5D')", "async def async_setup_platform(hass, config, async_add_devices,\n discovery_info=None):\n return True", "async def async_setup_platforms(hass, config):\n tasks = [\n hass.helpers.discovery.async_load_platform(component, DOMAIN, {}, config)\n for component in INSTEON_COMPONENTS\n ]\n await asyncio.gather(*tasks)\n\n for address in devices:\n device = devices[address]\n platforms = get_device_platforms(device)\n if ON_OFF_EVENTS in platforms:\n add_on_off_event_device(hass, device)\n\n _LOGGER.debug(\"Insteon device count: %s\", len(devices))\n register_new_device_callback(hass, config)\n async_register_services(hass)\n\n # Cannot be done concurrently due to issues with the underlying protocol.\n for address in devices:\n await devices[address].async_status()\n await async_id_unknown_devices(hass.config.config_dir)", "def _update_device_registry(self):\n try:\n if not self._flag_updating_deviceregistry:\n _log.debug(\"Updating device registry\")\n self._flag_updating_deviceregistry = True\n self._sync_connected_platforms()\n unreachable = []\n # Loop over the connections to the registered agent platforms.\n for k, v in self._platform_connections.items():\n _log.debug('updating for {}'.format(k))\n # Only attempt update if we have a connection to the\n # agent instance.\n if v is not None:\n try:\n devices = v.agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM,\n 'get_devices').get(timeout=30)\n\n anon_devices = defaultdict(dict)\n\n # for each device returned from the query to\n # get_devices we need to anonymize the k1 in the\n # anon_devices dictionary.\n for k1, v1 in devices.items():\n _log.debug(\n \"before anon: {}, {}\".format(k1, v1))\n # now we need to do a search/replace on the\n # self._topic_list so that the devices are\n # known as the correct itme nin the tree.\n anon_topic = self._topic_replace_map[k1]\n\n # if replaced has not already been replaced\n if not anon_topic:\n anon_topic = k1\n for sr in self._topic_replace_list:\n anon_topic = anon_topic.replace(\n sr['from'], sr['to'])\n\n self._topic_replace_map[k1] = anon_topic\n\n anon_devices[anon_topic] = v1\n\n _log.debug('Anon devices are: {}'.format(\n anon_devices))\n\n self._registry.update_devices(k, anon_devices)\n except (gevent.Timeout, Unreachable) as e:\n _log.error(\n 'Error getting devices from platform {}'\n .format(k))\n unreachable.append(k)\n for k in unreachable:\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]\n\n finally:\n self._flag_updating_deviceregistry = False", "def continue_setup_platform(hass, config, token, add_devices, discovery_info=None):\n if \"trakt\" in _CONFIGURING:\n hass.components.configurator.request_done(_CONFIGURING.pop(\"trakt\"))\n \n add_devices([TraktMyShowCalendarSensor(hass, config, token)], True)", "def get_matched_platforms(self, platform):\n raise NotImplemented", "def remote_registerEngine(self, engineReference):", "async def async_setup_platform(hass, config, async_add_devices, _discovery_info=None):\n pass", "def test_new_platform_appear_after_create(self):\n\n #\n response = self.client.get(reverse('webapp:platform_list'))\n check_response_is_ok(self, response)\n platform_count = len(response.context['object_list'])\n\n platform = create_platform(\"testplatform\", \"https://siteurl.com\", self.user)\n platform.save()\n\n response = self.client.get(reverse('webapp:platform_list'))\n check_response_is_ok(self, response)\n self.assertIn(platform, response.context['object_list'])\n\n expected_count = platform_count + 1\n self.assertEqual(expected_count, len(response.context['object_list']))", "def _new_device(device):\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )", "def getPlatform(self, name):\r\n if self.platforms.has_key(name):\r\n return self.platforms[name]\r\n else:\r\n self.platforms[name] = Platform(name)\r\n return self.platforms[name]", "def setup_platform(hass, config: ConfigType,\n add_devices: Callable[[list], None], discovery_info=None):\n devices = []\n\n for node in hass.data[ISY994_NODES][DOMAIN]:\n _LOGGER.debug(\"Loading %s\", node.name)\n devices.append(ISYSensorDevice(node))\n\n for node in hass.data[ISY994_WEATHER]:\n devices.append(ISYWeatherDevice(node))\n\n add_devices(devices)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Only act if loaded via mysensors by discovery event.\n # Otherwise gateway is not setup.\n if discovery_info is None:\n return\n\n for gateway in mysensors.GATEWAYS.values():\n # Define the S_TYPES and V_TYPES that the platform should handle as\n # states. Map them in a dict of lists.\n pres = gateway.const.Presentation\n set_req = gateway.const.SetReq\n map_sv_types = {\n pres.S_TEMP: [set_req.V_TEMP],\n pres.S_HUM: [set_req.V_HUM],\n pres.S_BARO: [set_req.V_PRESSURE, set_req.V_FORECAST],\n pres.S_WIND: [set_req.V_WIND, set_req.V_GUST],\n pres.S_RAIN: [set_req.V_RAIN, set_req.V_RAINRATE],\n pres.S_UV: [set_req.V_UV],\n pres.S_WEIGHT: [set_req.V_WEIGHT, set_req.V_IMPEDANCE],\n pres.S_POWER: [set_req.V_WATT, set_req.V_KWH],\n pres.S_DISTANCE: [set_req.V_DISTANCE],\n pres.S_LIGHT_LEVEL: [set_req.V_LIGHT_LEVEL],\n pres.S_IR: [set_req.V_IR_RECEIVE],\n pres.S_WATER: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_CUSTOM: [set_req.V_VAR1,\n set_req.V_VAR2,\n set_req.V_VAR3,\n set_req.V_VAR4,\n set_req.V_VAR5],\n pres.S_SCENE_CONTROLLER: [set_req.V_SCENE_ON,\n set_req.V_SCENE_OFF],\n }\n if float(gateway.protocol_version) < 1.5:\n map_sv_types.update({\n pres.S_AIR_QUALITY: [set_req.V_DUST_LEVEL],\n pres.S_DUST: [set_req.V_DUST_LEVEL],\n })\n if float(gateway.protocol_version) >= 1.5:\n map_sv_types.update({\n pres.S_COLOR_SENSOR: [set_req.V_RGB],\n pres.S_MULTIMETER: [set_req.V_VOLTAGE,\n set_req.V_CURRENT,\n set_req.V_IMPEDANCE],\n pres.S_SOUND: [set_req.V_LEVEL],\n pres.S_VIBRATION: [set_req.V_LEVEL],\n pres.S_MOISTURE: [set_req.V_LEVEL],\n pres.S_AIR_QUALITY: [set_req.V_LEVEL],\n pres.S_DUST: [set_req.V_LEVEL],\n })\n map_sv_types[pres.S_LIGHT_LEVEL].append(set_req.V_LEVEL)\n\n if float(gateway.protocol_version) >= 2.0:\n map_sv_types.update({\n pres.S_INFO: [set_req.V_TEXT],\n pres.S_GAS: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_GPS: [set_req.V_POSITION],\n pres.S_WATER_QUALITY: [set_req.V_TEMP, set_req.V_PH,\n set_req.V_ORP, set_req.V_EC]\n })\n map_sv_types[pres.S_CUSTOM].append(set_req.V_CUSTOM)\n map_sv_types[pres.S_POWER].extend(\n [set_req.V_VAR, set_req.V_VA, set_req.V_POWER_FACTOR])\n\n devices = {}\n gateway.platform_callbacks.append(mysensors.pf_callback_factory(\n map_sv_types, devices, add_devices, MySensorsSensor))", "async def _async_reconfig_platform(\n platform: EntityPlatform, platform_configs: list[dict[str, Any]]\n) -> None:\n await platform.async_reset()\n tasks = [platform.async_setup(p_config) for p_config in platform_configs]\n await asyncio.gather(*tasks)", "async def _async_reconfig_platform(\n platform: EntityPlatform, platform_configs: list[dict]\n) -> None:\n await platform.async_reset()\n tasks = [platform.async_setup(p_config) for p_config in platform_configs]\n await asyncio.gather(*tasks)", "async def async_setup_platform(\n hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None\n) -> None:\n if discovery_info is None:\n return\n\n broker = hass.data[DOMAIN][\"broker\"]\n\n async_add_entities(\n [\n GeniusSwitch(broker, z)\n for z in broker.client.zone_objs\n if z.data[\"type\"] == GH_ON_OFF_ZONE\n ]\n )", "def register_device():\n payload = request.get_json()\n return _register_device(payload)", "async def setup_platform(hass, platform: str, *,\n devices=None, scenes=None):\n hass.config.components.add(DOMAIN)\n config_entry = ConfigEntry(2, DOMAIN, \"Test\",\n {CONF_INSTALLED_APP_ID: str(uuid4())},\n SOURCE_USER, CONN_CLASS_CLOUD_PUSH)\n broker = DeviceBroker(hass, config_entry, Mock(), Mock(),\n devices or [], scenes or [])\n\n hass.data[DOMAIN] = {\n DATA_BROKERS: {\n config_entry.entry_id: broker\n }\n }\n await hass.config_entries.async_forward_entry_setup(\n config_entry, platform)\n await hass.async_block_till_done()\n return config_entry", "def set_platform(identifier):\n global _PLATFORM\n _PLATFORM = identifier", "def get_platform(self, name):\n if name in self.platforms:\n return name\n else:\n try:\n p = self.platforms['name'] = Platform.load(self, name)\n return p\n except IOError as e:\n print('Failed loading platform: {0}'.format(str(e)))\n return None", "def registerMachine(self, machine):\n if machine in self._machines:\n raise InternalError('Tried to register the same machine twice.')\n \n # Inform container manager about the CommID of the relay manager.\n msg = Message()\n msg.msgType = msgTypes.COMM_INFO\n msg.dest = machine.containerID\n msg.content = machine.relayID\n self._commManager.sendMessage(msg)\n \n # Order the new machine to connect to all the other existing machines.\n order = [(m.relayID, m.ip) for m in self._machines]\n \n if order:\n msg = Message()\n msg.msgType = msgTypes.CONNECT\n msg.dest = machine.relayID\n msg.content = order\n self._commManager.sendMessage(msg)\n \n self._machines.add(machine)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n binary_sensors = []\n for name in hass.data[DOMAIN]:\n if name in BINARY_SENSORS:\n binary_sensors.append(NextcloudBinarySensor(name))\n add_entities(binary_sensors, True)", "def register(self, filesystem_cls: Type[Filesystem]) -> None:\n with self._registration_lock:\n for scheme in filesystem_cls.SUPPORTED_SCHEMES:\n current_preferred = self._filesystems.get(scheme)\n if current_preferred is not None:\n # TODO: [LOW] Decide what to do here. Do we overwrite,\n # give out a warning or do we fail?\n pass\n self._filesystems[scheme] = filesystem_cls", "def async_register_device(self, device):\n self._entities.append(device)\n\n @asyncio.coroutine\n def async_shutdown(event):\n \"\"\"Stop ffmpeg process.\"\"\"\n yield from device.async_stop_ffmpeg()\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_STOP, async_shutdown)\n\n # start on startup\n if device.initial_state:\n @asyncio.coroutine\n def async_start(event):\n \"\"\"Start ffmpeg process.\"\"\"\n yield from device.async_start_ffmpeg()\n yield from device.async_update_ha_state()\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_START, async_start)", "def register(self):\n logger.info(\"Registering with Hub...\")\n register_complete = Event()\n\n def on_register_complete(result=None, error=None):\n # This could be a failed/successful registration result from the HUB\n # or a error from polling machine. Response should be given appropriately\n if result is not None:\n if result.status == \"assigned\":\n logger.info(\"Successfully registered with Hub\")\n else: # There be other statuses\n logger.error(\"Failed registering with Hub\")\n if error is not None: # This can only happen when the polling machine runs into error\n logger.info(error)\n\n register_complete.set()\n\n self._polling_machine.register(callback=on_register_complete)\n\n register_complete.wait()", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def unregister_platform(self, name, recursive=False):\r\n if name in dict(self._platforms):\r\n self.unregister_platform_instance(self._platforms[name], recursive)", "def setup_platform(hass, config, add_devices, discovery_info=None) -> None:\n friendly_name = config.get(CONF_FRIENDLY_NAME)\n mac_addr = config.get(CONF_MAC)\n add_devices([Switchmate(mac_addr, friendly_name)], True)", "def register(self):\n\n registerables = []\n registerables.append(swf.Domain(name=self.domain))\n registerables.append(swf.WorkflowType(\n domain=self.domain,\n name=self.task_list,\n version=self.version,\n task_list=self.task_list))\n\n for current_activity in self.activities:\n registerables.append(\n swf.ActivityType(\n domain=self.domain,\n name=current_activity.name,\n version=self.version,\n task_list=current_activity.task_list))\n\n for swf_entity in registerables:\n try:\n swf_entity.register()\n except (SWFDomainAlreadyExistsError, SWFTypeAlreadyExistsError):\n print(\n swf_entity.__class__.__name__, swf_entity.name,\n 'already exists')", "def platform(aliased=0, terse=0):\n result = _platform_cache.get((aliased, terse), None)\n if result is not None:\n return result\n\n # Get uname information and then apply platform specific cosmetics\n # to it...\n system, node, release, version, machine, processor = uname()\n if machine == processor:\n processor = ''\n if aliased:\n system, release, version = system_alias(system, release, version)\n\n if True: ###\n # Generic handler\n if terse:\n platform = _platform(system, release)\n else:\n bits, linkage = architecture() ###\n platform = _platform(system, release, machine,\n processor, bits, linkage)\n\n _platform_cache[(aliased, terse)] = platform\n return platform", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n monitored_conditions = config[CONF_MONITORED_CONDITIONS]\n\n api = GoogleWifiAPI(host, monitored_conditions)\n entities = [\n GoogleWifiSensor(api, name, description)\n for description in SENSOR_TYPES\n if description.key in monitored_conditions\n ]\n add_entities(entities, True)", "async def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n\n if discovery_info is None:\n _LOGGER.warning(\n \"To use this you need to configure the 'docker_monitor' component\")\n return\n\n host_name = discovery_info[CONF_NAME]\n api = hass.data[DOMAIN][host_name]\n\n switches = [ContainerSwitch(host_name, api, name)\n for name in discovery_info[CONF_CONTAINERS].keys()\n if discovery_info[CONF_CONTAINERS][name][CONF_CONTAINER_SWITCH]]\n\n if switches:\n async_add_entities(switches)\n else:\n _LOGGER.info(\"No containers setup\")", "def get_platforms():\n # Get all CL platforms\n platforms = cl.get_platforms()\n for p in platforms:\n # Not a safe way\n p.__class__ = Platform\n p.devices = p.get_devices()\n for d in p.devices:\n d.__class__ = Device\n return platforms", "async def async_discover(discovery_info: DiscoveryInfo) -> None:\n mysensors.setup_mysensors_platform(\n hass,\n DOMAIN,\n discovery_info,\n MySensorsSensor,\n async_add_entities=async_add_entities,\n )", "async def async_setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n\n entities = []\n for entity in discovery_info[CONF_CLIMATES]:\n hub: ModbusHub = get_hub(hass, discovery_info[CONF_NAME])\n entities.append(ModbusThermostat(hub, entity))\n\n async_add_entities(entities)", "def platform(self) -> Platform:\n _args: list[Arg] = []\n _ctx = self._select(\"platform\", _args)\n return _ctx.execute_sync(Platform)", "def create_platforms(plat_map):\n platform_group = set()\n for plat in plat_map:\n platform_group.add(Platform([(plat[1] + 0.5) * TILE_DIM, (plat[0] + 0.5)\n * TILE_DIM, plat[2], plat[3]]))\n return platform_group", "def async_register_os_in_dev_reg(\n entry_id: str, dev_reg: dr.DeviceRegistry, os_dict: dict[str, Any]\n) -> None:\n params = DeviceInfo(\n identifiers={(DOMAIN, \"OS\")},\n manufacturer=\"Home Assistant\",\n model=SupervisorEntityModel.OS,\n sw_version=os_dict[ATTR_VERSION],\n name=\"Home Assistant Operating System\",\n entry_type=dr.DeviceEntryType.SERVICE,\n )\n dev_reg.async_get_or_create(config_entry_id=entry_id, **params)", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def set_platform(self, platform_dict):\n if not os.path.exists(self.file_path):\n print(\"netCDF file does not exist, exiting without saving Platform group...\")\n elif self.format == '.nc':\n with netCDF4.Dataset(self.file_path, 'a', format='NETCDF4') as ncfile:\n plat = ncfile.createGroup('Platform')\n [plat.setncattr(k, v) for k, v in platform_dict.items()]\n elif self.format == '.zarr' and not self.append_zarr: # Do not save platform if appending\n zarrfile = zarr.open(self.file_path, mode='a')\n plat = zarrfile.create_group('Platform')\n for k, v in platform_dict.items():\n plat.attrs[k] = v", "def get_platforms(self):\n _log.debug(\"Passing platforms back: {}\".format(\n self._registered_platforms.keys()))\n return self._registered_platforms.values()", "def Platforms():\n return platforms", "async def async_setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n registry = er.async_get(hass)\n wrapped_switch = registry.async_get(config[CONF_ENTITY_ID])\n unique_id = wrapped_switch.unique_id if wrapped_switch else None\n\n async_add_entities(\n [\n LightSwitch(\n config[CONF_NAME],\n config[CONF_ENTITY_ID],\n unique_id,\n )\n ]\n )", "def has_platform(self, platform_name):\n return platform_name in self.platform_list", "def setPlatform(self):\n\t\treturn None", "async def load_platform(self, discovery_info, platform):\n load_info = list(self.filter_discovered(discovery_info, platform))\n if load_info:\n await discovery.async_load_platform(\n self.hass,\n platform,\n DOMAIN,\n load_info,\n self.config)\n\n \"\"\"Return entity id of all objects, even skipped\"\"\"\n return [\n '{}.{}'.format(platform, x[CONF_OBJECTID])\n for x in discovery_info\n ]", "def filesys_register(self, message, socket):\n addr = message.payload[\"addr\"]\n ping_addr = message.payload[\"ping_addr\"]\n answer_message = Message()\n if addr not in self._filesystem:\n self._filesystem.append((addr, ping_addr))\n answer_message.set_message(\"OK\", {\"info\": \"Registration completed\"})\n else:\n answer_message.set_message(\"OK\", {\"info\": \"You were already registered\"})\n socket.send_string(str(answer_message))", "def register(self):\n raise NotImplementedError()", "def setup_platform(hass, config, add_entities, discovery_info=None):\n hass.data.setdefault(DOMAIN, {})\n\n def service_set_override(call):\n \"\"\"Handle the service call.\"\"\"\n entity_id = call.data.get(ATTR_ENTITY_ID)\n temperature = call.data.get(ATTR_TEMPERATURE)\n until = call.data.get(\n ATTR_UNTIL, (datetime.now() + timedelta(hours=1)).strftime(\"%H:%M\")\n )\n target_devices = [\n dev for dev in hass.data[DOMAIN][\"entities\"] if dev.entity_id in entity_id\n ]\n target_device: WarmupThermostat\n for target_device in target_devices:\n target_device.set_override(temperature, until)\n target_device.schedule_update_ha_state(True)\n\n _LOGGER.info(\"Setting up platform for Warmup component\")\n user = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n warmup = Warmup4IE(user, password)\n\n if warmup is None or not warmup.setup_finished:\n raise PlatformNotReady\n warmup_client = WarmupClient(warmup)\n to_add = []\n for device in warmup.get_all_devices().values():\n to_add.append(WarmupThermostat(hass, device, warmup_client))\n add_entities(to_add)\n hass.data[DOMAIN][\"entities\"] = to_add\n hass.services.register(DOMAIN, \"set_override\", service_set_override)\n return True", "def platform(cls, name):\n\n for platform in cls.all_platforms:\n if platform.name == name:\n return platform\n\n raise UserException(\"'{0}' is not a supported platform\".format(name))", "def add_entity(device: SmartPlug, async_add_entities):\n # Attempt to get the sysinfo. If it fails, it will raise an\n # exception that is caught by async_add_entities_retry which\n # will try again later.\n device.get_sysinfo()\n\n async_add_entities([SmartPlugSwitch(device)], update_before_add=True)", "async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):\n if discovery_info is None:\n return\n\n name = discovery_info[habitica.CONF_NAME]\n sensors = discovery_info[habitica.CONF_SENSORS]\n sensor_data = HabitipyData(hass.data[habitica.DOMAIN][name])\n await sensor_data.update()\n async_add_devices(\n [HabitipySensor(name, sensor, sensor_data) for sensor in sensors], True\n )", "async def async_setup_platform(hass, config, async_add_entities,\n discovery_info=None):\n\n name = config.get(CONF_NAME)\n\n devices = []\n unique_id = None\n\n device = XiaomiPlugGenericSwitch(name, unique_id, hass, config)\n devices.append(device)\n\n async_add_entities(devices, update_before_add=True)", "def test_add_default_modules_compatible_platforms(mock_get_dm, require_empty_database):\n # Create compatible base modules.\n mmd = load_mmd(read_staged_data(\"platform\"))\n for stream in [\"f27\", \"f28\"]:\n mmd = mmd.copy(\"platform\", stream)\n\n # Set the virtual stream to \"fedora\" to make these base modules compatible.\n xmd = mmd.get_xmd()\n xmd[\"mbs\"][\"virtual_streams\"] = [\"fedora\"]\n xmd[\"mbs\"][\"use_default_modules\"] = True\n mmd.set_xmd(xmd)\n import_mmd(db_session, mmd)\n\n mmd = load_mmd(read_staged_data(\"formatted_testmodule.yaml\"))\n xmd_brs = mmd.get_xmd()[\"mbs\"][\"buildrequires\"]\n assert set(xmd_brs.keys()) == {\"platform\"}\n\n platform_f27 = ModuleBuild.get_build_from_nsvc(\n db_session, \"platform\", \"f27\", \"3\", \"00000000\")\n assert platform_f27\n\n # Create python default module which requires platform:f27 and therefore cannot be used\n # as default module for platform:f28.\n dependencies = [\n {\"requires\": {\"platform\": [\"f27\"]},\n \"buildrequires\": {\"platform\": [\"f27\"]}}]\n make_module_in_db(\"python:3:12345:1\", base_module=platform_f27, dependencies=dependencies)\n\n # Create nodejs default module which requries any platform stream and therefore can be used\n # as default module for platform:f28.\n dependencies[0][\"requires\"][\"platform\"] = []\n make_module_in_db(\"nodejs:11:2345:2\", base_module=platform_f27, dependencies=dependencies)\n db_session.commit()\n\n mock_get_dm.return_value = {\n \"nodejs\": \"11\",\n \"python\": \"3\",\n \"ruby\": \"2.6\",\n }\n defaults_added = default_modules.add_default_modules(mmd)\n # Make sure that the default modules were added. ruby:2.6 will be ignored since it's not in\n # the database\n assert set(mmd.get_xmd()[\"mbs\"][\"buildrequires\"].keys()) == {\"nodejs\", \"platform\"}\n mock_get_dm.assert_called_once_with(\n \"f28\",\n \"https://pagure.io/releng/fedora-module-defaults.git\",\n )\n assert defaults_added is True", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])", "def test_setup_platform_name(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.test_local\")", "def add_command_to_implementing_integrations_mapping(self):\n command_name_to_implemented_integration_map = (\n self.create_command_to_implemented_integration_map()\n )\n\n playbooks_list = self.id_set[\"playbooks\"]\n for playbook_dict in playbooks_list:\n playbook_name = list(playbook_dict.keys())[0]\n playbook_data = playbook_dict[playbook_name]\n commands_to_integration = playbook_data.get(\"command_to_integration\", {})\n for command in commands_to_integration:\n if commands_to_integration[command]:\n # only apply this logic when there is no specific brand\n continue\n is_command_implemented_in_integration = (\n command in command_name_to_implemented_integration_map\n )\n if (\n is_command_implemented_in_integration\n and command not in GENERIC_COMMANDS_NAMES\n ):\n implemented_integration = (\n command_name_to_implemented_integration_map[command]\n )\n commands_to_integration[command] = implemented_integration", "def claim_specific_hardware(cls, vendor_id, product_id):\n cls._create_mpdev_key_if_missing()\n cls.execute([\"-n\", \"-i\", \"-d\", cls._get_hardware_id(vendor_id, product_id)],\n not is_windows_2008())", "async def async_setup_platform(\n hass: HomeAssistantType,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n):\n\n component_config = hass.data[DOMAIN][DOMAIN_CONFIG]\n\n source_entity = config[CONF_ENTITY_ID]\n source_entity_domain, source_object_id = split_entity_id(source_entity)\n\n entity_registry = await er.async_get_registry(hass)\n entity_entry = entity_registry.async_get(source_entity)\n\n unique_id = None\n if entity_entry:\n source_entity_name = entity_entry.name or entity_entry.original_name\n source_entity_domain = entity_entry.domain\n unique_id = entity_entry.unique_id\n else:\n source_entity_name = source_object_id.replace(\"_\", \" \")\n\n entity_state = hass.states.get(source_entity)\n if entity_state:\n source_entity_name = entity_state.name\n\n capabilities = entity_entry.capabilities if entity_entry else []\n source_entity = SourceEntity(\n unique_id,\n source_object_id,\n source_entity,\n source_entity_name,\n source_entity_domain,\n capabilities,\n )\n\n try:\n power_sensor = await create_power_sensor(\n hass, entity_entry, config, component_config, source_entity\n )\n except (ModelNotSupported, StrategyConfigurationError) as err:\n pass\n\n entities_to_add = [power_sensor]\n\n should_create_energy_sensor = component_config.get(CONF_CREATE_ENERGY_SENSORS)\n if CONF_CREATE_ENERGY_SENSOR in config:\n should_create_energy_sensor = config.get(CONF_CREATE_ENERGY_SENSOR)\n\n if should_create_energy_sensor:\n energy_sensor = await create_energy_sensor(\n hass, component_config, config, power_sensor, source_entity\n )\n entities_to_add.append(energy_sensor)\n\n if component_config.get(CONF_CREATE_UTILITY_METERS):\n meter_types = component_config.get(CONF_UTILITY_METER_TYPES)\n for meter_type in meter_types:\n entities_to_add.append(\n create_utility_meter_sensor(energy_sensor, meter_type)\n )\n\n async_add_entities(entities_to_add)", "def platform(self, platform):\n # type: (string_types) -> None\n\n if platform is not None:\n if not isinstance(platform, string_types):\n raise TypeError(\"Invalid type for `platform`, type has to be `string_types`\")\n\n self._platform = platform", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def setup_platform(\n hass: HomeAssistant,\n config: Dict,\n add_devices: Callable,\n discovery_info: Optional[Dict] = None,\n) -> None:\n havdalah = config[HAVDALAH_MINUTES]\n candle_light = config[CANDLE_LIGHT_MINUTES]\n cities = config[GEONAMES]\n cities_list = cities.split(\",\")\n\n add_devices(\n [\n ShabbatTimes(\n hass,\n city,\n \"Shabbat Times {}\".format(city.replace(\"-\", \"_\")),\n havdalah,\n candle_light,\n )\n for city in cities_list\n ]\n )", "async def async_setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n time_zone = dt_util.get_time_zone(config[CONF_TIME_ZONE])\n async_add_entities(\n [\n WorldClockSensor(\n time_zone,\n config[CONF_NAME],\n config[CONF_TIME_FORMAT],\n )\n ],\n True,\n )", "def platform_init(self):\n if isinstance(self.imu, MockImuController) or isinstance(self.pwm_controller, MockPWMController):\n print(\"Mock components detected, creating mock antenna controller\")\n platform = MockPlatformController(self.azimuth_servo, self.elevation_servo, self.imu)\n else:\n print(\"Initializing PIDAntennaController class\")\n platform = PIDPlatformController(\n self.azimuth_servo,\n self.elevation_servo,\n self.imu,\n pid_output_limits=self.pid_config.get(\"output_limits\"),\n pid_frequency=self.pid_config.get(\"period\"),\n p=self.pid_config.get(\"p\"),\n i=self.pid_config.get(\"i\"),\n d=self.pid_config.get(\"d\")\n )\n \n self.platform = platform\n\n if not isinstance(self.gps, MockGPSController):\n self.gps_update_loop = GPSLocationController(self.gps)\n self.gps_update_loop.start()\n else:\n self.gps_update_loop = None\n \n return platform", "def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n if self.generation < 1:\n raise ValidationError(\"Generation must be greater than 0.\")\n\n for platform in Platform.objects.filter(family=self.family):\n if platform == self:\n continue\n if platform.generation == self.generation:\n raise ValidationError(\"Cannot have duplicate generations in family.\")\n\n super(Platform, self).save(force_insert, force_update, using, update_fields)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n if discovery_info is None:\n return\n\n devices = []\n for vin, datastore in hass.data[DATA_LEAF].items():\n _LOGGER.debug(\"Adding binary_sensors for vin=%s\", vin)\n devices.append(LeafPluggedInSensor(datastore))\n devices.append(LeafChargingSensor(datastore))\n\n add_entities(devices, True)", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def register(self):\n if self.registered:\n return\n\n config = current_app.config.get('TERMINAL_CONFIGS', {})\n apps = config.get('apps', [])\n\n for app in apps:\n cls, mod = app.rsplit('.', maxsplit=1)\n imported = import_module(cls)\n instance = getattr(imported, mod)()\n\n if getattr(instance, 'name', None) is None:\n continue\n\n if getattr(instance, 'hidden', False):\n self.hidden[getattr(instance, 'name')] = instance\n else:\n self.apps[getattr(instance, 'name')] = instance\n\n self.__set_apps_aliases(getattr(instance, 'name'), getattr(instance, 'aliases'))\n\n self.registered = True", "async def async_setup_legacy(\n self,\n hass: HomeAssistant,\n tracker: DeviceTracker,\n discovery_info: dict[str, Any] | None = None,\n ) -> None:\n assert self.type == PLATFORM_TYPE_LEGACY\n full_name = f\"{DOMAIN}.{self.name}\"\n LOGGER.info(\"Setting up %s\", full_name)\n with async_start_setup(hass, [full_name]):\n try:\n scanner = None\n setup: bool | None = None\n if hasattr(self.platform, \"async_get_scanner\"):\n scanner = await self.platform.async_get_scanner(\n hass, {DOMAIN: self.config}\n )\n elif hasattr(self.platform, \"get_scanner\"):\n scanner = await hass.async_add_executor_job(\n self.platform.get_scanner,\n hass,\n {DOMAIN: self.config},\n )\n elif hasattr(self.platform, \"async_setup_scanner\"):\n setup = await self.platform.async_setup_scanner(\n hass, self.config, tracker.async_see, discovery_info\n )\n elif hasattr(self.platform, \"setup_scanner\"):\n setup = await hass.async_add_executor_job(\n self.platform.setup_scanner,\n hass,\n self.config,\n tracker.see,\n discovery_info,\n )\n else:\n raise HomeAssistantError(\"Invalid legacy device_tracker platform.\")\n\n if scanner is not None:\n async_setup_scanner_platform(\n hass, self.config, scanner, tracker.async_see, self.type\n )\n\n if not setup and scanner is None:\n LOGGER.error(\n \"Error setting up platform %s %s\", self.type, self.name\n )\n return\n\n hass.config.components.add(full_name)\n\n except Exception: # pylint: disable=broad-except\n LOGGER.exception(\n \"Error setting up platform %s %s\", self.type, self.name\n )", "def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n pin = config.get(CONF_PIN)\n\n add_devices([ProgtimeSwitch(mac, pin, name)])", "async def async_setup_platform(hass: HomeAssistantType,\n config: ConfigType,\n async_add_entities,\n discovery_info=None) -> None:\n _LOGGER.info(\"Startup Youjia platform configuration.\")\n\n if (discovery_info is not None and config is None) or len(config) == 0:\n config = discovery_info\n\n if config is None:\n return\n\n if discovery_info is None:\n return\n\n if config['names'] is None:\n return\n\n for index, name in config['names'].items():\n entry_id = \"{0}{1:0>2}\".format(config['entity_id'], index)\n _LOGGER.info(\"Adding brightness light {} of {} into HA.\".format(name, entry_id))\n async_add_entities([YoujiaX160(name,\n entry_id,\n config['entity_id'],\n index,\n config['total_solts'],\n config['host_name']\n )], True)\n if config['auto'] is True:\n thread = threading.Thread(target=auto_checking_switch_state,\n args=(get_host(config['host_name']), config['entity_id']))\n thread.daemon = True\n SWITCH_STATUS_CHECKING_THREAD[config['name']] = thread\n thread.start()", "def evaluate_for_platform(self, **kwargs):\n macro = Macro(self.identifier, self.args, self.value)\n kwargs['platform'].define(self.identifier.as_str(), macro)\n return False", "def register(self, predictor):\n assert self.round == 0, \"Simulation is already running: \" + \\\n \"Predictors can not be added any more!\"\n#würde \"register\" abbrechen, wenn self.round > 0 ist,\n# dient nur der Programmsicherheit\n\n if isinstance(predictor, MetaInductivist):\n self.miList.append(predictor)\n else:\n self.non_miList.append(predictor)\n self.favN[predictor.name] = 0\n self.absucc[predictor.name] = 0\n self.nonultdeceivers.append(predictor)\n self.deceiveCount[predictor.name] = 0\n self.deceiveState[predictor.name] = False\n \n predictor.registeredBy(self)\n #print str(predictor)", "def register(self, data=None):\n returnvalue = False\n try:\n module = Module.get_by_name(data['name'])\n\n except DoesNotExist:\n module = Module.add(name=data['name'], queue=data['queue'])\n for rpc in data['rpc']:\n argslist = []\n if \"args\" in rpc:\n argslist = [(arg['name'],\n arg['type'],\n arg.get('optional', False),\n arg.get('descr', None)) for arg in rpc['args']]\n ModuleRPC.add(module, rpc['key'], rpc['type'], argslist)\n else:\n self.logger.debug(\"Sending sensors to module\")\n sensors = Sensor.get_by_module(module)\n module.active = True\n module.save()\n sensorlist = []\n for sensor in sensors:\n sensordict = SensorArg.get_dict(sensor)\n sensordict['rpc'] = sensor.modulerpc.key\n sensorlist.append(sensordict)\n return sensorlist", "def setup_platform(hass, config, add_entities, discovery_info=None):\n pass", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()" ]
[ "0.71603966", "0.7015166", "0.6779148", "0.65924436", "0.6380313", "0.6261706", "0.6097711", "0.5934751", "0.58920646", "0.5801316", "0.5635861", "0.56350684", "0.5605743", "0.55982506", "0.5503871", "0.5495337", "0.5468214", "0.54491985", "0.5402476", "0.536305", "0.5344545", "0.5318322", "0.52866143", "0.526513", "0.5237042", "0.5219462", "0.5202575", "0.5197274", "0.5189287", "0.51855505", "0.5173318", "0.5160491", "0.51602787", "0.51523036", "0.51408935", "0.51358855", "0.51323986", "0.513069", "0.5128181", "0.51192826", "0.5092178", "0.5090035", "0.5085675", "0.508513", "0.50558585", "0.50549406", "0.5044966", "0.5034115", "0.50305116", "0.5030343", "0.4996837", "0.49947813", "0.49614656", "0.49503133", "0.49503037", "0.49403664", "0.49389377", "0.49368107", "0.49294454", "0.49265853", "0.49244988", "0.4919109", "0.4919109", "0.4914637", "0.49133337", "0.4905524", "0.48907596", "0.4882664", "0.4864841", "0.48601854", "0.48460928", "0.4845195", "0.4838718", "0.4836147", "0.48238757", "0.48216918", "0.48185155", "0.4818148", "0.48179808", "0.48144138", "0.48127532", "0.48121634", "0.48102856", "0.4809686", "0.48082665", "0.47990304", "0.4790241", "0.47895712", "0.47894406", "0.47861376", "0.4785595", "0.47798038", "0.47789657", "0.4778587", "0.47762233", "0.47733596", "0.47707108", "0.4762487", "0.47506636", "0.47455296" ]
0.7204478
0
Worker method that do actually registers platform
def _try_register_platform(self, factory, kind, parent, wait, awaiting=False): name = factory.name assert kind is not None, "instance kind can't be None (instance name is {})".format(name) if factory.name is None: factory.name = name = "random_name" # TODO: use GUID assert name not in self._platforms and (awaiting or name not in self._awaiting),\ "encountered second platform with name {}".format(name) # TODO: analyze args and update wait if there are references to other platforms assert wait is None or name not in wait, "platform {} can't wait for self!".format(name) # If all necessary parent and co-platforms are already created - finish registration of this one if (parent is None or parent in self._platforms) \ and (wait is None or all(w in self._platforms for w in wait)): np = factory.finish_registration() self._platforms[name] = np if parent is not None: assert np not in self._platforms[parent].subplatforms, "Subplatform {} is already within " \ "parent's ({}) subplatforms list, " \ "but shouldn't be".format(name, parent) np.parent = self._platforms[parent] self._platforms[parent].subplatforms.append(np) if wait is not None: for w in wait: assert np not in self._platforms[w].depended, "Subplatform {} is already within " \ "depended's list of {}, " \ "but shouldn't be".format(name, w) self._platforms[w].depended.append(np) if awaiting: del self._awaiting[name] self._check_awaiting() # Otherwise put it into waiting list else: self._awaiting[name] = { "instance": factory, "kind": kind, "parent": parent, "wait": wait}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_process_hardware_platforms(hass: HomeAssistant) -> None:\n hass.data[DOMAIN][\"hardware_platform\"] = {}\n\n await async_process_integration_platforms(hass, DOMAIN, _register_hardware_platform)", "async def _register_hardware_platform(\n hass: HomeAssistant, integration_domain: str, platform: HardwareProtocol\n) -> None:\n if integration_domain == DOMAIN:\n return\n if not hasattr(platform, \"async_info\"):\n raise HomeAssistantError(f\"Invalid hardware platform {platform}\")\n hass.data[DOMAIN][\"hardware_platform\"][integration_domain] = platform", "async def reload_platform(self) -> None:", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def _init_hardware(self):\n return", "def _register(self, comm, handler):", "def _platform_compatible():\r\n raise NotImplementedError", "def register_platform(self, factory, kind, parent=None, wait=None):\r\n self._try_register_platform(factory, kind, parent, wait)", "def platform_start(self):\n self.platform.start()", "def remote_registerEngine(self, engineReference):", "async def _register_system_health_platform(hass, integration_domain, platform):\n platform.async_register(hass, RegisterSystemHealth(hass, integration_domain))", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def register_platform(self, address, serverkey=None, display_name=None):\n _log.info('Attempting registration of vcp at address: '\n '{} display_name: {}, serverkey: {}'.format(address,\n display_name,\n serverkey))\n parsed = urlparse(address)\n if parsed.scheme not in ('tcp', 'ipc'):\n raise ValueError(\n 'Only ipc and tpc addresses can be used in the '\n 'register_platform method.')\n try:\n connection = self._build_connection(address, serverkey)\n except gevent.Timeout:\n _log.error(\"Initial building of connection not found\")\n raise\n\n try:\n if connection is None:\n raise ValueError(\"Connection was not able to be found\")\n manager_key = connection.call('get_manager_key')\n except gevent.Timeout:\n _log.error(\"Couldn't retrieve managment key from platform\")\n raise\n\n try:\n if manager_key is not None:\n if manager_key == self.core.publickey:\n _log.debug('Platform is already managed and connected.')\n return\n else:\n _log.warn(\n 'Platform is registered with a different vc key.'\n 'This could be expected.')\n\n if parsed.scheme == 'tcp':\n self.core.publickey\n _log.debug(\n 'TCP calling manage. my serverkey: {}, my publickey: {}'.format(\n self._serverkey, self.core.publickey))\n pk = connection.call(\n 'manage', self._external_addresses[0], self._serverkey,\n self.core.publickey)\n else:\n pk = connection.call('manage', self.core.address)\n except gevent.Timeout:\n _log.error('RPC call to manage did not return in a timely manner.')\n raise\n # If we were successful in calling manage then we can add it to\n # our list of managed platforms.\n if pk is not None and len(pk) == 43:\n try:\n address_uuid = self._address_to_uuid.get(address)\n time_now = format_timestamp(get_aware_utc_now())\n\n if address_uuid is not None:\n _log.debug('Attempting to get instance id to reconfigure '\n 'the agent on the remote instance.')\n current_uuid = connection.call('get_instance_uuid')\n\n if current_uuid != address_uuid:\n _log.debug('Reconfiguring with new uuid. {}'.format(\n address_uuid\n ))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n if self._registered_platforms.get(address_uuid) is None:\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n else:\n address_uuid = str(uuid.uuid4())\n _log.debug(\"New platform with uuid: {}\".format(\n address_uuid))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n self._address_to_uuid[address] = address_uuid\n if display_name is None:\n display_name = address\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n self._platform_connections[address_uuid] = connection\n self._registered_platforms.sync()\n except gevent.Timeout:\n _log.error(\n 'Call to reconfigure did not return in a timely manner.')\n raise", "def regs() -> None:", "def register_worker(self):\n raise Exception('not implemented')", "def run(self, registry):", "def hardware(*args, brdType: bool=True, cpuType: bool=True, graphicsType: bool=True, megaHertz:\n bool=True, numProcessors: bool=True, **kwargs)->AnyStr:\n pass", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def register(self):\n raise NotImplementedError()", "def _check_awaiting(self):\r\n # TODO: check for wait loops\r\n for w in list(self._awaiting.values()):\r\n self._try_register_platform(w[\"instance\"], w[\"kind\"], w[\"parent\"], w[\"wait\"], awaiting=True)", "def usefulFunction():\n# I think the uname platform is a func. for findout out the information of the computer\n print(platform.uname())", "def _post_init(self):\n if WIN:\n self._find_devices_win()\n elif MAC:\n self._find_devices_mac()\n else:\n self._find_devices()\n self._update_all_devices()\n if NIX:\n self._find_leds()", "def find_hardware(self, device_info=None):\n if os.name is not 'nt': # If not on a Windows system, just set up soundcard\n self.setup_soundcard()\n self.hardware.append('Soundcard')\n self.out_samplefreq = 44100\n else:\n if 'NIDAQ' in self.required_hardware and self.setup_nidaq(device_info):\n self.hardware.append('NIDAQ')\n if 'RP21' in self.required_hardware and self.setup_RP21('c:\\pystartle\\startle.rco'):\n self.hardware.append('RP21')\n if 'PA5' in self.required_hardware and self.setup_PA5():\n self.hardware.append('PA5')\n if 'RZ5D' in self.required_hardware and self.setup_RZ5D():\n self.hardware.append('RZ5D')", "def port_maker(self, platform):\n raise NotImplementedError()", "def load_devices():", "async def async_setup(self):\n dev_reg = await device_registry.async_get_registry(self.hass)\n model_type = self.device.settings[\"device\"][\"type\"]\n dev_reg.async_get_or_create(\n config_entry_id=self.entry.entry_id,\n name=self.name,\n connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},\n # This is duplicate but otherwise via_device can't work\n identifiers={(DOMAIN, self.mac)},\n manufacturer=\"Shelly\",\n model=aioshelly.MODEL_NAMES.get(model_type, model_type),\n sw_version=self.device.settings[\"fw\"],\n )", "def install_platform_tweaks(self, worker):\n if self.app.IS_OSX:\n self.osx_proxy_detection_workaround()\n\n # Install signal handler so SIGHUP restarts the worker.\n if not self._isatty:\n # only install HUP handler if detached from terminal,\n # so closing the terminal window doesn't restart celeryd\n # into the background.\n if self.app.IS_OSX:\n # OS X can't exec from a process using threads.\n # See http://github.com/celery/celery/issues#issue/152\n install_HUP_not_supported_handler(worker)\n else:\n install_worker_restart_handler(worker)\n install_worker_term_handler(worker)\n install_worker_term_hard_handler(worker)\n install_worker_int_handler(worker)\n install_cry_handler()\n install_rdb_handler()", "def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool", "def on_register(cls):", "def setPlatform(self):\n\t\treturn None", "def set_platform(identifier):\n global _PLATFORM\n _PLATFORM = identifier", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def test_register_cloud(self):\n pass", "def register(cls, L):\r\n ...", "def run():\r\n plugin_ins = EmPluginCgwshDeviceControl()\r\n plugin_ins.merge_cgwsh_device()", "async def async_setup_legacy(\n self,\n hass: HomeAssistant,\n tracker: DeviceTracker,\n discovery_info: dict[str, Any] | None = None,\n ) -> None:\n assert self.type == PLATFORM_TYPE_LEGACY\n full_name = f\"{DOMAIN}.{self.name}\"\n LOGGER.info(\"Setting up %s\", full_name)\n with async_start_setup(hass, [full_name]):\n try:\n scanner = None\n setup: bool | None = None\n if hasattr(self.platform, \"async_get_scanner\"):\n scanner = await self.platform.async_get_scanner(\n hass, {DOMAIN: self.config}\n )\n elif hasattr(self.platform, \"get_scanner\"):\n scanner = await hass.async_add_executor_job(\n self.platform.get_scanner,\n hass,\n {DOMAIN: self.config},\n )\n elif hasattr(self.platform, \"async_setup_scanner\"):\n setup = await self.platform.async_setup_scanner(\n hass, self.config, tracker.async_see, discovery_info\n )\n elif hasattr(self.platform, \"setup_scanner\"):\n setup = await hass.async_add_executor_job(\n self.platform.setup_scanner,\n hass,\n self.config,\n tracker.see,\n discovery_info,\n )\n else:\n raise HomeAssistantError(\"Invalid legacy device_tracker platform.\")\n\n if scanner is not None:\n async_setup_scanner_platform(\n hass, self.config, scanner, tracker.async_see, self.type\n )\n\n if not setup and scanner is None:\n LOGGER.error(\n \"Error setting up platform %s %s\", self.type, self.name\n )\n return\n\n hass.config.components.add(full_name)\n\n except Exception: # pylint: disable=broad-except\n LOGGER.exception(\n \"Error setting up platform %s %s\", self.type, self.name\n )", "def __init__(self):\n thisType = type(self)\n if not thisType._initialized:\n thisType._initialized = True\n self._embedded_device_registry = {}\n self._root_device_registry = {}\n self._service_registry = {}\n self._scan_for_device_extensions_under_code_container(dynamic_extensions)\n self._scan_for_device_extensions_under_code_container(standard_extensions)\n self._scan_for_service_extensions_under_code_container(dynamic_extensions)\n self._scan_for_service_extensions_under_code_container(standard_extensions)\n return", "def register(blk):\n pass", "async def async_setup(self):\n self._unsub_stop = self.hass.bus.async_listen(\n EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop\n )\n dev_reg = await device_registry.async_get_registry(self.hass)\n model_type = self.device.settings[\"device\"][\"type\"]\n dev_reg.async_get_or_create(\n config_entry_id=self.entry.entry_id,\n name=self.name,\n connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},\n # This is duplicate but otherwise via_device can't work\n identifiers={(DOMAIN, self.mac)},\n manufacturer=\"Shelly\",\n model=aioshelly.MODEL_NAMES.get(model_type, model_type),\n sw_version=self.device.settings[\"fw\"],\n )", "def test_get_node_hardware_fast(self):\n pass", "def consul_register(self):\n self.log.debug(\"consul-register\")\n self.consul.agent.service.register(\n self.svc_name,\n address=self.this_host,\n check=consulate.models.agent.Check(\n name=\"qemu-process\",\n args=[\n \"/bin/sh\",\n \"-c\",\n \"test -e /proc/$(< /run/qemu.{}.pid )/mem || exit 2\".format(\n self.name\n ),\n ],\n interval=\"5s\",\n ),\n )", "def load_device():", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "async def async_setup_platforms(hass, config):\n tasks = [\n hass.helpers.discovery.async_load_platform(component, DOMAIN, {}, config)\n for component in INSTEON_COMPONENTS\n ]\n await asyncio.gather(*tasks)\n\n for address in devices:\n device = devices[address]\n platforms = get_device_platforms(device)\n if ON_OFF_EVENTS in platforms:\n add_on_off_event_device(hass, device)\n\n _LOGGER.debug(\"Insteon device count: %s\", len(devices))\n register_new_device_callback(hass, config)\n async_register_services(hass)\n\n # Cannot be done concurrently due to issues with the underlying protocol.\n for address in devices:\n await devices[address].async_status()\n await async_id_unknown_devices(hass.config.config_dir)", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def add_platform(self, platform: KetraPlatformBase):\n self.platforms.append(platform)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n binary_sensors = []\n for name in hass.data[DOMAIN]:\n if name in BINARY_SENSORS:\n binary_sensors.append(NextcloudBinarySensor(name))\n add_entities(binary_sensors, True)", "def register(self):\n self._register_dockyard()\n self._register_docker()", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def setupRegistrationForFeat(self, wait_for_execute = True):\n\t\tfor condition in ['WMM']:\n\t\t\tfor run in self.conditionDict[condition]:\n\t\t\t\t\n\t\t\t\tfeat_directory = self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf','sgtf'], extension='.feat')\n\t\t\t\t\n\n\t\t\t\ttry:\n\t\t\t\t\tos.mkdir(os.path.join(feat_directory,'reg'))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\n\t\t\t\tif not os.path.isdir(self.stageFolder(stage = 'processed/mri/reg/feat/')):\n\t\t\t\t\tself.registerSession(prepare_register = True, bb = False, MNI = True)\n\t\t\n\t\t\t\tos.system('cp ' + self.stageFolder(stage = 'processed/mri/reg/feat/') + '* ' + os.path.join(feat_directory,'reg/') )\n\t\t\t\tif wait_for_execute:\n\t\t\t\t\tos.system('featregapply ' + feat_directory )\n\t\t\t\telse:\n\t\t\t\t\tos.system('featregapply ' + feat_directory + ' & ' )", "def setup(hass: HomeAssistant, base_config: ConfigType) -> bool: # noqa: C901\n\n hass.data[DOMAIN] = {}\n\n # Parse configuration into a dict of device name to physical address\n # represented as a list of four elements.\n device_aliases = {}\n devices = base_config[DOMAIN].get(CONF_DEVICES, {})\n _LOGGER.debug(\"Parsing config %s\", devices)\n device_aliases.update(parse_mapping(devices))\n _LOGGER.debug(\"Parsed devices: %s\", device_aliases)\n\n platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)\n\n loop = (\n # Create own thread if more than 1 CPU\n hass.loop\n if multiprocessing.cpu_count() < 2\n else None\n )\n host = base_config[DOMAIN].get(CONF_HOST)\n display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)\n if host:\n adapter = TcpAdapter(host, name=display_name, activate_source=False)\n else:\n adapter = CecAdapter(name=display_name[:12], activate_source=False)\n hdmi_network = HDMINetwork(adapter, loop=loop)\n\n def _adapter_watchdog(now=None):\n _LOGGER.debug(\"Reached _adapter_watchdog\")\n event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n if not adapter.initialized:\n _LOGGER.info(\"Adapter not initialized; Trying to restart\")\n hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)\n adapter.init()\n\n _adapter_watchdog_job = HassJob(_adapter_watchdog, cancel_on_shutdown=True)\n\n @callback\n def _async_initialized_callback(*_: Any):\n \"\"\"Add watchdog on initialization.\"\"\"\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n\n hdmi_network.set_initialized_callback(_async_initialized_callback)\n\n def _volume(call: ServiceCall) -> None:\n \"\"\"Increase/decrease volume and mute/unmute system.\"\"\"\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)\n\n def _process_volume(cmd, att):\n if isinstance(att, (str,)):\n att = att.strip()\n if att == CMD_PRESS:\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n elif att == CMD_RELEASE:\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n else:\n att = 1 if att == \"\" else int(att)\n for _ in range(0, att):\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n\n def _tx(call: ServiceCall) -> None:\n \"\"\"Send CEC command.\"\"\"\n data = call.data\n if ATTR_RAW in data:\n command = CecCommand(data[ATTR_RAW])\n else:\n src = data.get(ATTR_SRC, ADDR_UNREGISTERED)\n dst = data.get(ATTR_DST, ADDR_BROADCAST)\n if ATTR_CMD in data:\n cmd = data[ATTR_CMD]\n else:\n _LOGGER.error(\"Attribute 'cmd' is missing\")\n return\n if ATTR_ATT in data:\n if isinstance(data[ATTR_ATT], (list,)):\n att = data[ATTR_ATT]\n else:\n att = reduce(lambda x, y: f\"{x}:{y:x}\", data[ATTR_ATT])\n else:\n att = \"\"\n command = CecCommand(cmd, dst, src, att)\n hdmi_network.send_command(command)\n\n def _standby(call: ServiceCall) -> None:\n hdmi_network.standby()\n\n def _power_on(call: ServiceCall) -> None:\n hdmi_network.power_on()\n\n def _select_device(call: ServiceCall) -> None:\n \"\"\"Select the active device.\"\"\"\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)\n\n def _update(call: ServiceCall) -> None:\n \"\"\"Update if device update is needed.\n\n Called by service, requests CEC network to update data.\n \"\"\"\n hdmi_network.scan()\n\n def _new_device(device):\n \"\"\"Handle new devices which are detected by HDMI network.\"\"\"\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )\n\n def _shutdown(call):\n hdmi_network.stop()\n\n def _start_cec(callback_event):\n \"\"\"Register services and start HDMI network to watch for devices.\"\"\"\n hass.services.register(\n DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA\n )\n hass.services.register(\n DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA\n )\n hass.services.register(\n DOMAIN,\n SERVICE_UPDATE_DEVICES,\n _update,\n schema=SERVICE_UPDATE_DEVICES_SCHEMA,\n )\n hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)\n hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)\n hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)\n\n hdmi_network.set_new_device_callback(_new_device)\n hdmi_network.start()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)\n return True", "def getPlatform(self):\n\t\treturn None", "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def test_os_processor(self):\n self.assertEqual(self.settings.OS_PROCESSOR, platform.processor())", "def register():\n \n global _registered\n if not _registered:\n _registered = True\n sys.path_hooks.insert(0, VFSImporter)", "def on_register(self, data: Any = None):\n raise NotImplementedError", "def init():\n\n global registry, fsk_router, ook_router\n\n radio.init()\n OpenThings.init(Devices.CRYPT_PID)\n\n fsk_router = Registry.Router(\"fsk\")\n\n #OOK receive not yet written\n #It will be used to be able to learn codes from Energenie legacy hand remotes\n ##ook_router = Registry.Router(\"ook\")\n\n registry = Registry.DeviceRegistry()\n registry.set_fsk_router(fsk_router)\n ##registry.set_ook_router(ook_router\n\n path = os.path.join(sys.path[0], registry.DEFAULT_FILENAME)\n if os.path.isfile(path):\n registry.load_from(path)\n print(\"loaded registry from file\")\n registry.list()\n fsk_router.list()\n\n # Default discovery mode, unless changed by app\n ##discovery_none()\n ##discovery_auto()\n ##discovery_ask(ask)\n discovery_autojoin()\n ##discovery_askjoin(ask)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()", "def Platforms():\n return platforms", "def evaluate_for_platform(self, **kwargs):\n macro = Macro(self.identifier, self.args, self.value)\n kwargs['platform'].define(self.identifier.as_str(), macro)\n return False", "def test_get_node_hardware(self):\n pass", "def register_platform_services(platform: entity_platform.EntityPlatform) -> None:\n platform.async_register_entity_service(\n SERVICE_ENABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_DISABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TOGGLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_CANCEL, ENTITY_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TIME_ADJUST, TIME_ADJUST_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_MANUAL_RUN, MANUAL_RUN_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_SUSPEND, SUSPEND_SCHEMA, async_entity_service_handler\n )", "def setup_platform(hass, config, add_devices, discovery_info=None):\n token = load_token(hass)\n \n if not token:\n request_app_setup(hass, config, add_devices, discovery_info)\n else:\n continue_setup_platform(hass, config, token, add_devices, discovery_info)", "def platform():\n return ['linux']", "def get_matched_platforms(self, platform):\n raise NotImplemented", "async def _async_setup_platform(\n opp: OpenPeerPower,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict],\n) -> None:\n if integration_platform not in opp.data:\n await async_setup_component(\n opp, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component = opp.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()", "def platform():\n return \"micaz\"", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def setup_platform(hass, config, add_devices, discovery_info=None):\n thread1 = QQ(config[QQ_NUMBER])\n thread1.start()\n object_qq = Qqsensor(hass, QQ_NUMBER, thread1)\n add_devices([object_qq])", "def _update_device_registry(self):\n try:\n if not self._flag_updating_deviceregistry:\n _log.debug(\"Updating device registry\")\n self._flag_updating_deviceregistry = True\n self._sync_connected_platforms()\n unreachable = []\n # Loop over the connections to the registered agent platforms.\n for k, v in self._platform_connections.items():\n _log.debug('updating for {}'.format(k))\n # Only attempt update if we have a connection to the\n # agent instance.\n if v is not None:\n try:\n devices = v.agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM,\n 'get_devices').get(timeout=30)\n\n anon_devices = defaultdict(dict)\n\n # for each device returned from the query to\n # get_devices we need to anonymize the k1 in the\n # anon_devices dictionary.\n for k1, v1 in devices.items():\n _log.debug(\n \"before anon: {}, {}\".format(k1, v1))\n # now we need to do a search/replace on the\n # self._topic_list so that the devices are\n # known as the correct itme nin the tree.\n anon_topic = self._topic_replace_map[k1]\n\n # if replaced has not already been replaced\n if not anon_topic:\n anon_topic = k1\n for sr in self._topic_replace_list:\n anon_topic = anon_topic.replace(\n sr['from'], sr['to'])\n\n self._topic_replace_map[k1] = anon_topic\n\n anon_devices[anon_topic] = v1\n\n _log.debug('Anon devices are: {}'.format(\n anon_devices))\n\n self._registry.update_devices(k, anon_devices)\n except (gevent.Timeout, Unreachable) as e:\n _log.error(\n 'Error getting devices from platform {}'\n .format(k))\n unreachable.append(k)\n for k in unreachable:\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]\n\n finally:\n self._flag_updating_deviceregistry = False", "def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]", "def platform_map(op):\n while True:\n found = platform_map_iterate(op)\n if not found:\n break\n op = found\n return op", "def _register(cls):\n clsid_path = \"Software\\\\Classes\\\\CLSID\\\\\" + cls._reg_clsid_\n progid_path = \"Software\\\\Classes\\\\\" + cls._reg_progid_\n spec = cls.__module__ + \".\" + cls.__name__\n\n # register the class information\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\ProgID\", win32con.REG_SZ, cls._reg_progid_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\PythonCOM\", win32con.REG_SZ, spec)\n hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\InprocServer32\")\n win32api.RegSetValueEx(hkey, None, None, win32con.REG_SZ, pythoncom.__file__)\n win32api.RegSetValueEx(hkey, \"ThreadingModel\", None, win32con.REG_SZ, \"Both\")\n\n # and add the progid\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path + \"\\\\CLSID\", win32con.REG_SZ, cls._reg_clsid_)", "def Setup(self):\n raise NotImplementedError(\n 'No runtime setup defined for %s' % self.__class__.__name__)", "def test_setup_platform_name(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.test_local\")", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Only act if loaded via mysensors by discovery event.\n # Otherwise gateway is not setup.\n if discovery_info is None:\n return\n\n for gateway in mysensors.GATEWAYS.values():\n # Define the S_TYPES and V_TYPES that the platform should handle as\n # states. Map them in a dict of lists.\n pres = gateway.const.Presentation\n set_req = gateway.const.SetReq\n map_sv_types = {\n pres.S_TEMP: [set_req.V_TEMP],\n pres.S_HUM: [set_req.V_HUM],\n pres.S_BARO: [set_req.V_PRESSURE, set_req.V_FORECAST],\n pres.S_WIND: [set_req.V_WIND, set_req.V_GUST],\n pres.S_RAIN: [set_req.V_RAIN, set_req.V_RAINRATE],\n pres.S_UV: [set_req.V_UV],\n pres.S_WEIGHT: [set_req.V_WEIGHT, set_req.V_IMPEDANCE],\n pres.S_POWER: [set_req.V_WATT, set_req.V_KWH],\n pres.S_DISTANCE: [set_req.V_DISTANCE],\n pres.S_LIGHT_LEVEL: [set_req.V_LIGHT_LEVEL],\n pres.S_IR: [set_req.V_IR_RECEIVE],\n pres.S_WATER: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_CUSTOM: [set_req.V_VAR1,\n set_req.V_VAR2,\n set_req.V_VAR3,\n set_req.V_VAR4,\n set_req.V_VAR5],\n pres.S_SCENE_CONTROLLER: [set_req.V_SCENE_ON,\n set_req.V_SCENE_OFF],\n }\n if float(gateway.protocol_version) < 1.5:\n map_sv_types.update({\n pres.S_AIR_QUALITY: [set_req.V_DUST_LEVEL],\n pres.S_DUST: [set_req.V_DUST_LEVEL],\n })\n if float(gateway.protocol_version) >= 1.5:\n map_sv_types.update({\n pres.S_COLOR_SENSOR: [set_req.V_RGB],\n pres.S_MULTIMETER: [set_req.V_VOLTAGE,\n set_req.V_CURRENT,\n set_req.V_IMPEDANCE],\n pres.S_SOUND: [set_req.V_LEVEL],\n pres.S_VIBRATION: [set_req.V_LEVEL],\n pres.S_MOISTURE: [set_req.V_LEVEL],\n pres.S_AIR_QUALITY: [set_req.V_LEVEL],\n pres.S_DUST: [set_req.V_LEVEL],\n })\n map_sv_types[pres.S_LIGHT_LEVEL].append(set_req.V_LEVEL)\n\n if float(gateway.protocol_version) >= 2.0:\n map_sv_types.update({\n pres.S_INFO: [set_req.V_TEXT],\n pres.S_GAS: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_GPS: [set_req.V_POSITION],\n pres.S_WATER_QUALITY: [set_req.V_TEMP, set_req.V_PH,\n set_req.V_ORP, set_req.V_EC]\n })\n map_sv_types[pres.S_CUSTOM].append(set_req.V_CUSTOM)\n map_sv_types[pres.S_POWER].extend(\n [set_req.V_VAR, set_req.V_VA, set_req.V_POWER_FACTOR])\n\n devices = {}\n gateway.platform_callbacks.append(mysensors.pf_callback_factory(\n map_sv_types, devices, add_devices, MySensorsSensor))", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))", "def setup_platform(hass, config, add_devices, discovery_info=None) -> None:\n friendly_name = config.get(CONF_FRIENDLY_NAME)\n mac_addr = config.get(CONF_MAC)\n add_devices([Switchmate(mac_addr, friendly_name)], True)", "async def async_setup(self) -> None:\n await self.hass.async_add_executor_job(self._setup)\n\n # set already known devices to away instead of unavailable\n device_registry = dr.async_get(self.hass)\n devices = dr.async_entries_for_config_entry(device_registry, self.entry_id)\n for device_entry in devices:\n if device_entry.via_device_id is None:\n continue # do not add the router itself\n\n device_mac = dict(device_entry.connections).get(dr.CONNECTION_NETWORK_MAC)\n self.devices[device_mac] = {\n \"mac\": device_mac,\n \"name\": device_entry.name,\n \"active\": False,\n \"last_seen\": dt_util.utcnow() - timedelta(days=365),\n \"device_model\": None,\n \"device_type\": None,\n \"type\": None,\n \"link_rate\": None,\n \"signal\": None,\n \"ip\": None,\n }\n\n await self.async_update_device_trackers()\n self.entry.async_on_unload(\n async_track_time_interval(\n self.hass, self.async_update_device_trackers, SCAN_INTERVAL\n )\n )\n\n async_dispatcher_send(self.hass, self.signal_device_new)", "def device_reg(hass):\n return mock_device_registry(hass)", "def add_to_platform_start(\n self,\n hass: HomeAssistant,\n platform: EntityPlatform,\n parallel_updates: asyncio.Semaphore | None,\n ) -> None:\n super().add_to_platform_start(hass, platform, parallel_updates)\n\n # Bail out if the sensor doesn't have a unique_id or a device class\n if self.unique_id is None or self.device_class is None:\n return\n registry = er.async_get(self.hass)\n\n # Bail out if the entity is not yet registered\n if not (\n entity_id := registry.async_get_entity_id(\n platform.domain, platform.platform_name, self.unique_id\n )\n ):\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._sensor_option_unit_of_measurement = self._get_initial_suggested_unit()\n return\n\n registry_entry = registry.async_get(entity_id)\n assert registry_entry\n\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self.registry_entry = registry_entry\n self._async_read_entity_options()\n\n # If the sensor has 'unit_of_measurement' in its sensor options, the user has\n # overridden the unit.\n # If the sensor has 'sensor.private' in its entity options, it already has a\n # suggested_unit.\n registry_unit = registry_entry.unit_of_measurement\n if (\n (\n (sensor_options := registry_entry.options.get(DOMAIN))\n and CONF_UNIT_OF_MEASUREMENT in sensor_options\n )\n or f\"{DOMAIN}.private\" in registry_entry.options\n or self.unit_of_measurement == registry_unit\n ):\n return\n\n # Make sure we can convert the units\n if (\n (unit_converter := UNIT_CONVERTERS.get(self.device_class)) is None\n or registry_unit not in unit_converter.VALID_UNITS\n or self.unit_of_measurement not in unit_converter.VALID_UNITS\n ):\n return\n\n # Set suggested_unit_of_measurement to the old unit to enable automatic\n # conversion\n self.registry_entry = registry.async_update_entity_options(\n entity_id,\n f\"{DOMAIN}.private\",\n {\"suggested_unit_of_measurement\": registry_unit},\n )\n # Update _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._async_read_entity_options()", "def _post_init(self):\n self._led_type_code = self.manager.get_typecode('LED')\n self.device_path = os.path.realpath(os.path.join(self.path, 'device'))\n if '::' in self.name:\n chardev, code_name = self.name.split('::')\n if code_name in self.manager.codes['LED_type_codes']:\n self.code = self.manager.codes['LED_type_codes'][code_name]\n try:\n event_number = chardev.split('input')[1]\n except IndexError:\n print(\"Failed with\", self.name)\n raise\n else:\n self._character_device_path = '/dev/input/event' + event_number\n self._match_device()", "def boot(self):\n\n pass", "def run_genie(platform, cc=None, cxx=None):\n\n cmd = PLATFORMS_GENIE[platform]\n\n subprocess.check_call(cmd)", "def async_setup_scanner_platform(\n hass: HomeAssistant,\n config: ConfigType,\n scanner: DeviceScanner,\n async_see_device: Callable[..., Coroutine[None, None, None]],\n platform: str,\n) -> None:\n interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)\n update_lock = asyncio.Lock()\n scanner.hass = hass\n\n # Initial scan of each mac we also tell about host name for config\n seen: set[str] = set()\n\n async def async_device_tracker_scan(now: datetime | None) -> None:\n \"\"\"Handle interval matches.\"\"\"\n if update_lock.locked():\n LOGGER.warning(\n (\n \"Updating device list from %s took longer than the scheduled \"\n \"scan interval %s\"\n ),\n platform,\n interval,\n )\n return\n\n async with update_lock:\n found_devices = await scanner.async_scan_devices()\n\n device_name_uses_executor = (\n scanner.async_get_device_name.__func__ # type: ignore[attr-defined]\n is DeviceScanner.async_get_device_name\n )\n extra_attributes_uses_executor = (\n scanner.async_get_extra_attributes.__func__ # type: ignore[attr-defined]\n is DeviceScanner.async_get_extra_attributes\n )\n host_name_by_mac: dict[str, str | None] = {}\n extra_attributes_by_mac: dict[str, dict[str, Any]] = {}\n if device_name_uses_executor or extra_attributes_uses_executor:\n (\n host_name_by_mac,\n extra_attributes_by_mac,\n ) = await hass.async_add_executor_job(\n _load_device_names_and_attributes,\n scanner,\n device_name_uses_executor,\n extra_attributes_uses_executor,\n seen,\n found_devices,\n )\n\n for mac in found_devices:\n if mac in seen:\n host_name = None\n else:\n host_name = host_name_by_mac.get(\n mac, await scanner.async_get_device_name(mac)\n )\n seen.add(mac)\n\n try:\n extra_attributes = extra_attributes_by_mac.get(\n mac, await scanner.async_get_extra_attributes(mac)\n )\n except NotImplementedError:\n extra_attributes = {}\n\n kwargs: dict[str, Any] = {\n \"mac\": mac,\n \"host_name\": host_name,\n \"source_type\": SourceType.ROUTER,\n \"attributes\": {\n \"scanner\": scanner.__class__.__name__,\n **extra_attributes,\n },\n }\n\n zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)\n if zone_home is not None:\n kwargs[\"gps\"] = [\n zone_home.attributes[ATTR_LATITUDE],\n zone_home.attributes[ATTR_LONGITUDE],\n ]\n kwargs[\"gps_accuracy\"] = 0\n\n hass.async_create_task(async_see_device(**kwargs))\n\n cancel_legacy_scan = async_track_time_interval(\n hass,\n async_device_tracker_scan,\n interval,\n name=f\"device_tracker {platform} legacy scan\",\n )\n hass.async_create_task(async_device_tracker_scan(None))\n\n @callback\n def _on_hass_stop(_: Event) -> None:\n \"\"\"Cleanup when Home Assistant stops.\n\n Cancel the legacy scan.\n \"\"\"\n cancel_legacy_scan()\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _on_hass_stop)", "def register(self,registerable):\n result = self.registry.register(registerable)\n if result.reg_info.index is None:\n raise RuntimeError(\"failed to register {}\".format(str(registerable)))\n return result", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_hardware_sound_systems'] = True", "async def async_setup_platform(hass, config, async_add_devices, _discovery_info=None):\n pass", "def launch ():\n #core.addListenerByName(\"UpEvent\", _go_up)\n core.registerNew(MAC_Filter)", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])", "def platform(aliased=0, terse=0):\n result = _platform_cache.get((aliased, terse), None)\n if result is not None:\n return result\n\n # Get uname information and then apply platform specific cosmetics\n # to it...\n system, node, release, version, machine, processor = uname()\n if machine == processor:\n processor = ''\n if aliased:\n system, release, version = system_alias(system, release, version)\n\n if True: ###\n # Generic handler\n if terse:\n platform = _platform(system, release)\n else:\n bits, linkage = architecture() ###\n platform = _platform(system, release, machine,\n processor, bits, linkage)\n\n _platform_cache[(aliased, terse)] = platform\n return platform", "async def _async_setup_platform(\n hass: HomeAssistant,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict[str, Any]],\n) -> None:\n if integration_platform not in hass.data:\n await async_setup_component(\n hass, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component: EntityComponent[Entity] = hass.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "async def _async_setup_hardware_integration(_: datetime | None = None) -> None:\n if (os_info := get_os_info(hass)) is None:\n # os info not yet fetched from supervisor, retry later\n async_call_later(\n hass,\n HASSIO_UPDATE_INTERVAL,\n async_setup_hardware_integration_job,\n )\n return\n if (board := os_info.get(\"board\")) is None:\n return\n if (hw_integration := HARDWARE_INTEGRATIONS.get(board)) is None:\n return\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n hw_integration, context={\"source\": \"system\"}\n )\n )", "def _setup(self):" ]
[ "0.67203164", "0.6423017", "0.62060404", "0.61684966", "0.6153153", "0.5944612", "0.59082896", "0.5862393", "0.5726266", "0.5700211", "0.56732666", "0.5646277", "0.5646277", "0.5623879", "0.56002146", "0.5568672", "0.5550284", "0.5511287", "0.54954875", "0.54954875", "0.54710275", "0.54399604", "0.54374117", "0.54218215", "0.5389259", "0.5386897", "0.5380801", "0.5380435", "0.53759766", "0.5364215", "0.5346935", "0.53144026", "0.53038996", "0.529395", "0.52905935", "0.5275916", "0.527498", "0.52611244", "0.52594864", "0.5257225", "0.52557874", "0.525429", "0.5241784", "0.52402586", "0.5239693", "0.523599", "0.5233892", "0.5233447", "0.52216804", "0.5217928", "0.5210053", "0.5190824", "0.51857644", "0.5173383", "0.51719743", "0.516857", "0.5151815", "0.5150487", "0.51472175", "0.5145434", "0.5144817", "0.51427656", "0.5134451", "0.51323", "0.51159793", "0.5102731", "0.50951713", "0.50921726", "0.50919247", "0.50847346", "0.508168", "0.50790274", "0.50790274", "0.5058763", "0.5057705", "0.5055347", "0.50452495", "0.50442106", "0.5039123", "0.5035224", "0.5023533", "0.5018497", "0.50163275", "0.5006806", "0.4985548", "0.49834487", "0.49817193", "0.4979084", "0.4971129", "0.4970957", "0.49688637", "0.49683705", "0.4961169", "0.49563137", "0.49484038", "0.49439633", "0.49418417", "0.49410602", "0.49364716", "0.49302572" ]
0.5712664
9
Runs through platforms which registration were deferred and tries to register them again
def _check_awaiting(self): # TODO: check for wait loops for w in list(self._awaiting.values()): self._try_register_platform(w["instance"], w["kind"], w["parent"], w["wait"], awaiting=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_process_hardware_platforms(hass: HomeAssistant) -> None:\n hass.data[DOMAIN][\"hardware_platform\"] = {}\n\n await async_process_integration_platforms(hass, DOMAIN, _register_hardware_platform)", "def _try_register_platform(self, factory, kind, parent, wait, awaiting=False):\r\n name = factory.name\r\n assert kind is not None, \"instance kind can't be None (instance name is {})\".format(name)\r\n\r\n if factory.name is None:\r\n factory.name = name = \"random_name\" # TODO: use GUID\r\n\r\n assert name not in self._platforms and (awaiting or name not in self._awaiting),\\\r\n \"encountered second platform with name {}\".format(name)\r\n\r\n # TODO: analyze args and update wait if there are references to other platforms\r\n assert wait is None or name not in wait, \"platform {} can't wait for self!\".format(name)\r\n\r\n # If all necessary parent and co-platforms are already created - finish registration of this one\r\n if (parent is None or parent in self._platforms) \\\r\n and (wait is None or all(w in self._platforms for w in wait)):\r\n np = factory.finish_registration()\r\n self._platforms[name] = np\r\n if parent is not None:\r\n assert np not in self._platforms[parent].subplatforms, \"Subplatform {} is already within \" \\\r\n \"parent's ({}) subplatforms list, \" \\\r\n \"but shouldn't be\".format(name, parent)\r\n np.parent = self._platforms[parent]\r\n self._platforms[parent].subplatforms.append(np)\r\n if wait is not None:\r\n for w in wait:\r\n assert np not in self._platforms[w].depended, \"Subplatform {} is already within \" \\\r\n \"depended's list of {}, \" \\\r\n \"but shouldn't be\".format(name, w)\r\n self._platforms[w].depended.append(np)\r\n if awaiting:\r\n del self._awaiting[name]\r\n self._check_awaiting()\r\n # Otherwise put it into waiting list\r\n else:\r\n self._awaiting[name] = {\r\n \"instance\": factory,\r\n \"kind\": kind,\r\n \"parent\": parent,\r\n \"wait\": wait}", "async def reload_platform(self) -> None:", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def _update_device_registry(self):\n try:\n if not self._flag_updating_deviceregistry:\n _log.debug(\"Updating device registry\")\n self._flag_updating_deviceregistry = True\n self._sync_connected_platforms()\n unreachable = []\n # Loop over the connections to the registered agent platforms.\n for k, v in self._platform_connections.items():\n _log.debug('updating for {}'.format(k))\n # Only attempt update if we have a connection to the\n # agent instance.\n if v is not None:\n try:\n devices = v.agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM,\n 'get_devices').get(timeout=30)\n\n anon_devices = defaultdict(dict)\n\n # for each device returned from the query to\n # get_devices we need to anonymize the k1 in the\n # anon_devices dictionary.\n for k1, v1 in devices.items():\n _log.debug(\n \"before anon: {}, {}\".format(k1, v1))\n # now we need to do a search/replace on the\n # self._topic_list so that the devices are\n # known as the correct itme nin the tree.\n anon_topic = self._topic_replace_map[k1]\n\n # if replaced has not already been replaced\n if not anon_topic:\n anon_topic = k1\n for sr in self._topic_replace_list:\n anon_topic = anon_topic.replace(\n sr['from'], sr['to'])\n\n self._topic_replace_map[k1] = anon_topic\n\n anon_devices[anon_topic] = v1\n\n _log.debug('Anon devices are: {}'.format(\n anon_devices))\n\n self._registry.update_devices(k, anon_devices)\n except (gevent.Timeout, Unreachable) as e:\n _log.error(\n 'Error getting devices from platform {}'\n .format(k))\n unreachable.append(k)\n for k in unreachable:\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]\n\n finally:\n self._flag_updating_deviceregistry = False", "def _post_init(self):\n if WIN:\n self._find_devices_win()\n elif MAC:\n self._find_devices_mac()\n else:\n self._find_devices()\n self._update_all_devices()\n if NIX:\n self._find_leds()", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def _register_fallback_implementations_by_process_graph(process_registry: ProcessRegistry = process_registry_100):\n for name in process_registry.list_predefined_specs():\n spec = process_registry.load_predefined_spec(name)\n if \"process_graph\" in spec and not process_registry.contains(name):\n _log.info(f\"Registering fallback implementation of {name!r} by process graph ({process_registry})\")\n custom_process_from_process_graph(process_spec=spec, process_registry=process_registry)", "async def async_setup_platforms(hass, config):\n tasks = [\n hass.helpers.discovery.async_load_platform(component, DOMAIN, {}, config)\n for component in INSTEON_COMPONENTS\n ]\n await asyncio.gather(*tasks)\n\n for address in devices:\n device = devices[address]\n platforms = get_device_platforms(device)\n if ON_OFF_EVENTS in platforms:\n add_on_off_event_device(hass, device)\n\n _LOGGER.debug(\"Insteon device count: %s\", len(devices))\n register_new_device_callback(hass, config)\n async_register_services(hass)\n\n # Cannot be done concurrently due to issues with the underlying protocol.\n for address in devices:\n await devices[address].async_status()\n await async_id_unknown_devices(hass.config.config_dir)", "def unregister_platform(self, name, recursive=False):\r\n if name in dict(self._platforms):\r\n self.unregister_platform_instance(self._platforms[name], recursive)", "def load_devices():", "async def _async_reconfig_platform(\n platform: EntityPlatform, platform_configs: list[dict]\n) -> None:\n await platform.async_reset()\n tasks = [platform.async_setup(p_config) for p_config in platform_configs]\n await asyncio.gather(*tasks)", "async def _async_reconfig_platform(\n platform: EntityPlatform, platform_configs: list[dict[str, Any]]\n) -> None:\n await platform.async_reset()\n tasks = [platform.async_setup(p_config) for p_config in platform_configs]\n await asyncio.gather(*tasks)", "def reset_registries():\n StepRegistry().clear()\n HookRegistry().reset()\n ExtensionRegistry().reset()", "async def async_reload_integration_platforms(\n hass: HomeAssistant, integration_name: str, integration_platforms: Iterable[str]\n) -> None:\n try:\n unprocessed_conf = await conf_util.async_hass_config_yaml(hass)\n except HomeAssistantError as err:\n _LOGGER.error(err)\n return\n\n tasks = [\n _resetup_platform(\n hass, integration_name, integration_platform, unprocessed_conf\n )\n for integration_platform in integration_platforms\n ]\n\n await asyncio.gather(*tasks)", "async def _register_hardware_platform(\n hass: HomeAssistant, integration_domain: str, platform: HardwareProtocol\n) -> None:\n if integration_domain == DOMAIN:\n return\n if not hasattr(platform, \"async_info\"):\n raise HomeAssistantError(f\"Invalid hardware platform {platform}\")\n hass.data[DOMAIN][\"hardware_platform\"][integration_domain] = platform", "async def _register_system_health_platform(hass, integration_domain, platform):\n platform.async_register(hass, RegisterSystemHealth(hass, integration_domain))", "def plugins_ready():\n\n for plugin in registerorder:\n plugin.ready()", "def register_platform(self, factory, kind, parent=None, wait=None):\r\n self._try_register_platform(factory, kind, parent, wait)", "async def async_reload_integration_platforms(\n opp: OpenPeerPower, integration_name: str, integration_platforms: Iterable\n) -> None:\n try:\n unprocessed_conf = await conf_util.async_opp_config_yaml(opp)\n except OpenPeerPowerError as err:\n _LOGGER.error(err)\n return\n\n tasks = [\n _resetup_platform(opp, integration_name, integration_platform, unprocessed_conf)\n for integration_platform in integration_platforms\n ]\n\n await asyncio.gather(*tasks)", "def _init_hardware(self):\n return", "def registration_started(self):\n self.resolution = 0", "def tear_down_registry(registry):\n for reg_adp in list(registry.registeredAdapters()):\n registry.unregisterAdapter(factory=reg_adp.factory,\n required=reg_adp.required,\n provided=reg_adp.provided,\n name=reg_adp.name)\n for reg_ut in list(registry.registeredUtilities()):\n registry.unregisterUtility(component=reg_ut.component,\n provided=reg_ut.provided,\n name=reg_ut.name)", "async def async_setup(self) -> None:\n await self.hass.async_add_executor_job(self._setup)\n\n # set already known devices to away instead of unavailable\n device_registry = dr.async_get(self.hass)\n devices = dr.async_entries_for_config_entry(device_registry, self.entry_id)\n for device_entry in devices:\n if device_entry.via_device_id is None:\n continue # do not add the router itself\n\n device_mac = dict(device_entry.connections).get(dr.CONNECTION_NETWORK_MAC)\n self.devices[device_mac] = {\n \"mac\": device_mac,\n \"name\": device_entry.name,\n \"active\": False,\n \"last_seen\": dt_util.utcnow() - timedelta(days=365),\n \"device_model\": None,\n \"device_type\": None,\n \"type\": None,\n \"link_rate\": None,\n \"signal\": None,\n \"ip\": None,\n }\n\n await self.async_update_device_trackers()\n self.entry.async_on_unload(\n async_track_time_interval(\n self.hass, self.async_update_device_trackers, SCAN_INTERVAL\n )\n )\n\n async_dispatcher_send(self.hass, self.signal_device_new)", "def _sync_connected_platforms(self):\n _log.debug(\"len pa_agents {}\".format(len(self._platform_connections)))\n pakeys = set(self._platform_connections.keys())\n _log.debug(\"Syncing with {}\".format(pakeys))\n for p in self._registry.get_platforms():\n if p.instance_uuid in pakeys:\n pakeys.remove(p.instance_uuid)\n\n for k in pakeys:\n _log.debug('Removing {} from pa_agents'.format(k))\n if k in self._platform_connections.keys():\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]", "def test_device_registration(self):\n sensor = self._get_dummy_sensor()\n responses = []\n sensor.set_response_callback(func=lambda response: responses.append(response))\n sensor.register()\n time.sleep(wait_seconds)\n for response in responses:\n print(json.loads(response.content.decode()))\n assert len(responses) > 0\n assert json.loads(responses[0].content.decode())['module_name'] == 'test_get_sensor'\n sensor.stopped.set()", "async def _async_setup_platform(\n opp: OpenPeerPower,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict],\n) -> None:\n if integration_platform not in opp.data:\n await async_setup_component(\n opp, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component = opp.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "async def test_device_registry_calls(opp):\n dev_reg = async_get(opp)\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test\",\n \"slug\": \"test\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"test\",\n \"url\": \"https://github.com/openpeerpower/addons/test\",\n },\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n os_mock_data = {\n \"board\": \"odroid-n2\",\n \"boot\": \"A\",\n \"update_available\": False,\n \"version\": \"5.12\",\n \"version_latest\": \"5.12\",\n }\n\n with patch.dict(os.environ, MOCK_ENVIRON), patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)\n config_entry.add_to_opp(opp)\n assert await opp.config_entries.async_setup(config_entry.entry_id)\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 3\n\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n\n # Test that when addon is removed, next update will remove the add-on and subsequent updates won't\n with patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=1))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 2\n\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=2))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 2\n\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n {\n \"name\": \"test3\",\n \"slug\": \"test3\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n\n # Test that when addon is added, next update will reload the entry so we register\n # a new device\n with patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=3))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 3", "def clean_registries(self):\n registry = self.connection.get_finished_registry(name=self.name)\n registry.cleanup()\n registry = self.connection.get_started_registry(name=self.name)\n registry.cleanup()", "def test_checks_registered_on_ready(self):\n app_config = apps.get_app_config('ironcage')\n register_path = 'ironcage.apps.register'\n with mock.patch(register_path) as register:\n app_config.ready()\n\n register.assert_called_once_with(env_vars_check, deploy=True)", "def continue_setup_platform(hass, config, token, add_devices, discovery_info=None):\n if \"trakt\" in _CONFIGURING:\n hass.components.configurator.request_done(_CONFIGURING.pop(\"trakt\"))\n \n add_devices([TraktMyShowCalendarSensor(hass, config, token)], True)", "def auto_setup(self):\n if self.mot_type == \"xps8p\":\n return\n if self.get_par(\"err_sevr\") == 3:\n print \"Reinitializing motor {}...\".format(self.name)\n self.reinit()\n ok = self.wait_par(\"err_sevr\", 3, match_value=False, timeout=20)\n if ok:\n print \"Successfully reinitialized {}.\".format(self.name)\n time.sleep(0.5)\n else:\n print \"Reinitializing {} timed out. Aborting auto_setup.\".format(self.name)\n return\n\n for i in range(3):\n for clear, name in ((self.clear_pu, \"powerup\"),\n (self.clear_stall, \"stall flag\"),\n (self.clear_error, \"error flag\")):\n clear(check=True, wait=False)\n\n ok = []\n for bit, mask in ((RA_POWERUP, 1), (RA_STALL, 1), (RA_ERR, RA_ERR_MASK)):\n ok.append(self._wait_msta_bit(bit, 0, mask, timeout=10))\n if not all(ok):\n print \"Issues with clearing flags for {}\".format(self.name)\n\n try: # Not every environment has pmgr access\n self.pmgr.apply_config(dumb_config=self.name)\n except:\n pass", "def do_deregistrations(registration_info_modulename):\n\timport importlib\n\tc = importlib.import_module(registration_info_modulename, package=None)\n\tdevices = c.devices\n\tapps = c.apps\n\tlogger.info(\"DE-REGISTER: de-registering all devices from file {}....\".format(registration_info_modulename))\n\tsetup_entities.deregister_entities(devices)\n\tlogger.info(\"DE-REGISTER: de-registering all apps from file {}....\".format(registration_info_modulename))\n\tsetup_entities.deregister_entities(apps)\n\tlogger.info(\"DE-REGISTER: done.\")", "async def async_setup_platform(hass, config, async_add_devices, _discovery_info=None):\n pass", "def no_platforms():\n with patch(\"homeassistant.components.mqtt.PLATFORMS\", []):\n yield", "async def _async_setup_platform(\n hass: HomeAssistant,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict[str, Any]],\n) -> None:\n if integration_platform not in hass.data:\n await async_setup_component(\n hass, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component: EntityComponent[Entity] = hass.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "async def async_setup_platform(\n hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None\n) -> None:\n if discovery_info is None:\n return\n\n broker = hass.data[DOMAIN][\"broker\"]\n\n async_add_entities(\n [\n GeniusSwitch(broker, z)\n for z in broker.client.zone_objs\n if z.data[\"type\"] == GH_ON_OFF_ZONE\n ]\n )", "async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):\n data = hass.data[UPV_DATA]\n if not data:\n return\n\n switches = []\n for switch_type in config.get(CONF_MONITORED_CONDITIONS):\n for camera in data.devices:\n switches.append(UnifiProtectSwitch(data, camera, switch_type))\n\n async_add_entities(switches, True)", "def test_stepregistry_should_gracefully_accept_double_registration():\n # given\n registry = StepRegistry()\n registry.register(\"Given\", \"pattern\", None)\n\n # when\n registry.register(\"Given\", \"pattern\", None)\n\n # then\n assert registry.step_implementations(\"Given\") == [\n StepImpl(\"Given\", \"pattern\", None)\n ]", "async def disable_platforms(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.demo.COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM\",\n [],\n ):\n yield", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "async def async_id_unknown_devices(config_dir):\n await devices.async_load(id_devices=1)\n for addr in devices:\n device = devices[addr]\n flags = True\n for name in device.operating_flags:\n if not device.operating_flags[name].is_loaded:\n flags = False\n break\n if flags:\n for name in device.properties:\n if not device.properties[name].is_loaded:\n flags = False\n break\n\n # Cannot be done concurrently due to issues with the underlying protocol.\n if not device.aldb.is_loaded or not flags:\n await device.async_read_config()\n\n await devices.async_save(workdir=config_dir)", "def device_reg(hass):\n return mock_device_registry(hass)", "def test_already_registered_002(self):\n\n class MyChecker(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_long_code():\n \"\"\"Do nothing.\"\"\"\n return \"something\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return []\n\n class MyContext(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return\n\n registry.register_checker(MyChecker)\n\n with self.assertRaises(EnvironmentError):\n registry.register_checker(MyChecker)\n\n registry.register_context(MyContext)\n\n with self.assertRaises(EnvironmentError):\n registry.register_context(MyContext)", "async def test_service_setup_and_unload_not_called_if_multiple_integrations_detected(\n register_service_mock,\n remove_service_mock,\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n register_service_mock.reset_mock()\n config_entry_2 = await setup_deconz_integration(hass, aioclient_mock, entry_id=2)\n register_service_mock.assert_not_called()\n\n register_service_mock.assert_not_called()\n assert await hass.config_entries.async_unload(config_entry_2.entry_id)\n remove_service_mock.assert_not_called()\n assert await hass.config_entries.async_unload(config_entry.entry_id)\n assert remove_service_mock.call_count == 3", "def register_platform_services(platform: entity_platform.EntityPlatform) -> None:\n platform.async_register_entity_service(\n SERVICE_ENABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_DISABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TOGGLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_CANCEL, ENTITY_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TIME_ADJUST, TIME_ADJUST_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_MANUAL_RUN, MANUAL_RUN_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_SUSPEND, SUSPEND_SCHEMA, async_entity_service_handler\n )", "def get_platforms(self):\n _log.debug(\"Passing platforms back: {}\".format(\n self._registered_platforms.keys()))\n return self._registered_platforms.values()", "def on_first_registration(self):\n pass", "def setup_platform(opp, config, add_entities, discovery_info=None):\n\n for scene in pywink.get_scenes():\n _id = scene.object_id() + scene.name()\n if _id not in opp.data[DOMAIN][\"unique_ids\"]:\n add_entities([WinkScene(scene, opp)])", "def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")", "def test_deregister_post_import_hook_after_register_multiple_all(self):\n test_hook = mock.MagicMock()\n test_hook2 = mock.MagicMock()\n register_post_import_hook('tests.utils.test_module', test_hook)\n register_post_import_hook('tests.utils.test_module', test_hook2)\n\n outcome = deregister_post_import_hook('tests.utils.test_module', test_hook)\n self.assertTrue(outcome)\n outcome = deregister_post_import_hook('tests.utils.test_module', test_hook2)\n self.assertTrue(outcome)\n import tests.utils.test_module # noqa\n self.assertEqual(test_hook.call_count, 0, 'hook has been deregistered and should be removed')\n self.assertEqual(test_hook2.call_count, 0, 'hook has been deregistered and should be removed')", "def post_hook(self):\n self.mk_rg1()\n self.mk_rg2()\n self.mk_rg3()", "def async_setup_scanner_platform(\n hass: HomeAssistant,\n config: ConfigType,\n scanner: DeviceScanner,\n async_see_device: Callable[..., Coroutine[None, None, None]],\n platform: str,\n) -> None:\n interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)\n update_lock = asyncio.Lock()\n scanner.hass = hass\n\n # Initial scan of each mac we also tell about host name for config\n seen: set[str] = set()\n\n async def async_device_tracker_scan(now: datetime | None) -> None:\n \"\"\"Handle interval matches.\"\"\"\n if update_lock.locked():\n LOGGER.warning(\n (\n \"Updating device list from %s took longer than the scheduled \"\n \"scan interval %s\"\n ),\n platform,\n interval,\n )\n return\n\n async with update_lock:\n found_devices = await scanner.async_scan_devices()\n\n device_name_uses_executor = (\n scanner.async_get_device_name.__func__ # type: ignore[attr-defined]\n is DeviceScanner.async_get_device_name\n )\n extra_attributes_uses_executor = (\n scanner.async_get_extra_attributes.__func__ # type: ignore[attr-defined]\n is DeviceScanner.async_get_extra_attributes\n )\n host_name_by_mac: dict[str, str | None] = {}\n extra_attributes_by_mac: dict[str, dict[str, Any]] = {}\n if device_name_uses_executor or extra_attributes_uses_executor:\n (\n host_name_by_mac,\n extra_attributes_by_mac,\n ) = await hass.async_add_executor_job(\n _load_device_names_and_attributes,\n scanner,\n device_name_uses_executor,\n extra_attributes_uses_executor,\n seen,\n found_devices,\n )\n\n for mac in found_devices:\n if mac in seen:\n host_name = None\n else:\n host_name = host_name_by_mac.get(\n mac, await scanner.async_get_device_name(mac)\n )\n seen.add(mac)\n\n try:\n extra_attributes = extra_attributes_by_mac.get(\n mac, await scanner.async_get_extra_attributes(mac)\n )\n except NotImplementedError:\n extra_attributes = {}\n\n kwargs: dict[str, Any] = {\n \"mac\": mac,\n \"host_name\": host_name,\n \"source_type\": SourceType.ROUTER,\n \"attributes\": {\n \"scanner\": scanner.__class__.__name__,\n **extra_attributes,\n },\n }\n\n zone_home = hass.states.get(hass.components.zone.ENTITY_ID_HOME)\n if zone_home is not None:\n kwargs[\"gps\"] = [\n zone_home.attributes[ATTR_LATITUDE],\n zone_home.attributes[ATTR_LONGITUDE],\n ]\n kwargs[\"gps_accuracy\"] = 0\n\n hass.async_create_task(async_see_device(**kwargs))\n\n cancel_legacy_scan = async_track_time_interval(\n hass,\n async_device_tracker_scan,\n interval,\n name=f\"device_tracker {platform} legacy scan\",\n )\n hass.async_create_task(async_device_tracker_scan(None))\n\n @callback\n def _on_hass_stop(_: Event) -> None:\n \"\"\"Cleanup when Home Assistant stops.\n\n Cancel the legacy scan.\n \"\"\"\n cancel_legacy_scan()\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _on_hass_stop)", "def ensure_autodiscover():\n if not (form_element_plugin_registry._registry\n and form_handler_plugin_registry._registry\n and theme_registry._registry):\n autodiscover()", "async def async_setup_reload_service(\n opp: OpenPeerPower, domain: str, platforms: Iterable\n) -> None:\n if opp.services.has_service(domain, SERVICE_RELOAD):\n return\n\n async def _reload_config(call: Event) -> None:\n \"\"\"Reload the platforms.\"\"\"\n await async_reload_integration_platforms(opp, domain, platforms)\n opp.bus.async_fire(f\"event_{domain}_reloaded\", context=call.context)\n\n opp.helpers.service.async_register_admin_service(\n domain, SERVICE_RELOAD, _reload_config\n )", "async def async_setup_legacy(\n self,\n hass: HomeAssistant,\n tracker: DeviceTracker,\n discovery_info: dict[str, Any] | None = None,\n ) -> None:\n assert self.type == PLATFORM_TYPE_LEGACY\n full_name = f\"{DOMAIN}.{self.name}\"\n LOGGER.info(\"Setting up %s\", full_name)\n with async_start_setup(hass, [full_name]):\n try:\n scanner = None\n setup: bool | None = None\n if hasattr(self.platform, \"async_get_scanner\"):\n scanner = await self.platform.async_get_scanner(\n hass, {DOMAIN: self.config}\n )\n elif hasattr(self.platform, \"get_scanner\"):\n scanner = await hass.async_add_executor_job(\n self.platform.get_scanner,\n hass,\n {DOMAIN: self.config},\n )\n elif hasattr(self.platform, \"async_setup_scanner\"):\n setup = await self.platform.async_setup_scanner(\n hass, self.config, tracker.async_see, discovery_info\n )\n elif hasattr(self.platform, \"setup_scanner\"):\n setup = await hass.async_add_executor_job(\n self.platform.setup_scanner,\n hass,\n self.config,\n tracker.see,\n discovery_info,\n )\n else:\n raise HomeAssistantError(\"Invalid legacy device_tracker platform.\")\n\n if scanner is not None:\n async_setup_scanner_platform(\n hass, self.config, scanner, tracker.async_see, self.type\n )\n\n if not setup and scanner is None:\n LOGGER.error(\n \"Error setting up platform %s %s\", self.type, self.name\n )\n return\n\n hass.config.components.add(full_name)\n\n except Exception: # pylint: disable=broad-except\n LOGGER.exception(\n \"Error setting up platform %s %s\", self.type, self.name\n )", "def registration_started(self):\n pass", "async def async_discover(discovery_info: DiscoveryInfo) -> None:\n mysensors.setup_mysensors_platform(\n hass,\n DOMAIN,\n discovery_info,\n MySensorsSensor,\n async_add_entities=async_add_entities,\n )", "async def async_setup_platform(hass, config, async_add_devices,\n discovery_info=None):\n return True", "def test_register_cloud(self):\n pass", "def get_matched_platforms(self, platform):\n raise NotImplemented", "def forceRegister(self, name, value):\n pass", "def add_to_platform_start(\n self,\n hass: HomeAssistant,\n platform: EntityPlatform,\n parallel_updates: asyncio.Semaphore | None,\n ) -> None:\n super().add_to_platform_start(hass, platform, parallel_updates)\n\n # Bail out if the sensor doesn't have a unique_id or a device class\n if self.unique_id is None or self.device_class is None:\n return\n registry = er.async_get(self.hass)\n\n # Bail out if the entity is not yet registered\n if not (\n entity_id := registry.async_get_entity_id(\n platform.domain, platform.platform_name, self.unique_id\n )\n ):\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._sensor_option_unit_of_measurement = self._get_initial_suggested_unit()\n return\n\n registry_entry = registry.async_get(entity_id)\n assert registry_entry\n\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self.registry_entry = registry_entry\n self._async_read_entity_options()\n\n # If the sensor has 'unit_of_measurement' in its sensor options, the user has\n # overridden the unit.\n # If the sensor has 'sensor.private' in its entity options, it already has a\n # suggested_unit.\n registry_unit = registry_entry.unit_of_measurement\n if (\n (\n (sensor_options := registry_entry.options.get(DOMAIN))\n and CONF_UNIT_OF_MEASUREMENT in sensor_options\n )\n or f\"{DOMAIN}.private\" in registry_entry.options\n or self.unit_of_measurement == registry_unit\n ):\n return\n\n # Make sure we can convert the units\n if (\n (unit_converter := UNIT_CONVERTERS.get(self.device_class)) is None\n or registry_unit not in unit_converter.VALID_UNITS\n or self.unit_of_measurement not in unit_converter.VALID_UNITS\n ):\n return\n\n # Set suggested_unit_of_measurement to the old unit to enable automatic\n # conversion\n self.registry_entry = registry.async_update_entity_options(\n entity_id,\n f\"{DOMAIN}.private\",\n {\"suggested_unit_of_measurement\": registry_unit},\n )\n # Update _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._async_read_entity_options()", "def register_platform(self, address, serverkey=None, display_name=None):\n _log.info('Attempting registration of vcp at address: '\n '{} display_name: {}, serverkey: {}'.format(address,\n display_name,\n serverkey))\n parsed = urlparse(address)\n if parsed.scheme not in ('tcp', 'ipc'):\n raise ValueError(\n 'Only ipc and tpc addresses can be used in the '\n 'register_platform method.')\n try:\n connection = self._build_connection(address, serverkey)\n except gevent.Timeout:\n _log.error(\"Initial building of connection not found\")\n raise\n\n try:\n if connection is None:\n raise ValueError(\"Connection was not able to be found\")\n manager_key = connection.call('get_manager_key')\n except gevent.Timeout:\n _log.error(\"Couldn't retrieve managment key from platform\")\n raise\n\n try:\n if manager_key is not None:\n if manager_key == self.core.publickey:\n _log.debug('Platform is already managed and connected.')\n return\n else:\n _log.warn(\n 'Platform is registered with a different vc key.'\n 'This could be expected.')\n\n if parsed.scheme == 'tcp':\n self.core.publickey\n _log.debug(\n 'TCP calling manage. my serverkey: {}, my publickey: {}'.format(\n self._serverkey, self.core.publickey))\n pk = connection.call(\n 'manage', self._external_addresses[0], self._serverkey,\n self.core.publickey)\n else:\n pk = connection.call('manage', self.core.address)\n except gevent.Timeout:\n _log.error('RPC call to manage did not return in a timely manner.')\n raise\n # If we were successful in calling manage then we can add it to\n # our list of managed platforms.\n if pk is not None and len(pk) == 43:\n try:\n address_uuid = self._address_to_uuid.get(address)\n time_now = format_timestamp(get_aware_utc_now())\n\n if address_uuid is not None:\n _log.debug('Attempting to get instance id to reconfigure '\n 'the agent on the remote instance.')\n current_uuid = connection.call('get_instance_uuid')\n\n if current_uuid != address_uuid:\n _log.debug('Reconfiguring with new uuid. {}'.format(\n address_uuid\n ))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n if self._registered_platforms.get(address_uuid) is None:\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n else:\n address_uuid = str(uuid.uuid4())\n _log.debug(\"New platform with uuid: {}\".format(\n address_uuid))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n self._address_to_uuid[address] = address_uuid\n if display_name is None:\n display_name = address\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n self._platform_connections[address_uuid] = connection\n self._registered_platforms.sync()\n except gevent.Timeout:\n _log.error(\n 'Call to reconfigure did not return in a timely manner.')\n raise", "async def test_manually_configured_platform(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass, LIGHT_DOMAIN, {LIGHT_DOMAIN: {\"platform\": HMIPC_DOMAIN}}\n )\n assert not hass.data.get(HMIPC_DOMAIN)", "def setup_reload_service(opp: OpenPeerPower, domain: str, platforms: Iterable) -> None:\n asyncio.run_coroutine_threadsafe(\n async_setup_reload_service(opp, domain, platforms),\n opp.loop,\n ).result()", "async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):\n cameradata = hass.data.get(DATA_UFP)\n if not cameradata:\n return\n\n sensors = []\n for sensor_type in config.get(CONF_MONITORED_CONDITIONS):\n for camera in cameradata.cameras:\n name = \"{0} {1}\".format(SENSOR_TYPES[sensor_type][0], camera[\"name\"])\n sensors.append(UnifiProtectSensor(name, camera, sensor_type, cameradata))\n\n async_add_entities(sensors, True)", "def test_deregister_post_import_hook_after_register_multiple(self):\n # Enforce a spec so that hasattr doesn't vacuously return True.\n test_hook = mock.MagicMock(spec=[])\n test_hook2 = mock.MagicMock(spec=[])\n register_post_import_hook('tests.utils.test_module', test_hook)\n register_post_import_hook('tests.utils.test_module', test_hook2)\n\n outcome = deregister_post_import_hook('tests.utils.test_module', test_hook)\n self.assertTrue(outcome)\n import tests.utils.test_module # noqa\n self.assertEqual(test_hook.call_count, 0, 'hook has been deregistered and should be removed')\n self.assertEqual(test_hook2.call_count, 1, 'hook should have been called')", "def registration_resolution_changed(self):\n pass", "def _platform_compatible():\r\n raise NotImplementedError", "def flush(platform):\n devices = jax.devices(platform)\n\n for device in devices:\n # as suggested in jax#4335\n noop = jax.device_put(0, device=device) + 0\n noop.block_until_ready()", "async def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None\n): # pylint: disable=unused-argument\n # Only single instance allowed\n if FKFGarbageCollectionCalendar.instances == 0:\n async_add_entities([FKFGarbageCollectionCalendar(hass)], True)", "async def test_setup_platform(hass):\n added_entities = []\n\n zone_defs = [{\n 'id': '1',\n 'type': '3',\n 'zone_name': 'Kitchen smoke',\n 'area': '1',\n 'area_name': 'House',\n 'input': '0',\n 'status': '0',\n }, {\n 'id': '3',\n 'type': '0',\n 'zone_name': 'Hallway PIR',\n 'area': '1',\n 'area_name': 'House',\n 'input': '0',\n 'status': '0',\n }, {\n 'id': '5',\n 'type': '1',\n 'zone_name': 'Front door',\n 'area': '1',\n 'area_name': 'House',\n 'input': '1',\n 'status': '0',\n }]\n\n def add_entities(entities):\n nonlocal added_entities\n added_entities = list(entities)\n\n from pyspcwebgw import Zone\n\n zones = [Zone(area=None, spc_zone=z) for z in zone_defs]\n\n await spc.async_setup_platform(hass=hass,\n config={},\n async_add_entities=add_entities,\n discovery_info={'devices': zones})\n\n assert len(added_entities) == 3\n assert added_entities[0].device_class == 'smoke'\n assert added_entities[0].state == 'off'\n assert added_entities[1].device_class == 'motion'\n assert added_entities[1].state == 'off'\n assert added_entities[2].device_class == 'opening'\n assert added_entities[2].state == 'on'\n assert all(d.hidden for d in added_entities)", "async def _async_setup_hardware_integration(_: datetime | None = None) -> None:\n if (os_info := get_os_info(hass)) is None:\n # os info not yet fetched from supervisor, retry later\n async_call_later(\n hass,\n HASSIO_UPDATE_INTERVAL,\n async_setup_hardware_integration_job,\n )\n return\n if (board := os_info.get(\"board\")) is None:\n return\n if (hw_integration := HARDWARE_INTEGRATIONS.get(board)) is None:\n return\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n hw_integration, context={\"source\": \"system\"}\n )\n )", "def identify_smart_sensors(serial_conns):\n def recv_subscription_response(conn, uid_queue, stop_event):\n \"\"\"\n Place received subscription response UIDs from CONN into UID_QUEUE,\n stopping when STOP_EVENT is set.\n \"\"\"\n try:\n for packet in hm.blocking_read_generator(conn, stop_event):\n msg_type = packet.get_message_id()\n if msg_type == hm.MESSAGE_TYPES[\"SubscriptionResponse\"]:\n _, _, uid = hm.parse_subscription_response(packet)\n uid_queue.put(uid)\n except serial.SerialException:\n pass\n\n\n device_map = {}\n candidates = []\n for conn in serial_conns:\n old_timeout = conn.write_timeout\n conn.write_timeout = IDENTIFY_TIMEOUT\n try:\n hm.send(conn, hm.make_ping())\n except serial.SerialTimeoutException:\n continue\n finally:\n conn.write_timeout = old_timeout\n maybe_device = namedtuple(\"MaybeDevice\", [\"serial_conn\", \"queue\", \"event\", \"thread\"])\n maybe_device.queue = queue.Queue()\n maybe_device.event = threading.Event()\n maybe_device.serial_conn = conn\n maybe_device.thread = threading.Thread(target=recv_subscription_response,\n args=(conn, maybe_device.queue, maybe_device.event))\n candidates.append(maybe_device)\n for cand in candidates:\n cand.thread.start()\n for cand in candidates:\n try:\n uid = cand.queue.get(block=True, timeout=IDENTIFY_TIMEOUT)\n device_map[cand.serial_conn.name] = uid\n # Shut device up\n hm.send(cand.serial_conn, hm.make_subscription_request(uid, [], 0))\n except queue.Empty:\n pass\n for cand in candidates:\n cand.event.set()\n cand.thread.join()\n return device_map", "def setup_reload_service(\n hass: HomeAssistant, domain: str, platforms: Iterable[str]\n) -> None:\n asyncio.run_coroutine_threadsafe(\n async_setup_reload_service(hass, domain, platforms),\n hass.loop,\n ).result()", "def setupRegistrationForFeat(self, wait_for_execute = True):\n\t\tfor condition in ['WMM']:\n\t\t\tfor run in self.conditionDict[condition]:\n\t\t\t\t\n\t\t\t\tfeat_directory = self.runFile(stage = 'processed/mri', run = self.runList[run], postFix = ['mcf','sgtf'], extension='.feat')\n\t\t\t\t\n\n\t\t\t\ttry:\n\t\t\t\t\tos.mkdir(os.path.join(feat_directory,'reg'))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\n\t\t\t\tif not os.path.isdir(self.stageFolder(stage = 'processed/mri/reg/feat/')):\n\t\t\t\t\tself.registerSession(prepare_register = True, bb = False, MNI = True)\n\t\t\n\t\t\t\tos.system('cp ' + self.stageFolder(stage = 'processed/mri/reg/feat/') + '* ' + os.path.join(feat_directory,'reg/') )\n\t\t\t\tif wait_for_execute:\n\t\t\t\t\tos.system('featregapply ' + feat_directory )\n\t\t\t\telse:\n\t\t\t\t\tos.system('featregapply ' + feat_directory + ' & ' )", "async def async_setup_integration(hass: HomeAssistant, config: ConfigType) -> None:\n tracker = await get_tracker(hass, config)\n\n legacy_platforms = await async_extract_config(hass, config)\n\n setup_tasks = [\n asyncio.create_task(legacy_platform.async_setup_legacy(hass, tracker))\n for legacy_platform in legacy_platforms\n ]\n\n if setup_tasks:\n await asyncio.wait(setup_tasks)\n\n async def async_platform_discovered(\n p_type: str, info: dict[str, Any] | None\n ) -> None:\n \"\"\"Load a platform.\"\"\"\n platform = await async_create_platform_type(hass, config, p_type, {})\n\n if platform is None or platform.type != PLATFORM_TYPE_LEGACY:\n return\n\n await platform.async_setup_legacy(hass, tracker, info)\n\n discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)\n\n # Clean up stale devices\n cancel_update_stale = async_track_utc_time_change(\n hass, tracker.async_update_stale, second=range(0, 60, 5)\n )\n\n async def async_see_service(call: ServiceCall) -> None:\n \"\"\"Service to see a device.\"\"\"\n # Temp workaround for iOS, introduced in 0.65\n data = dict(call.data)\n data.pop(\"hostname\", None)\n data.pop(\"battery_status\", None)\n await tracker.async_see(**data)\n\n hass.services.async_register(\n DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA\n )\n\n # restore\n await tracker.async_setup_tracked_device()\n\n @callback\n def _on_hass_stop(_: Event) -> None:\n \"\"\"Cleanup when Home Assistant stops.\n\n Cancel the async_update_stale schedule.\n \"\"\"\n cancel_update_stale()\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _on_hass_stop)", "def Platforms():\n return platforms", "def init():\n\n global registry, fsk_router, ook_router\n\n radio.init()\n OpenThings.init(Devices.CRYPT_PID)\n\n fsk_router = Registry.Router(\"fsk\")\n\n #OOK receive not yet written\n #It will be used to be able to learn codes from Energenie legacy hand remotes\n ##ook_router = Registry.Router(\"ook\")\n\n registry = Registry.DeviceRegistry()\n registry.set_fsk_router(fsk_router)\n ##registry.set_ook_router(ook_router\n\n path = os.path.join(sys.path[0], registry.DEFAULT_FILENAME)\n if os.path.isfile(path):\n registry.load_from(path)\n print(\"loaded registry from file\")\n registry.list()\n fsk_router.list()\n\n # Default discovery mode, unless changed by app\n ##discovery_none()\n ##discovery_auto()\n ##discovery_ask(ask)\n discovery_autojoin()\n ##discovery_askjoin(ask)", "def registration_iteration_ended(self):", "def reconnect(self):\n\t\t# TODO: Make sure the remote devices are actually found?\n\t\tself.setup()\n\t\tself.patch()", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)", "def test_patch_pci_switch(self):\n pass", "def load_kernel_modules():\n if not os.path.isdir(W1ThermSensor.BASE_DIRECTORY):\n os.system(\"modprobe w1-gpio >/dev/null 2>&1\")\n os.system(\"modprobe w1-therm >/dev/null 2>&1\")\n\n for _ in range(W1ThermSensor.RETRY_ATTEMPTS):\n if os.path.isdir(\n W1ThermSensor.BASE_DIRECTORY\n ): # w1 therm modules loaded correctly\n break\n time.sleep(W1ThermSensor.RETRY_DELAY_SECONDS)\n else:\n raise KernelModuleLoadError()", "async def test_unsupported_sensors(hass, mock_bridge):\n response_with_unsupported = dict(SENSOR_RESPONSE)\n response_with_unsupported['7'] = UNSUPPORTED_SENSOR\n mock_bridge.mock_sensor_responses.append(response_with_unsupported)\n await setup_bridge(hass, mock_bridge)\n assert len(mock_bridge.mock_requests) == 1\n # 2 \"physical\" sensors with 3 virtual sensors each\n assert len(hass.states.async_all()) == 6", "async def async_setup_reload_service(\n hass: HomeAssistant, domain: str, platforms: Iterable[str]\n) -> None:\n if hass.services.has_service(domain, SERVICE_RELOAD):\n return\n\n async def _reload_config(call: ServiceCall) -> None:\n \"\"\"Reload the platforms.\"\"\"\n await async_reload_integration_platforms(hass, domain, platforms)\n hass.bus.async_fire(f\"event_{domain}_reloaded\", context=call.context)\n\n async_register_admin_service(hass, domain, SERVICE_RELOAD, _reload_config)", "def _shutdown(self):\n self.logger.debug(\"Unregistering feature modules.\")\n for feature in self._features.values():\n try:\n if hasattr(feature.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(feature.handle.module_unregister())\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering feature module '{feature.name}'.\")\n self.logger.debug(\"Unregistering protocol modules.\")\n for protocol in self._protocols.values():\n try:\n if hasattr(protocol.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(\n protocol.handle.module_unregister(protocol.contexts, self._shutdown_reason)\n )\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering protocol module '{protocol.name}'.\")\n self.eventloop.run_until_complete(self.database.close())\n if len(self._db_connections) > 0:\n self.logger.debug(\"Cleaning up unclosed database connections\")\n for module in list(self._db_connections):\n self.eventloop.run_until_complete(self.database_disconnect(module))", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)", "def __init__(self):\n thisType = type(self)\n if not thisType._initialized:\n thisType._initialized = True\n self._embedded_device_registry = {}\n self._root_device_registry = {}\n self._service_registry = {}\n self._scan_for_device_extensions_under_code_container(dynamic_extensions)\n self._scan_for_device_extensions_under_code_container(standard_extensions)\n self._scan_for_service_extensions_under_code_container(dynamic_extensions)\n self._scan_for_service_extensions_under_code_container(standard_extensions)\n return", "async def test_registered_devices(hass: HomeAssistant) -> None:\n integration_matchers = [\n {\"domain\": \"not-matching\", \"registered_devices\": True},\n {\"domain\": \"mock-domain\", \"registered_devices\": True},\n ]\n\n packet = Ether(RAW_DHCP_RENEWAL)\n\n registry = dr.async_get(hass)\n config_entry = MockConfigEntry(domain=\"mock-domain\", data={})\n config_entry.add_to_hass(hass)\n registry.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"50147903852c\")},\n name=\"name\",\n )\n # Not enabled should not get flows\n config_entry2 = MockConfigEntry(domain=\"mock-domain-2\", data={})\n config_entry2.add_to_hass(hass)\n registry.async_get_or_create(\n config_entry_id=config_entry2.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"50147903852c\")},\n name=\"name\",\n )\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n # Ensure no change is ignored\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.1.120\",\n hostname=\"irobot-ae9ec12dd3b04885bcbfa36afb01e1cc\",\n macaddress=\"50147903852c\",\n )", "def regs() -> None:", "def find_hardware(self, device_info=None):\n if os.name is not 'nt': # If not on a Windows system, just set up soundcard\n self.setup_soundcard()\n self.hardware.append('Soundcard')\n self.out_samplefreq = 44100\n else:\n if 'NIDAQ' in self.required_hardware and self.setup_nidaq(device_info):\n self.hardware.append('NIDAQ')\n if 'RP21' in self.required_hardware and self.setup_RP21('c:\\pystartle\\startle.rco'):\n self.hardware.append('RP21')\n if 'PA5' in self.required_hardware and self.setup_PA5():\n self.hardware.append('PA5')\n if 'RZ5D' in self.required_hardware and self.setup_RZ5D():\n self.hardware.append('RZ5D')", "def on_registered(self):\r\n super().on_registered()\r\n\r\n # Register type information\r\n cls = self.__class__\r\n\r\n subclass_cache = cls._of_subclass_cache\r\n type_cache = cls._of_type_cache\r\n\r\n # Cache subtypes\r\n for base_cls in cls.__mro__:\r\n try:\r\n instances = subclass_cache[base_cls]\r\n\r\n except KeyError:\r\n instances = subclass_cache[base_cls] = set()\r\n\r\n instances.add(self)\r\n\r\n # Cache the type\r\n try:\r\n instances = type_cache[cls]\r\n\r\n except KeyError:\r\n instances = type_cache[cls] = set()\r\n\r\n instances.add(self)\r\n\r\n ReplicableRegisteredSignal.invoke(target=self)", "def emergency_stop(self):\r\n eprint(\"Emergency platforms stop\")\r\n stop_list = []\r\n for p in self._platforms:\r\n stop_list.append(self._platforms[p])\r\n\r\n success = True\r\n while len(stop_list) > 0: # NOTE: stop platforms in reverse order\r\n p = stop_list.pop(-1)\r\n vprint(\"Emergency stop for {}\".format(p))\r\n try:\r\n r = p._stop([])\r\n except Exception as e:\r\n success = False\r\n eprint(\"Exception occurred while stopping platform {} emergently: {}\".format(p, e))\r\n exprint()\r\n continue\r\n if not r.success:\r\n success = False\r\n return success", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")" ]
[ "0.667766", "0.6472627", "0.64608395", "0.6125786", "0.6103623", "0.585542", "0.5823268", "0.5784557", "0.57517254", "0.56858265", "0.5681627", "0.5622832", "0.5608007", "0.55615294", "0.5553192", "0.55258495", "0.5492156", "0.54448736", "0.54345196", "0.5424958", "0.53986436", "0.536904", "0.53196496", "0.53193444", "0.5313927", "0.5300347", "0.5176856", "0.51619375", "0.51548874", "0.5151162", "0.51301074", "0.5127501", "0.5110918", "0.51040804", "0.5101444", "0.50946283", "0.5093933", "0.5091361", "0.5074292", "0.5067601", "0.5055463", "0.5055463", "0.5029818", "0.5026264", "0.5024336", "0.5018196", "0.50162625", "0.5011799", "0.5011358", "0.500465", "0.49965453", "0.499527", "0.4993567", "0.4989406", "0.4954374", "0.49507192", "0.49463373", "0.49453533", "0.49327454", "0.49254036", "0.49198192", "0.4916881", "0.49129337", "0.49099594", "0.4909069", "0.4904882", "0.48851913", "0.48835474", "0.48833197", "0.48817387", "0.48699018", "0.4864484", "0.4863781", "0.48624143", "0.48569283", "0.4840333", "0.48390776", "0.48332375", "0.48307067", "0.48294008", "0.48279086", "0.48264074", "0.4822126", "0.48182437", "0.48155633", "0.48132232", "0.48125505", "0.48124382", "0.48108596", "0.48075178", "0.48075178", "0.47999614", "0.47993693", "0.47978196", "0.4790297", "0.47811064", "0.47795224", "0.47736797", "0.47736797", "0.4772852" ]
0.6857937
0
Stops platforms as it can
def emergency_stop(self): eprint("Emergency platforms stop") stop_list = [] for p in self._platforms: stop_list.append(self._platforms[p]) success = True while len(stop_list) > 0: # NOTE: stop platforms in reverse order p = stop_list.pop(-1) vprint("Emergency stop for {}".format(p)) try: r = p._stop([]) except Exception as e: success = False eprint("Exception occurred while stopping platform {} emergently: {}".format(p, e)) exprint() continue if not r.success: success = False return success
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def platform_stop(self):\n self.platform.stop()", "def stopEngines():\n pass", "def stop(self):\n # Cleanup platform first.\n self.cleanup()\n\n if self.init_lhost:\n self._lhost.stop()\n\n self.status = False # pylint: disable=attribute-defined-outside-init", "def stop():", "def stop():", "def stop():", "def stop():", "def stop() -> None:", "def stop(self):\n self.turnOffMotors()", "def shutdown(self):\n self.running = False\n ev3.Leds.all_off()\n self.left_motor.stop()\n self.right_motor.stop()", "def shutdown(self):\n self.left_motor.stop()\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n self.running = False\n print('Good Bye')\n ev3.Sound.speak(\"Good Bye\")", "def stop(self):\n GameEngine().stop()\n self.on_stop()", "def exit_engine(self):\n self.stop_flag = True", "def stop():\n pygame.quit() # stop pygame\n exit() # stop python", "def stop():\n pygame.quit() # stop pygame\n exit() # stop python", "def stop(self):\n self.scion_sh('stop')", "def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0", "def _stop(self):", "def stop(self) -> None:\n turnOffMotors()", "def stop(self):", "def stop(self):", "def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()", "def stop_all():\r\n motors.stop_all_motors()\r\n led.set_colour_solid(0)\r\n display.clear()", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def stop_full(self):\n self.systems[\"driver\"].move(0, 0)", "def stop(self) -> None:", "def stop(self) -> None:", "def __stop_game(self) -> None:\n # delete objects\n self.pipes = []\n self.player = []\n \n # stop timer\n pygame.time.set_timer(PIPE_SPAWN, 0)", "def stop(self):\n self.stopAngMovementAll()\n self.stopMovementAll()", "def stop(self):\n self.microblaze.reset()", "def _control_stop(self):\n self.player.stop()", "def __fullBlackBoxExit(self):\r\n\r\n if not core.FW_conf['blackbox'].isVideoRecorderAvailable():\r\n raise Exception('Exit: No video camera available. Check settings.')\r\n\r\n mcStart = core.FW_conf['blackbox'].getCountMotionFrames()\r\n mcAfter = None\r\n\r\n self.inIdle = False\r\n\r\n #exitmethods = ['single back','3 back presses', 'swipe', 'keylock']\r\n\r\n for i in range(5):\r\n\r\n# for method in exitmethods:\r\n import sys\r\n sys.path.append('..')\r\n from override.Exit import ExitOverrides\r\n exitOverrides = ExitOverrides(self.phone)\r\n exitMethods = [m for m in dir(exitOverrides) if m.startswith('EXIT')]\r\n debug.brf(exitMethods)\r\n\r\n for method in exitMethods:\r\n\r\n mcBefore = core.FW_conf['blackbox'].getCountMotionFrames()\r\n\r\n if self._isInIdle():\r\n continue\r\n\r\n self.phone.comment('Exit: Try %s to get idle..' % method)\r\n getattr(exitOverrides,method)()\r\n\r\n #if method == 'single back': # if in some application, this could close it\r\n # self.phone.select.long('KBD_KEY_BACK',doNotReport=True)\r\n\r\n #elif method == '3 back presses': # if in some application, this could close it\r\n # self.phone.select.long('KBD_KEY_BACK',doNotReport=True)\r\n # self.phone.delay(1000)\r\n # self.phone.select('KBD_KEY_BACK', doNotReport=True)\r\n # self.phone.delay(1000)\r\n # self.phone.select('KBD_KEY_BACK', doNotReport=True)\r\n\r\n #elif method == 'swipe':\r\n # self.__backToIdleWithSwipe() # close application with swipe\r\n\r\n #elif method == 'keylock':\r\n # self.phone.select('KBD_KEY_KEYLOCK_TOGGLE', doNotReport=True)\r\n\r\n #else:\r\n # self.phone.fail('Exit: %s is not handled' % method)\r\n\r\n self.phone.delay(2000, False)\r\n\r\n if self._isInIdle():\r\n continue\r\n\r\n mcAfter = core.FW_conf['blackbox'].getCountMotionFrames()\r\n if mcBefore != mcAfter:\r\n #self.phone.comment('Exit: %s caused motion' % method)\r\n\r\n if i >= 2: # keylock?\r\n self.phone.input('201426\\n', mode='pin', delayBetweenPresses=1000)\r\n self.phone.delay(3000)\r\n if self._isInIdle():\r\n continue\r\n\r\n elif i > 3 and mcBefore == mcAfter: # enough tries, crash note might be on the screen\r\n self.phone.comment('try closing crash note')\r\n self.phone.select((260, 490))\r\n self.phone.delay(3000)\r\n mcAfter = core.FW_conf['blackbox'].getCountMotionFrames() # if nothing happened, check ui freeze\r\n if mcBefore == mcAfter:\r\n self._checkUiFreeze()\r\n\r\n if mcAfter == None and self.inIdle: # nothing was done, check freeze if phone has freezed in idle state\r\n self._checkUiFreeze()\r\n return True\r\n\r\n elif self.inIdle:\r\n return True\r\n\r\n self._checkUiFreeze()\r\n return False", "def stop():\n current_event_loop().stop()", "def stop(self):\n self.kc.stop_channels()\n self.km.shutdown_kernel(now=True)\n del self.km", "def stop(self):\n self.idle = True\n # pass", "def stop(self):\r\n self.terminating = True", "def stop_engine(self) -> None:\n self.engine.dispose()\n self.engine_state = \"stopped\"", "def stop(self) -> None:\n ...", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def _platformix_stop(self, context, fake_reply): # TODO: Force parameter\r\n assert fake_reply is None, \"platformix_stop replies shouldn't be faked!\"\r\n\r\n stopping = self._worker.stopping # Store current stopping state\r\n need_stop = self._worker.stopping = self._worker.running or self._worker.starting\r\n self._worker.stopping = True # Set _stopping right in the beginning\r\n\r\n new_thread = False\r\n if not stopping and self._context is not None: # Break startup process if necessary\r\n self._reply_all(self._context[\"reply_to\"], proto_failure(\"interrupted by stop\"), None)\r\n if self._worker.starting:\r\n self._worker.starting = False\r\n self._worker.start_in_progress = False\r\n self._context = None\r\n if not stopping and not need_stop: # If not running and not starting - do nothing more\r\n self._worker.stopping = False\r\n self._reply(context, proto_success(\"already stopped\", \"state\"), None)\r\n return\r\n if stopping: # If were already stopping - update reply list\r\n if context not in self._context[\"reply_to\"]:\r\n new_thread = True\r\n self._context[\"reply_to\"].append(context)\r\n else: # Otherwise initiate context\r\n new_thread = True\r\n self._context = {\"action\": \"stop\", \"reply_to\": [context],\r\n \"waiting_for\": [], \"wait_ignore\": []}\r\n self._notify(context, \"received stop signal\")\r\n # TODO: do recursive stop? parent->childs? and call only root platforms stop?\r\n assert self._worker.stopping, \"At this point stopping should be True\"\r\n # Update waiting list\r\n # TODO: also wait those that are depends on this one\r\n self._context[\"waiting_for\"] = [w.name for w in self.host.subplatforms + self.host.depended\r\n if w.running is True or w.stopping is True\r\n and w.name not in self._context[\"wait_ignore\"]]\r\n\r\n # If there is some platforms to wait - notify about this\r\n if self.waiting_count > 0 and new_thread:\r\n self._worker.register_reply_handler(context,\r\n self._platformix_stop_reply_handler, [], {},\r\n timeout=self._worker.stop_max_wait, force=True)\r\n self._notify_all(self._context[\"reply_to\"], \"waiting\")\r\n # If no one left to wait for - do stop at last\r\n elif not self._worker.stop_in_progress and self.waiting_count == 0:\r\n for c in self._context[\"reply_to\"]:\r\n self._worker.unregister_reply_handler(c, True, {}, dont_check=True)\r\n self._worker.running = False\r\n self._worker.stop_in_progress = True\r\n self._notify_all(self._context[\"reply_to\"], \"stopping\")\r\n result = self._worker.stop(self._context[\"reply_to\"])\r\n reply_to = self._context[\"reply_to\"]\r\n self._context = None\r\n assert isinstance(result, ProtocolReply), \"Worker should return result as ProtocolReply instance\"\r\n if result.success:\r\n self._reply_all(reply_to, proto_success(None), None)\r\n else:\r\n self._reply_all(reply_to, result, None)", "def stop(self):\n self.running = False\n self.clear_game_objects()\n print 'GAME STOPPED'", "def shutdown(self):\n cv2.destroyAllWindows()\n rospy.loginfo(\"Stop\")\n # wait for robot to stop before shutdown\n rospy.sleep(5)", "def stop(self):\n self.running = False\n self.cam.stop()\n self.amplifier.stop()\n pass", "def stop(self):\n self.active = False", "def stop(self):\n self.stopped = True", "def stop(self):\r\n pass", "def shutdown():\n\trospy.loginfo(\"Stopping the robot...\")\n\tglobal_vars.move_base.cancel_all_goals()\n\n\tglobal_vars.cmd_vel_pub.publish(Twist())\n\n\trospy.sleep(1)", "def kill(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].stop()", "def ShutDown(self):\n self.stop = True", "def stop (self):\n pass", "def stop (self):\n pass", "def Stop(self) :\n\t\t...", "def handle_stop(_):\n loop.force_unmute()", "def stop_telescope_move():\n\ttarget = send_command('stoptel')", "def stop(self):\n\n self.active = False", "def stop(self):\n\t\tpass", "def _stop(self):\n self._pi.stop()", "def stop(self):\r\n self.stopped = True", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\r\n check_mixer()\r\n sdl.Mix_HaltGroup(self._chunk_tag)", "def stop(self, **kwargs):\n self.turn_off()", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "def stopAllMotors():\n return RoboCaller().call(\"stopAllMotors\", \"void\")", "def stop(self):\n logging.debug(\"footprint/stop entered\")\n logging.info(\"Stopping cloud instances\")\n print \"Stopping machines\"\n for machine in self.machines:\n logging.debug(\"stopping %s\" % machine)\n server = self.machines[machine]\n server.stop()\n \n # monitor until all the machines are down\n active_machines = 1\n while active_machines:\n running = 0\n active_machines = 0\n for machine in self.machines:\n server = self.machines[machine]\n try:\n tmp = cs.servers.get(self.machines[machine].id)\n active_machines = 1\n running = running + 1 \n except novaclient.exceptions.NotFound:\n continue\n # if running == 0:\n # break\n time.sleep(10)\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n logging.info(\"Stopping Networks\")\n print\n print \"Stopping networks\"\n \n for network in self.networks:\n logging.debug(\"stopping %s\" % str(network))\n n = self.networks[network]\n n.stop()\n \n while True:\n running = 0\n # print self.networks\n for network in self.networks:\n n = self.networks[network]\n\n try:\n tmp = cn.find(id=n.id)\n running = running + 1\n except pyrax.exceptions.NotFound:\n continue\n if running == 0:\n break\n time.sleep(1)\n sys.stdout.write(\".\")\n sys.stdout.flush()", "def _kill_kernel(self):", "def stop(self):\n self._run = False\n self.IA.stop()", "def stop(self):\n self.running = False\n self.join()", "def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass", "def stop(self):\n print(\"Stopping accessory.\")", "def end_game(self):\n self.game.stop_running()", "def stop(self):\n self.api.stop()", "def _stop_bot(_event):\n pass", "def terminate():\n leds.on()\n time.sleep(1)\n leds.off()\n\n GPIO.cleanup()", "def stop(self):\n\n self._bounce.clear()\n self._dir.clear()\n self._b_names.clear()\n\n self.hardware_interfaces[self._gpio].close()", "def do_stop(self,line):\n print \"Trying to stop the robot\"\n self.robot.tank(0,0)", "def shutdown(self):\n\t\trospy.loginfo(\"Stopping the robot...\")\n\t\tself.cmd_vel.publish(Twist())\n\t\trospy.sleep(1)", "def replace_off_screen_meteors(self):\n for meteor in self.meteor_sprites:\n if meteor.is_off_screen():\n meteor.kill()\n self.spawn_meteors(1)\n del meteor", "def stop(self):\n self.stopped = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def stop(self):\n self.stopped = True", "def stop(self):\n self.stopped = True", "def stop(self):\n print_message_received(\"stop\")\n self.robot.drive_system.stop()", "def stop(self):\n self.send_stop()\n self.join()", "def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)", "def stopall(pidevice, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n pidevice.StopAll(noraise=True)\n waitonready(pidevice, **kwargs) # there are controllers that need some time to halt all axes", "def lysis(self) :\n self.kill()\n return True" ]
[ "0.8364842", "0.75134593", "0.7007069", "0.6861968", "0.6861968", "0.6861968", "0.6861968", "0.6804117", "0.6767265", "0.67454034", "0.6689028", "0.66532433", "0.66270936", "0.65178484", "0.65178484", "0.6510709", "0.64955664", "0.6493137", "0.6477048", "0.6451692", "0.6451692", "0.64327776", "0.64298165", "0.64177316", "0.64177316", "0.64175844", "0.64163077", "0.64163077", "0.639853", "0.6394725", "0.63906026", "0.6384955", "0.6373993", "0.6367556", "0.636125", "0.6354148", "0.6352829", "0.63482374", "0.63376576", "0.6322011", "0.6322011", "0.6322011", "0.6322011", "0.6322011", "0.6319695", "0.63079846", "0.6303186", "0.62725955", "0.6271932", "0.62706095", "0.6261444", "0.62612975", "0.62551874", "0.62512267", "0.6249664", "0.6249664", "0.6245877", "0.6243443", "0.6239858", "0.62367725", "0.6229957", "0.622418", "0.6223741", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.62187374", "0.6205744", "0.62045795", "0.62006176", "0.61940604", "0.6184191", "0.6166996", "0.61492735", "0.61440945", "0.614267", "0.6130997", "0.6129041", "0.6122511", "0.6121007", "0.6118889", "0.6118058", "0.61144", "0.61124784", "0.60994184", "0.6095775", "0.6090756", "0.6090756", "0.60898656", "0.6088857", "0.60829914", "0.6078694", "0.6076054" ]
0.7356
2
Unregisters platform instance. Can recursively unregister all instance's nested platforms. If recursion is not used and by the instant unregister is called there is still nested platforms then exception would be rised
def unregister_platform_instance(self, instance, recursive=False): platform_to_remove = None for k, v in self._platforms.items(): if v == instance: platform_to_remove = k break if platform_to_remove is None: raise ValueError("No platform instance have been found to unregister") if len(instance.subplatforms) > 0: if recursive: for sp in list(instance.subplatforms): self.unregister_platform_instance(sp, recursive) else: raise ValueError("Can't unregister platform with subplatforms. Set recursive to True") if instance.parent is not None: if instance in instance.parent.subplatforms: instance.parent.subplatforms.remove(instance) if instance in instance.parent.subplatforms: raise IndexError("Instance were registered multiple times in parent's subplatforms list") else: raise IndexError("Instance is not found in parent's subplatforms list") del self._platforms[platform_to_remove]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unregister_platform(self, name, recursive=False):\r\n if name in dict(self._platforms):\r\n self.unregister_platform_instance(self._platforms[name], recursive)", "def unregister_platform(self, platform_uuid):\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)", "def tear_down_registry(registry):\n for reg_adp in list(registry.registeredAdapters()):\n registry.unregisterAdapter(factory=reg_adp.factory,\n required=reg_adp.required,\n provided=reg_adp.provided,\n name=reg_adp.name)\n for reg_ut in list(registry.registeredUtilities()):\n registry.unregisterUtility(component=reg_ut.component,\n provided=reg_ut.provided,\n name=reg_ut.name)", "def deconfigure(self):\n\n self.platform.deconfigure()", "def remove_platforms(project, env_spec_name, platforms):\n return _modify_platforms(project, env_spec_name, additions=[], removals=platforms)", "def unregister(self) -> None:\n for child in self._children:\n child.unregister()\n\n actions_registry.unregister(self)", "def platform_stop(self):\n self.platform.stop()", "def deregister_instance(InstanceId=None):\n pass", "def unregister(self):\r\n self._unregister()", "def on_deregistered(self):\r\n self.unpossessed()\r\n\r\n # Register type information\r\n cls = self.__class__\r\n\r\n subclass_cache = cls._of_subclass_cache\r\n type_cache = cls._of_type_cache\r\n\r\n # Uncache subtypes\r\n for base_cls in cls.__mro__:\r\n instances = subclass_cache[base_cls]\r\n instances.remove(self)\r\n\r\n if not instances:\r\n subclass_cache.pop(base_cls)\r\n\r\n # Uncache the type\r\n instances = type_cache[cls]\r\n instances.remove(self)\r\n\r\n if not instances:\r\n type_cache.pop(cls)\r\n\r\n ReplicableUnregisteredSignal.invoke(target=self)\r\n\r\n super().on_deregistered()", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def unregister(self):\n if self._registered:\n try:\n log.info(\"Attempting to unregister simulator.\")\n self._client.session.delete(\n self._config.workspace,\n session_id=self._registered.session_id,\n )\n\n if (\n self._last_event is not None\n and self._last_event.type == EventType.unregister.value\n and self._last_event.unregister\n ):\n self.unregistered(self._last_event.unregister.reason)\n\n log.info(\"Successfully unregistered simulator.\")\n except Exception as err:\n log.error(\"Unregister simulator failed with error: {}\".format(err))", "def destroy(self):\r\n if self._group:\r\n if self._machine:\r\n self._group.unregisterContainer(self)\r\n self._machine.unregisterContainer(self)\r\n self._machine = None\r\n\r\n self._group = None\r\n\r\n super(Container, self).destroy()\r\n else:\r\n print('container.Container destroy() called multiple times...')", "def kill(self):\n\t\tself.kill_subcomponents()\n\t\tself._subcomponents.clear()\n\t\tself.bug_world = None\n\n\t\ttry:\n\t\t\tself.ci.deregister_all()\n\t\texcept:\n\t\t\tpass", "def uninstall():\n log.info(\"Deregistering NukeStudio plug-ins..\")\n pyblish.deregister_host(\"nukestudio\")\n pyblish.deregister_plugin_path(PUBLISH_PATH)\n avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)\n avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)", "def deregister_class(self, name):\n isbase = name in self.base_types\n if not isbase and name not in self.template_types:\n _raise_type_error(name)\n if isbase:\n self.base_types.remove(name)\n else:\n self.template_types.pop(name, None)\n\n self.cython_ctypes.pop(name, None)\n self.cython_cytypes.pop(name, None)\n self.cython_pytypes.pop(name, None)\n self.from_pytypes.pop(name, None)\n self.cpp_types.pop(name, None)\n self.humannames.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_classnames.pop(name, None)\n\n self.clearmemo()", "def destroy(self):\n self.remove()\n for inst in reversed(self.insts[:]):\n uses = inst.uses()\n for tmp_inst in uses:\n if tmp_inst.op_name == 'OpPhi':\n IRError('Not implemented: remove from phi node') # XXX\n inst.destroy()\n self.module = None", "def unregister_node(name):\n if name in NODE_REGISTRY:\n del NODE_REGISTRY[name]", "def cleanUp(self):\r\n for group in self._groups.values():\r\n group.destroy()\r\n\r\n assert len(self._groups) == 0\r\n\r\n for machine in self._machines.copy():\r\n self.destroyMachine(machine)\r\n\r\n assert len(self._machines) == 0\r\n\r\n self.unregisterIAASHook()", "def unregister_router(self, hostname):", "async def reload_platform(self) -> None:", "def unregister(self):\r\n self.__screen.unregister_asteroid(self)", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)", "def __del__(self):\r\n if self.coreInst:\r\n for mod in self.coreInst.keys():\r\n self.coreInst[mod][plug_key].stop()", "def unregister ():\n dsf_prop_export.unregister ()\n dsf_geom_export.unregister ()\n dsf_wm_import.unregister ()\n dsf_pose_import.unregister ()\n dsf_arm_import.unregister ()\n dsf_uvset_import.unregister ()\n dsf_morph_export.unregister ()\n dsf_morph_import.unregister ()\n dsf_geom_import.unregister ()", "def unregister(self, name):\r\n raise NotImplementedError", "def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")", "def reset_registries():\n StepRegistry().clear()\n HookRegistry().reset()\n ExtensionRegistry().reset()", "def unregister(self, parent):\n parent.unregisterCommand('delete')\n parent.unregisterCommand('meshcreated')", "def unregister(self):\n if self in self.board.timed_objects:\n self.board.timed_objects.remove(self)\n del (self)", "def _unregister(self):\r\n if hasattr(self, '_registered') and self._registered:\r\n self._conn.unregisterInterface(self._iTag, self)\r\n self._registered = False", "async def async_will_remove_from_hass(self):\n self._unsub_dispatcher()", "def _stopping(self, sender, **kwargs):\n for v in self._platform_connections.values():\n v.kill()\n\n self._platform_connections.clear()\n\n self.vip.rpc.call(MASTER_WEB, 'unregister_all_agent_routes',\n self.core.identity).get(timeout=30)", "def __del__(self):\n if hasattr(self, 'dev'):\n kernel32.CloseHandle(self.dev)", "def unregister(self, parent):\n parent.unregisterCommand('LayerData')\n parent.unregisterCommand('LayerDataDecoded')", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "def unregister(self, model_or_iterable):\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model in self._registry:\n del self._registry[model]", "def _unregister_from_server(self):\n self.remote_controller.unregister()", "def TearDown(self):\n raise NotImplementedError(\n 'No runtime tear down defined for %s' % self.__class__.__name__)", "def deregister_specialization(self, t):\n t = self.canon(t)\n self.cython_ctypes.pop(t, None)\n self.cython_cytypes.pop(t, None)\n self.cython_pytypes.pop(t, None)\n self.cython_cimports.pop(t, None)\n self.cython_cyimports.pop(t, None)\n self.cython_pyimports.pop(t, None)\n self.clearmemo()", "def do_deregistrations(registration_info_modulename):\n\timport importlib\n\tc = importlib.import_module(registration_info_modulename, package=None)\n\tdevices = c.devices\n\tapps = c.apps\n\tlogger.info(\"DE-REGISTER: de-registering all devices from file {}....\".format(registration_info_modulename))\n\tsetup_entities.deregister_entities(devices)\n\tlogger.info(\"DE-REGISTER: de-registering all apps from file {}....\".format(registration_info_modulename))\n\tsetup_entities.deregister_entities(apps)\n\tlogger.info(\"DE-REGISTER: done.\")", "async def unregister(self):\n\t\tif self.group != None:\n\t\t\tif self.group.in_game:\n\t\t\t\tfor team in self.group.game.teams:\n\t\t\t\t\tif self in team:\n\t\t\t\t\t\tself.group.game.teams.remove(team)\n\t\t\t\t\t\tbreak\n\n\t\t\tawait self.group.remove(self)\n\n\t\tshared.users.remove(self)", "def remote_destroy(self):\r\n if self._receivers:\r\n for interface in reduce(set.union, self._receivers.itervalues()):\r\n interface.unregisterProtocol(self)\r\n\r\n self._receivers = None\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None", "def destroy(self):\r\n self._namespace.unregisterNode(self)\r\n self._namespace = None\r\n\r\n super(Node, self).destroy()", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n return await hass.config_entries.async_unload_platforms(\n entry, (entry.options[\"group_type\"],)\n )", "async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):\n\n tasks = []\n\n for platform in KETRA_PLATFORMS:\n tasks.append(hass.config_entries.async_forward_entry_unload(entry, platform))\n\n await asyncio.gather(*tasks)\n\n common_platform = hass.data[DOMAIN][entry.unique_id][\"common_platform\"]\n await common_platform.shutdown()\n\n return True", "def reset_global_registry(obj_type: ObjectType):\n global registry\n registry.pop(obj_type, None)", "def unregister(self):\n from arkouda.util import unregister\n\n if not self.registered_name:\n raise RegistrationError(\n \"This item does not have a name and does not appear to be registered.\"\n )\n\n unregister(self.registered_name)\n\n self.registered_name = None # Clear our internal GroupBy object name", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def shutdown(self,):\n if hasattr(self, 'gpu_process_ids'):\n for ppid in self.gpu_process_ids:\n kill_proc_tree(ppid)\n for dispatcher in self.dispatchers:\n silence_function(1, dispatcher.shutdown)\n print('DistributedBackend shutdown.')", "def remote_destroy(self):\r\n for interface in self._interfaces.values():\r\n interface.remote_destroy()\r\n\r\n assert len(self._interfaces) == 0\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterNamespace(self)\r\n self._endpoint = None", "def _update_device_registry(self):\n try:\n if not self._flag_updating_deviceregistry:\n _log.debug(\"Updating device registry\")\n self._flag_updating_deviceregistry = True\n self._sync_connected_platforms()\n unreachable = []\n # Loop over the connections to the registered agent platforms.\n for k, v in self._platform_connections.items():\n _log.debug('updating for {}'.format(k))\n # Only attempt update if we have a connection to the\n # agent instance.\n if v is not None:\n try:\n devices = v.agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM,\n 'get_devices').get(timeout=30)\n\n anon_devices = defaultdict(dict)\n\n # for each device returned from the query to\n # get_devices we need to anonymize the k1 in the\n # anon_devices dictionary.\n for k1, v1 in devices.items():\n _log.debug(\n \"before anon: {}, {}\".format(k1, v1))\n # now we need to do a search/replace on the\n # self._topic_list so that the devices are\n # known as the correct itme nin the tree.\n anon_topic = self._topic_replace_map[k1]\n\n # if replaced has not already been replaced\n if not anon_topic:\n anon_topic = k1\n for sr in self._topic_replace_list:\n anon_topic = anon_topic.replace(\n sr['from'], sr['to'])\n\n self._topic_replace_map[k1] = anon_topic\n\n anon_devices[anon_topic] = v1\n\n _log.debug('Anon devices are: {}'.format(\n anon_devices))\n\n self._registry.update_devices(k, anon_devices)\n except (gevent.Timeout, Unreachable) as e:\n _log.error(\n 'Error getting devices from platform {}'\n .format(k))\n unreachable.append(k)\n for k in unreachable:\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]\n\n finally:\n self._flag_updating_deviceregistry = False", "def unassign_instance(InstanceId=None):\n pass", "def remove(hub: pop.hub.Hub, subname: str):\n if hasattr(hub, subname):\n sub = getattr(hub, subname)\n if hasattr(sub, \"init\"):\n mod = getattr(sub, \"init\")\n if hasattr(mod, \"shutdown\"):\n mod.shutdown()\n hub._remove_subsystem(subname)", "def unregister_factory(self, instance):\r\n to_remove = None\r\n for k, v in self._awaiting.items():\r\n if v[\"instance\"] == instance:\r\n to_remove = k\r\n break\r\n if to_remove is not None:\r\n del self._awaiting[to_remove]", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n return unload_ok", "def unregister():\n for name in _registered_ops:\n try:\n torch.onnx.unregister_custom_op_symbolic(name, _OPSET_VERSION)\n except AttributeError:\n # The symbolic_registry module was removed in PyTorch 1.13.\n # We are importing it here for backwards compatibility\n # because unregister_custom_op_symbolic is not available before PyTorch 1.12\n from torch.onnx import symbolic_registry\n\n namespace, kind = name.split(\"::\")\n for version in symbolic_helper._onnx_stable_opsets:\n if version >= _OPSET_VERSION and symbolic_registry.is_registered_op(kind, namespace, version):\n del symbolic_registry._registry[(namespace, version)][kind]", "def unregister_resource(resource):\n del _name_to_resources[resource.name]\n del _name_to_resources[resource.name_plural]\n del _class_to_resources[resource.__class__]", "def unregister(self, model_or_iterable):\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n del self._registry[model]", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry):\n unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n opp.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "def _shutdown(self):\n self.logger.debug(\"Unregistering feature modules.\")\n for feature in self._features.values():\n try:\n if hasattr(feature.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(feature.handle.module_unregister())\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering feature module '{feature.name}'.\")\n self.logger.debug(\"Unregistering protocol modules.\")\n for protocol in self._protocols.values():\n try:\n if hasattr(protocol.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(\n protocol.handle.module_unregister(protocol.contexts, self._shutdown_reason)\n )\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering protocol module '{protocol.name}'.\")\n self.eventloop.run_until_complete(self.database.close())\n if len(self._db_connections) > 0:\n self.logger.debug(\"Cleaning up unclosed database connections\")\n for module in list(self._db_connections):\n self.eventloop.run_until_complete(self.database_disconnect(module))", "def deactivate(self):\n if self.parents[0].type == 'dm-multipath':\n devmap = block.getMap(major=self.major, minor=self.minor)\n if devmap:\n try:\n block.removeDeviceMap(devmap)\n except Exception as e:\n raise errors.DeviceTeardownError(\"failed to tear down device-mapper partition %s: %s\" % (self.name, e))\n udev.settle()", "def _sync_connected_platforms(self):\n _log.debug(\"len pa_agents {}\".format(len(self._platform_connections)))\n pakeys = set(self._platform_connections.keys())\n _log.debug(\"Syncing with {}\".format(pakeys))\n for p in self._registry.get_platforms():\n if p.instance_uuid in pakeys:\n pakeys.remove(p.instance_uuid)\n\n for k in pakeys:\n _log.debug('Removing {} from pa_agents'.format(k))\n if k in self._platform_connections.keys():\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]", "def SBMLResolverRegistry_deleteResolerRegistryInstance():\n return _libsbml.SBMLResolverRegistry_deleteResolerRegistryInstance()", "async def disable_platforms(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.demo.COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM\",\n [],\n ):\n yield", "def __del__(self):\n GPIO.cleanup()", "def remote_destroy(self):\r\n # TODO: WHY ???\r\n if not self._owner:\r\n return\r\n\r\n self.stop()\r\n\r\n if self._owner:\r\n self._owner.unregisterInterface(self)\r\n self._owner = None", "def unload(self, shutdown=False):\n olof.core.Plugin.unload(self)\n for ss in self.scanSetups.values():\n ss.unload()", "def CleanupInstance(self, instance_name):\n root_dir = self._InstanceDir(instance_name)\n\n if not os.path.exists(root_dir):\n return\n\n if self._IsDirLive(root_dir):\n raise HypervisorError(\"Processes are still using the chroot\")\n\n for mpath in self._GetMountSubdirs(root_dir):\n utils.RunCmd([\"umount\", mpath])\n\n result = utils.RunCmd([\"umount\", root_dir])\n if result.failed:\n msg = (\"Processes still alive in the chroot: %s\" %\n utils.RunCmd(\"fuser -vm %s\" % root_dir).output)\n logging.error(msg)\n raise HypervisorError(\"Can't umount the chroot dir: %s (%s)\" %\n (result.output, msg))", "def unregister(self, module):\n if module in self.modules.values():\n module.unregister_daemon(self)\n delattr(self.state, module.name)\n del self.modules[module.name]\n else:\n raise ValueError(\"this module is not registered\")", "def unregister(self) -> None:\n actions_registry.unregister(self)", "def destroy(self):\n for item in self.resources:\n if item[\"status\"] == REGISTER:\n item[\"resource\"].destroy()\n item[\"status\"] = UNREGISTER", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "def unregister_device(device_id):\n DllHandle = hikvision_global_value.DllHandle\n cur_device_lists = hikvision_global_value.get_device_lists()\n cur_status_device_lists = hikvision_global_value.get_device_status_lists()\n if device_id in cur_device_lists:\n if DllHandle.NET_DVR_Logout(cur_device_lists.get(device_id).session_id):\n logger.debug(\"deviceID:{0} logout success\".format(device_id, cur_device_lists.get(device_id).session_id))\n else:\n logger.error(\"deviceID:{0} logout failed\".format(device_id, cur_device_lists.get(device_id).session_id))\n cur_device_lists.pop(device_id)\n for key in cur_status_device_lists.keys():\n if device_id in key:\n cur_status_device_lists.pop(key)", "def unregister(appname, qualified_name):\n if Registry.has_appname(appname):\n if qualified_name in Registry.monomers[appname]:\n cls = Registry.monomers[appname].pop(qualified_name)\n if qualified_name in sys.modules:\n del sys.modules[qualified_name]\n if hasattr(cls, consts.EXPORTER_NAME):\n getattr(cls, consts.EXPORTER_NAME).unregister(qualified_name)\n importlib.invalidate_caches()\n return bool(cls)\n return False", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n del hass.data[DOMAIN]\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "def unregister(self, name):\r\n\r\n if name in self.components:\r\n logger.debug(\"Unregistering Component: %s\", name)\r\n self.stop([name])\r\n del self.components[name]", "def shutdown_system():\n yield None\n active = active_children()\n for child in active:\n child.kill()", "def remove_device(hass: HomeAssistant, mac: str):\n registry = dr.async_get(hass)\n device = registry.async_get_device({(DOMAIN, mac)}, None)\n if device:\n registry.async_remove_device(device.id)", "def remove_machine_from_config(machine):\n cfg = _get_config()\n cfg[\"frontends\"].pop(machine.id, None)\n cfg[\"backends\"].pop(machine.id, None)\n return _set_config(cfg)", "def destroy(self):\r\n self._tidy()\r\n self.stop()\r\n try:\r\n self.opengl.destroy(self)\r\n except:\r\n pass\r\n if self.external_mouse:\r\n try:\r\n self.external_mouse.stop()\r\n except:\r\n pass_\r\n try:\r\n self.mouse.stop()\r\n except:\r\n pass\r\n try:\r\n self.tkwin.destroy()\r\n except:\r\n pass\r\n Display.INSTANCE = None", "async def async_will_remove_from_hass(self):\n await super().async_will_remove_from_hass()\n for service in self._device.device_services:\n service.unsubscribe_callback(self.entity_id)", "def destroy(self):\r\n for interface in self._interfaces.itervalues():\r\n interface.dontNotifyOnDeath(self._interfaceDied)\r\n\r\n self._interfaces = None\r\n\r\n super(Robot, self).destroy()", "def clean_registries(self):\n registry = self.connection.get_finished_registry(name=self.name)\n registry.cleanup()\n registry = self.connection.get_started_registry(name=self.name)\n registry.cleanup()", "def destroy(self):\n\t\tfor team in range(len(self.dots)): #will cycle through each team\n\t\t\tfor i in range(len(self.dots[team])): #will cycle through each member of the team\n\t\t\t\tdot = self.dots[team][i]\n\t\t\t\tdot.removeNode()\n\t\tself.mousePosition.removeNode()\n\t\tself.mapimage.removeNode()\n\t\tself.map.removeNode()", "def do_dbus_unregister(self, connection, object_path):\n logger.debug('::dbus_unregister')\n Gio.Application.do_dbus_unregister(self, connection, object_path)\n if self._dbus_id is not None:\n logger.info('removing DBus registration for name %s', object_path)\n connection.unregister_object(self._dbus_id)\n self._dbus_id = None", "def cleanup_resources(self, kernel_id, restart=False):", "def unregister(provider):\n _DEFAULT_PROVIDER.remove_provider(provider)", "def unregister_publisher(self, hostname):", "def tearDown(self):\n self.event_out.unregister()\n self.pub_current_joints.unregister()\n self.component_output.unregister()", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok" ]
[ "0.8009664", "0.6706928", "0.6103871", "0.5728736", "0.57112795", "0.568827", "0.5645297", "0.5567026", "0.5547235", "0.55387", "0.55205977", "0.5519004", "0.5469608", "0.54393387", "0.5368441", "0.5361209", "0.53546786", "0.53507906", "0.5317688", "0.52915245", "0.5263275", "0.5251263", "0.5239264", "0.5238416", "0.52313554", "0.5203153", "0.5187019", "0.51736414", "0.5169626", "0.5161241", "0.5157384", "0.5152975", "0.5148992", "0.51422966", "0.51396674", "0.513871", "0.513871", "0.51294065", "0.5116223", "0.5095783", "0.5085", "0.5082387", "0.50817454", "0.5079083", "0.5073912", "0.50622356", "0.5061049", "0.5053264", "0.5050554", "0.50406635", "0.50206953", "0.50129074", "0.49991012", "0.49988177", "0.49881616", "0.49643195", "0.49555507", "0.49546492", "0.49540722", "0.49536335", "0.49477637", "0.49453557", "0.49451393", "0.49396154", "0.4939032", "0.49380395", "0.49224657", "0.49203658", "0.49164563", "0.4910666", "0.49103546", "0.4903871", "0.48979932", "0.48934796", "0.4888527", "0.48878643", "0.48878643", "0.4879377", "0.48789996", "0.4878947", "0.4878947", "0.4878947", "0.4878947", "0.48715052", "0.48711014", "0.48585042", "0.4856118", "0.48521927", "0.4851278", "0.48332524", "0.48316246", "0.482748", "0.48224226", "0.48213828", "0.4812415", "0.48107055", "0.4806438", "0.48013267", "0.47954187", "0.47926402" ]
0.8235111
0
Wrap for unregister_platform_instance method
def unregister_platform(self, name, recursive=False): if name in dict(self._platforms): self.unregister_platform_instance(self._platforms[name], recursive)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deregister_instance(InstanceId=None):\n pass", "def unregister_platform_instance(self, instance, recursive=False):\r\n platform_to_remove = None\r\n for k, v in self._platforms.items():\r\n if v == instance:\r\n platform_to_remove = k\r\n break\r\n if platform_to_remove is None:\r\n raise ValueError(\"No platform instance have been found to unregister\")\r\n if len(instance.subplatforms) > 0:\r\n if recursive:\r\n for sp in list(instance.subplatforms):\r\n self.unregister_platform_instance(sp, recursive)\r\n else:\r\n raise ValueError(\"Can't unregister platform with subplatforms. Set recursive to True\")\r\n if instance.parent is not None:\r\n if instance in instance.parent.subplatforms:\r\n instance.parent.subplatforms.remove(instance)\r\n if instance in instance.parent.subplatforms:\r\n raise IndexError(\"Instance were registered multiple times in parent's subplatforms list\")\r\n else:\r\n raise IndexError(\"Instance is not found in parent's subplatforms list\")\r\n del self._platforms[platform_to_remove]", "def unregister_platform(self, platform_uuid):\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)", "def unregister(self, name):\r\n raise NotImplementedError", "def unregister(self):\r\n self._unregister()", "def unassign_instance(InstanceId=None):\n pass", "def unregister_publisher(self, hostname):", "def unregister_factory(self, instance):\r\n to_remove = None\r\n for k, v in self._awaiting.items():\r\n if v[\"instance\"] == instance:\r\n to_remove = k\r\n break\r\n if to_remove is not None:\r\n del self._awaiting[to_remove]", "def unregister(provider):\n _DEFAULT_PROVIDER.remove_provider(provider)", "def unregister(self, target, hostname, listener_type):", "def _unregister(self):\r\n if hasattr(self, '_registered') and self._registered:\r\n self._conn.unregisterInterface(self._iTag, self)\r\n self._registered = False", "def unregister(self, old):\n raise NotImplementedError", "def unregister(target: str) -> bool:\n ...", "def stop(self, context):\n # Unregister the service\n self.__registration.unregister()\n self.__registration = None", "def uninstall(self, provider):\n pass # pragma: no cover", "def __del__(self):\n if hasattr(self, 'dev'):\n kernel32.CloseHandle(self.dev)", "def TearDown(self):\n raise NotImplementedError(\n 'No runtime tear down defined for %s' % self.__class__.__name__)", "def deregister(ext_id):\n extensions.pop(ext_id, None)", "def __del__(self):\r\n if self.coreInst:\r\n for mod in self.coreInst.keys():\r\n self.coreInst[mod][plug_key].stop()", "async def unregister_system_shared_memory(\n self, name: str = ..., headers: dict[str, t.Any] = ...\n ) -> None:", "def unregisterProducer():", "def tear_down_registry(registry):\n for reg_adp in list(registry.registeredAdapters()):\n registry.unregisterAdapter(factory=reg_adp.factory,\n required=reg_adp.required,\n provided=reg_adp.provided,\n name=reg_adp.name)\n for reg_ut in list(registry.registeredUtilities()):\n registry.unregisterUtility(component=reg_ut.component,\n provided=reg_ut.provided,\n name=reg_ut.name)", "def _unregister_from_server(self):\n self.remote_controller.unregister()", "def unregister(self):\r\n self.__screen.unregister_asteroid(self)", "def on_deregistered(self):\r\n self.unpossessed()\r\n\r\n # Register type information\r\n cls = self.__class__\r\n\r\n subclass_cache = cls._of_subclass_cache\r\n type_cache = cls._of_type_cache\r\n\r\n # Uncache subtypes\r\n for base_cls in cls.__mro__:\r\n instances = subclass_cache[base_cls]\r\n instances.remove(self)\r\n\r\n if not instances:\r\n subclass_cache.pop(base_cls)\r\n\r\n # Uncache the type\r\n instances = type_cache[cls]\r\n instances.remove(self)\r\n\r\n if not instances:\r\n type_cache.pop(cls)\r\n\r\n ReplicableUnregisteredSignal.invoke(target=self)\r\n\r\n super().on_deregistered()", "def unregister_router(self, hostname):", "def unregister():\n ip.events.unregister('post_run_cell', help_with_exception)", "def platform_stop(self):\n self.platform.stop()", "def __del__(self):\n GPIO.cleanup()", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "def SBMLResolverRegistry_deleteResolerRegistryInstance():\n return _libsbml.SBMLResolverRegistry_deleteResolerRegistryInstance()", "def deregister(self):\n self.callback = None", "def test_hook_unregister(self):\n self.assertEqual(list(self.registry), [])\n item = self.DummyItem(123)\n self.hook_cls(self.extension, item)\n\n self.extension.shutdown()\n self.assertEqual(list(self.registry), [])", "def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")", "def unregister(self):\n if self._registered:\n try:\n log.info(\"Attempting to unregister simulator.\")\n self._client.session.delete(\n self._config.workspace,\n session_id=self._registered.session_id,\n )\n\n if (\n self._last_event is not None\n and self._last_event.type == EventType.unregister.value\n and self._last_event.unregister\n ):\n self.unregistered(self._last_event.unregister.reason)\n\n log.info(\"Successfully unregistered simulator.\")\n except Exception as err:\n log.error(\"Unregister simulator failed with error: {}\".format(err))", "def deregister(self):\n if self.debug:\n print(\"%r.deregister()\" % (self,))\n if not self.func:\n if self.debug:\n print(\"already deregistered\")\n return\n try:\n self.tkApp.deletecommand(self.tclFuncName)\n except tkinter.TclError as e:\n if self.debug:\n print(\"deregistering failed: %r\" % (e,))\n pass\n self.func = None", "def deleteResolerRegistryInstance():\n return _libsbml.SBMLResolverRegistry_deleteResolerRegistryInstance()", "def unregister( key ):\n global callbacks\n del callbacks[ key ]", "def __del__(self):\n self.motorStop()\n GPIO.cleanup()", "def unregister_server():\n (code, message) = rest_api.unregister_server(request)\n if (code == 200):\n return message\n else:\n abort(code)", "def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]", "def remote_destroy(self):\r\n # TODO: WHY ???\r\n if not self._owner:\r\n return\r\n\r\n self.stop()\r\n\r\n if self._owner:\r\n self._owner.unregisterInterface(self)\r\n self._owner = None", "def test_remove_vm(self, instance_name):\n self.instances.pop(instance_name)", "def unregister(self, alias):\n delattr(self, alias)", "def cmd_unregister(self, app_name=None):\n rc = self.socket_command_with_project('unregister', app_name)\n return rc", "def unregister_resource(resource):\n del _name_to_resources[resource.name]\n del _name_to_resources[resource.name_plural]\n del _class_to_resources[resource.__class__]", "def unregister(self):\n if self in self.board.timed_objects:\n self.board.timed_objects.remove(self)\n del (self)", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def unregister ():\n dsf_prop_export.unregister ()\n dsf_geom_export.unregister ()\n dsf_wm_import.unregister ()\n dsf_pose_import.unregister ()\n dsf_arm_import.unregister ()\n dsf_uvset_import.unregister ()\n dsf_morph_export.unregister ()\n dsf_morph_import.unregister ()\n dsf_geom_import.unregister ()", "def unregisterSimulationEvent(self, handle):\r\n raise NotImplementedError()", "def unregister(self, parent):\n parent.unregisterCommand('delete')\n parent.unregisterCommand('meshcreated')", "def __del__(self):\n self.shutdown()", "def unregister(self) -> None:\n actions_registry.unregister(self)", "def unreg(self):\n\t\treturn Job(SDK.PrlVm_Unreg(self.handle)[0])", "def __del__(self):\n GPIO.cleanup()\n logging.info('GPIO Cleanup Complete')", "def __delete__(self, obj):\n self._instances.pop(obj, None)", "def _destroy(self):", "def __del__(self):\n self.p.sleep()\n GPIO.cleanup()", "def stop_instance(InstanceId=None, Force=None):\n pass", "def ProcessUnregister(self, msg):\n # Check the management token.\n token, response = self.CheckToken()\n if not token:\n return response\n\n # Unregister the device.\n self.server.UnregisterDevice(token['device_token'])\n\n # Prepare and send the response.\n response = dm.DeviceManagementResponse()\n response.unregister_response.CopyFrom(dm.DeviceUnregisterResponse())\n\n return (200, response)", "def SafeBridgeDestroy(bridge):\n #try:\n # bridge.destroy()\n #except requests.HTTPError as e:\n # if e.response.status_code != requests.codes.not_found:\n # print e.response.status_code\n try:\n bridge.destroy()\n except:\n return", "def unregister(self):\n self._executor.unregister_publisher(self)", "def unsubscribe(self):\r\n self._unregister()", "async def async_will_remove_from_hass(self):\n await super().async_will_remove_from_hass()\n for service in self._device.device_services:\n service.unsubscribe_callback(self.entity_id)", "def __unregister(self, args = []):\n\n try:\n self.__cm.send(p.T_LEAVE,[])\n reply = self.__cm.receive()\n if (reply.type != p.T_ACK):\n raise Exception, \"Unregistering from server was not successfull. Disconnecting anyway!\"\n \n except Exception,e:\n self.__handleError('Leave', e)", "def remote_destroy(self):\r\n if self._receivers:\r\n for interface in reduce(set.union, self._receivers.itervalues()):\r\n interface.unregisterProtocol(self)\r\n\r\n self._receivers = None\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None", "def perform_destroy(self, instance):\n pass", "def unregister(self, name: str, opset: OpsetVersion) -> None:\n if name not in self._registry:\n return\n self._registry[name].remove_custom(opset)", "def __del__(self):\n self.DcMotor.run(Adafruit_MotorHAT.RELEASE) # changed rightMotor to DcMotor , RFMH_2019_02_28\n del self.motorhat", "def _stop(self):\n\n if self._daemon_id:\n pyro_proxy_name = 'PySwitchLib.' + self._daemon_id\n uri = None\n\n try:\n with Pyro4.locateNS(host='localhost', port=self._pyro_ns_port) as ns:\n try:\n uri = ns.lookup(pyro_proxy_name)\n except:\n pass\n\n if uri:\n ns.remove(pyro_proxy_name)\n except:\n pass\n finally:\n ns_daemon_dict = ConfigFileUtil().read(filename=pyswitchlib_ns_daemon_file)\n\n if self._daemon_id in ns_daemon_dict:\n uri = ns_daemon_dict[self._daemon_id]\n del ns_daemon_dict[self._daemon_id]\n\n if len(ns_daemon_dict):\n ConfigFileUtil().write(filename=pyswitchlib_ns_daemon_file, conf_dict=ns_daemon_dict, do_merge=False)\n else:\n try:\n os.unlink(pyswitchlib_ns_daemon_file)\n except:\n pass\n\n if uri:\n try:\n with Pyro4.Proxy(uri) as pyro_proxy:\n pyro_proxy.shutdown()\n pyro_proxy._pyroRelease()\n except:\n pass\n\n super(PySwitchLibApiDaemonRunner, self)._stop()", "def __del__(self):\n self.uninstall_handle_input()", "def deconfigure(self):\n\n self.platform.deconfigure()", "def unregister_standard(self, event_name, callback):\n\n raise NotImplementedError()", "def unregister(self, old):\n if old is not None and old is not Uninitialized:\n try:\n active = self.active.pop(old, None)\n if active is not None:\n for name, type in active:\n getattr(self, type)(old, name, True)\n except TypeError:\n # An error can occur if 'old' is a list or other object for\n # which a weakref cannot be created and used an a key for\n # 'self.active':\n pass", "async def async_will_remove_from_hass(self) -> None:\n self._nobo.deregister_callback(self._after_update)", "def unregister(self):\n idaapi.unregister_action(self.get_name())", "def unreg_event_handler(self, callback_func, userData):\n\t\tcall_sdk_function('PrlVm_UnregEventHandler', self.handle, callback_func, userData)", "def uninstall():\n log.info(\"Deregistering NukeStudio plug-ins..\")\n pyblish.deregister_host(\"nukestudio\")\n pyblish.deregister_plugin_path(PUBLISH_PATH)\n avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)\n avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)", "def deregister_class(self, name):\n isbase = name in self.base_types\n if not isbase and name not in self.template_types:\n _raise_type_error(name)\n if isbase:\n self.base_types.remove(name)\n else:\n self.template_types.pop(name, None)\n\n self.cython_ctypes.pop(name, None)\n self.cython_cytypes.pop(name, None)\n self.cython_pytypes.pop(name, None)\n self.from_pytypes.pop(name, None)\n self.cpp_types.pop(name, None)\n self.humannames.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_classnames.pop(name, None)\n\n self.clearmemo()", "def unregister(self, service_name, service_addr):\n raise NotImplementedError", "def delete_app_instance(self, instance_id):\n self.stop_app_instance(instance_id)\n aic = self.get_app_instances_configs(instance_id=instance_id)\n # invoking on_uninstall callback , so app can run cleanup routines .\n ai_obj = self.get_app_instance_obj(instance_id)\n try:\n if hasattr(ai_obj,\"on_uninstall\"):\n ai_obj.on_uninstall()\n except Exception as ex:\n log.exception(ex)\n if aic:\n self.app_instances_configs.remove(aic[0])\n self.serialize_instances_config()", "def unregister(self, parent):\n parent.unregisterCommand('LayerData')\n parent.unregisterCommand('LayerDataDecoded')", "def shutdown_plugin(self):\n pass", "def deregister_specialization(self, t):\n t = self.canon(t)\n self.cython_ctypes.pop(t, None)\n self.cython_cytypes.pop(t, None)\n self.cython_pytypes.pop(t, None)\n self.cython_cimports.pop(t, None)\n self.cython_cyimports.pop(t, None)\n self.cython_pyimports.pop(t, None)\n self.clearmemo()", "def unload_plugin(self, handle):\n ckresult(_dll.FMOD_System_UnloadPlugin(self._ptr, handle))", "def destroy(self):\r\n self._namespace.unregisterParameter(self)\r\n self._namespace = None\r\n\r\n super(Parameter, self).destroy()", "def unregister(url):\n return Client.get_client().unregister(url)", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def _stopping(self, sender, **kwargs):\n for v in self._platform_connections.values():\n v.kill()\n\n self._platform_connections.clear()\n\n self.vip.rpc.call(MASTER_WEB, 'unregister_all_agent_routes',\n self.core.identity).get(timeout=30)", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def cleanup_resources(self, kernel_id, restart=False):", "def msg_unregister(channel, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(UNREGISTER, channel, \"\", version, order)", "def shutdown(self):\n\n raise NotImplementedError", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfClient.KILL_STRING)", "def do_dbus_unregister(self, connection, object_path):\n logger.debug('::dbus_unregister')\n Gio.Application.do_dbus_unregister(self, connection, object_path)\n if self._dbus_id is not None:\n logger.info('removing DBus registration for name %s', object_path)\n connection.unregister_object(self._dbus_id)\n self._dbus_id = None", "def destroy(self):\n self.unbindAllWidgets()\n self.__func = None\n self.__instance = None", "def remote_destroy(self):\r\n if self._registered:\r\n try:\r\n rospy.delete_param(self._name)\r\n except rospy.ROSException:\r\n pass\r\n\r\n self._registered = False\r\n\r\n if self._owner:\r\n self._owner.unregisterParameter(self)\r\n self._owner = None", "def uninstall_mac_processor(interface, mac_profile):\n pass", "def destroy(self):\n self.remove()\n for inst in reversed(self.insts[:]):\n uses = inst.uses()\n for tmp_inst in uses:\n if tmp_inst.op_name == 'OpPhi':\n IRError('Not implemented: remove from phi node') # XXX\n inst.destroy()\n self.module = None", "def _plugin_stop(handle):\n GPIO.cleanup()\n _LOGGER.info('MAX31865 (async) Disconnected.')" ]
[ "0.7437885", "0.722585", "0.71067107", "0.6713975", "0.67006856", "0.65715706", "0.6490679", "0.6454682", "0.6433686", "0.64128125", "0.6409299", "0.6344683", "0.6314368", "0.6292651", "0.6288555", "0.62754303", "0.62084085", "0.6114578", "0.609524", "0.6088872", "0.6082098", "0.6078305", "0.6077322", "0.60758", "0.6066138", "0.60634255", "0.6045683", "0.6025446", "0.6022367", "0.60037965", "0.60024863", "0.5965115", "0.59352934", "0.59171873", "0.5907156", "0.589513", "0.58668625", "0.58377266", "0.58339965", "0.58291566", "0.58241445", "0.5818526", "0.58183813", "0.5811532", "0.5805835", "0.57944506", "0.57938474", "0.5773655", "0.5770684", "0.5747017", "0.5741251", "0.5739098", "0.57325923", "0.5720166", "0.57170534", "0.57054704", "0.5702002", "0.56981814", "0.56980133", "0.5693464", "0.5692399", "0.5689923", "0.5686703", "0.5681853", "0.5674596", "0.5671023", "0.56658036", "0.56645834", "0.56549394", "0.5644654", "0.56414914", "0.5636201", "0.5623834", "0.5622692", "0.5622042", "0.56214416", "0.56144416", "0.56127244", "0.56109565", "0.56038296", "0.5603709", "0.56000113", "0.55866325", "0.55851185", "0.55796033", "0.5578506", "0.5571449", "0.5555727", "0.5553324", "0.5547616", "0.554482", "0.55402493", "0.5535819", "0.55351394", "0.55336446", "0.55311435", "0.5528607", "0.55268997", "0.55205965", "0.55162126" ]
0.6979844
3
Unregisters platforms factory. Usually happens after successful platform registration
def unregister_factory(self, instance): to_remove = None for k, v in self._awaiting.items(): if v["instance"] == instance: to_remove = k break if to_remove is not None: del self._awaiting[to_remove]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unregister_platform(self, name, recursive=False):\r\n if name in dict(self._platforms):\r\n self.unregister_platform_instance(self._platforms[name], recursive)", "def tear_down_registry(registry):\n for reg_adp in list(registry.registeredAdapters()):\n registry.unregisterAdapter(factory=reg_adp.factory,\n required=reg_adp.required,\n provided=reg_adp.provided,\n name=reg_adp.name)\n for reg_ut in list(registry.registeredUtilities()):\n registry.unregisterUtility(component=reg_ut.component,\n provided=reg_ut.provided,\n name=reg_ut.name)", "def unregister_platform(self, platform_uuid):\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)", "def deconfigure(self):\n\n self.platform.deconfigure()", "def remove_platforms(project, env_spec_name, platforms):\n return _modify_platforms(project, env_spec_name, additions=[], removals=platforms)", "def uninstall():\n log.info(\"Deregistering NukeStudio plug-ins..\")\n pyblish.deregister_host(\"nukestudio\")\n pyblish.deregister_plugin_path(PUBLISH_PATH)\n avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)\n avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)", "def unregister_platform_instance(self, instance, recursive=False):\r\n platform_to_remove = None\r\n for k, v in self._platforms.items():\r\n if v == instance:\r\n platform_to_remove = k\r\n break\r\n if platform_to_remove is None:\r\n raise ValueError(\"No platform instance have been found to unregister\")\r\n if len(instance.subplatforms) > 0:\r\n if recursive:\r\n for sp in list(instance.subplatforms):\r\n self.unregister_platform_instance(sp, recursive)\r\n else:\r\n raise ValueError(\"Can't unregister platform with subplatforms. Set recursive to True\")\r\n if instance.parent is not None:\r\n if instance in instance.parent.subplatforms:\r\n instance.parent.subplatforms.remove(instance)\r\n if instance in instance.parent.subplatforms:\r\n raise IndexError(\"Instance were registered multiple times in parent's subplatforms list\")\r\n else:\r\n raise IndexError(\"Instance is not found in parent's subplatforms list\")\r\n del self._platforms[platform_to_remove]", "def removeAllFactories(self) -> None:\n ...", "def reset_registries():\n StepRegistry().clear()\n HookRegistry().reset()\n ExtensionRegistry().reset()", "def unregister ():\n dsf_prop_export.unregister ()\n dsf_geom_export.unregister ()\n dsf_wm_import.unregister ()\n dsf_pose_import.unregister ()\n dsf_arm_import.unregister ()\n dsf_uvset_import.unregister ()\n dsf_morph_export.unregister ()\n dsf_morph_import.unregister ()\n dsf_geom_import.unregister ()", "async def reload_platform(self) -> None:", "def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")", "def cleanUp(self):\r\n for group in self._groups.values():\r\n group.destroy()\r\n\r\n assert len(self._groups) == 0\r\n\r\n for machine in self._machines.copy():\r\n self.destroyMachine(machine)\r\n\r\n assert len(self._machines) == 0\r\n\r\n self.unregisterIAASHook()", "def platform_stop(self):\n self.platform.stop()", "def unregister(self):\r\n self._unregister()", "def uninitializePlugin(obj):\n print(\"Removing Azure Batch plug-in\")\n plugin = MFnPlugin(obj)\n plugin.deregisterCommand(cmd_name)\n try:\n mel.eval('deleteShelfTab %s' % \"AzureBatch\")\n except:\n print(\"Couldn't delete shelf\")\n global fMayaExitingCB\n if (fMayaExitingCB is not None):\n OpenMaya.MSceneMessage.removeCallback(fMayaExitingCB)\n if cmds.window(\"AzureBatch\", exists=1):\n cmds.deleteUI(\"AzureBatch\")\n AzureBatchSetup.remove_environment()\n print(\"Finished clearing up all Azure Batch components\")", "def tearDown(self):\n self.framework.stop()\n FrameworkFactory.delete_framework()", "def destroy(self):\r\n for interface in self._interfaces.itervalues():\r\n interface.dontNotifyOnDeath(self._interfaceDied)\r\n\r\n self._interfaces = None\r\n\r\n super(Robot, self).destroy()", "def unregister(self):\n if self._registered:\n try:\n log.info(\"Attempting to unregister simulator.\")\n self._client.session.delete(\n self._config.workspace,\n session_id=self._registered.session_id,\n )\n\n if (\n self._last_event is not None\n and self._last_event.type == EventType.unregister.value\n and self._last_event.unregister\n ):\n self.unregistered(self._last_event.unregister.reason)\n\n log.info(\"Successfully unregistered simulator.\")\n except Exception as err:\n log.error(\"Unregister simulator failed with error: {}\".format(err))", "def unregister(provider):\n _DEFAULT_PROVIDER.remove_provider(provider)", "def do_deregistrations(registration_info_modulename):\n\timport importlib\n\tc = importlib.import_module(registration_info_modulename, package=None)\n\tdevices = c.devices\n\tapps = c.apps\n\tlogger.info(\"DE-REGISTER: de-registering all devices from file {}....\".format(registration_info_modulename))\n\tsetup_entities.deregister_entities(devices)\n\tlogger.info(\"DE-REGISTER: de-registering all apps from file {}....\".format(registration_info_modulename))\n\tsetup_entities.deregister_entities(apps)\n\tlogger.info(\"DE-REGISTER: done.\")", "def unload(self, shutdown=False):\n olof.core.Plugin.unload(self)\n for ss in self.scanSetups.values():\n ss.unload()", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def tearDown(self):\n # Stop the framework\n pelix.framework.FrameworkFactory.delete_framework()\n\n self.framework = None\n self.dispatcher = None", "async def disable_platforms(hass: HomeAssistant) -> None:\n with patch(\n \"homeassistant.components.demo.COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM\",\n [],\n ):\n yield", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "async def async_process_hardware_platforms(hass: HomeAssistant) -> None:\n hass.data[DOMAIN][\"hardware_platform\"] = {}\n\n await async_process_integration_platforms(hass, DOMAIN, _register_hardware_platform)", "def clean_up_factories():\n for name, obj in inspect.getmembers(factories):\n if inspect.isclass(obj) and \"factory\" in name.lower():\n obj.reset_sequence(0)", "def __del__(self):\n GPIO.cleanup()", "def tear_down_cleanup(self):\n self.hass.stop()", "def unregister(self):\r\n self.__screen.unregister_asteroid(self)", "def uninstall(self, provider):\n pass # pragma: no cover", "def _shutdown(self):\n self.logger.debug(\"Unregistering feature modules.\")\n for feature in self._features.values():\n try:\n if hasattr(feature.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(feature.handle.module_unregister())\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering feature module '{feature.name}'.\")\n self.logger.debug(\"Unregistering protocol modules.\")\n for protocol in self._protocols.values():\n try:\n if hasattr(protocol.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(\n protocol.handle.module_unregister(protocol.contexts, self._shutdown_reason)\n )\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering protocol module '{protocol.name}'.\")\n self.eventloop.run_until_complete(self.database.close())\n if len(self._db_connections) > 0:\n self.logger.debug(\"Cleaning up unclosed database connections\")\n for module in list(self._db_connections):\n self.eventloop.run_until_complete(self.database_disconnect(module))", "def cleanup_resources_and_bundles_in_registry(context=None):\n registry = getUtility(IRegistry)\n\n # We need to upgrade staticresources first.\n # Otherwise the bundles we delete will come back to haunt us\n context.upgradeProfile(\"plone.staticresources:default\", dest=\"208\")\n\n # Also reregister the newly defined plone.session bundle if it is installed.\n installer = get_installer(context)\n if installer.is_profile_installed(\"plone.session:default\"):\n loadMigrationProfile(\n context, \"profile-plone.session:default\", steps=[\"plone.app.registry\"]\n )\n\n # Remove obsolete records from the registry\n removed_keys = [\n \"plone.resources/\",\n \"plone.lessvariables\",\n \"plone.resources.configjs\",\n \"plone.resources.last_legacy_import\",\n \"plone.resources.less-modify\",\n \"plone.resources.less-variables\",\n \"plone.resources.lessc\",\n \"plone.resources.requirejs\",\n \"plone.resources.rjs\",\n ]\n to_delete = []\n for key in registry.records:\n for removed_key in removed_keys:\n if key.startswith(removed_key):\n to_delete.append(key)\n logger.debug(f\"Removed record {key}\")\n break\n for key in to_delete:\n del registry.records[key]\n logger.info(f\"Removed {len(to_delete)} records from registry\")\n\n # make sure they are all gone\n try:\n from Products.CMFPlone.interfaces import IResourceRegistry\n\n records = registry.collectionOfInterface(\n IResourceRegistry, prefix=\"plone.resources\", check=False\n )\n assert len(records) == 0\n except ImportError:\n # the interface may be removed at some point\n pass\n\n # Remove obsolete bundles and reload the default bundles\n # The default bundles are reloaded in v60/profiles/to6003/registry.xml\n removed_bundles = [\n \"filemanager\",\n \"plone-base\",\n \"plone-datatables\",\n \"plone-editor-tools\",\n \"plone-fontello\",\n \"plone-glyphicons\",\n \"plone-moment\",\n \"plone-tinymce\",\n \"resourceregistry\",\n \"thememapper\",\n \"plone-legacy\",\n \"plone-logged-in\",\n \"plone-session-pseudo-css\",\n \"plone-session-js\",\n ]\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False\n )\n for name in removed_bundles:\n if name in bundles:\n del bundles[name]\n logger.info(f\"Removed bundle {name}\")\n\n # Remove deprecated bundle fields\n removed_fields = [\n \"compile\",\n \"develop_css\",\n \"develop_javascript\",\n \"last_compilation\",\n \"merge_with\",\n \"resources\",\n \"stub_js_modules\",\n \"conditionalcomment\",\n ]\n to_delete = []\n for key in registry.records:\n for removed_field in removed_fields:\n if key.startswith(\"plone.bundles/\") and key.endswith(removed_field):\n to_delete.append(key)\n logger.debug(f\"Removed record {key}\")\n for key in to_delete:\n del registry.records[key]\n logger.info(f\"Removed {len(to_delete)} deprecated bundle attributes from registry\")\n\n # local default controlpanel icons\n loadMigrationProfile(\n context, \"profile-Products.CMFPlone:plone\", steps=[\"controlpanel\"]\n )\n if installer.is_profile_installed(\"plone.app.theming:default\"):\n loadMigrationProfile(\n context, \"profile-plone.app.theming:default\", steps=[\"controlpanel\"]\n )\n if installer.is_profile_installed(\"plone.app.registry:default\"):\n loadMigrationProfile(\n context, \"profile-plone.app.registry:default\", steps=[\"controlpanel\"]\n )\n if installer.is_profile_installed(\"plone.app.caching:default\"):\n loadMigrationProfile(\n context, \"profile-plone.app.caching:default\", steps=[\"controlpanel\"]\n )\n if installer.is_profile_installed(\"Products.CMFPlacefulWorkflow:base\"):\n loadMigrationProfile(\n context, \"profile-Products.CMFPlacefulWorkflow:base\", steps=[\"controlpanel\"]\n )", "def clean():\n GPIO.cleanup()", "def __del__(self):\n if hasattr(self, 'dev'):\n kernel32.CloseHandle(self.dev)", "def _unregister(self):\r\n if hasattr(self, '_registered') and self._registered:\r\n self._conn.unregisterInterface(self._iTag, self)\r\n self._registered = False", "def unregister(self, name):\r\n raise NotImplementedError", "def clean_registries(self):\n registry = self.connection.get_finished_registry(name=self.name)\n registry.cleanup()\n registry = self.connection.get_started_registry(name=self.name)\n registry.cleanup()", "def destroy(self):\n for item in self.resources:\n if item[\"status\"] == REGISTER:\n item[\"resource\"].destroy()\n item[\"status\"] = UNREGISTER", "def remote_destroy(self):\r\n for interface in self._interfaces.values():\r\n interface.remote_destroy()\r\n\r\n assert len(self._interfaces) == 0\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterNamespace(self)\r\n self._endpoint = None", "def on_shutdown(self):\n self.factory.core.master_local_token.cleanup()\n self.api.stop()", "def __del__(self):\n GPIO.cleanup()\n logging.info('GPIO Cleanup Complete')", "def unload_all():\n module_utils.unload_package_modules(__name__)", "def destroy(self):\n\n self.cmapTexture.destroy()\n\n for tex in (self.modulateTexture,\n self.clipTexture,\n self.colourTexture):\n tex.deregister(self.name)\n glresources.delete(tex.getTextureName())\n\n self.removeListeners()\n self.deregisterAuxImage('modulate')\n self.deregisterAuxImage('clip')\n self.deregisterAuxImage('colour')\n\n self.modulateTexture = None\n self.clipTexture = None\n self.colourTexture = None\n self.modulateImage = None\n self.clipImage = None\n self.colourImage = None\n self.modulateOpts = None\n self.clipOpts = None\n self.colourOpts = None\n\n glimageobject.GLImageObject.destroy(self)", "def TearDown(self):\n raise NotImplementedError(\n 'No runtime tear down defined for %s' % self.__class__.__name__)", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)", "def _teardown_modules(self):\r\n if self._state != self.SHUTDOWN:\r\n raise self.Error('Expected application to be in SHUTDOWN state!')\r\n module_registry = AppModule.module_registry()\r\n for module_label in reversed(self._init_modules):\r\n assert module_label in module_registry\r\n module = module_registry[module_label]\r\n self._debug_log('Running exit function for %s (%s)' % (module_label, module.description()))\r\n try:\r\n module.teardown_function()\r\n except AppModule.Unimplemented:\r\n pass", "def cleanup(self):\n print('GPIO cleanup...')\n GPIO.cleanup()", "def tear_down_ui():\n from .gen import icons_rc\n from .contextmenus import unregister_context_menu\n\n close_all_editors()\n unregister_context_menu()\n icons_rc.qCleanupResources()", "def deregister_class(self, name):\n isbase = name in self.base_types\n if not isbase and name not in self.template_types:\n _raise_type_error(name)\n if isbase:\n self.base_types.remove(name)\n else:\n self.template_types.pop(name, None)\n\n self.cython_ctypes.pop(name, None)\n self.cython_cytypes.pop(name, None)\n self.cython_pytypes.pop(name, None)\n self.from_pytypes.pop(name, None)\n self.cpp_types.pop(name, None)\n self.humannames.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_classnames.pop(name, None)\n\n self.clearmemo()", "def tearDown(self):\n self.pose_in_pub.unregister()\n self.event_out.unregister()\n self.component_output.unregister()", "def unregister_router(self, hostname):", "def __del__(self):\r\n if self.coreInst:\r\n for mod in self.coreInst.keys():\r\n self.coreInst[mod][plug_key].stop()", "def cleanup_dpdk_framework(node, if1, if2):\n if node[u\"type\"] == NodeType.DUT:\n pci_address1 = Topology.get_interface_pci_addr(node, if1)\n pci_address2 = Topology.get_interface_pci_addr(node, if2)\n # We are not supporting more than one driver yet.\n nic_driver = Topology.get_interface_driver(node, if1)\n\n command = f\"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}\"\\\n f\"/entry/cleanup_dpdk.sh \" \\\n f\"{nic_driver} {pci_address1} {pci_address2}\"\n message = u\"Cleanup the DPDK failed!\"\n exec_cmd_no_error(node, command, timeout=1200, message=message)", "def uninstall_window_hook(self):\n\n\t\tif cls._interface:\n\t\t\tcls._interface.uninstall_window_hook()\n\t\telse:\n\t\t\traise NotImplementedError('Unsupported platform')", "def cleanResources(self, shuttingDown = False ):\r\n try:\r\n if self.initialized == False: return\r\n LOG(\"Cleaning up driver\")\r\n DriverManager.instance().cleanup( shutdown = shuttingDown )\r\n self.initialized = False\r\n except SpellException,ex:\r\n traceback.print_exc( file = sys.stderr )\r\n LOG(\"Could not cleanup driver: \" + repr(ex), LOG_ERROR)\r\n self.errorMessage = ex.message\r\n self.errorReason = ex.reason\r\n if not shuttingDown: raise ex", "def tearDown(self):\n self.event_out.unregister()\n self.pub_current_joints.unregister()\n self.component_output.unregister()", "def _unregister_from_server(self):\n self.remote_controller.unregister()", "def close_registrar(self):\n self.registrar.destroy()", "def unload_plugin(self):\n pass", "def cleanup(self):\n self.sensor.cleanup()", "def remote_destroy(self):\r\n if self._receivers:\r\n for interface in reduce(set.union, self._receivers.itervalues()):\r\n interface.unregisterProtocol(self)\r\n\r\n self._receivers = None\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None", "def reset_global_registry(obj_type: ObjectType):\n global registry\n registry.pop(obj_type, None)", "def destroy(self):\r\n self._tidy()\r\n self.stop()\r\n try:\r\n self.opengl.destroy(self)\r\n except:\r\n pass\r\n if self.external_mouse:\r\n try:\r\n self.external_mouse.stop()\r\n except:\r\n pass_\r\n try:\r\n self.mouse.stop()\r\n except:\r\n pass\r\n try:\r\n self.tkwin.destroy()\r\n except:\r\n pass\r\n Display.INSTANCE = None", "def teardown(self):\n self.bot.unregister_commands(tag=self)", "def shutdown(self):\n for review_ui in self.review_uis:\n unregister_ui(review_ui)", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Create xyzrgb from Mosaic/DSM'),\n action)\n self.iface.removeToolBarIcon(action)", "def cleanup_resources(self, kernel_id, restart=False):", "def destroy_ui_model_instances():\n from .core import BlueprintUIModel\n\n BlueprintUIModel.delete()", "def teardown(self) -> None:\n self._unregister_service()\n self._unregister_agent()", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&EU Mapper'),\n action)\n self.iface.removeToolBarIcon(action)", "def cleanup(self):\n GPIO.cleanup(self.chanlist)", "def unload(self):\n for action in self.actions:\n self.iface.removePluginRasterMenu(\n self.tr(u'&Hybriddekning'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unregister_publisher(self, hostname):", "def tearDown(self):\n self.platform = None\n self.tag_outdoor = None", "def gpio_cleanup(self):\n self.motor.gpio_cleanup()", "def destroy():\n pass", "def destroy():\n pass", "def prepareUninstall(self):\n e5App().unregisterPluginObject(pluginTypename)", "def cleanup_revpi(self):\n self.rpi.core.a1green.value = False\n self.rpi.core.a1red.value = False\n self.rpi.io.main_relay.value = False\n self.rpi.io.relay_1.value = False\n self.rpi.io.relay_2.value = False\n self.rpi = revpimodio2.RevPiModIO(autorefresh=False)\n self.opc_server.stop()\n print(\"exit\")\n # self.master.destroy()\n exit(1)", "async def async_will_remove_from_hass(self):\n await super().async_will_remove_from_hass()\n for service in self._device.device_services:\n service.unsubscribe_callback(self.entity_id)", "async def async_reload_integration_platforms(\n hass: HomeAssistant, integration_name: str, integration_platforms: Iterable[str]\n) -> None:\n try:\n unprocessed_conf = await conf_util.async_hass_config_yaml(hass)\n except HomeAssistantError as err:\n _LOGGER.error(err)\n return\n\n tasks = [\n _resetup_platform(\n hass, integration_name, integration_platform, unprocessed_conf\n )\n for integration_platform in integration_platforms\n ]\n\n await asyncio.gather(*tasks)", "def deregister(self):\n if self.debug:\n print(\"%r.deregister()\" % (self,))\n if not self.func:\n if self.debug:\n print(\"already deregistered\")\n return\n try:\n self.tkApp.deletecommand(self.tclFuncName)\n except tkinter.TclError as e:\n if self.debug:\n print(\"deregistering failed: %r\" % (e,))\n pass\n self.func = None", "def stopFactory(self):\n self.watchdog.stop()\n super().stopFactory()", "def destroy(self):\n for window in self.windows:\n try:\n destroy_window(window)\n except:\n pass", "def unregister_finders():\r\n\r\n global __PREVIOUS_FINDER\r\n if not __PREVIOUS_FINDER:\r\n return\r\n\r\n pkg_resources.register_finder(zipimport.zipimporter, __PREVIOUS_FINDER)\r\n _remove_finder(pkgutil.ImpImporter, find_wheels_on_path)\r\n\r\n if importlib_bootstrap is not None:\r\n _remove_finder(importlib_bootstrap.FileFinder, find_wheels_on_path)\r\n\r\n __PREVIOUS_FINDER = None", "def __del__(self):\n self.uninstall_handle_input()", "def clearScanSetups(self, value=None):\n for ss in self.scanSetups.values():\n ss.unload()\n self.scanSetups = {}", "def unregisterProducer():", "def unload(self):\n self.removeSketchesAction()\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr('&Red Layer'),\n action)\n self.iface.removeToolBarIcon(action)\n del self.toolbar", "def _installed_apps_remove(self):\n config.remove_plugin(self.module_path)", "def destroy(self):\n self.unbindAllWidgets()\n self.__func = None\n self.__instance = None", "def Destroy(self):\n self.Disconnected()\n self._io_loop.remove_handler(self._fd)\n os.close(self._fd)\n self._gadget = None\n self._fd = None", "def unload(self):\n \n ImajnetLog.info(\"** UNLOAD QGisImajnetPlugin\")\n\n if self.pluginIsActive:\n self.onClosePlugin(False)\n \n for action in self.actions:\n self.iface.removePluginWebMenu(\n self.tr(u'&Imajnet'),\n action)\n self.iface.removeToolBarIcon(action)\n \n \n # remove the toolbar\n del self.toolbar\n \n if self.aboutDialog :\n self.aboutDialog = None\n #del self.aboutDialog\n \n self.pluginLayerRegistry.removePluginLayerType(ImajnetOpenlayersLayer.LAYER_TYPE)\n #ImajnetLog.error(\"** UNLOADED\")\n ImajnetLog.close()", "def destroy(self):\r\n for interface in self._interfaces.copy():\r\n interface.destroy()\r\n\r\n assert len(self._interfaces) == 0\r\n\r\n self._endpoint.unregisterNamespace(self)\r\n self._endpoint = None\r\n\r\n super(Namespace, self).destroy()", "def shutdown(self):\n for param in [self._device_name_param, self._lm_param, self._dic_param]:\n if rospy.has_param(param):\n rospy.delete_param(param)\n\n \"\"\" Shutdown the GTK thread. \"\"\"\n gtk.main_quit()", "def __del__(self):\n self.motorStop()\n GPIO.cleanup()", "def destroy(self):\r\n if self._group:\r\n if self._machine:\r\n self._group.unregisterContainer(self)\r\n self._machine.unregisterContainer(self)\r\n self._machine = None\r\n\r\n self._group = None\r\n\r\n super(Container, self).destroy()\r\n else:\r\n print('container.Container destroy() called multiple times...')", "def destroyBridge(self, name):\r\n return self._ref.callRemote('destroyBridge', name)" ]
[ "0.74393696", "0.6878564", "0.6781992", "0.67377704", "0.6643691", "0.65110755", "0.63454175", "0.6340024", "0.6208858", "0.62054634", "0.61191297", "0.60082805", "0.6006337", "0.59341156", "0.59182423", "0.58983845", "0.5870383", "0.5865489", "0.586217", "0.5841826", "0.5835734", "0.5814838", "0.57546085", "0.5744748", "0.574073", "0.5692978", "0.56892014", "0.5686241", "0.5665445", "0.5658949", "0.5656606", "0.5638658", "0.56312144", "0.56174314", "0.55965656", "0.5588861", "0.55645365", "0.5550046", "0.5543728", "0.5530921", "0.5526612", "0.54920465", "0.54919666", "0.54784256", "0.547799", "0.5474175", "0.54680693", "0.5467551", "0.5462832", "0.5454812", "0.5450305", "0.54376155", "0.54328924", "0.5426004", "0.54163295", "0.5411415", "0.5402127", "0.5401875", "0.53933215", "0.5392553", "0.5381746", "0.5376642", "0.53687704", "0.5366198", "0.5359785", "0.5355408", "0.5353066", "0.53514725", "0.5346277", "0.5344452", "0.5341615", "0.53410923", "0.5340652", "0.53390044", "0.5335477", "0.5321941", "0.53158426", "0.531533", "0.531533", "0.53106266", "0.5310563", "0.5304145", "0.5296783", "0.5289638", "0.52830917", "0.5279959", "0.5275678", "0.5271566", "0.52697", "0.5262316", "0.52585775", "0.52538764", "0.5251853", "0.5251794", "0.52515656", "0.5245449", "0.5244646", "0.524082", "0.5238581", "0.52371687" ]
0.55174696
41
Says whether or not specified platform (by instance) is subscribed to channel
def is_subscribed(self, inst, channel): if channel not in self._channels: return False return inst in self._channels[channel].subscribers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_channel(self):\n return True", "def single_channel():\n return True", "def is_event_channel(channel: discord.TextChannel) -> bool:\n return get_active_feature(channel) == ActivationState.EVENT", "def check_event_channel(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.EVENT:\n raise error_handling.InactiveChannelError()\n else:\n return True", "def channel_connected(self):\n self.update_status()", "def onSubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount += 1\n\t\treturn True", "def event_detected(self, channel):\n self._check_mode()\n # mit `bool()` kann aus einer Zahl ohne grossen Aufwand ein bool (Wahrheitswert) erstellt werden\n # dabei werden alle Zahlen zu `True`, nur 0 wird zu `False`\n return bool(randint(0, 1))", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "async def is_listened(self, ctx, id: int = None):\n if id is None:\n id = ctx.channel.id\n if id in self.etrigs['channels']:\n await ctx.send(f'Channel {self.bot.get_channel(id).mention} *is* being listened to for etriggers')\n else:\n await ctx.send(f'Channel {self.bot.get_channel(id).mention} *is **not*** being listened to for etriggers')", "def is_on(self):\n return self.car.data[DATA_PLUGGED_IN]", "def is_party_channel(channel: discord.TextChannel) -> bool:\n return get_active_feature(channel) == ActivationState.PARTY", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def is_active(self, channel):\n return bool(int(self.bus.ask('sel:%s?' % channel)))", "def vc_only():\n\n async def check(ctx):\n if ctx.guild and ctx.author.voice:\n if not ctx.guild.me.voice or ctx.author.voice.channel == ctx.guild.me.voice.channel:\n return True\n await ctx.reply(\"I'm already in another voice channel!\")\n return False\n await ctx.reply('You must join a server voice channel first!')\n return False\n\n return commands.check(check)", "def is_incall_connected(self) -> bool:", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def is_day(self, day_channel: discord.TextChannel, player: discord.Role):\n\n if day_channel.overwrites_for(player).send_messages:\n return True", "def test_is_subscribed(self):\n manager_root = ISubscriptionManager(self.root)\n manager_root.subscribability = SUBSCRIBABLE\n manager_root.subscribe('[email protected]')\n self.assertEqual(manager_root.is_subscribed('[email protected]'), True)\n self.assertEqual(manager_root.is_subscribed('[email protected]'), False)\n\n manager = ISubscriptionManager(self.root.folder)\n self.assertEqual(manager.is_subscribed('[email protected]'), True)\n self.assertEqual(manager.is_subscribed('[email protected]'), False)\n\n # If you turn off subscription off at the folder level, you\n # are no longer subscribed\n manager.subscribability = NOT_SUBSCRIBABLE\n self.assertEqual(manager.is_subscribed('[email protected]'), False)\n\n # That didn't changed anything on the parent\n self.assertEqual(manager_root.is_subscribed('[email protected]'), True)", "def is_channel(target, channel_prefixes='!&#+'):\n return len(target) > 1 and target[0] in channel_prefixes", "def test_add_channel_starts_loop(self):\n self.notifier.add_channel(Mock())\n self.notifier_start_mock.assert_called_once()", "def __bool__(self) -> bool:\n return self._connected_event.is_set()", "def is_connected(self):\n return self.connected_channel is not None", "def subscribe(self, client, api_key, channel):\n if channel not in self.clients:\n return False\n pair = (client, api_key)\n if pair in self.clients[channel]:\n return False\n\n self.clients[channel].append(pair)\n return True", "def check_party_channel(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.PARTY:\n raise error_handling.InactiveChannelError()\n else:\n return True", "def _subscribed(self, account_id):\n sql = \"\"\"SELECT 1 FROM hive_subscriptions\n WHERE community_id = :community_id\n AND account_id = :account_id\"\"\"\n return bool(DB.query_one(\n sql, community_id=self.community_id, account_id=account_id))", "def is_on(self) -> bool:\n return self._client.get_circ_pump()", "def is_subscribed(self) -> bool:\n return bool(self._subscriptions)", "def poll(self):\r\n if self.channel.is_available():\r\n self.serve()\r\n return True\r\n else:\r\n return False", "def CanHandle(self, message):\n return (isinstance(message, messages.ChannelMessage)\n and message.content.startswith(TRIGGER))", "def can_push(self) -> bool:\n return pulumi.get(self, \"can_push\")", "async def on_member_join(member: discord.Member):\n for channel in member.server.channels:\n print(channel)\n if channel == \"general\":\n await member.send(f\"\"\"Welcome to the server {member.mention}!\"\"\")", "def func(self):\n from evennia.comms.models import ChannelDB\n\n caller = self.caller\n if self.args not in (\"on\", \"off\"):\n return super(CmdArxAllCom, self).func()\n if self.args == \"on\":\n # get names of all channels available to listen to\n # and activate them all\n channels = [\n chan\n for chan in ChannelDB.objects.get_all_channels()\n if chan.access(caller, \"listen\")\n ]\n for channel in channels:\n unmuted = channel.unmute(caller)\n if unmuted:\n self.msg(\"You unmute channel %s.\" % channel)\n else:\n caller.execute_cmd(\"addcom %s\" % channel.key)\n return\n channels = ChannelDB.objects.get_subscriptions(caller)\n for channel in channels:\n if channel.mute(caller):\n self.msg(\"You mute channel %s.\" % channel)", "def developer_can_push(self) -> bool:\n return pulumi.get(self, \"developer_can_push\")", "def joined(self):\n return str(self) in holder.bot.conn.channels.keys()", "def on_channel_change(self, new_channel):\n pass", "async def _msgvote_on(self, ctx):\n\n channel = ctx.message.channel\n if channel.id in self.settings[\"channels_enabled\"]:\n await self.bot.say(\"Msgvote mode is already on in this channel.\")\n else:\n self.settings[\"channels_enabled\"].append(channel.id)\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Msgvote mode is now on in this channel.\")", "def user_in_channel(self, server_id, user):\n srv = self.get_server_dict(server_id)\n return user.voice.voice_channel and srv['voice'] and user.voice.voice_channel == srv['voice'].channel", "def in_voice(self, server_id):\n srv = self.get_server_dict(server_id)\n return srv['voice'] and srv['voice'].channel", "def on(self) -> bool:\n on_cmd = HomeAssistantPlugin.service_map[self.domain.lower()][\"on\"]\n return self.send(on_cmd)", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def isconnected(self) -> bool:", "def available(self) -> bool:\n return self.thermostat[\"runtime\"][\"connected\"]", "def onConnect(self, connection:MQTTConnection) -> bool:\n\t\treturn True", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)", "async def _check_channel(\n self, starboard: StarboardEntry, channel: discord.TextChannel\n ) -> bool:\n if starboard.whitelist_channel:\n return channel.id in starboard.whitelist_channel\n else:\n return channel.id not in starboard.blacklist_channel", "async def on_ready():\n # channel = client.get_channel(695669957891194952)\n # await channel.send(\"Who wants to play The Game of 99?\")\n print(\"Who wants to play The Game of 99?\")", "def is_connected(self) -> bool:", "async def test_polling_platform_init(hass: HomeAssistant, polling_platform) -> None:\n assert hass.services.has_service(DOMAIN, SERVICE_SEND_MESSAGE) is True", "async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')", "def is_connected(self) -> bool:\n return self.arduino is not None", "def is_open(self, channel=None):\n return self.get_state(channel) == 2", "def isconnected(self) -> bool:\n ...", "def available(self) -> bool:\n return self._ctrl.connected()", "def is_ready(cls):\n\n return SUB_NOTIFY_READY", "def check_yang_subscribe(device, step, result=None):\n if step.result.value == 'failed':\n # subscribe or change for ON_CHANGE subscribe failed\n return\n subscribe_thread = None\n hostname = None\n if hasattr(device, 'name'):\n hostname = device.name\n elif hasattr(device, 'device') and hasattr(device.device, 'name'):\n hostname = device.device.name\n if hostname is None:\n # how did we get this far?\n log.error('YANG Subscribe check, cannot find hostname')\n return\n\n if isinstance(result, Thread):\n if result.sub_mode == 'ON_CHANGE':\n active_subscriptions[hostname] = result\n return\n subscribe_thread = result\n elif hostname in active_subscriptions:\n # ON_CHANGE thread waiting for change\n on_change = active_subscriptions[hostname]\n if not on_change.stopped():\n if not on_change.result:\n step.failed('ON_CHANGE subscription failed.')\n else:\n log.info('ON_CHANGE subscribe terminated...')\n active_subscriptions[hostname].stop()\n del active_subscriptions[hostname]\n return\n\n if subscribe_thread is not None:\n # Wait for subscribe thread to finish and return result.\n while not subscribe_thread.stopped():\n log.info('Waiting for notification...')\n time.sleep(1)\n # set subscribe result\n if not subscribe_thread.result:\n step.failed('subscription failed')", "def check_channel_shell_request(self, channel):\n return False", "def push_events(self) -> bool:\n return pulumi.get(self, \"push_events\")", "def is_dialing(self) -> bool:", "def telescopeDispatch(self):\n\n if self.ui.telescopeDevice.currentText().startswith('INDI'):\n self.app.telescope.name = self.ui.telescopeDeviceName.currentText()\n self.app.message.emit('Telescope enabled', 0)\n self.deviceStat['telescope'] = False\n else:\n self.app.telescope.name = ''\n self.app.message.emit('Telescope disabled', 0)\n self.deviceStat['telescope'] = None\n\n return True", "async def aoc_subscribe(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n role = ctx.guild.get_role(settings.aoc.role_id)\n unsubscribe_command = f\"{ctx.prefix}{ctx.command.root_parent} unsubscribe\"\n\n if role not in ctx.author.roles:\n await ctx.author.add_roles(role)\n await ctx.send(\n \"Okay! You have been __subscribed__ to notifications about new Advent of Code tasks. \"\n f\"You can run `{unsubscribe_command}` to disable them again for you.\"\n )\n else:\n await ctx.send(\n \"Hey, you already are receiving notifications about new Advent of Code tasks. \"\n f\"If you don't want them any more, run `{unsubscribe_command}` instead.\"\n )", "async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output", "def is_connected(self):\n return self.hw_connected", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "def trysay(self, msg):\n if self.channel:\n try:\n self.say(self.channel, msg)\n return True\n except: pass", "def is_registered(self, name):\r\n\r\n return name in self.__events", "async def check_if_is_ticket(ctx):\n channel : TextChannel = ctx.channel\n return 'ticket-' in channel.name", "def channel(self):\n raise NotImplementedError", "def condition(self, device, log):\n return True", "def is_added(self, channel=None):\n return self.get_state(channel) == 1", "def is_on(self) -> bool:\n return self._device.fan_on", "def is_connected(self):\n return self.hub.is_connected and self.client.is_running", "def on_connect(client, userdata, flags, rcdata):\n client.subscribe(\"diy/system/fire\", 1)\n client.subscribe(\"diy/system/panic\", 1)\n client.subscribe(\"diy/system/who\", 1)", "def change_channel():\n global interface\n\n print(\"Change channels for interface {}\".format(interface))\n channel_number = 1\n\n while True:\n system(f\"iwconfig {interface} channel {channel_number}\")\n channel_number = channel_number % 14 + 1\n sleep(0.5)", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def __hasHubs(self):\n for c in self.__comps:\n if c.isHub():\n return True\n return False", "def is_subscriber(self) -> bool:\n return self.subscriber", "def set_channel_verified(m_card, channel):\n\n # set the channel\n pyw.chset(m_card, channel)\n\n # verify\n ch_freq = pych.ch2rf(channel)\n device_freq = pyw.devinfo(m_card).get('CF', None)\n\n if device_freq != ch_freq:\n # the channel frequency of the device is not the selected one\n return False\n\n # the channel frequency of the device is the selected one\n return True", "async def ping(ctx):\n if ctx.channel.name.lower() in channels:\n await ctx.send(\"pong\")", "def on_dedicated(self):\n\n return self.is_valid_platform() and self['MODE'] == 'enterprise'", "async def connected_callback(self):\n channels = []\n for ch in self._channels:\n if ch == \"orderbook\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"orderBook10\")\n channels.append(channel)\n if ch == \"trade\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"trade\")\n channels.append(channel)\n if ch == \"kline\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"tradeBin1m\")\n channels.append(channel)\n while channels:\n data = {\n \"op\": \"subscribe\",\n \"args\": channels[:10]\n }\n await self._ws.send(data)\n channels = channels[10:]\n logger.info(\"subscribe orderbook/trade/kline successfully.\", caller=self)", "def is_incall_playing_dialtone(self) -> bool:", "def send_notification(self) -> bool:\n return pulumi.get(self, \"send_notification\")", "def subscribe(self, client, channel_id):\n logger.access('-- SlimPatternSubscriberManager subscribe, channel_id: %s', channel_id)\n\n need_subscribe = False\n channel_id = str(channel_id)\n\n clients = self.clients.get(channel_id, None)\n if not clients:\n clients = set()\n self.clients[channel_id] = clients\n\n need_subscribe = True\n\n clients.add(client)\n\n if need_subscribe:\n # this function return None\n self.subscriber.psubscribe(channel_id)\n logger.debug('SlimSubscriberManger need subscribe')\n return False\n\n elif channel_id in self._subscribed_channels:\n # the channel has been subscribed\n logger.debug('SlimSubscriberManager have subscribed')\n return True\n else:\n logger.debug('SlimSubscriberManager return NONE!!!!!!!')", "def is_connected(self) -> bool:\n\n return self.send(self.cmd.GET_SYSTEMLINE) == self.cmd.DEFAULT_SYSTEM_LINE", "def subscribe(self, channel, **kwargs):\n pass", "def _detect(self):\n if monasca_setup.detection.find_process_cmdline('rabbitmq-server') is not None:\n self.available = True", "def _want_subscription() -> bool:\n prompt = (\n 'Would you be willing, once your first certificate is successfully issued, '\n 'to share your email address with the Electronic Frontier Foundation, a '\n \"founding partner of the Let's Encrypt project and the non-profit organization \"\n \"that develops Certbot? We'd like to send you email about our work encrypting \"\n \"the web, EFF news, campaigns, and ways to support digital freedom. \")\n return display_util.yesno(prompt, default=False)", "def is_playing(self):\n return self.connected_channel is not None and self.current is not None", "def is_connected(self):\n return self._proxy.get(\"is_connected\", \"filterwheel\")", "def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites", "def has_platform(self, platform_name):\n return platform_name in self.platform_list", "def isConnected():", "def channel(self):\n return os.getenv(\"CONAN_CHANNEL\", \"testing\")", "def get_do_sample_clock_supported( channel ):\n supported = bool32(0)\n CALL('GetPhysicalChanDOSampClkSupported', channel, byref(supported))\n return bool( supported.value )", "def signedOn(self):\n self.join(self.factory.channel)\n self.identify()", "async def event_ready():\n print(f\"{BOT_NICK} is online!\")", "def EnableBroadcast(self) -> bool:", "def EnableBroadcast(self) -> bool:", "def on_publish(unused_client, unused_userdata, unused_mid):\n print('on_publish')\n status_light.on()" ]
[ "0.6758969", "0.65061855", "0.6170213", "0.60398626", "0.59377897", "0.58887047", "0.58596504", "0.5826945", "0.57663727", "0.574901", "0.57350725", "0.57127315", "0.5697036", "0.5667392", "0.5624958", "0.5623939", "0.5619179", "0.56188387", "0.55817115", "0.55813885", "0.5570246", "0.55436397", "0.55420333", "0.55302066", "0.5526674", "0.5525865", "0.5518668", "0.5514526", "0.5511954", "0.5510967", "0.5492885", "0.5481387", "0.5480411", "0.54699177", "0.5451882", "0.54507196", "0.5450029", "0.5447691", "0.5442268", "0.54396063", "0.54353493", "0.54344857", "0.5433999", "0.54145205", "0.5411094", "0.54018444", "0.5395028", "0.5392526", "0.5386567", "0.5366471", "0.5363291", "0.53576005", "0.53412616", "0.5336286", "0.53355414", "0.5322493", "0.53102285", "0.5283135", "0.5281782", "0.52793443", "0.5275933", "0.52751815", "0.5271106", "0.52686286", "0.52646947", "0.5259841", "0.5259229", "0.52576876", "0.52564573", "0.5240845", "0.52354074", "0.5229644", "0.5229575", "0.5222123", "0.52212954", "0.52185065", "0.52146775", "0.52141047", "0.5206857", "0.51981956", "0.5187583", "0.5187196", "0.5184766", "0.5184739", "0.5184576", "0.51834565", "0.5182138", "0.5179294", "0.51783526", "0.5175161", "0.5174738", "0.5173062", "0.51698947", "0.51698834", "0.51698124", "0.5162674", "0.5159123", "0.51582044", "0.51582044", "0.5157096" ]
0.62435955
2
Subscribes specified platform to channel
def subscribe(self, inst, channel): if channel not in self._channels: self._channels[channel] = TalkChannel(channel, print_messages=self.verbose, timeref=self._timeref) self._channels[channel].subscribe(inst)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(self, channel, **kwargs):\n pass", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "async def _subscribe_to_channels(self, ws: WSAssistant):\n try:\n # BitMart WebSocket API currently offers only spot/user/order private channel.\n for trading_pair in self._trading_pairs:\n ws_message: WSRequest = WSRequest({\n \"op\": \"subscribe\",\n \"args\": [f\"spot/user/order:{bitmart_utils.convert_to_exchange_trading_pair(trading_pair)}\"]\n })\n await ws.send(ws_message)\n\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Error occured during subscribing to Bitmart private channels.\", exc_info=True)\n raise", "def subscribe(self, subject):\n pass", "async def connected_callback(self):\n channels = []\n for ch in self._channels:\n if ch == \"orderbook\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"orderBook10\")\n channels.append(channel)\n if ch == \"trade\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"trade\")\n channels.append(channel)\n if ch == \"kline\":\n for symbol in self._symbols:\n channel = self._symbol_to_channel(symbol, \"tradeBin1m\")\n channels.append(channel)\n while channels:\n data = {\n \"op\": \"subscribe\",\n \"args\": channels[:10]\n }\n await self._ws.send(data)\n channels = channels[10:]\n logger.info(\"subscribe orderbook/trade/kline successfully.\", caller=self)", "def subscribe(self, channels: typing.Iterable, listener: types.MethodType):\n raise TypeError(\"{} - subscribe not implemented!\")", "def subscribe(self, client, api_key, channel):\n if channel not in self.clients:\n return False\n pair = (client, api_key)\n if pair in self.clients[channel]:\n return False\n\n self.clients[channel].append(pair)\n return True", "def on_open(self) -> None:\n\n channel = [{\"name\": \"level2\", \"product_ids\": list(self.products.keys())}]\n msg_subscribe = {\"type\": \"subscribe\", \"channels\": channel}\n\n subscribe_payload = json.dumps(msg_subscribe)\n self.ws.send(subscribe_payload)", "def subscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/subscribe\", post_args=args)", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def subscribe(self, chanel_name):\n name = 'subscribe'\n\n self._send_websocket_request(name, chanel_name)", "def on_connect(client, interface, flags, rc):\n logger.info(\"Connected with result code \" + str(rc))\n for i in Const.sub_topics:\n client.subscribe(i)\n logger.info(\"Successfully subscribed to \" + i)", "def cbMqtt_on_subscribe(client, userdata, mid, granted_qos):\n # logger.debug('Subscribed to MQTT topic with message id %d', mid)\n pass", "def subscribe_to_quorum_channel(self):\n from dallinger.experiment_server.sockets import chat_backend\n\n self.log(\"Bot subscribing to quorum channel.\")\n chat_backend.subscribe(self, \"quorum\")", "def subscribe(self, channel: str) -> None:\n print(f'{self._name} starts subscribing to channel-[{channel}]')\n self._server.register(self, channel)", "def _subscribe(self, channel, callback):\n local_subs = self._sub_cbs.get(channel, None)\n if local_subs is None:\n local_subs = {callback}\n self._sub_cbs[channel]= local_subs\n self._rc.publish(self._sub_cmd_q, 'subscribe:' + channel)\n else:\n local_subs.add(callback)", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "def subscribe(receiver, catchup):", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def on_connect(client, userdata, flags, rc):\n client.subscribe(mqtt_Light_topic)", "async def aoc_subscribe(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n role = ctx.guild.get_role(settings.aoc.role_id)\n unsubscribe_command = f\"{ctx.prefix}{ctx.command.root_parent} unsubscribe\"\n\n if role not in ctx.author.roles:\n await ctx.author.add_roles(role)\n await ctx.send(\n \"Okay! You have been __subscribed__ to notifications about new Advent of Code tasks. \"\n f\"You can run `{unsubscribe_command}` to disable them again for you.\"\n )\n else:\n await ctx.send(\n \"Hey, you already are receiving notifications about new Advent of Code tasks. \"\n f\"If you don't want them any more, run `{unsubscribe_command}` instead.\"\n )", "def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")", "def subscribeConsumer(consumer):", "def subscribe(receiver, updateInterval=10):", "def subscribe(receiver, updateInterval=None):", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def channel_subscribe(self, download_market_data=True):\r\n\r\n symb = \"%s%s\" % (self.curr_base, self.curr_quote)\r\n if not FORCE_NO_DEPTH:\r\n self.send(json.dumps({\"op\":\"mtgox.subscribe\", \"channel\":\"depth.%s\" % symb}))\r\n self.send(json.dumps({\"op\":\"mtgox.subscribe\", \"channel\":\"ticker.%s\" % symb}))\r\n\r\n # trades and lag are the same channels for all currencies\r\n self.send(json.dumps({\"op\":\"mtgox.subscribe\", \"type\":\"trades\"}))\r\n if not FORCE_NO_LAG:\r\n self.send(json.dumps({\"op\":\"mtgox.subscribe\", \"type\":\"lag\"}))\r\n\r\n self.request_idkey()\r\n self.request_orders()\r\n self.request_info()\r\n\r\n if download_market_data:\r\n if self.config.get_bool(\"gox\", \"load_fulldepth\"):\r\n if not FORCE_NO_FULLDEPTH:\r\n self.request_fulldepth()\r\n\r\n if self.config.get_bool(\"gox\", \"load_history\"):\r\n if not FORCE_NO_HISTORY:\r\n self.request_history()\r\n\r\n self._time_last_subscribed = time.time()", "def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )", "def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC, qos=1)", "def on_connect(mqttc, mosq, obj, rc):\n print(\"Connected with result code:\"+str(rc))\n # subscribe for all devices of user\n mqttc.subscribe('+/devices/+/up')\n mqttc.subscribe('+/devices/+/events/down/sent')\n if rc != 0:\n sys.exit('Could not connect to server. \\n Result code: ' + str(rc))", "def on_connect(client, userdata, flags, rc):\n if rc == 0:\n client.subscribe(topic_subscribe)\n print(\"connected OK with returned code=\", rc)\n else:\n print(\"Bad connection with returned code=\", rc)", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def on_connect(client, userdata, flags, rc):\n print(f\"Re/Suscribing to TOPIC: {TOPIC}\")\n client.subscribe(TOPIC)\n if rc == 0:\n print(f'Connected OK Returned code={rc}')\n else:\n print('Bad connection Returned code={rc}')", "async def channel(self, ctx):\n pass", "def on_channel_change(self, new_channel):\n pass", "def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC)", "def on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe(MQTT_TOPIC)", "def channel(self):\n raise NotImplementedError", "async def managechannels(self, ctx:commands.Context):", "def subscribe(self, broker):\n if self.subscribed == False:\n for attr in self.parm_list:\n if attr.direction == attr.SUB:\n self.logging.debug(\"Subscribing: \"+attr.label)\n self.mqtt_client.subscribe(attr.topic)\n self.subscribed = True\n else:\n self.logging.debug(\"Already subscribed ... ignoring\")", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def subscribe(client, mqtt_topic):\n\n print()\n print(\"Subscribe\")\n print(\"================================================\")\n print()\n\n # Subscribe to the config topic.\n print(\"Subscribing\")\n print(mqtt_topic)\n print()\n client.subscribe(mqtt_topic, qos=1)", "def subscribe(self) -> None:\n events = [\n HathorEvents.NETWORK_NEW_TX_ACCEPTED,\n HathorEvents.NETWORK_PEER_CONNECTING,\n HathorEvents.NETWORK_PEER_READY,\n HathorEvents.NETWORK_PEER_CONNECTED,\n HathorEvents.NETWORK_PEER_DISCONNECTED,\n HathorEvents.NETWORK_PEER_CONNECTION_FAILED\n ]\n\n for event in events:\n self.pubsub.subscribe(event, self.handle_publish)", "async def connect(self, channel=\"btc_confirmed_exchange_flows\"):\n uri = \"wss://ws.tokenanalyst.io\"\n id = \"token_analyst_stream\"\n payload = {\"event\":\"subscribe\",\"channel\":channel,\"id\":id,\"key\":self._key}\n\n async with websockets.connect(uri, ping_timeout=None) as websocket:\n self._ws = websocket\n await websocket.send(json.dumps(payload))\n async for msg in websocket: \n data = await self.interpret(json.loads(msg), id)\n yield data", "def change_channel():\n global interface\n\n print(\"Change channels for interface {}\".format(interface))\n channel_number = 1\n\n while True:\n system(f\"iwconfig {interface} channel {channel_number}\")\n channel_number = channel_number % 14 + 1\n sleep(0.5)", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def on_subscribe(\n client: mqtt.Client,\n userdata: Any,\n mid: int,\n granted_qos: int,\n properties: Properties = None,\n ) -> None:\n logging.info(\n f\"Successfully subscribed to topic: mid={mid}, granted qos={granted_qos}, properties={properties}\"\n )", "async def subscribe(self, topic: str, callback: aiowamp.SubscriptionHandler, *,\n match_policy: aiowamp.MatchPolicy = None,\n node_key: str = None,\n options: aiowamp.WAMPDict = None) -> int:\n ...", "def subscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = user\n self.invited.pop(user)\n self.message_queue.append('_%s has joined the channel_' % user)\n self.log.info('%s subscribed to the broadcast.' % user)\n self.save_state()\n return 'You are now subscribed.'", "def subscribe(self, sub, chan, auth=\"\", cipher=\"\", use_ssl=False):\r\n self.sub = sub\r\n self.chan = chan\r\n self.auth = auth\r\n self.cipher = cipher\r\n self.use_ssl = use_ssl\r\n\r\n # force disconnect of currently active longpoll.\r\n self.hup()", "def _subscribe(self, callback_path: str, topic_url: str, mode: str = \"subscribe\"):\n data = {'hub.callback': self.callback_url + callback_path,\n 'hub.mode': mode,\n 'hub.topic': topic_url,\n 'hub.lease_seconds': self.subscribe_least_seconds}\n if self.secret is not None:\n data['hub.secret'] = self.secret\n result = self.__api_post_request(TWITCH_API_BASE_URL + \"webhooks/hub\", data=data)\n if result.status_code != 202:\n logging.error(f'Subscription failed! status code: {result.status_code}, body: {result.text}')\n return result.status_code == 202", "def on_connect(client, userdata, flags, rc):\n print(\"MQTT Connected with result code \" + str(rc))\n if rc == 0:\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n topic = \"building/dgm/command\"\n client.subscribe(topic, qos=2)\n print(\"MQTT Subscribed to \" + topic)", "def subscribe(self, transport, data):\r\n\r\n self.add(transport, address=data.get('hx_subscribe'))\r\n\r\n self.send(\r\n data.get('hx_subscribe'),\r\n {'message': \"%r is listening\" % transport}\r\n )", "def web_channels_subscriptions():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n update = flask.request.args.get('update', None)\n\n if update is not None:\n update_data = json.loads(urllib.parse.unquote(update))\n\n if update_data['subscribe']:\n yt_create_subscription(update_data['id'])\n time.sleep(10)\n else:\n yt_remove_subscription(update_data['id'])\n time.sleep(10)\n\n return flask.redirect(update_data['redirect'])\n\n return flask.redirect('channels')", "def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()", "def _on_platforms_messsage(self, peer, sender, bus, topic, headers,\n message):\n topicsplit = topic.split('/')\n if len(topicsplit) < 2:\n _log.error('Invalid topic length published to volttron central')\n return\n\n # Topic is platforms/<platform_uuid>/otherdata\n topicsplit = topic.split('/')\n\n if len(topicsplit) < 3:\n _log.warn(\"Invalid topic length no operation or datatype.\")\n return\n\n _, platform_uuid, op_or_datatype, other = topicsplit[0], \\\n topicsplit[1], \\\n topicsplit[2], topicsplit[3:]\n\n if len(platform_uuid) != 36:\n _log.error('Invalid platform id detected {}'\n .format(platform_uuid))\n return\n\n platform = self._registered_platforms.get(platform_uuid)\n if platform is None:\n _log.warn('Platform {} is not registered but sent message {}'\n .format(platform_uuid, message))\n return\n\n _log.debug('Doing operation: {}'.format(op_or_datatype))\n _log.debug('Topic was: {}'.format(topic))\n _log.debug('Message was: {}'.format(message))\n\n if op_or_datatype == 'devices':\n md5hash = message.get('md5hash')\n if md5hash is None:\n _log.error('Invalid topic for devices datatype. Must contain '\n 'md5hash in message.')\n if message['md5hash'] not in self._hash_to_topic:\n devices = platform.get(\"devices\", {})\n lookup_topic = '/'.join(other)\n _log.debug(\"Lookup topic is: {}\".format(lookup_topic))\n vcp = self._get_connection(platform_uuid)\n device_node = vcp.call(\"get_device\", lookup_topic)\n if device_node is not None:\n devices[lookup_topic] = device_node\n self._hash_to_topic[md5hash] = lookup_topic\n else:\n _log.error(\"Couldn't retrive device topic {} from platform \"\n \"{}\".format(lookup_topic, platform_uuid))", "def subscribe(self):\n if hasattr(self.bus, \"signal_handler\"):\n self.bus.signal_handler.subscribe()\n if hasattr(self.bus, \"console_control_handler\"):\n self.bus.console_control_handler.subscribe()", "async def __call__(self, payload):\n self.sdk.log(\"/metrika_subscribe handler fired with payload {}\".format(payload))\n\n buttons = []\n time = 19\n\n for rows in range(0,3):\n row = []\n\n for cols in range(0,2):\n row.append({\n 'text': '{}:00'.format(str(time % 24).zfill(2)),\n 'callback_data': 'subscribe|{}'.format(time % 24)\n })\n time += 1\n\n buttons.append(row)\n\n buttons.append([\n {\n 'text': 'Отписаться',\n 'callback_data': 'unsubscribe'\n }\n ])\n\n await self.sdk.send_inline_keyboard_to_chat(payload['chat'], 'Выберете время', buttons)", "def linkTrackToChannel(*args, **kwargs):\n pass", "def mqtt_on_connect(client, userdata, flags, rc):\n logging.debug('successfully connected to mqtt broker')\n client.subscribe(config['mqtt']['subscribe_topic'])", "def on_connect(client, userdata, flags, rc):\n print(\"Connected with with mqtt server: \" + str(rc))\n client.subscribe(\"clients/#\")", "async def _async_setup_platform(\n opp: OpenPeerPower,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict],\n) -> None:\n if integration_platform not in opp.data:\n await async_setup_component(\n opp, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component = opp.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "async def _register_hardware_platform(\n hass: HomeAssistant, integration_domain: str, platform: HardwareProtocol\n) -> None:\n if integration_domain == DOMAIN:\n return\n if not hasattr(platform, \"async_info\"):\n raise HomeAssistantError(f\"Invalid hardware platform {platform}\")\n hass.data[DOMAIN][\"hardware_platform\"][integration_domain] = platform", "async def aoc_subscribe(self, ctx: commands.Context) -> None:\n role = ctx.guild.get_role(AocConfig.role_id)\n unsubscribe_command = f\"{ctx.prefix}{ctx.command.root_parent} unsubscribe\"\n\n if role not in ctx.author.roles:\n await ctx.author.add_roles(role)\n await ctx.send(\"Okay! You have been __subscribed__ to notifications about new Advent of Code tasks. \"\n f\"You can run `{unsubscribe_command}` to disable them again for you.\")\n else:\n await ctx.send(\"Hey, you already are receiving notifications about new Advent of Code tasks. \"\n f\"If you don't want them any more, run `{unsubscribe_command}` instead.\")", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def vol_push_callback(channel):\n \n global volume, mute\n \n if mute:\n subprocess.run([\"mpc\", \"volume\", str(volume)],stdout=subprocess.DEVNULL)\n else:\n print(\"mute\")\n subprocess.run([\"mpc\", \"volume\", \"0\"],stdout=subprocess.DEVNULL)\n mute = not mute", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def on_subscribe(self, client, userdata, mid, granted_qos):\n\t\tprint (\"[{}] Client subscribed to {}\".format(\n\t\t\tint(time.time()),\n\t\t\tself.topic\n\t\t))\n\t\t#the following lines are here and not in on_connect() only for printing purpose\n\t\tif not self.printed_sub:\n\t\t\tself.printed_sub = True\n\t\t\tself.subscribe(\"measure/people\")", "def on_connect(client, userdata, flags, rcdata):\n client.subscribe(\"diy/system/fire\", 1)\n client.subscribe(\"diy/system/panic\", 1)\n client.subscribe(\"diy/system/who\", 1)", "def test_publish(self):\n lcm = LCM(\"memq://\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", DeprecationWarning)\n lcm.publish(\"TEST_CHANNEL\", b\"\")", "def subscribe(observer):", "def subscribe(observer):", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def onSubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount += 1\n\t\treturn True", "def subscribe(self, _type, symbol):\n self._assert_types_are_correct([_type])\n self.types.add(_type)\n self.symbols.add(symbol)\n\n if _type == 'tickers':\n self.wss.subscribe_to_ticker(symbol)\n elif _type == 'trades':\n self.wss.subscribe_to_trades(symbol)\n elif isinstance(_type, tuple):\n self.wss.subscribe_to_candles(symbol, timeframe=_type[1])", "def on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(MQTT_PATH)", "def subscribe(self, client, channel_id):\n logger.access('-- SlimPatternSubscriberManager subscribe, channel_id: %s', channel_id)\n\n need_subscribe = False\n channel_id = str(channel_id)\n\n clients = self.clients.get(channel_id, None)\n if not clients:\n clients = set()\n self.clients[channel_id] = clients\n\n need_subscribe = True\n\n clients.add(client)\n\n if need_subscribe:\n # this function return None\n self.subscriber.psubscribe(channel_id)\n logger.debug('SlimSubscriberManger need subscribe')\n return False\n\n elif channel_id in self._subscribed_channels:\n # the channel has been subscribed\n logger.debug('SlimSubscriberManager have subscribed')\n return True\n else:\n logger.debug('SlimSubscriberManager return NONE!!!!!!!')", "def add_platform(self, platform: KetraPlatformBase):\n self.platforms.append(platform)", "def subscribeToEvent(eventName,subscriber,msgInterface):", "async def subscribe(topics, on_close=None, on_error=None):\n loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession()\n async with session.ws_connect('wss://api.huobi.pro/ws') as ws:\n keys = {\n topic: uuid.uuid4().hex\n for topic in topics\n }\n keyed_channels = {\n v: topics[k]\n for k, v in keys.items()\n }\n subscribed_chanels = {}\n for topic, config in topics.items():\n payload = {\n 'sub': topic,\n 'id': keys[topic]\n }\n await ws.send_str(encode_ws_payload(payload))\n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.BINARY:\n data = decode_ws_payload(msg.data)\n\n ping = data.get('ping')\n if ping:\n reply = encode_ws_payload({'pong': ping})\n await ws.send_str(\n reply\n )\n\n subbed = data.get('subbed')\n if subbed:\n if data.get('status') == 'ok':\n subscribed_chanels[subbed] = keyed_channels[data['id']]\n\n ch = data.get('ch')\n if ch:\n cb = subscribed_chanels[ch].get('callback', lambda _: None)\n if asyncio.iscoroutinefunction(cb):\n await cb(data)\n else:\n loop.run_in_executor(None, partial(cb, data))\n elif msg.type == aiohttp.WSMsgType.CLOSED:\n if on_close:\n return on_close()\n return\n elif msg.type == aiohttp.WSMsgType.ERROR:\n if on_error:\n return on_error()\n return", "def mqtt_pub():\n global args\n args = parse_args()\n init(args)\n mqtt_connection = setup_connection(args)\n\n connect_future = mqtt_connection.connect()\n # Future.result() waits until a result is available\n connect_future.result()\n print(\"Connected!\")\n\n topic_data = read_config()\n print(f\"platform_type: {topic_data['platform_type']}\")\n print(f\"random platform_type: {random.choice(topic_data['platform_type'])}\")\n\n # Publish message to server desired number of times\n # This step loops forever if count was set to 0\n if args.count == 0:\n print(\"Sending messages until program killed\")\n else:\n print(f\"Sending {args.count} message(s)\")\n\n publish_count = 1\n while (publish_count <= args.count) or (args.count == 0):\n\n # topic definition: generate a random topic to publish to, based on the established hierarchy:\n # ex: IOOS/<platform_type>/<ra>/<platform>/<sensor>/<variable>\n platform_type = random.choice(topic_data[\"platform_type\"])\n ra = random.choice(topic_data[\"ra\"])\n platform = random.choice(topic_data[\"platform\"])\n sensor = random.choice(topic_data[\"sensor\"])\n variable = random.choice(topic_data[\"variable\"])\n\n topic = f\"IOOS/{platform_type}/{ra}/{platform}/{sensor}/{variable}\"\n obs_data = random.uniform(1, 100)\n # msg_json = \"\"\"\n # { \"metadata\": {\n # \"platform_type\": \"{platform_type}\",\n # \"ra\": \"{ra}\",\n # \"platform\": \"{platform}\",\n # \"sensor\": \"{sensor}\",\n # \"variable\": \"{variable}\"\n # },\n # \"data\": {\n # \"value\": \"{data}\"\n # }\n # }\n # \"\"\"\n msg_dict = dict()\n msg_dict[\"metadata\"] = {\n \"platform_type\": platform_type,\n \"ra\": ra,\n \"platform\": platform,\n \"sensor\": sensor,\n \"variable\": variable,\n }\n msg_dict[\"data\"] = {\"value\": obs_data}\n # print(msg_dict)\n\n print(f\"Topic: {topic}\")\n print(f\"Message: {msg_dict}\")\n mqtt_connection.publish(\n topic=topic,\n # payload=str(msg_dict),\n payload=json.dumps(msg_dict),\n qos=mqtt.QoS.AT_LEAST_ONCE,\n )\n time.sleep(1)\n publish_count += 1\n\n # Disconnect\n print(\"Disconnecting...\")\n disconnect_future = mqtt_connection.disconnect()\n disconnect_future.result()\n print(\"Disconnected!\")", "def add_subscription(self, device, cb, event_type=None):\n device.subscribe(cb, event_type=event_type, run=False)\n self.subs[cb] = device", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def join(self, channel):\n raise NotImplementedError", "def platform_subscribe_subscription(\n user_id: str,\n body: Optional[PlatformSubscribeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PlatformSubscribeSubscription.create(\n user_id=user_id,\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def publish(self, topic: Hashable, *args, **kwargs):\n for sub in self.subscribers[topic]:\n sub(*args, **kwargs)", "async def on_ready():\n # channel = client.get_channel(695669957891194952)\n # await channel.send(\"Who wants to play The Game of 99?\")\n print(\"Who wants to play The Game of 99?\")", "def on_connected(connection):\n # Open a channel\n connection.channel(on_channel_open)", "def on_connect(self, client, userdata, flags, rc):\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n sleep(5) # quick delay\n self.client.subscribe(self.subControls)\n self.client.subscribe(self.subSettings)", "def subscribe(self, req: SubscribeRequest) -> None:\n if self.login_status:\n exchange = EXCHANGE_VT2TORA[req.exchange]\n self.api.SubscribeMarketData([str.encode(req.symbol)], exchange)", "def subscribe(self, req: SubscribeRequest) -> None:\n if self.login_status:\n exchange = EXCHANGE_VT2TORA[req.exchange]\n self.api.SubscribeMarketData([str.encode(req.symbol)], exchange)", "async def subscribe(self, payload):\n\n time = payload['inline_params']\n\n if not time:\n await self.__call__(payload)\n return\n\n result = self.sdk.scheduler.find(payload['chat'])\n if result and result['hour'] == time:\n await self.sdk.send_text_to_chat(\n payload[\"chat\"],\n \"Вы уже подписаны на ежедневный дайджест в {}:00\".format(time)\n )\n else:\n payload['command'] = 'today'\n self.sdk.scheduler.remove(payload['chat'])\n self.sdk.scheduler.add(\n CommandStatistics(self.sdk).stats,\n chat_id=str(payload['chat']),\n hour=time,\n args=[payload]\n )\n await self.sdk.send_text_to_chat(\n payload[\"chat\"],\n \"Вы успешно подписались на ежедневный дайджест в {}:00\".format(time)\n )", "def create_subscription(self, device_type):\n url = '{}/v2/subscriptions'.format(self.url)\n device_type = device_type.split('.')[0]\n device_pattern = \"urn:ngsi-ld:{}:*\".format(device_type)\n description = \"Notify QuantumLeap with {}\".format(device_type)\n data = {\n \"description\": description,\n \"subject\": {\n \"entities\": [\n {\n \"idPattern\": device_pattern\n }\n ]\n },\n \"notification\": {\n \"http\": {\n \"url\": \"http://quantumleap:8668/v2/notify\"\n },\n \"metadata\": [\"dateCreated\", \"dateModified\"]\n },\n \"throttling\": 1\n }\n return self.post(url, data=json.dumps(data), headers=self.headers_json)", "def get_subscribers():\n # TODO: make calls to each device asynchronous so we don't block\n # trying to read from an unresponsive device on start up.\n PortList = serial.tools.list_ports.comports()\n manager.executor = ThreadPoolExecutor(max_workers=len(PortList))\n for port in PortList:\n if port.device in IGNORE_LIST:\n continue\n log.info('Found {}'.format(port.device))\n ser = serial.Serial(port.device, baudrate=115200)\n looping = True\n while looping:\n b = pyvesc.encode(pyvesc.ReqSubscription('t'))\n ser.reset_input_buffer()\n ser.write(b)\n l = ser.read()\n if l == b'\\x02' or l == b'\\x03':\n # It's a VESC message!\n looping = not init_vesc_driver(port, ser, l)\n else:\n # It's a JSON message!\n looping = not init_json_driver(port, ser, l)", "def connect(self, mach) -> channel.Channel:\n self.console_uart = self.servo_get_tty()\n return mach.open_channel(\"picocom\", \"-q\", \"-b\", \"115200\",\n self.console_uart)" ]
[ "0.66808635", "0.5979494", "0.5979494", "0.5979494", "0.5952215", "0.5772565", "0.5757093", "0.57420313", "0.567808", "0.5661305", "0.56558084", "0.5654329", "0.56333697", "0.56230813", "0.5613967", "0.55897367", "0.558452", "0.55815876", "0.55511", "0.54930943", "0.54869413", "0.54393387", "0.54197574", "0.541609", "0.5412909", "0.53921413", "0.5359343", "0.5330766", "0.53283286", "0.5309524", "0.5282591", "0.5278514", "0.52694005", "0.5261311", "0.5258332", "0.52429813", "0.52403927", "0.52370894", "0.5223805", "0.5222079", "0.52187526", "0.52187526", "0.5218613", "0.5206746", "0.52011627", "0.5200377", "0.5194708", "0.5190058", "0.5174037", "0.5154677", "0.5152725", "0.5151279", "0.51453996", "0.5109464", "0.5091309", "0.50805587", "0.5079541", "0.507797", "0.50773597", "0.5076682", "0.507488", "0.50694776", "0.5062443", "0.50598705", "0.5041928", "0.50190806", "0.5016143", "0.50155616", "0.4996038", "0.49947757", "0.49945578", "0.49849606", "0.49838418", "0.4976356", "0.4976343", "0.49728575", "0.49728575", "0.49498817", "0.4949454", "0.49447635", "0.49423963", "0.49386862", "0.493342", "0.49289745", "0.49269083", "0.49172634", "0.49150294", "0.49116844", "0.4904037", "0.4901673", "0.48892444", "0.48866487", "0.4882238", "0.4877175", "0.48685575", "0.48685575", "0.48638913", "0.48579508", "0.48566052", "0.48525763" ]
0.5703723
8
Unsubscribes specified platform to channel
def unsubscribe(self, inst, channel): if channel not in self._channels: raise ValueError("Channel {} not exists!".format(channel)) self._channels[channel].unsubscribe(inst) return # TODO: ?delete channels if there is no subscribers # if len(self._channels[channel].subscribers) == 0: # del self._channels[channel]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unsubscribe(self, channel, update_handler=None):\n pass", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unregister_publisher(self, hostname):", "def unsubscribe(self, subject):\n pass", "def unsubscribe(self, destination, *args, **kwargs):", "def part(self, channel):\n\n self._pubsub.unsubscribe('cluster:%s' % channel)", "def unregister_platform(self, platform_uuid):\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)", "def _unsubscribe(self, signal):\n while signal in self._downstream:\n self._downstream.remove(signal)\n while signal in self._downstream_reconnect:\n self._downstream_reconnect.remove(signal)", "def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def unsubscribe(observer):", "def unsubscribe(observer):", "def unsubscribe(self):\n pass # pragma: no cover", "def unsubscribe(self, chanel_name):\n name = 'unsubscribe'\n\n self._send_websocket_request(name, chanel_name)", "def unsubscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/unsubscribe\", post_args=args)", "def _onUnsubscribe(self, client:mqtt.Client, userdata:Any, mid:int) -> None:\n\t\t# TODO doc, error check when not connected, not subscribed\n\t\tfor t in self.subscribedTopics.values():\n\t\t\tif t.mid == mid:\n\t\t\t\tdel self.subscribedTopics[t.topic]\n\t\t\t\tself.messageHandler and self.messageHandler.onUnsubscribed(self, t.topic)\n\t\t\t\tbreak", "def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)", "def onUnsubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount -= 1\n\t\treturn True", "def unsubscribeFromEvent(eventName,subscriber):", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def cb_stop(self, update, context):\n\n print(f\"Unsubscribing chat_id '{update.message.chat_id}'\")\n try:\n self.clientChatIds.remove(update.message.chat_id)\n answer = \"You sucessfully unsubscribed.\"\n self.saveToFile(self.configFile)\n except KeyError:\n answer = \"You are not subscribed.\"\n\n update.message.reply_text(answer)", "async def unsubscribe(self, topic: str, subscription_id: int = None) -> None:\n ...", "def unsubscribe(self, client, channel):\n clients = self.clients.get(channel)\n if clients is None:\n return False\n index = None\n for i, pair in enumerate(clients):\n if pair[0] != client:\n continue\n index = i\n break\n if index is not None:\n del self.clients[channel][index]\n return True", "def unregister_platform(self, name, recursive=False):\r\n if name in dict(self._platforms):\r\n self.unregister_platform_instance(self._platforms[name], recursive)", "def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'", "def unsubscribe(callback):\n if callback in _subscribers:\n del _subscribers[callback]", "def unregisterProducer():", "def dropchan(channel):", "def unsubscribe(cls,sender,receiver):\n cls._unsubscribe(id(sender),receiver)", "async def deregister(self, ctx:commands.Context):\r\n\r\n if await self.IsSpecialized(ctx.guild, ctx.channel.id):\r\n channels = await self.config.guild(ctx.guild).channels()\r\n t = channels.pop(str(ctx.channel.id))\r\n await self.config.guild(ctx.guild).channels.set(channels)\r\n await ctx.send(f'<#{ctx.channel.id}> is no longer a {t}')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> was never specialized!')", "def cleanup(self, channel=None):\n # falls `channel` angegeben wurden, werden nur diese bereinigt,\n # ansonsten wird alles bereinigt\n if channel:\n # ueberpruefe, ob `channel` eine Zahl ist und erstelle eventuell eine Liste nur mit dieser Zahl\n # dies ist wichtig, weil eine For-Schleife nicht ueber eine Zahl,\n # sondern in meinem Fall nur ueber eine Liste, iterieren kann\n if type(channel) == int:\n channel = [channel]\n for c in channel:\n # loesche den channel `c` aus dem dictionary `self.channels`\n del self.channels[c]\n print(f\"cleanup von channel {c}\")\n else:\n print(\"cleanup\")\n self.channels = {}", "def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()", "def unsubscribe(self):\r\n self._unregister()", "async def aoc_unsubscribe(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n role = ctx.guild.get_role(settings.aoc.role_id)\n\n if role in ctx.author.roles:\n await ctx.author.remove_roles(role)\n await ctx.send(\"Okay! You have been __unsubscribed__ from notifications about new Advent of Code tasks.\")\n else:\n await ctx.send(\"Hey, you don't even get any notifications about new Advent of Code tasks currently anyway.\")", "async def unsubscribe(connection, message):\n from high_templar.hub import NotSubscribedException\n\n if 'requestId' not in message:\n return await connection.send({\n 'code': 'error',\n 'message': 'no-request-id'\n })\n\n for subscription in connection.app.hub.subscriptions[connection]:\n if subscription.request_id == message['requestId']:\n try:\n connection.app.hub.unsubscribe(subscription)\n await connection.send({ 'code': 'success' })\n except NotSubscribedException:\n await connection.send({\n 'code': 'error',\n 'message': 'not-subscribed'\n })\n break\n else:\n return await connection.send({\n 'code': 'error',\n 'message': 'not-subscribed'\n })", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "def unsubscribe_symbol(self, symbol):\n try:\n self.symbol.pop(symbol, None)\n self.symbol_data.pop(symbol, None)\n except KeyError:\n print(\"Could not unsubscribe symbol {} as it was never subscribed.\".format(str(symbol)))", "def unsubscribe(self, instrument_ids, exchange_id=b''):\n pass", "def unsubscribe(self, meta_type, callback):\n try:\n self.subscribers.get(meta_type, []).remove(callback)\n except ValueError:\n pass\n try:\n self.nackables.get(meta_type, []).remove(callback)\n except ValueError:\n pass", "def unregisterEvent(eventName, publisher):", "def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")", "def test_unsubscribe(self):\n dest = '/topic/dest'\n\n self.tm.subscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)\n subscription = self.conn.frames[0].headers.pop(\"subscription\", None)\n self.assertEqual(subscription, 0)\n self.assertEqual(self.conn.frames[0], f)\n\n self.tm.unsubscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)", "def ws_disconnect(message):\n language = message.channel_session['knocker']\n grLangUser = Group('knocker-{0}-{1}'.format(language, \n message.user.id))\n grLangUser.discard(message.reply_channel)", "def unsubscribe_all_known(self):\n for key, value in self.__callbacks.items():\n self.__logger.debug(f'unsubscribe from event {key}')\n succ = self.__twitch.delete_eventsub_subscription(key)\n if not succ:\n self.__logger.warning(f'failed to unsubscribe from event {key}')\n self.__callbacks.clear()", "def test_remove_channel_stops_loop(self):\n with mock.patch.object(self.notifier, \"_silenced_channels\", __bool__=lambda _: False):\n self.notifier.remove_channel(Mock())\n self.notifier_stop_mock.assert_called_once()", "def stop_subscription(event):\n _LOGGER.info(\"Shutting down subscriptions\")\n hass.data[vera.VERA_CONTROLLER].stop()", "def unsubscribe_values(self, req):\n \n rospy.loginfo(\"Unsubscribing values for \" + str(req.component) +\" \" + str(req.field))\n \n resp = UnsubscribeValuesResponse()\n resp.success = False\n \n if (req.component, req.field, req.datatype) in self.publishers.keys():\n rospy.loginfo(\"Removing publisher thread for \" + str((req.component, req.field)) + \"...\")\n t = self.publishers[(req.component, req.field, req.datatype)]\n if t.running:\n t.stop()\n \n timeout = 0\n while t.running and timeout <=5:\n time.sleep(1) #waiting\n timeout += 1\n if not t.running:\n t.join()\n with self.lock: \n del self.publishers[req.component, req.field, req.datatype]\n resp.success = True\n self.set_max_rate()\n rospy.loginfo(\"..done!\")\n else:\n rospy.logerr(\"Something went wrong, publisher not removed\")\n else:\n rospy.loginfo(\"publisher does not exist, nothing to delete...\")\n return resp", "def unsubscribe(self, update, context):\n # remove or update to the sqlite table.\n chat = update.message.chat\n self.db_manager.remove_user(chat.id)\n self.logger.info(\n 'Username: %s and chat_id: %s unsubscribed to the list.' % (chat.username, chat.id)\n )\n update.message.reply_text('You have successfully unsubscribed the notifications forever.')", "def _async_untrack_subscription(self, subscription: Subscription) -> None:\n topic = subscription.topic\n try:\n if _is_simple_match(topic):\n simple_subscriptions = self._simple_subscriptions\n simple_subscriptions[topic].remove(subscription)\n if not simple_subscriptions[topic]:\n del simple_subscriptions[topic]\n else:\n self._wildcard_subscriptions.remove(subscription)\n except (KeyError, ValueError) as ex:\n raise HomeAssistantError(\"Can't remove subscription twice\") from ex", "def _drop_channels_func(cfg, raw, subject, session) -> None:\n if cfg.drop_channels:\n msg = f'Dropping channels: {\", \".join(cfg.drop_channels)}'\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n raw.drop_channels(cfg.drop_channels)", "def unsubscribe(self):\n\n # Unsubscribe\n self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) \n\n # Remove message queue\n self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))", "def remove(hub: pop.hub.Hub, subname: str):\n if hasattr(hub, subname):\n sub = getattr(hub, subname)\n if hasattr(sub, \"init\"):\n mod = getattr(sub, \"init\")\n if hasattr(mod, \"shutdown\"):\n mod.shutdown()\n hub._remove_subsystem(subname)", "def unsubscribe_callback(self, callback, sensor):\n if sensor in self._callbacks:\n self._callbacks[sensor].remove(callback)", "async def test_removed_notifier(self):\n await self.cog._unsilence(self.text_channel)\n self.cog.notifier.remove_channel.assert_called_once_with(self.text_channel)", "def test_remove_channel_removes_channel(self):\n channel = Mock()\n with mock.patch.object(self.notifier, \"_silenced_channels\") as silenced_channels:\n self.notifier.remove_channel(channel)\n silenced_channels.__delitem__.assert_called_with(channel)", "async def unsubscribe(self):\n LOGGER.info('Subscription removed')\n await self._ros.send(self._unsubscribe_msg)", "def unsubscribe(self, item_name):\n self.subscribed = None", "def unsubscribe(\n self, *, other_subreddits: list[praw.models.Subreddit] | None = None\n ):\n data = {\n \"action\": \"unsub\",\n \"sr_name\": self._subreddit_list(\n other_subreddits=other_subreddits, subreddit=self\n ),\n }\n self._reddit.post(API_PATH[\"subscribe\"], data=data)", "def unsubscribe(self, event_type: typing.Type[typing.Any], callback: CallbackT[typing.Any]) -> None:", "def disconnect_trade_stream(message):\r\n Group('trade_stream').discard(message.reply_channel)", "async def aoc_unsubscribe(self, ctx: commands.Context) -> None:\n role = ctx.guild.get_role(AocConfig.role_id)\n\n if role in ctx.author.roles:\n await ctx.author.remove_roles(role)\n await ctx.send(\"Okay! You have been __unsubscribed__ from notifications about new Advent of Code tasks.\")\n else:\n await ctx.send(\"Hey, you don't even get any notifications about new Advent of Code tasks currently anyway.\")", "def untag():\n form = TagSubscriptionForm(hidden_mode=True)\n if not form.validate_on_submit():\n abort(403)\n\n subscription = current_user.subscriptions.filter_by(\n channel_id=form.channel_id.data\n ).first_or_404()\n tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()\n\n results = subscription.untag(tag.id)\n response = {\"success\": results}\n return jsonify(response)", "def get_unsubscription_channel(self):\n unsubscription_channels = dict(settings.UNSUBSCRIPTION_CHANNEL_CHOICES)\n return unsubscription_channels.get(self.unsubscription_channel, \"N/A\")", "def handle_mic_unmute(_):\n loop.unmute()", "def unsubscribe_quote(self, instrument_ids, exchange_id=b''):\n pass", "def unregisterWebsocketProtocol(connection, protocol): #@NoSelf", "async def unwatch(self, ctx, channel: discord.TextChannel):\r\n channel_list = await self.config.guild(ctx.guild).watching()\r\n if channel.id in channel_list:\r\n channel_list.remove(channel.id)\r\n else:\r\n return await ctx.send(\"Channel is not being watched.\")\r\n await self.config.guild(ctx.guild).watching.set(channel_list)\r\n await ctx.send(f\"{self.bot.get_channel(channel.id).mention} will not have bad gifs removed.\")", "def disconnect_subscriber(reply_channel):\n try:\n send_disc_message_to_channel(reply_channel)\n except Exception as exc:\n print(str(exc))\n\n disconnect_in_subscribers(reply_channel)", "async def vote_unsetup(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is None:\n await ctx.send('This channel was never setup for votes.')\n return\n session.delete(old_channel)\n session.commit()\n await vote_clear(ctx)\n await ctx.send(f'{ctx.channel} no longer open for voting.')", "def unsubscribe(self, event, callback, args = None):\n if {\"event\": event, \"callback\": callback, \"args\": args, }\\\n in self.events:\n self.events.remove({\"event\": event, \"callback\": callback,\\\n \"args\": args, })\n\n return True", "async def async_will_remove_from_hass(self) -> None:\n async_unsubscribe_topics(self.hass, self._sub_state)\n self._sub_state = None", "def unsubscribe(self, topic):\n request = protos.RequestUnsubscribe(topic=topic)\n return self.stub.unsubscribe(request)", "def ws_disconnect(message):\n Group('clients').discard(message.reply_channel)", "def msg_unregister(channel, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(UNREGISTER, channel, \"\", version, order)", "def remove_platforms(project, env_spec_name, platforms):\n return _modify_platforms(project, env_spec_name, additions=[], removals=platforms)", "def unregister_platform_instance(self, instance, recursive=False):\r\n platform_to_remove = None\r\n for k, v in self._platforms.items():\r\n if v == instance:\r\n platform_to_remove = k\r\n break\r\n if platform_to_remove is None:\r\n raise ValueError(\"No platform instance have been found to unregister\")\r\n if len(instance.subplatforms) > 0:\r\n if recursive:\r\n for sp in list(instance.subplatforms):\r\n self.unregister_platform_instance(sp, recursive)\r\n else:\r\n raise ValueError(\"Can't unregister platform with subplatforms. Set recursive to True\")\r\n if instance.parent is not None:\r\n if instance in instance.parent.subplatforms:\r\n instance.parent.subplatforms.remove(instance)\r\n if instance in instance.parent.subplatforms:\r\n raise IndexError(\"Instance were registered multiple times in parent's subplatforms list\")\r\n else:\r\n raise IndexError(\"Instance is not found in parent's subplatforms list\")\r\n del self._platforms[platform_to_remove]", "def unsubscribe_from_host_statistics(self):\n\t\treturn Job(SDK.PrlSrv_UnsubscribeFromHostStatistics(self.handle)[0])", "async def unregister(websocket):\n app['websockets'].discard(websocket)\n await notify_users()", "def on_close_handler(self, instmt, ws):\n Logger.info(self.__class__.__name__, \"Instrument %s is unsubscribed in channel %s\" % \\\n (instmt.get_instmt_name(), instmt.get_exchange_name()))\n instmt.set_subscribed(False)", "def remove_channel(self, channel):\n self._channels.pop(channel.fileno, None)\n\n try:\n self._poller.remove(channel.fileno, channel._events)\n except (IOError, OSError):\n log.exception(\"Error while removing %r.\" % channel)", "def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()", "def remove_event_detect(self, channel):\n self._check_mode()\n # entferne den channel aus der Liste `self.events`\n self.events.remove(channel)\n print(f\"event detect fuer channel {channel} entfernt\")", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self, inst):\r\n if inst in self._subscribers:\r\n self._subscribers.remove(inst)\r\n vprint(\"{} is unsubscribed from {}\".format(inst.name, self.name))", "def unsubscribe(id, userId):\n db = core.connect()\n theUser = db[userId]\n if id in theUser[\"streams\"]:\n theUser[\"streams\"].remove(id)\n db[userId] = theUser", "def unregister(self):\n self._executor.unregister_publisher(self)", "def test_remove_channel_skips_stop_with_channels(self):\n self.notifier.remove_channel(Mock())\n self.notifier_stop_mock.assert_not_called()", "async def _async_perform_unsubscribes(self) -> None:\n if not self._pending_unsubscribes:\n return\n\n topics = list(self._pending_unsubscribes)\n self._pending_unsubscribes = set()\n\n async with self._paho_lock:\n result, mid = await self.hass.async_add_executor_job(\n self._mqttc.unsubscribe, topics\n )\n _raise_on_error(result)\n for topic in topics:\n _LOGGER.debug(\"Unsubscribing from %s, mid: %s\", topic, mid)\n\n await self._wait_for_mid(mid)", "def remove_notification_for_user(\n self, login, type, channel=\"EmailNotificationChannel\", project=None\n ):", "def unsubscribe(self, subscription):\n request = Request(\n method='delete',\n endpoint='/streams/subcription/{}'.format(subscription)\n )\n\n def response_handler(resp):\n code = resp.status_code\n if resp.is_success:\n return 'OK'\n elif code == 403:\n raise ex.StreamPermissionError(resp, request)\n raise ex.StreamConnectionError(resp, request)\n\n return self._execute(request, response_handler)", "async def unlistened(self, value=None):\n pass", "def unsubscribe(self, observer, name=None):\n if name is None:\n name = 'default'\n if observer in self._observers:\n del self._observers[observer][name]", "def on_unsubscribe(self, mqtt_client, userdata, mid ):\n logging.debug(\"DEBUG - unsubscribe ack received\")", "def test_unsubscribe(self):\n self.service.clientConnected()\n\n unsubscribers = []\n self.service.subscribe(u'url', 1\n ).addCallback(lambda fn: unsubscribers.append(fn))\n self.service.subscribe(u'url', 2\n ).addCallback(lambda fn: unsubscribers.append(fn))\n\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)\n\n unsubscribers.pop()()\n self.service.unsubscribe(u'url')\n self.assertIn(u'url', pubsubClient.subscriptions)\n\n unsubscribers.pop()()\n self.service.unsubscribe(u'url')\n self.assertNotIn(u'url', pubsubClient.subscriptions)", "def _unregisterConnect(self, function):\n self._sig_connect.unsubscribe(function)", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )" ]
[ "0.6753091", "0.67168313", "0.67168313", "0.67168313", "0.67168313", "0.67168313", "0.61841947", "0.614403", "0.61410797", "0.60061914", "0.5981644", "0.59452146", "0.59342414", "0.59306574", "0.59306574", "0.59159696", "0.58706665", "0.58182955", "0.5811057", "0.58029413", "0.5781126", "0.5723125", "0.5714389", "0.5705456", "0.5678534", "0.5672124", "0.56649786", "0.5657966", "0.56433386", "0.5638787", "0.5632441", "0.5610003", "0.5608445", "0.56047434", "0.55909246", "0.559061", "0.5576609", "0.55625063", "0.5544989", "0.5520745", "0.55063283", "0.5492636", "0.5484889", "0.54803836", "0.5478384", "0.5475182", "0.5465867", "0.5439633", "0.5412033", "0.54093856", "0.5408857", "0.5400948", "0.53993964", "0.5397914", "0.5394992", "0.5380745", "0.53709173", "0.53628147", "0.53441733", "0.5332819", "0.53027666", "0.5301185", "0.5300026", "0.52954364", "0.5286209", "0.52757543", "0.5270808", "0.52665377", "0.5259262", "0.52588177", "0.5245174", "0.524056", "0.5232277", "0.52313817", "0.52229935", "0.5207172", "0.52053034", "0.5200532", "0.5192569", "0.5192382", "0.5191667", "0.51867235", "0.51595014", "0.5152524", "0.5145616", "0.514053", "0.514053", "0.5135786", "0.51303375", "0.5127073", "0.51165026", "0.51126397", "0.51119524", "0.5110001", "0.51095814", "0.51029205", "0.5101808", "0.50940204", "0.50887847", "0.50876087" ]
0.60036814
10
Starts new thread on specified channel
def start_thread(self, topic_caster, channel, interface, reply_to_tc=None): if channel not in self._channels: raise ValueError("Channel {} not exists!".format(channel)) return TalkContext(channel, self._channels[channel].start_thread(topic_caster, reply_to_tc), interface)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, channel, name, server):\n threading.Thread.__init__(self, target=self._run)\n self.__channel = channel\n self.__transport = channel.get_transport()\n self.__name = name\n self.__server = server", "def join(self, channel):\n raise NotImplementedError", "def run_channels():\n\n # Run channel encoders\n for c in channels:\n threads[c] = threading.Thread()\n threads[c].name = c\n threads[c].run = channels[c].run\n threads[c].start()\n \n time.sleep(0.5)\n print()", "def open_channel(self):\n # LOGGER.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_task_open)\n self._connection.channel(on_open_callback=self.on_channel_ctrl_open)", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def stasis_start_cb(channel, ev):\n await channel.answer()\n await bridge.addChannel(channel=channel.id)", "def channel_open(self):\n self._chan = self._session.invoke_shell()", "def on_channel_open(self, channel):\n self.logger.debug(\"Channel opened: %s\", channel)\n\n self._channel = channel\n self._channel.add_on_close_callback(self.on_channel_closed)\n\n self.start_consuming()", "def join_channel(self, channel):\r\n self._send('JOIN #%s\\r\\n' % channel)", "def open_channel(self):\n self.logger.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)", "def open_channel(self):\n self.logger.info('creating channel')\n self._connection.channel(on_open_callback=self.on_channel_opened)", "def open_channel(self):\n logger.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)", "def on_channel_open(self, new_channel):\n\t\tself.channel = new_channel\n\t\tself.declare_queue(self.on_queue_declared)", "async def join(self, channel : str):\n # todo: check if # is required. If it is, append it at the start if DNE.\n await self._connection.join(channel)", "def on_channel_open(self, channel):\n logger.info('Channel opened')\n self._channel = channel\n self._channel.basic_qos(prefetch_count=\n self.DEFAULT_PREFETCH_COUNT)\n self.add_on_channel_close_callback()\n self.setup_queues_and_bindings()", "def start_subsystem(self, name, transport, channel):\n pass", "def channel(self, channel: int, /) -> \"TimerChannel\" | None:", "def on_channel_task_open(self, channel):\n # LOGGER.info('Channel opened')\n self._channel_task = channel\n self._channel_task.add_on_close_callback(self.on_channel_closed)\n channel.queue_declare(\n queue=self.queue_task,\n durable=False,\n exclusive=False\n )\n channel.basic_qos(prefetch_count=self._prefetch_count)\n self._init_ok_task = True", "def on_channel_ctrl_open(self, channel):\n # LOGGER.info('Channel opened')\n self._channel_ctrl = channel\n self._channel_ctrl.add_on_close_callback(self.on_channel_closed)\n self._channel_ctrl.basic_qos(prefetch_count=1)\n self.setup_exchange()", "def join(self, channel):\n self.channels[channel.name.lower()] = channel\n channel.protocol = self.protocol\n self.protocol.join(channel.name)", "async def part(self, channel : str):\n await self._connection.part(channel)", "def test_add_channel_starts_loop(self):\n self.notifier.add_channel(Mock())\n self.notifier_start_mock.assert_called_once()", "def channel(self):\n\n self._channel = self._connection.channel()\n print(\"Channel opened...\")", "def _set_channel_(self, channel):\n self._channel = channel", "def start_background_thread(self):\n self.runner = Runner(queue=queue, app_id=self.app_id)\n self.runner.start()\n # TODO: stop the thread at some point?", "def __init__(self, channel, sys_impl=sys, fake_clock=None):\n StoppableThread.__init__(self, fake_clock=fake_clock)\n self.__channel = channel\n self.__stdout = sys_impl.stdout\n self.__stderr = sys_impl.stderr\n self.__fake_clock = fake_clock", "async def request_channel_thread_channels(client, guild_id, channel_id, request_function):\n thread_channels = []\n \n data = None\n \n while True:\n data = await request_function(client.http, channel_id, data)\n thread_channel_datas = data['threads']\n \n for thread_channel_data in thread_channel_datas:\n thread_channel = Channel.from_data(thread_channel_data, client, guild_id)\n thread_channels.append(thread_channel)\n \n thread_user_datas = data['members']\n for thread_user_data in thread_user_datas:\n thread_channel_id = int(thread_user_data['id'])\n try:\n thread_channel = CHANNELS[thread_channel_id]\n except KeyError:\n continue\n \n user_id = int(thread_user_data['user_id'])\n user = create_partial_user_from_id(user_id)\n \n thread_user_create(thread_channel, user, thread_user_data)\n \n if not data.get('has_more', True):\n break\n \n if thread_channels:\n before = thread_channels[-1].created_at\n else:\n before = datetime.utcnow()\n \n data = {'before': before}\n \n return thread_channels", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)\n\n # subscribe to personal channel and fire up the message handler\n self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})\n self._proc = Thread(target=pubsub_thread)\n self._proc.daemon = True\n self._proc.start()", "def _make_thread(self):\r\n pass", "def addchan(channel):", "async def on_channel_create(self, channel):\n if channel.is_private:\n return\n\n role = await self.get_role(channel.server)\n if not role:\n return\n\n await self.setup_channel(channel, role)", "def connectChannel(sock, chan):\n sock.send(\"JOIN {}\\r\\n\".format(chan).encode(\"utf-8\"))\n\n console.info(\"Successfully connected to {}\".format(chan))", "def start_cmd(wrapper: MessageDispatcher, message: str):\n if wrapper.target is channels.Main:\n start(wrapper)", "def test_start_already_listening(testloop, testchannel):\n with testchannel.open():\n testchannel.start()\n with pytest.raises(ChannelListeningError):\n testchannel.start()", "def set_channel(cls, channel):\n cls.channel = channel", "def subscribe(self, channel: str) -> None:\n print(f'{self._name} starts subscribing to channel-[{channel}]')\n self._server.register(self, channel)", "def on_channel_open(self, channel):\n self.logger.info('Channel opened')\n self._channel = channel\n self.add_on_channel_close_callback()", "def subscribe(actor_id, worker_ch):\n actor_ch = ActorMsgChannel(actor_id)\n t = threading.Thread(target=process_worker_ch, args=(worker_ch, actor_id, actor_ch))\n t.start()\n print(\"Worker subscribing to actor channel...\")\n while keep_running:\n try:\n msg = actor_ch.get(timeout=2)\n except channelpy.ChannelTimeoutException:\n continue\n print(\"Received message {}. Starting actor container...\".format(str(msg)))\n try:\n stats, logs = execute_actor(image, msg['msg'])\n except DockerStartContainerError as e:\n print(\"Got DockerStartContainerError: {}\".format(str(e)))\n Actor.set_status(actor_id, ERROR)\n continue\n # add the execution to the actor store\n print(\"Actor container finished successfully. Got stats object:{}\".format(str(stats)))\n exc_id = Execution.add_execution(actor_id, stats)\n Execution.set_logs(exc_id, logs)", "def new_channel(self, *args, **kwargs):\n logger.debug('creating channel -> connection.channel(%r, %r)' % (args, kwargs))\n if self.enabled:\n channel = self.connection.channel(*args, **kwargs)\n self._channels.append(channel)\n return channel\n else:\n return None", "def start_new_thread(self, conn, addr):\n thread = ClientThread(conn, addr, self.msg_queue)\n thread.start()\n return thread", "def open_channel(self, timeout=120):\n if self.get_channel_count() < self.MAX_CHANNELS and self.is_connected():\n try:\n channel = self.get_client().invoke_shell()\n channel.settimeout(timeout)\n self.add_channel(channel)\n return channel\n except error as e:\n print(\"opening channel error\")\n self._error = e\n # return None", "def joined(self, channel):\n self.logger.info(\"Joined %s\" % channel)", "def make_iter(capture, channel):\n\n def cycle():\n threading.Timer(INTERVAL, cycle).start()\n publish_frame(capture, channel)\n\n return cycle", "def _on_channel_open(self, channel_id: str) -> None:\n self._send_alive(channel_id)", "def open_channel(config_file):\n server_ip, port, channel_name, content_type = get_channel_config(config_file)\n channel = PresenterChannel(server_ip, port, channel_name, content_type)\n ret = channel.startup()\n if ret:\n log_error(\"ERROR:Open channel failed\")\n return None\n return channel", "async def channel(self, ctx):\n pass", "def test_open_via_channel(testchannel, callit):\n\n channel = testchannel.channel() if callit else testchannel.channel\n\n with channel as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed", "def connect_thread(self, *args, **kwargs):\r\n thread = threading.Thread(target=self.connect_stream, args=args, kwargs=kwargs)\r\n thread.daemon = True\r\n thread.start()\r\n return thread", "def start(self):\n self.thread.start()", "def fstart(wrapper: MessageDispatcher, message: str):\n channels.Main.send(messages[\"fstart_success\"].format(wrapper.source))\n wrapper.target = channels.Main\n start(wrapper, forced=True)", "def on_open_channel(new_channel):\n # assign new channel to the global channel variable\n global channel\n channel = new_channel\n\n # channel is assigned and declare a queue named scripbox.\n # queue Properties - durable is True so that the queue withstands rabbitmq reboot\n # Pass a callback on_queue_declared which fires when a queue declaration\n # is successful\n channel.queue_declare(queue='scripbox', durable=True,\n auto_delete=False, callback=on_queue_declared)", "def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread", "def build_channel(channel, client=None, topic=None, mode=None, num_procs=1, group=None, shard_id=u'shardId-000000000000', shard_it_type='LATEST'):\n channel_lc = channel.lower()\n if channel_lc == \"file\" or channel_lc == \"geowatchchannelfile\":\n from geowatchutil.channel.geowatch_channel_file import GeoWatchChannelFile\n return GeoWatchChannelFile(client, mode)\n elif channel_lc == \"kafka\" or channel_lc == \"geowatchchannelkafka\":\n from geowatchutil.channel.geowatch_channel_kafka import GeoWatchChannelKafka\n return GeoWatchChannelKafka(client, topic, mode, num_procs=num_procs, group=group)\n elif channel_lc == \"kinesis\" or channel_lc == \"geowatchchannelkinesis\":\n from geowatchutil.channel.geowatch_channel_kinesis import GeoWatchChannelKinesis\n return GeoWatchChannelKinesis(client, topic, mode, num_procs=num_procs, shard_id=shard_id, shard_it_type=shard_it_type)\n elif channel_lc == \"sns\" or channel_lc == \"geowatchchannelsns\":\n from geowatchutil.channel.geowatch_channel_sns import GeoWatchChannelSNS\n return GeoWatchChannelSNS(client, topic, mode)\n elif channel_lc == \"sqs\" or channel_lc == \"geowatchchannelsqs\":\n from geowatchutil.channel.geowatch_channel_sqs import GeoWatchChannelSQS\n return GeoWatchChannelSQS(client, topic, mode)\n elif channel_lc == \"slack\" or channel_lc == \"geowatchchannelslack\":\n from geowatchutil.channel.geowatch_channel_slack import GeoWatchChannelSlack\n return GeoWatchChannelSlack(client, topic, mode)\n elif channel_lc == \"wfs\" or channel_lc == \"geowatchchannelwfs\":\n from geowatchutil.channel.geowatch_channel_wfs import GeoWatchChannelWFS\n return GeoWatchChannelWFS(client, topic, mode)", "def start_thread(self) -> threading.Thread:\n assert self._thread is None, \"Thread has already been created.\"\n\n self._thread = threading.Thread(target=self.start)\n self._thread.start()\n return self._thread", "def start(self):\n self._setup_thread()\n self.thread.start()", "def subscribe(self, channel, **kwargs):\n pass", "async def startchannel(self, ctx, vc: discord.VoiceChannel):\n await self.config.guild(ctx.guild).pstart.set(vc.id)\n await self.config.guild(ctx.guild).pcat.set(vc.category_id)\n await ctx.send(\n _(\n \"Private starting channel set. Users can join this channel to use all features of private rooms.\\nI recommend not allowing members to speak in this channel.\"\n )\n )", "def part(self, channel):\n raise NotImplementedError", "def open(self):\n if self._is_open():\n return\n\n channel = api.Api.ssh_channel_new(self._session)\n if channel is None:\n raise exceptions.ChannelException(\"Channel cannot be created: {}\".format(self.get_error_message()))\n\n ret = api.Api.ssh_channel_open_session(channel)\n if ret != api.SSH_OK:\n raise exceptions.ChannelException(\"Channel cannot be opened: {}\".format(self.get_error_message()))\n\n self._channel = channel", "def start_thread(self):\n self.stop_thread()\n self.running = True\n self.run_thread = threading.Thread(target=self.run, daemon=True)\n self.run_thread.start()", "async def _cmdf_setchannel(self, substr, msg, privilege_level):\n ch_obj = None\n if len(substr) == 0:\n ch_obj = msg.channel\n else:\n ch_obj = self._client.search_for_channel(substr, enablenamesearch=True, serverrestriction=self._server)\n\n if ch_obj is None:\n buf = \"**Error:** Channel not found. No changes were made.\"\n else:\n self._ch_msg_channelid = ch_obj.id\n self._save_settings()\n buf = \"In-channel greeting messages will now be sent in \" + utils.ch_to_mention(ch_obj) + \".\"\n await self._client.send_msg(msg, buf)\n return", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def import_channel(self, request):\n self.sync_channel(request)\n\n return Importer()", "def joined(self, channel):\n log.msg(\"GbRobot joined \" + self.factory.channel)", "def run_in_thread(self, fn, *args, **kwargs):\r\n thread = threading.Thread(target=fn, args=args, kwargs=kwargs)\r\n thread.start()\r\n \r\n return thread", "def start(self):\n if self._chan is not None:\n try:\n self._chan.start_consume()\n except ChannelError:\n log.info('Subscriber is already started')\n\n else:\n self.gl = spawn(self.listen)", "def start_thread(thread_func, name=None):\r\n thread = threading.Thread(None, thread_func)\r\n thread.daemon = True\r\n thread.start()\r\n if name:\r\n thread.name = name\r\n return thread", "def add_channel(self, channel):\n self._channels[channel.fileno] = channel\n self._poller.add(channel.fileno, channel._events)", "def active_channel(self, channel):\n old_timeout = self.resource.timeout\n self.resource.timeout = 500\n if channel in self.channel_list:\n self.scpi.set_active_channel(channel)\n else:\n print('Channel %i not in list of channels. Create channel first'\n % channel)\n set_channel = self.scpi.query_active_channel()\n self.resource.timeout = old_timeout\n return set_channel", "def joined(self, channel):\n self.logger.log(\"[I have joined %s]\" % channel)\n #IRC Keepalive\n self.startHeartbeat()\n #Custom heartbeat\n self.loopcall.start(5.0)", "def launch_channels(self) -> None:\n live_run = self.get_live_run()\n\n channels = live_run.architect.get_channels(\n self._on_channel_open,\n self._on_catastrophic_disconnect,\n self._on_channel_message,\n )\n for channel in channels:\n self._register_channel(channel)\n\n async def launch_status_task():\n self._status_task = asyncio.create_task(self._ping_statuses_while_alive())\n\n live_run.loop_wrap.execute_coro(launch_status_task())", "def __init__(self, ch, nreps):\n threading.Thread.__init__(self)\n self.ch = ch\n self.nreps = nreps", "def send_factory(slack_client, channel: str):\n return partial(send, slack_client, channel)", "def __create_channel_run(self, channel, username, token):\n data = {\n 'channel_id': channel.get_node_id().hex,\n 'chef_name': self.__get_chef_name(),\n 'ricecooker_version': __version__,\n 'started_by_user': username,\n 'started_by_user_token': token,\n 'content_server': config.DOMAIN,\n }\n try:\n response = requests.post(\n config.sushi_bar_channel_runs_url(),\n data=data,\n auth=AUTH)\n response.raise_for_status()\n return response.json()['run_id']\n except Exception as e:\n config.LOGGER.debug('Sushibar error when creating run: %s' % e)\n raise ConnectionError('failed to register run on Sushibar.')", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "async def channel(self, ctx: commands.Context, channel: discord.TextChannel):\n self.channel = str(channel.id)\n await self._update_db()\n\n await ctx.send(f\"Done! {channel.mention} is the Starboard Channel now!\")", "def _ensure_thread(self) -> None:\n\n if not self._thread:\n thread = self._thread_factory(self.run)\n self._thread = thread\n thread.start()", "def __init__(self, channel, name):\n self._channel = channel\n self.name = name", "def set_channel(self, channel_name, value):\n try:\n cm = self.__core.get_service(\"channel_manager\")\n cdb = cm.channel_database_get()\n channel = cdb.channel_get(channel_name)\n try:\n typing_value = channel.type()(value)\n except Exception:\n traceback.print_exc()\n return\n channel.consumer_set(Sample(time.time(), typing_value))\n except Exception:\n traceback.print_exc()", "def start_thread():\n global gIt, gOt, gRunning\n gRunning = True\n gIt = Thread(target = input_thread)\n gIt.start()\n gOt = Thread(target = output_thread)\n gOt.start()", "def join(self, channel, func=None):\n\n self._pubsub.subscribe(**{'cluster:%s' % channel: func\n if func is not None\n else self._handler})", "def test_start_closed(testchannel):\n with pytest.raises(ChannelClosedError):\n testchannel.start()", "def start_thread(self):\n self.thread = Thread(target=self.put_lines_into_queue)\n self.thread.daemon = True\n self.thread.start()", "def start(self):\n self._thread.start()", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "def send_part(self, channel) -> None:\n\n self.send_line('PART {}'.format(channel))", "def join_channel(self, server, username, channel):\n for sock in self.socks:\n if sock.server == server and username == sock.username:\n if sock.channel == channel:\n return sock\n sock.send(\"JOIN {}\\r\\n\".format(channel))\n print (\"[!] channel {} joined on {} with username {}\".format(channel, server, username))\n sock = IRC.Socket(self.dispatcher, sock.sock, username, server, channel)\n self.replyer.add_sock(sock)\n return sock\n return self.add_sock(server=server, username=username, channel=channel)", "def test_no_listeners(testloop, testchannel):\n\n async def run():\n \"\"\"run\"\"\"\n async for i in aiter(range(10)):\n await testchannel.send(i)\n await asyncio.sleep(0)\n\n with testchannel.open():\n testchannel.start(asyncfunc=False)\n testloop.run_until_complete(run())", "def channel(self):\n if not hasattr(self, '_channel'):\n self._channel = self.new_channel()\n return self._channel", "def joined(self, channel):\n # find or make a session. \n ss = self.findSessions(channel)[0]\n if ss.isDefaultSession: # i.e., not found\n channel = channel.decode(self.serverEncoding)\n ss = self.store.find(d20session.D20Session,\n d20session.D20Session.name == channel).one()\n\n if ss is None:\n ss = d20session.D20Session()\n ss.name = channel.decode(ss.encoding)\n self.store.add(ss)\n Store.of(ss).commit()\n\n self.sessions.append(ss)\n\n self.responding = 1", "def connect_channel(channel, service=VoidService, config={}):\n return service._connect(channel, config)", "def launch_thread(self, daemon=1):\r\n assert self._thread is None\r\n t = threading.Thread(target=self._loop, name=\"TorLoop\")\r\n if daemon:\r\n t.setDaemon(daemon)\r\n t.start()\r\n self._thread = t\r\n t = threading.Thread(target=self._eventLoop, name=\"EventLoop\")\r\n if daemon:\r\n t.setDaemon(daemon)\r\n t.start()\r\n self._eventThread = t\r\n # eventThread provides a more reliable indication of when we are done.\r\n # The _loop thread won't always die when self.close() is called.\r\n return self._eventThread", "def spawn_thread(func, *args, **kwargs):\n thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n thread.daemon = True\n thread.start()\n return thread", "def __init__(__self__, *,\n channel: Optional[pulumi.Input['ReleaseChannelChannel']] = None):\n if channel is not None:\n pulumi.set(__self__, \"channel\", channel)", "def _start_in_thread(self):\n return spawn_waitready(self._listen, self.start)[0]", "def handle_channel_assigned(self, channel):\n logger.debug(\"AIRepository was assigned channel {}\".format(channel))\n self.channel = channel", "async def set_channel(self, ctx, channel):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n self.stream_channel = channel\n await self.bot.say(\"Channel sucessfully assigned.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "def select_channel(self, channel):\n if not channel in self._channel_frames:\n self.get_channel_frame(channel)\n self._cur_channel = channel\n for name in self._channel_frames:\n f = self._channel_frames[name]\n if name == channel:\n f.pack(side=Tix.LEFT, fill=Tix.BOTH, expand=True)\n else:\n f.forget()\n self.set_channel_event(channel, _k.IDLE_EV, True)\n for i in range(self._chanlist.size()):\n name = self._chanlist.get(i)\n if kaeirc.util.strlower(name) == channel:\n self._chanlist.selection_anchor(i)\n self._chatbox.focus()", "def on_channel_open(new_channel):\n global channel\n channel = new_channel\n channel.queue_declare(queue=\"test\", durable=True, exclusive=False, auto_delete=False, callback=on_queue_declared)" ]
[ "0.6507928", "0.6503015", "0.6434967", "0.6381614", "0.6208421", "0.6115024", "0.61078566", "0.6104068", "0.6061114", "0.60220224", "0.60070866", "0.6001835", "0.59062725", "0.58968794", "0.58294946", "0.5827837", "0.57995164", "0.578793", "0.57634115", "0.57456595", "0.5743649", "0.57019484", "0.568596", "0.5613022", "0.56037253", "0.5602157", "0.55855775", "0.5574828", "0.5560538", "0.5550253", "0.55486214", "0.55435306", "0.5539613", "0.5536114", "0.5515331", "0.55070585", "0.5494971", "0.5492537", "0.5476858", "0.54524416", "0.5447729", "0.54295546", "0.5414244", "0.54103625", "0.5407968", "0.5406493", "0.540383", "0.53918046", "0.53877294", "0.53796697", "0.5373981", "0.53714144", "0.5363265", "0.53543675", "0.53390443", "0.53289217", "0.53231096", "0.5303428", "0.53004104", "0.52994484", "0.52936774", "0.5288037", "0.52730423", "0.5270513", "0.52674556", "0.5259736", "0.5256938", "0.5249821", "0.5245558", "0.52422607", "0.5241684", "0.5229052", "0.52269775", "0.522408", "0.52124405", "0.52124405", "0.5211801", "0.5203993", "0.52028006", "0.51922596", "0.5190532", "0.5190101", "0.51802725", "0.5177653", "0.5167943", "0.5165325", "0.51586944", "0.5149408", "0.5140442", "0.5132662", "0.5125862", "0.5125797", "0.51233685", "0.51133657", "0.51120603", "0.511078", "0.51084584", "0.51076496", "0.51056445", "0.5104477" ]
0.5886065
14
Sends message to specified channel. Also updates Message with Sources's Name (puts it as first component of message)
def send_message(self, context, message, processing=None): # TODO: chained context to trace consequent requests # TODO: option to build UML out of conversation if processing is None: if message.is_reply: processing = 0 else: processing = 2 if processing == 2 and message.is_reply: raise ValueError("Processing level 2 can be set only for initial (non-reply) messages") if context.channel not in self._channels: raise ValueError("Channel {} not exists!".format(context.channel)) print_level_change = pref.send_message_print_level_change if processing == 2: self._send_message_level += 1 # Increase level if processing == 2 if print_level_change: vprint("send message level changed to: {}".format(self._send_message_level)) try: # Send message into channel self._channels[context.channel].send_message(context, message) # Register replies if message.is_reply: self._replies[context.channel][context.thread][message.sender] = message else: if context.channel not in self._replies: self._replies[context.channel] = {} assert context.thread not in self._replies[context.channel], \ "PlatformsFarm:send_message Unexpectedly received second initial (non-reply) message " \ "for {}:{}".format(context.channel, context.thread) self._replies[context.channel][context.thread] = {} except Exception as e: if processing == 2: self._send_message_level -= 1 if print_level_change: vprint("send message level changed to: {}".format(self._send_message_level)) raise e # Just exit here if processing is 0 if processing == 0: return None stay_in_loop = True while stay_in_loop: stay_in_loop = False # Process messages if any platform received messages if any(p.received_messages > 0 for p in self._platforms.values()): self.process_messages() if processing == 2: # If processing is 2, then stay in loop as there could be responses stay_in_loop = True if processing == 2 and not stay_in_loop: # If no more processing expected # But it's multithreadning case and not all final replies were received yet if pref.multithreading and not all(m.is_failure or m.is_success for m in self._replies[context.channel][context.thread].values()): # And some platforms are still waiting for reply - just wait a bit if any(p.waiting_reply for p in self._platforms.values()): time.sleep(0.001) stay_in_loop = True if processing == 2: self._send_message_level -= 1 if print_level_change: vprint("send message level changed to: {}".format(self._send_message_level)) result = self._replies[context.channel].pop(context.thread) return result else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send_message(self, channel : str, message : str):\n await self._connection.send_message(channel, message)", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def send(self, msg):\n return self._channel_action(msg, 1)", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "def send_message(self, channel, text):\n if not channel:\n return\n self.post('chat.postMessage', data={\"channel\": channel, \"text\": text})", "async def send_initial_message(self, ctx: Context, channel: discord.TextChannel) -> discord.Message:\n\n return await channel.send(embed=self.embed)", "def send(self, messages, channel):\n # Process strings as well as iterables\n if isinstance(messages, str):\n message = messages\n else:\n message = '\\n'.join(messages)\n # Post message to output stream\n self.outputs.append([channel, message])", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def msg_chan_send(channel, value, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CHAN_SEND, channel, value, version, order)", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "async def test_send_to_channel(self):\n message = \"Test basic message.\"\n await self.cog.send_message(message, *self.text_channels, alert_target=False)\n\n self.text_channels[0].send.assert_awaited_once_with(message)\n self.text_channels[1].send.assert_not_called()", "def send(msg, dest=None):", "def send_message(self, message, channel=None):\n if channel is None:\n channel = self.default_channel\n\n self._slack_client.api_call(\n \"chat.postMessage\", channel=channel, text=message)", "def send_channel(self, text, channel_id, **kwargs):\n\n data = {\"content\": text}\n data.update(kwargs)\n self._post_json(f\"channels/{channel_id}/messages\", data)", "def send_message(self, message: str):\n self.client.chat_postMessage(\n channel=f\"@{self.username}\", text=message,\n )", "def send_message(channel, message):\n slack_client = get_client()\n slack_client.chat_postMessage(channel=channel, text=message, as_user=True)", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "def send_channel_message(self, status, data1=None, data2=None, ch=None):\n msg = [(status & 0xF0) | ((ch if ch else self.channel) - 1 & 0xF)]\n\n if data1 is not None:\n msg.append(data1 & 0x7F)\n\n if data2 is not None:\n msg.append(data2 & 0x7F)\n\n self._midi.send_message(msg)", "def send_message(channel, data):\n try:\n socketio.emit(channel, data)\n logging.info('Message was sent.')\n logging.debug(data)\n except Exception as e:\n logging.error(e)\n logging.error(\"Can't send message. Exeption occured\")", "def send_message(self, message):\n source_guid = str(uuid.uuid1())\n date = time.strftime(\"%H:%M:%S\")\n self.api.send_message(\n self.conversation_type,\n self.cid,\n source_guid,\n message[:1000]\n )\n if self.api.send_message(self.conversation_type, self.cid, source_guid, message):\n self.append_message(source_guid, 'me', date, message[:1000])\n if len(message) > 1000:\n self.send_message(message[1000:])", "async def send(self, channel=None, **kwargs):\n\n if \"user\" in kwargs:\n api_call = self.client.chat_postEphemeral\n\n else:\n api_call = self.client.chat_postMessage\n\n return await api_call(\n channel=channel or self.channel,\n # contents of messenger[UserDict]\n **self,\n # any other API fields provided by Caller\n **kwargs,\n )", "async def send_shortlived_message(self, message, channel, duration=5):\n pass", "def send_message(self, message):\n pass", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def send_message(self, msg: dict):\n txrx_debug('{} sending {} msg to {}'.format(msg['src'], msg['type'], msg['dst']))\n self.sock.send(dumps(msg).encode('utf-8'))", "def join(self, source, channel):\n\n self.channel_map[channel].add(source[0])\n self.nick_map[source[0]].add(channel)\n\n self.log(\"*** {0:s} has joined {1:s}\".format(source[0], channel))", "def send(self, message):\n pass", "def msg(self, chan, msg):\n self._msg(chan, msg)", "def publish(self, channel: str, message):\n raise TypeError(\"{} - publish not implemented!\")", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "def sendMessage(self, message):\n for component in CraftChatMessage.fromString(message):\n self.block.sendMessage(component)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send_user_message(self, channel_id, message):\n self.slack_client.api_call('chat.postMessage', as_user='true', channel=channel_id, text=message)", "async def channel(self, ctx: commands.Context, channel: discord.TextChannel):\n self.channel = str(channel.id)\n await self._update_db()\n\n await ctx.send(f\"Done! {channel.mention} is the Starboard Channel now!\")", "def send(self, msg: Message, **kwargs):\n\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send_irc_message(self, event):\n\n self.log('Transmitting IRC message', lvl=debug)\n\n self.fireEvent(PRIVMSG(event.username, \"[%s] %s : %s\" % (event.msg_type, event.subject, event.body)))", "def send(self, msg):\n self.message('Me', msg)", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "def send_message(self, message):\n self.send_message_queue.put(message)", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def Send(self, message, message_type_name, source):\n type_enum = message_type_helper.Value(message_type_name)\n address = (self._mcast_groups[type_enum], network_config.UDP_PORT_AIO)\n self._SendInternal(message, message_type_name, source, address)", "def send(self,header,dest,msg):\n message = self.create_message(header,dest,msg)\n\n if message == None:\n print(\"Not a valid Message\")\n else:\n message = json.dumps(message) # turns message dictionary into json string\n message = message.encode(FORMAT) # encodes message w/ UTF-8\n msg_len = len(message) # gets message length\n send_length = str(msg_len).encode(FORMAT) #encodes message length w/ UTF-8\n send_length += b' ' * (PREFIX-len(send_length)) #pads send length up to 64 bits\n\n conn = self.connections[dest][\"CONN\"]\n conn.send(send_length)\n sleep(0.1)\n conn.send(message)", "def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)", "async def send_discord(msg, cnl):\n await bot.wait_until_ready()\n await bot.send_message(bot.get_channel(cnl), msg)", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def message_send(token, channel_id, message):\n verify_token(token)\n if not message:\n raise InputError(\"Message can't be empty\")\n if len(message) > 1000:\n raise InputError(\"Message is over 1000 characters\")\n user = get_user_from_token(token)\n selected_channel = select_channel(channel_id)\n # Raise error if the user is not a part of the channel they are trying to message in\n if not is_user_member(user, selected_channel):\n raise AccessError(description=\"User is not a member of channel with channel_id\")\n\n sent_message = Message(\n channel=selected_channel,\n user=user,\n message=message\n )\n db.session.add(sent_message)\n db.session.commit()\n return {\n \"message_id\": sent_message.id\n }", "def send(self, msg, flags=0, copy=True, track=False, **kwargs):\n kwargs['flags'] = flags\n kwargs['copy'] = copy\n kwargs['track'] = track\n kwargs.update(dict(flags=flags, copy=copy, track=track))\n return self._add_send_event('send', msg=msg, kwargs=kwargs)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def dispatch_messages(sock, queue, channel):\n while run:\n try:\n message = queue.get()\n except Queue.Empty:\n pass\n else:\n if message.recipient is None:\n message.recipient = channel\n sock.send(\"{0}\\r\\n\".format(message.msg()))\n logging.debug(\"{0}\".format(message.msg()))\n queue.task_done()", "def sendMessage(self, name, message):\n time.sleep(int(self.getOwnName()) * 0.05)\n self.getSocket(name).send_pyobj(message)", "async def admin_msg(self, message):\n for channel in self.admin_channels.values():\n if channel:\n await channel.send(message)", "def receive_message(self, channel: str, message: str) -> None:\n print(f\"{self._name} receives message {message} from \"\n f\"channel-[{channel}]\")", "def send_message(message, destination):\n\n #Your code here\n pass", "def send(self, event, message):\n pass", "def send_message(\n self, text=None, blocks=None, attachments=None,\n timestamp=None, channel_name=None, channel_id=None):\n if not channel_id:\n channel_id = self.find_channel_id(channel_name)\n\n response = self.client.api_call(\n ''.join(\n [\n f'chat.postMessage?as_user={cfg.POST[\"as_user\"]}&channel={channel_id}&',\n f'thread_ts={timestamp}&' if timestamp else '',\n f'text={text}&' if text else '',\n f'blocks={blocks}&' if blocks else '',\n f'attachments={attachments}' if attachments else ''\n ]\n )\n )\n assert response['ok']\n return response", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "async def send_sys_message(app, channel_id: int, m_type: MessageType,\n *args, **kwargs):\n handler = {\n MessageType.CHANNEL_PINNED_MESSAGE: _handle_pin_msg,\n }[m_type]\n\n message_id = await handler(app, channel_id, *args, **kwargs)\n\n message = await app.storage.get_message(message_id)\n\n await app.dispatcher.dispatch(\n 'channel', channel_id, 'MESSAGE_CREATE', message\n )", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "async def send_message(self, message: dict) -> None:\n await self.client.chat_postMessage(channel=self.channel_id, **message)", "def send_message(self, context, message):\r\n if context.channel == \"__void__\":\r\n return\r\n if self._busy:\r\n self._queue.append((context, message))\r\n return\r\n thread = context.thread\r\n _msg = message\r\n message = message.serialize()\r\n self._busy = True\r\n if self._topics[thread] is None:\r\n assert not _msg.is_reply, \"First message shouldn't be reply!\\n\" \\\r\n \" were told to send into {}:{} message {}\".format(self.name, thread, message)\r\n self._topics[thread] = ' '.join(str(m) for m in message)\r\n first_message = True\r\n else:\r\n assert _msg.is_reply, \"Messages besides first should be replies!\\n\" \\\r\n \" were told to send into {}:{} messaage {}\".format(self.name, thread, message)\r\n first_message = False\r\n assert isinstance(thread, int) and 0 <= thread < len(self._threads), \"Thread {} don't exists at channel {}!\".\\\r\n format(thread, self.name)\r\n if self.print_messages:\r\n if first_message:\r\n vprint(\"{}: Sending message {} to {}::{}\".format(time.time() - self._timeref,\r\n message, self.name, thread))\r\n else:\r\n vprint(\"{}: Sending reply {} to {}::{}({})\".format(time.time() - self._timeref,\r\n message, self.name, thread,\r\n self._topics[thread]))\r\n fail_idx = next(_mc)\r\n received_by = 0\r\n if self.gather_conversation:\r\n conv = [_msg.sender, \"-->\", None, message[2:], 0, 0]\r\n if not _msg.is_reply or self._threads[thread][\"reply_to_tc\"] is not True:\r\n for s in self._subscribers:\r\n if s.name == _msg.sender: # Don't send message back to it's source\r\n continue\r\n if s.name == self._threads[thread][\"tc\"].name \\\r\n and self._threads[thread][\"reply_to_tc\"] is not False:\r\n # If s is topic caster and it would get reply - send it later (to avoid double sends)\r\n continue\r\n if self.gather_conversation:\r\n conv[-2] = time.time()\r\n idx = next(_mc)\r\n r = s.receive_message(context, message)\r\n if self.gather_conversation:\r\n conv[-1] = time.time()\r\n if r not in (False, True):\r\n self._busy = False\r\n assert r in (False, True), \\\r\n \"{}: Reply from {} contains no result or value({}) not in (False, True)\".format(\r\n time.time() - self._timeref, s.name, r)\r\n if r:\r\n received_by += 1\r\n if self.gather_conversation and (r or self.gather_all):\r\n if r:\r\n conv[1] = \"-->\"\r\n else:\r\n conv[1] = \"-->x\"\r\n conv[2] = s.name\r\n self._log_conv(thread, conv, idx)\r\n\r\n if self._threads[thread][\"reply_to_tc\"] is not False:\r\n idx = next(_mc)\r\n r = self._threads[thread][\"tc\"].receive_message(context, message)\r\n if self.gather_conversation:\r\n conv[-1] = time.time()\r\n if r not in (False, True):\r\n self._busy = False\r\n assert r in (False, True), \\\r\n \"{}: Reply from {} contains no result or value({}) not in (False, True)\".format(\r\n time.time() - self._timeref, self._threads[thread][\"tc\"].name, r)\r\n if r:\r\n received_by += 1\r\n if self.gather_conversation and (r or self.gather_all):\r\n if r:\r\n conv[1] = \"-->\"\r\n else:\r\n conv[1] = \"-->x\"\r\n conv[2] = self._threads[thread][\"tc\"].name\r\n self._log_conv(thread, conv, idx)\r\n\r\n if received_by < 1:\r\n if self.gather_conversation:\r\n conv[-1] = time.time()\r\n if self.print_messages:\r\n vprint(\"{}: Message {} to {}::{} had no effect\".format(time.time() - self._timeref,\r\n message, self.name, thread))\r\n if self.gather_conversation:\r\n conv[1] = \"-->x\"\r\n conv[2] = None\r\n self._log_conv(thread, conv, fail_idx)\r\n self._busy = False\r\n if len(self._queue) > 0:\r\n queued = self._queue.pop(0)\r\n self.send_message(*queued)", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send(self, message, sender):\n chatclient.receive_chat_message(message, sender)\n return {}", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def send_message(self, peer_name, message):\n try:\n writer = self.peers[peer_name][\"writer\"]\n writer.write(message.get_message(self.network_type))\n except KeyError:\n print(f\"Error: Connection to {peer_name} doesn't exist.\")", "def joined(self, channel):\n self.logger.info(\"Joined %s\" % channel)", "def join_channel(self, channel):\r\n self._send('JOIN #%s\\r\\n' % channel)", "async def send(self, message):", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "async def say(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, message=None):\n if not text_channel:\n return await ctx.send(f\"> **{ctx.author.display_name}, Please specify a message to send.**\")\n if isinstance(text_channel, str):\n if message:\n message = f\"{text_channel} {message}\"\n else:\n message = text_channel\n text_channel = ctx\n await text_channel.send(message)", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def sendMessage(self, Message_, Content_):\r\n self.messagesToSend[Message_] = Content_", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "async def send(self, channel, content=MISSING, *, tts=False, embed=MISSING, embeds=MISSING, file=MISSING, \n files=MISSING, delete_after=MISSING, nonce=MISSING, allowed_mentions=MISSING, reference=MISSING, \n mention_author=MISSING, components=MISSING) -> Message:\n\n if type(channel) not in [discord.TextChannel, int, str]:\n raise discord.InvalidArgument(\"Channel must be of type discord.TextChannel\")\n\n channel_id = channel.id if type(channel) is discord.TextChannel else channel\n payload = jsonifyMessage(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, reference=reference, mention_author=mention_author, components=components)\n\n route = BetterRoute(\"POST\", f\"/channels/{channel_id}/messages\")\n\n r = None\n if file is MISSING and files is MISSING:\n r = await self._discord.http.request(route, json=payload)\n else:\n r = await send_files(route, files=files or [file], payload=payload, http=self._discord.http)\n\n msg = Message(state=self._discord._get_state(), channel=channel, data=r)\n \n if delete_after is not None:\n await msg.delete(delay=delete_after)\n \n return msg", "def send_whisper_message(self, channel, user, message):\r\n self._send(\"PRIVMSG #{0} :/w {1} {2}\".format(channel, user, message))", "def send(self, msg):\n self._mailbox.put(msg)", "def send(self, message):\n self.sock.send(message)", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def send_msg (chan, msg):\n\n if not os.path.isdir(tq_dir):\n os.mkdir(tq_dir)\n if not os.path.isdir(rq_dir):\n os.mkdir(rq_dir)\n\n t = datetime.datetime.now()\n fname = tq_dir + '/' + t.strftime(\"%y%m%d.%H%M%S.%f\")[:17]\n\n try:\n f = open(fname, 'w')\n except:\n print (\"Failed to open \" + fname + \" for write\")\n else:\n if chan > 0:\n f.write('[' + str(chan) + '] ' + msg + '\\n')\n else:\n f.write(msg + '\\n')\n f.close()\n time.sleep (0.005)\t# Ensure unique names", "def send(self, msg):\n self.__sock.send(msg)", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def publish(self, channel: str, content: str) -> None:\n print(f\"{self._name} publishes message '{content}' to \"\n f\"channel-[{channel}]\")\n self._server.route(channel, content)", "def send(event):\n\n\tid = get_hostname()\n\n\tmessage = str(id) + \"|\" + str(event)\n\n\tif mq is None: # if no mq exists\n\t\tprint \"mq is None\"\n\n\telse: # if mq exists\n\t\ttry:\n\n\t\t\tmq.send(message)\n\t\t\tprint 'completed sending message'\n\n\t\texcept Exception as e:\n\n\t\t\tprint 'failed to send message: {}'.format(e)", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "async def test_send_to_multiple_channels(self):\n message = \"Test basic message.\"\n await self.cog.send_message(message, *self.text_channels, alert_target=True)\n\n self.text_channels[0].send.assert_awaited_once_with(message)\n self.text_channels[1].send.assert_awaited_once_with(message)", "def send(self):\r\n return self.sendRaw(self.message)", "def fsend(var, wrapper, message):\n wrapper.source.client.send(message)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, msg: str):\n message = msg.encode(HttpClient.FORMAT)\n self.client.send(message)\n print(\"[MESSAGE] message sent:\", msg)", "def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)", "def send_msg():\n\tmessage = \"%s %s %d\\n\" % (metric, activeDAHDIChannels, int(time.time()))\n\t# print 'sending message:\\n%s' % message\n\tcarbonSocket = socket.socket()\n\tcarbonSocket.connect((CARBON_HOST, CARBON_PORT))\n\tcarbonSocket.sendall(message)\n\tcarbonSocket.close()\n\tlast_send = int(time.time())", "def send_message(self, message):\n self.client.queue.put(message)", "def send(self, msg):\n msg = stc.pack('>I', len(msg)) + msg\n self.sendall(msg)", "async def channel(self, ctx, channel: discord.TextChannel):\r\n server = ctx.guild\r\n self._logs[str(server.id)][\"channel\"] = str(channel.id)\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"<#{str(channel.id)}> has been set as the modlog channel {self.bot.get_emoji(470063310386233344)}\")", "def send_message(self, **kwargs):\n sending_kwargs = {}\n if self.__handled_message is not None:\n self.__has_sent_response = True\n sending_kwargs['connection'] = kwargs.pop('connection',\n self.__handled_message_source)\n kwargs.setdefault('references', self.__handled_message.id)\n kwargs.setdefault('workflow', self.__handled_message.workflow)\n kwargs.setdefault('to', self.__handled_message.from_)\n return self._client.send_message(self.create_message(**kwargs),\n **sending_kwargs)" ]
[ "0.6885398", "0.675431", "0.65821993", "0.6575302", "0.65634346", "0.63894933", "0.6296932", "0.62330484", "0.6228483", "0.6189294", "0.61043847", "0.61043113", "0.60943514", "0.6092948", "0.6089668", "0.60888904", "0.60853773", "0.6045753", "0.60221964", "0.59493864", "0.59470767", "0.5930451", "0.5930284", "0.59294003", "0.5921757", "0.5919235", "0.5910527", "0.5909746", "0.5906424", "0.58632535", "0.58610797", "0.5857014", "0.58446187", "0.583652", "0.5820297", "0.58111984", "0.58111984", "0.58111984", "0.57997507", "0.5798553", "0.579007", "0.57803077", "0.57609206", "0.5757034", "0.5752696", "0.5748294", "0.5738136", "0.5732475", "0.57203424", "0.5718024", "0.5707674", "0.5707522", "0.57063514", "0.57059115", "0.5690962", "0.5688982", "0.5688762", "0.56852734", "0.56846434", "0.5676307", "0.5668368", "0.56679577", "0.5667704", "0.5660552", "0.5656617", "0.56561023", "0.56534654", "0.56510127", "0.5650573", "0.5648204", "0.56479007", "0.56422323", "0.563405", "0.5631554", "0.5620764", "0.56060743", "0.55999625", "0.55993205", "0.559134", "0.55831254", "0.5581472", "0.55801076", "0.55714893", "0.55600464", "0.55558604", "0.554964", "0.554955", "0.554391", "0.55413336", "0.55396837", "0.5538374", "0.55363727", "0.55361086", "0.55300134", "0.552837", "0.5525339", "0.55206573", "0.5510674", "0.55059505", "0.5501147", "0.5498505" ]
0.0
-1
Invokes received messages processing by platforms Usualy is called automatically from send_message method
def process_messages(self): for p in self._platforms.values(): if p.received_messages > 0: p.queue_received_messages() for p in self._platforms.values(): if p.queued_messages > 0: p.process_queued_messages()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receive_message(self, message):", "def handleMessage(msg):", "def execute_message_received(self, message_received):\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "def receive(self, message):", "def handle_message(self, message):", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle(self, message):", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def on_message(data):\n pass", "def process_messages(self):\n pass", "def receive_message(self, message):\r\n return", "def process(self, message: Message, **kwargs: Any) -> None:", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "def receive(self, msg):\n pass", "def handle_message(self, msg):\n pass", "def client(self,message):\n self.message = message\n self.run()", "def _handle_message(self, msg):\n self.event('message', msg)", "def receive_message(self, context, message):\r\n pass", "def run_local(self, message):", "def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result", "def onMessage(self, message):\n raise NotImplementedError", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "async def on_message(self, message: \"steam.Message\") -> None:", "def test_sendimmessages(self):\n pass", "def send(self, msg):\n self.message('Me', msg)", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def messageHandler(self):\n\n while len(self.ReceiveMessageBuffer) > 0: # if message handler is called all received messages will be processed\n #print 'entered message handler of ID {0}'.format(self.CommID)\n msg = self.ReceiveMessageBuffer.popleft()\n self.MsgReceiveCount += 1\n self.MsgReceiveCount_interval += 1\n type = msg.getType()\n # for communication test:\n if type == 0: #System message\n print 'ID {0} has received msg {1} from ID {2}'.format(self.CommID, msg.getData(), msg.getIDSender())\n # send reply\n data = msg.getData()\n if data == 'ping':\n retval = self.sendMessage(msg.getIDSender(), 0, 'pong')\n return retval\n elif data == 'pong':\n retval = self.sendMessage(msg.getIDSender(), 0, 'ping')\n return retval\n # elif data[0] == 'system':\n # if(data[1] == 'startRONOPT'):\n # #save fluctuation curve of cluster\n # self.EFluctuationCurve = data[4]\n # #begin with local optimization (data[2] = fromTime, data[3]=toTime)\n # self.stateRONOPT = 0\n # for n in range(len(self.Neighbors)):\n # self.NeighborMessageRec[n] = 0\n # self.RemainderOfNeighborsOpt(data[2],data[3],1)\n #########################################################################################################\n\n elif type == 20: # pseudo tree generation message\n ret = self.messageHandler_PseudoTree(msg)\n if ret == -1:\n break\n\n elif type == 40: # load propagation message\n self.messageHandler_LoadProp(msg)\n\n elif type == 70:\n self.messageHandler_RemainderMulticast(msg) #remainder multicast optimization\n\n return 0", "def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)", "def onMessage(self, payload, isBinary):", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "def process_message(self, msg, src):", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def on_receive(self, msg):\n raise NotImplementedError", "def process(self, msg):\n raise NotImplemented", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def send_message(self, message):\n pass", "def _send_message(self, *args, **kwargs):\n with self.comm_lock:\n return super(FrontendComm, self)._send_message(*args, **kwargs)", "def processMessage(self, msg):\r\n LOG(\"Received message: \" + msg.getId())\r\n \r\n # Process messages incoming from child executor, if any\r\n procId = msg[FIELD_PROC_ID]\r\n if procId != self.procId:\r\n if self.childManager.hasChild():\r\n self.childManager.processChildMessage(msg)\r\n else:\r\n LOG(\"Unexpected child message: \" + msg.getId(), LOG_ERROR)\r\n elif msg.getType() == MSG_TYPE_COMMAND:\r\n if msg.getId() == Messages.MSG_ADD_CLIENT:\r\n self.addClient(msg)\r\n elif msg.getId() == Messages.MSG_REMOVE_CLIENT:\r\n self.removeClient(msg)\r\n elif msg.getId() == Messages.CMD_CLOSE:\r\n self.cleanup()\r\n elif msg.getId() == Messages.CMD_RELOAD:\r\n REGISTRY['CIF'].clearAsRun()\r\n self.cleanup( executionOnly = True )\r\n self.setupResources()\r\n self.prepareExecution()\r\n else:\r\n cmdId = msg[\"Id\"]\r\n if cmdId in [ Messages.CMD_ABORT, Messages.CMD_PAUSE ]:\r\n self.mailbox.push( msg, high_priority = True )\r\n else:\r\n self.mailbox.push( msg )\r\n else:\r\n LOG(\"Unexpected message: \" + msg.getId() + \"/\" + msg.getType(), LOG_ERROR)", "def received_message(self, msg):\n command = int(msg[:8], base=16)\n msg = msg[8:]\n self.log.debug(\"CONTROLLER - RECEIVED COMMAND: \" + str(command))\n self.log.debug(\"CONTROLLER - MSG: \" + str([int(msg[i:i+8], base=16) for i in range(0, len(msg), 8)]))\n if command == 0:\n # 0 - opponent start the game\n self.master.add_log(\"Opponent starts the game.\")\n elif command == 1:\n # 1 - you start the game\n self.master.add_log(\"You start the game! Your turn.\")\n self.master.first = True\n self.master.new_round(False)\n elif command == 2:\n # 2 - start of your turn\n self.master.add_log(\"Your turn.\")\n self.master.new_round()\n elif command == 3:\n # 3 - opponent draws a card\n self.master.opp_hand.add_placeholder()\n self.master.add_log(\"Opponent draw a card.\")\n elif command == 4:\n # 4,x,y - opponent plays a card with x id on y spot on gameboard\n c_id = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n card = self.master.database.get_card(c_id)\n if card.card_type == \"Spell\":\n self.master.opp_sfield.set_card(card)\n else:\n self.master.opp_bfield.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent played a card {card.name}.\")\n elif command == 5:\n # 5,v,x,y - player v picks up card from x space from y spot to his hand\n # v - 0/1 - you/opponent\n # x - 0/1 - mana/battlefield\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n card = self.master.mana.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from mana zone to your hand.\")\n elif c_space == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from battle zone to your hand.\")\n elif c_player == 1:\n if c_space == 0:\n card = self.master.opp_mana.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from mana to his hand.\")\n elif c_space == 1:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from battle zone to his hand.\")\n elif command == 6:\n # 6,v,x,y - player v puts card from x space from y spot to his graveyard\n # v - 0/1 - you/opponent\n # x - 0/1/2 - mana/battlefield/hand\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"yu_mn\", c_pos)\n elif c_space == 1:\n self.master.a_move_to_graveyard(\"yu_bf\", c_pos)\n elif c_space == 2:\n card = self.master.hand[c_pos]\n self.master.a_move_to_graveyard(\"yu_hd\", c_pos)\n self.master.send_message(15, card.id) # Sent back which card was discarded\n elif c_player == 1:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"op_mn\", c_pos, False)\n elif c_space == 1:\n # Do not change to a_move_to_graveyard\n if c_pos == 5:\n card = self.master.opp_sfield.remove_card()\n else:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent's card {card.name} from battle zone was moved to his graveyard.\")\n elif command == 7:\n # 7,x,y - opponent puts y card from x space to manazone\n # x - 0/1/2/3 - hand/deck/graveyard\n c_space = int(msg[:8], base=16)\n c_id = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card {card.name} from his hand to the mana zone\")\n elif c_space == 1:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his deck to the mana zone\")\n elif c_space == 2:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_graveyard.remove_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to the mana zone\")\n elif command == 8:\n # 8,x - opponent adds card from his hand to y shield (face down)\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.add_placeholder(c_pos)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card from his hand to shields\")\n elif command == 9:\n # 9,x,y - Opponent tap/untap card on y spot in mana zone\n # x - 0/1 - tap/untap\n c_tap = bool(int(msg[:8]))\n c_pos = int(msg[8:16], base=16)\n if c_tap:\n self.master.opp_mana.untap_card(c_pos)\n else:\n self.master.opp_mana.tap_card(c_pos)\n elif command == 10:\n # 10,x - (info) opponent looks under his shield on x spot\n c_pos = int(msg[:8], base=16)\n self.master.add_log(f\"Opponent is peeking his {c_pos} shield\")\n elif command == 11:\n # 11,x,y - opponent looks under my shield/card on hand on y spot\n # x - 0/1 - hand/shield\n c_space = int(msg[:8])\n c_pos = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.hand[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} card in hand\")\n elif c_space == 1:\n card = self.master.shields[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} shield\")\n self.master.send_message(111, card.id)\n elif command == 111:\n # 111,x - \n c_id = int(msg[:8], base=16)\n # TODO: split command to separate hand and shield\n # TODO: show in the UI what the card actually is\n self.master.add_log(f\"The choosen card is {c_id}\")\n elif command == 12:\n # 12,x,y - opponent attacks your x card with his y card on the battlefield\n c_opp_pos = int(msg[:8], base=16)\n c_my_pos = int(msg[8:16], base=16)\n opp_card = self.master.opp_bfield[c_opp_pos]\n my_card = self.master.bfield[c_my_pos]\n self.master.add_log(f\"Opponent is attacking your card {my_card.name} with card {opp_card.name}.\")\n self.master.creature_attacked(c_opp_pos, c_my_pos)\n elif command == 112:\n # 112,x - returned which card you will attack\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 13:\n # 13,x,y1,y2,... - opponent attacks your shields with y card\n # x - position of creature on the board\n # ya - a-th shield attacked by this creature\n creature_pos = int(msg[:8], base=16)\n msg = msg[8:]\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n shields_string = \", \".join([str(pos) for pos in shields_pos])\n self.master.add_log(f\"Your shields at pos {shields_string} are being attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.shields_attacked(creature_pos, shields_pos)\n elif command == 113:\n # 113,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block shield attack, continue\n self.master.attack_shield()\n else:\n # Oppponent blocked with creature\n self.master.selected_shields = []\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 14:\n # 14,y1,y2,... - opponent destroys your shields\n # ya - a-th shield\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n self.master.shield_destroyed(shields_pos)\n elif command == 114:\n # 114,x - opponent picked up x shield to his hand\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up {c_pos} shield to his hand.\")\n self.master.refresh_screen()\n elif command == 214:\n # 214,x - opponent played x shield to spell/battle zone\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.add_log(f\"Opponent played a card from {c_pos} shield trigger.\")\n self.master.refresh_screen()\n elif command == 314:\n # 314 - opponent ended handling shield attack\n self.master.selected_card = []\n self.master.your_turn = 1\n elif command == 15:\n # 15 - id of the discarded card\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent discarded {card.name}\")\n self.master.refresh_screen()\n elif command == 16:\n # 16,v,x,y - x player taps/untaps a y creature\n # v - 0/1 - tap/untap\n # x - 0/1 - you/opponent\n # y - pos\n c_tap = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_tap == 0:\n # Tap\n if c_player == 0:\n # You\n self.master.bfield.set_tapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now tapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_tapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now tapped.\")\n if c_tap == 1:\n # Untap\n if c_player == 0:\n # You\n self.master.bfield.set_untapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now untapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_untapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now untapped.\")\n self.master.refresh_screen()\n elif command == 17:\n # 17,c,s1,p1,s2,p2... - opponent chooses which cards to destroy from the list\n # c - how many creatures to destoy\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_destoyed(count, target_list)\n elif command == 117:\n # 117 - opponent choosed cards and his actions ended\n self.master.post_destroy_creatures()\n elif command == 18:\n # 18,x - opponent adds card x from his deck to hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his deck to his hand\")\n elif command == 19:\n # 19,x - opponent adds card x from his graveyard to his hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.remove_card(card)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to his hand\")\n elif command == 20:\n # 20,c,s1,p1,s2,p2... - opponent chooses which cards to move to manazone from the list\n # c - how many creatures to sacrafice\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_put_to_mana(count, target_list)\n elif command == 120:\n # 120 - opponent choosed cards and his actions ended\n self.master.post_sacrafice_creatures()\n elif command == 21:\n # 21,y,x - player x puts card from y pos on battlefield zone to manazone\n # x - 0/1 - opponent/you\n # y - position\n c_player = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n if c_player == 0:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent moved card {card.name} from his battlezone to the mana zone\")\n elif c_player == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.mana.add_card(card)\n self.master.add_log(f\"Opponent moved your card {card.name} from battlezone to your mana zone\")\n elif command == 22:\n # 22,x - player x puts card from y pos on battlefield zone to hand\n # x - position\n c_pos = int(msg[:8], base=16)\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up card {card.name} from his battlezone to his hand\")\n elif command == 23:\n # 23 - opponent added an z effect to x card on y battefield\n c_pos = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_effect_name = int(msg[16:24], base=16)\n effect_name = EffectName(c_effect_name).name\n if c_player == 0:\n # to the opponent\n card = self.master.opp_bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to his card {card.name}\")\n elif c_player == 1:\n # to the player\n card = self.master.bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to your card {card.name}\")\n elif command == 24:\n # 24,x - opponent attacks you directly with x card\n # x - position of creature on the board\n creature_pos = int(msg[:8], base=16)\n self.master.add_log(f\"You are being directly attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.directly_attacked(creature_pos)\n elif command == 124:\n # 124,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block, you win\n self.master.win()\n else:\n # Oppponent blocked with creature\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 25:\n # 25 - opponent won the game\n self.master.lose(True)\n elif command == 26:\n # 26 - opponent lost the game\n self.master.win(True)\n elif command == 27:\n # 27 - start of the next turn\n self.master.turn_count += 1\n self.master.add_turn_info()", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def message_callback(self, message):\n pass", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def receiveMessage(self, user, message):\n pass", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def run_message_loop(self):\n raise NotImplementedError", "def _process_messages(self):\r\n \r\n self._print(\"%s: Starting _process messages, looking out for special messages:\" \\\r\n % (self._clientnr))\r\n \r\n # Set some expected messages.\r\n expected = {}\r\n expected['clientconfirm'] = cb.CLIENTCONFIRM[:cb.CLIENTCONFIRM.find('_')]\r\n expected['waitwhat'] = cb.WAITWHATCLIENT[:cb.WAITWHATCLIENT.find('_')]\r\n \r\n for key in expected.keys():\r\n self._print(\"%s: Special message '%s': '%s'\" % \\\r\n (self._clientnr, key, expected[key]))\r\n \r\n # Run idefinitively\r\n while True:\r\n \r\n # Get new incoming commands.\r\n cmds = self.udp.getCommands()\r\n self._print(\"%s: Found %d new UDP commands.\" % \\\r\n (self._clientnr, len(cmds)))\r\n # Add new commands to the queue.\r\n for c in cmds:\r\n # Parse the message.\r\n target, message, clienttime = c.text.split('|')\r\n self._print(\"%s: Found message (%s to %s, t=%s) '%s'\" % \\\r\n (self._clientnr, c.ip, target, clienttime, message))\r\n # Only process messages from the server.\r\n if c.ip == self._servernr:\r\n # Check if this is a client confirmation message.\r\n if expected['clientconfirm'] in message:\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Only process the messages that were directed at this client.\r\n elif target in ['None', str(self._clientnr)]:\r\n # Check if this is a confused message to find out what\r\n # the client is waiting for.\r\n if expected['waitwhat'] in message:\r\n self._print(\"%s: Received '%s' from server\" % \\\r\n (self._clientnr, message))\r\n # Parse the waitwhat message, which looks like this:\r\n # 'waitwhatclient_expected=%s'\r\n msg, xpctd = message.split('_')\r\n xpctd = xpctd[xpctd.find('=')+1:]\r\n # Re-send the last version of the expected message.\r\n if xpctd in self._lastmessage.keys():\r\n self._outgoing.append(self._lastmessage[xpctd])\r\n self._print(\"%s: Resending the last version of expected message '%s': '%s'\" % \\\r\n (self._clientnr, xpctd, self._lastmessage[xpctd]))\r\n else:\r\n self._print(\"%s: Do not have a last version of expected message '%s'\" % \\\r\n (self._clientnr, xpctd))\r\n else:\r\n # Add the message to the queue.\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Chuck a message out if the queue is getting too long.\r\n if len(self._incoming) > self._maxincominglen:\r\n self._incominglock.acquire()\r\n delmsg = self._incoming.pop(0)\r\n self._incominglock.release()\r\n self._print(\"%s: Removed message '%s' from the incoming queue\" \\\r\n % (self._clientnr, delmsg))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't for me (%s)\" \\\r\n % (self._clientnr, message, self._clientnr))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't from the server (%s)\" \\\r\n % (self._clientnr, message, self._servernr))\r\n \r\n # Process outgoing commands.\r\n while len(self._outgoing) > 0:\r\n # Send a message to the server.\r\n self._outgoinglock.acquire()\r\n message = self._outgoing.pop(0)\r\n self._outgoinglock.release()\r\n self._print(\"%s: Sending '%s' to %s\" % \\\r\n (self._clientnr, message, self._servernr))\r\n msg = 'cmd,%s|%s' % (self._servernr, message)\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n for i in range(self._message_reps):\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n # Store the message in the 'last sent' dict.\r\n if '_' in message:\r\n m = message[:message.find('_')]\r\n else:\r\n m = message\r\n self._lastmessage[m] = message", "def Start(self):\n for unused_i in range(0, self.args.message_count):\n self.CallClient(\n standard.ReadBuffer, offset=0, length=100, next_state=\"Process\")", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def receive():\n pass", "def send_msg(self, my_queue, my_msg):", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "async def send(self, message):", "def communicate():\n pass", "def onMessageFrame(self, payload):", "def test_sendMessage(self):\n self.p.sendMessage(\"CMD\", \"param1\", \"param2\")\n self.check(\"CMD param1 param2\\r\\n\")", "def on_message(client, userdata, msg):\n TOPIC_DISPATCH_DICTIONARY[msg.topic][\"method\"](msg)", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "def onMessageBegin(self, isBinary):", "def application_message(self, bus, msg):\n\t\tmsgtype = msg.structure.get_name()\n\t\tif msgtype == 'partial_result':\n\t\t\tself.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\telif msgtype == 'result':\n\t\t\tself.final_result(msg.structure['hyp'], msg.structure['uttid'])\n\t\t\tself.pipeline.set_state(gst.STATE_PAUSED)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def send(self, message):\n pass", "def recv(self, *messages):\n for message in messages:\n self.input.put(message)", "def cmd(self, message):\n pass", "def handle_msg(self, msg):\n self.log.debug(\"handle_msg[%s](%s)\", self.comm_id, msg)\n if self._msg_callback:\n shell = self.kernel.shell\n if shell:\n shell.events.trigger('pre_execute')\n self._msg_callback(msg)\n if shell:\n shell.events.trigger('post_execute')", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def handle_message(self, data, channel):\n pass", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)", "def _process_message(self, obj):\n pass", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def received_message(self, m):\n self.receiver.handle_message(m)", "def SendMessage(self, event):\n pass", "def receive(self):\n pass", "def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )", "def main(self):\n\n dp = self.dispatcher\n\n dp.add_handler(MessageHandler(Filters.text, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.command, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.photo, self.__photo_handler))\n dp.add_handler(MessageHandler(\n Filters.location, self.__location_handler))\n dp.add_handler(CallbackQueryHandler(callback=self.__clb_handler))\n\n self.updater.start_polling()\n self.updater.idle()", "def __processMsg(self, sock, msgData):\n\n pass", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n if msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n if msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])", "def on_message(self, msg):\n\n msg = json.loads(msg)\n\n psession = self.funcserver.pysessions.get(self.pysession_id, None)\n if psession is None:\n interpreter = PyInterpreter(self.funcserver.define_python_namespace())\n psession = dict(interpreter=interpreter, socks=set([self.id]))\n self.funcserver.pysessions[self.pysession_id] = psession\n else:\n interpreter = psession[\"interpreter\"]\n psession[\"socks\"].add(self.id)\n\n code = msg[\"code\"]\n msg_id = msg[\"id\"]\n\n stdout = sys.stdout\n try:\n sys.stdout = cStringIO.StringIO()\n interpreter.runsource(code)\n output = sys.stdout.getvalue() or interpreter.output\n if isinstance(output, list):\n output = \"\".join(output)\n interpreter.output = []\n finally:\n sys.stdout = stdout\n\n msg = {\"type\": MSG_TYPE_CONSOLE, \"id\": msg_id, \"data\": output}\n self.send_message(msg)", "def process_message(self, context, message):\r\n r = self._process_message_general(context, message)\r\n if r is True:\r\n return\r\n elif r is not False:\r\n self._interface.incoming(context, message, r)\r\n else:\r\n self._interface.incoming(context, message, None)", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def handle_msg(self, state_id, msg):\n pass", "def message_callback(self, message):\n message_data = json.loads(message)\n\n if message_data.get('command') == 'error':\n return self.command_error(message_data)\n\n if 'device_type' in message_data and not message_data['device_type'].startswith(self.device_filter):\n return\n\n # Try to find a matching command and execute it\n command_name = message_data['command']\n command_data = message_data.get('data', {})\n device_name = message_data.get('name')\n\n command_handler_name = 'command_{}'.format(command_name)\n if not hasattr(self, command_handler_name):\n logging.info(\"{} does not support command {}\".format(\n self,\n command_name\n ))\n return\n\n command_handler = getattr(self, command_handler_name)\n return command_handler(device_name, command_data)", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def cmd_handler():\n context = zmq.Context()\n\n # socket to receive commands (a subscription to ELECTION_CODE channel)\n cmd_socket = context.socket(zmq.SUB)\n cmd_socket.connect (\"tcp://%s:5556\" % SERVER_HOST)\n topicfilter = \"politiche2013\"\n cmd_socket.setsockopt(zmq.SUBSCRIBE, topicfilter)\n\n # socket to send replies\n reply_sender = context.socket(zmq.PUSH)\n reply_sender.connect(\"tcp://%s:5557\" % SERVER_HOST)\n\n # main loop\n while True:\n print \"Aye sir, unit {0} ready for your commands ...\".format(computer_id)\n # wait for a command\n string = cmd_socket.recv()\n\n # action\n print \"Message received: '%s'\" % (string,)\n\n # send reply to server\n print \"Sending reply to server\"\n reply = { 'unit' : computer_id, 'status' : 'configured'}\n reply_sender.send_json(reply)", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass" ]
[ "0.7143642", "0.71015793", "0.7093893", "0.702307", "0.6902395", "0.6859738", "0.68166494", "0.6779305", "0.6749306", "0.6745342", "0.6691393", "0.6681336", "0.6598424", "0.6589076", "0.65837705", "0.6558769", "0.6531817", "0.65218943", "0.6481661", "0.64773434", "0.646337", "0.6409125", "0.64086604", "0.64086604", "0.64086604", "0.63864195", "0.63847905", "0.6377551", "0.63727456", "0.63685495", "0.63541293", "0.63386947", "0.6322964", "0.6316526", "0.63012207", "0.63", "0.6281436", "0.62728345", "0.62691927", "0.6262433", "0.6260114", "0.62592995", "0.62559307", "0.62547076", "0.6253312", "0.62390035", "0.62283033", "0.6216926", "0.6216856", "0.6200243", "0.6191942", "0.6191942", "0.6190126", "0.61896193", "0.6173588", "0.6172791", "0.6171647", "0.61549354", "0.6140956", "0.6135626", "0.612969", "0.61265504", "0.61263794", "0.6123262", "0.6122211", "0.61151487", "0.6108278", "0.6101574", "0.6097669", "0.6097669", "0.60831416", "0.6078737", "0.607301", "0.6070955", "0.6066815", "0.60656774", "0.60482657", "0.60464495", "0.6034302", "0.6027114", "0.60258895", "0.6020485", "0.6016778", "0.6009794", "0.60086155", "0.60081244", "0.60051847", "0.59947866", "0.5984667", "0.5984667", "0.59823716", "0.59813666", "0.5978927", "0.59686065", "0.5964842", "0.59646976", "0.5964149", "0.5955471", "0.5955471", "0.5955471" ]
0.6632475
12
Creates Platform's instance Should be called when conditions for creation this very instance are met platforms that this instance depends on should be already registered
def finish_registration(self): base_platform = self._args.get("base_platform", None) lcls = {} try: exec("from platforms.{}.main import RootClass as rc; cl = rc".format(base_platform), globals(), lcls) except ModuleNotFoundError as e: eprint("Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!".format( base_platform, self.name)) raise e lcls["name"] = self.name lcls["farm"] = self._farm lcls["args"] = self._args try: exec("inst = cl(name=name, farm=farm, **args)", globals(), lcls) inst = lcls["inst"] except Exception as e: eprint("Exception occurred when creating platform {} of {} kind!\nException: {}".format( self.name, base_platform, e)) raise e # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception return inst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_platform(self, id):\n p = Platform(self, id, [])\n self.platforms[id] = p\n return p", "def platform_init(self):\n if isinstance(self.imu, MockImuController) or isinstance(self.pwm_controller, MockPWMController):\n print(\"Mock components detected, creating mock antenna controller\")\n platform = MockPlatformController(self.azimuth_servo, self.elevation_servo, self.imu)\n else:\n print(\"Initializing PIDAntennaController class\")\n platform = PIDPlatformController(\n self.azimuth_servo,\n self.elevation_servo,\n self.imu,\n pid_output_limits=self.pid_config.get(\"output_limits\"),\n pid_frequency=self.pid_config.get(\"period\"),\n p=self.pid_config.get(\"p\"),\n i=self.pid_config.get(\"i\"),\n d=self.pid_config.get(\"d\")\n )\n \n self.platform = platform\n\n if not isinstance(self.gps, MockGPSController):\n self.gps_update_loop = GPSLocationController(self.gps)\n self.gps_update_loop.start()\n else:\n self.gps_update_loop = None\n \n return platform", "def register_platform(self, factory, kind, parent=None, wait=None):\r\n self._try_register_platform(factory, kind, parent, wait)", "def platform(self) -> Platform:\n _args: list[Arg] = []\n _ctx = self._select(\"platform\", _args)\n return _ctx.execute_sync(Platform)", "def platform_start(self):\n self.platform.start()", "def _try_register_platform(self, factory, kind, parent, wait, awaiting=False):\r\n name = factory.name\r\n assert kind is not None, \"instance kind can't be None (instance name is {})\".format(name)\r\n\r\n if factory.name is None:\r\n factory.name = name = \"random_name\" # TODO: use GUID\r\n\r\n assert name not in self._platforms and (awaiting or name not in self._awaiting),\\\r\n \"encountered second platform with name {}\".format(name)\r\n\r\n # TODO: analyze args and update wait if there are references to other platforms\r\n assert wait is None or name not in wait, \"platform {} can't wait for self!\".format(name)\r\n\r\n # If all necessary parent and co-platforms are already created - finish registration of this one\r\n if (parent is None or parent in self._platforms) \\\r\n and (wait is None or all(w in self._platforms for w in wait)):\r\n np = factory.finish_registration()\r\n self._platforms[name] = np\r\n if parent is not None:\r\n assert np not in self._platforms[parent].subplatforms, \"Subplatform {} is already within \" \\\r\n \"parent's ({}) subplatforms list, \" \\\r\n \"but shouldn't be\".format(name, parent)\r\n np.parent = self._platforms[parent]\r\n self._platforms[parent].subplatforms.append(np)\r\n if wait is not None:\r\n for w in wait:\r\n assert np not in self._platforms[w].depended, \"Subplatform {} is already within \" \\\r\n \"depended's list of {}, \" \\\r\n \"but shouldn't be\".format(name, w)\r\n self._platforms[w].depended.append(np)\r\n if awaiting:\r\n del self._awaiting[name]\r\n self._check_awaiting()\r\n # Otherwise put it into waiting list\r\n else:\r\n self._awaiting[name] = {\r\n \"instance\": factory,\r\n \"kind\": kind,\r\n \"parent\": parent,\r\n \"wait\": wait}", "def getPlatform(self, name):\r\n if self.platforms.has_key(name):\r\n return self.platforms[name]\r\n else:\r\n self.platforms[name] = Platform(name)\r\n return self.platforms[name]", "def add_platform(self, platform: KetraPlatformBase):\n self.platforms.append(platform)", "def create_platform():\n if config.P_LIST == []:\n pitem = obstacle.Platform(\n randint(config.M.x_pos+2, common.COLS-5), randint(common.R1_R, common.MIDS_R-5))\n config.P_LIST.append(pitem)\n elif len(config.P_LIST) < int(common.COLS/20):\n if randint(0, 5) == 1:\n pos = config.P_LIST[-1].x_pos + randint(7, 15)\n if pos < (common.COLS - 3):\n pitem = obstacle.Platform(pos, randint(\n common.R1_R, common.MIDS_R-5))\n config.P_LIST.append(pitem)\n\n for i in config.P_LIST:\n xitem = randint(-3, 3)+i.x_pos\n i.move(xitem)", "async def async_create_platform_type(\n hass: HomeAssistant, config: ConfigType, p_type: str, p_config: dict\n) -> DeviceTrackerPlatform | None:\n platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)\n\n if platform is None:\n return None\n\n return DeviceTrackerPlatform(p_type, platform, p_config)", "def Platforms():\n return platforms", "def get_platforms():\n # Get all CL platforms\n platforms = cl.get_platforms()\n for p in platforms:\n # Not a safe way\n p.__class__ = Platform\n p.devices = p.get_devices()\n for d in p.devices:\n d.__class__ = Device\n return platforms", "def setup_platform(opp, config, add_entities, discovery_info=None):\n\n for scene in pywink.get_scenes():\n _id = scene.object_id() + scene.name()\n if _id not in opp.data[DOMAIN][\"unique_ids\"]:\n add_entities([WinkScene(scene, opp)])", "def get_platform(self, name):\n if name in self.platforms:\n return name\n else:\n try:\n p = self.platforms['name'] = Platform.load(self, name)\n return p\n except IOError as e:\n print('Failed loading platform: {0}'.format(str(e)))\n return None", "def port_maker(self, platform):\n raise NotImplementedError()", "async def _register_hardware_platform(\n hass: HomeAssistant, integration_domain: str, platform: HardwareProtocol\n) -> None:\n if integration_domain == DOMAIN:\n return\n if not hasattr(platform, \"async_info\"):\n raise HomeAssistantError(f\"Invalid hardware platform {platform}\")\n hass.data[DOMAIN][\"hardware_platform\"][integration_domain] = platform", "def create_platforms(plat_map):\n platform_group = set()\n for plat in plat_map:\n platform_group.add(Platform([(plat[1] + 0.5) * TILE_DIM, (plat[0] + 0.5)\n * TILE_DIM, plat[2], plat[3]]))\n return platform_group", "def setPlatform(self):\n\t\treturn None", "async def async_process_hardware_platforms(hass: HomeAssistant) -> None:\n hass.data[DOMAIN][\"hardware_platform\"] = {}\n\n await async_process_integration_platforms(hass, DOMAIN, _register_hardware_platform)", "def initialize(self, platform=None):\n\n if self._simulation is None:\n if type(platform) is str:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platform=openmm.Platform.getPlatformByName(platform),\n platformProperties=self.openmm_properties\n )\n elif platform is None:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platformProperties=self.openmm_properties\n )\n else:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platform=platform,\n platformProperties=self.openmm_properties\n )\n\n logger.info(\n 'Initialized OpenMM engine using platform `%s`' %\n self.platform)", "def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")", "def default_platform(self) -> Platform:\n _args: list[Arg] = []\n _ctx = self._select(\"defaultPlatform\", _args)\n return _ctx.execute_sync(Platform)", "def platform(cls, name):\n\n for platform in cls.all_platforms:\n if platform.name == name:\n return platform\n\n raise UserException(\"'{0}' is not a supported platform\".format(name))", "def getPlatform(self):\n\t\treturn None", "def configure(self):\n\n self.platform.configure()", "def create_instance_by_os(self):\n print '# Start a new instance based on the OS'\n\n # Choose between linux or windows\n is_linux = True\n while True:\n\n os = raw_input('Enter the OS (windows/linux or empty to cancel): ')\n\n # Cancel\n if not os:\n print 'Operation cancelled'\n return\n\n # Check if linux\n if os.lower() == 'linux':\n is_linux = True\n break\n\n # Check windows\n if os.lower() == 'windows':\n is_linux = False\n break\n\n # Error\n print 'Invalid input!'\n\n # Create the instance\n if self.compute.create_instance_by_os(is_linux):\n print 'Instance started!'\n else:\n print 'It was not possible to create an instance with the given OS'", "def register_platform(self, address, serverkey=None, display_name=None):\n _log.info('Attempting registration of vcp at address: '\n '{} display_name: {}, serverkey: {}'.format(address,\n display_name,\n serverkey))\n parsed = urlparse(address)\n if parsed.scheme not in ('tcp', 'ipc'):\n raise ValueError(\n 'Only ipc and tpc addresses can be used in the '\n 'register_platform method.')\n try:\n connection = self._build_connection(address, serverkey)\n except gevent.Timeout:\n _log.error(\"Initial building of connection not found\")\n raise\n\n try:\n if connection is None:\n raise ValueError(\"Connection was not able to be found\")\n manager_key = connection.call('get_manager_key')\n except gevent.Timeout:\n _log.error(\"Couldn't retrieve managment key from platform\")\n raise\n\n try:\n if manager_key is not None:\n if manager_key == self.core.publickey:\n _log.debug('Platform is already managed and connected.')\n return\n else:\n _log.warn(\n 'Platform is registered with a different vc key.'\n 'This could be expected.')\n\n if parsed.scheme == 'tcp':\n self.core.publickey\n _log.debug(\n 'TCP calling manage. my serverkey: {}, my publickey: {}'.format(\n self._serverkey, self.core.publickey))\n pk = connection.call(\n 'manage', self._external_addresses[0], self._serverkey,\n self.core.publickey)\n else:\n pk = connection.call('manage', self.core.address)\n except gevent.Timeout:\n _log.error('RPC call to manage did not return in a timely manner.')\n raise\n # If we were successful in calling manage then we can add it to\n # our list of managed platforms.\n if pk is not None and len(pk) == 43:\n try:\n address_uuid = self._address_to_uuid.get(address)\n time_now = format_timestamp(get_aware_utc_now())\n\n if address_uuid is not None:\n _log.debug('Attempting to get instance id to reconfigure '\n 'the agent on the remote instance.')\n current_uuid = connection.call('get_instance_uuid')\n\n if current_uuid != address_uuid:\n _log.debug('Reconfiguring with new uuid. {}'.format(\n address_uuid\n ))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n if self._registered_platforms.get(address_uuid) is None:\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n else:\n address_uuid = str(uuid.uuid4())\n _log.debug(\"New platform with uuid: {}\".format(\n address_uuid))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n self._address_to_uuid[address] = address_uuid\n if display_name is None:\n display_name = address\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n self._platform_connections[address_uuid] = connection\n self._registered_platforms.sync()\n except gevent.Timeout:\n _log.error(\n 'Call to reconfigure did not return in a timely manner.')\n raise", "def add_platforms(project, env_spec_name, platforms):\n return _modify_platforms(project, env_spec_name, additions=platforms, removals=[])", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def create_knx_device(\n hass: HomeAssistant,\n platform: SupportedPlatforms,\n knx_module: XKNX,\n config: ConfigType,\n) -> XknxDevice:\n if platform is SupportedPlatforms.light:\n return _create_light(knx_module, config)\n\n if platform is SupportedPlatforms.cover:\n return _create_cover(knx_module, config)\n\n if platform is SupportedPlatforms.climate:\n return _create_climate(knx_module, config)\n\n if platform is SupportedPlatforms.switch:\n return _create_switch(knx_module, config)\n\n if platform is SupportedPlatforms.sensor:\n return _create_sensor(knx_module, config)\n\n if platform is SupportedPlatforms.notify:\n return _create_notify(knx_module, config)\n\n if platform is SupportedPlatforms.scene:\n return _create_scene(knx_module, config)\n\n if platform is SupportedPlatforms.binary_sensor:\n return _create_binary_sensor(hass, knx_module, config)\n\n if platform is SupportedPlatforms.weather:\n return _create_weather(knx_module, config)", "def test_new_platform_appear_after_create(self):\n\n #\n response = self.client.get(reverse('webapp:platform_list'))\n check_response_is_ok(self, response)\n platform_count = len(response.context['object_list'])\n\n platform = create_platform(\"testplatform\", \"https://siteurl.com\", self.user)\n platform.save()\n\n response = self.client.get(reverse('webapp:platform_list'))\n check_response_is_ok(self, response)\n self.assertIn(platform, response.context['object_list'])\n\n expected_count = platform_count + 1\n self.assertEqual(expected_count, len(response.context['object_list']))", "async def setup_platform(hass, platform: str, *,\n devices=None, scenes=None):\n hass.config.components.add(DOMAIN)\n config_entry = ConfigEntry(2, DOMAIN, \"Test\",\n {CONF_INSTALLED_APP_ID: str(uuid4())},\n SOURCE_USER, CONN_CLASS_CLOUD_PUSH)\n broker = DeviceBroker(hass, config_entry, Mock(), Mock(),\n devices or [], scenes or [])\n\n hass.data[DOMAIN] = {\n DATA_BROKERS: {\n config_entry.entry_id: broker\n }\n }\n await hass.config_entries.async_forward_entry_setup(\n config_entry, platform)\n await hass.async_block_till_done()\n return config_entry", "def test_setup_platform_name(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.test_local\")", "def _init_hardware(self):\n return", "def setup_platform(hass, config, add_devices, discovery_info=None):\n token = load_token(hass)\n \n if not token:\n request_app_setup(hass, config, add_devices, discovery_info)\n else:\n continue_setup_platform(hass, config, token, add_devices, discovery_info)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n binary_sensors = []\n for name in hass.data[DOMAIN]:\n if name in BINARY_SENSORS:\n binary_sensors.append(NextcloudBinarySensor(name))\n add_entities(binary_sensors, True)", "def get_platforms(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-platformsMixin0\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"get_platformsMixin0\",\n keywords=kwargs,\n params=parameters\n )", "def get_matched_platforms(self, platform):\n raise NotImplemented", "def setup_platform(hass, config, add_devices, discovery_info=None):\n thread1 = QQ(config[QQ_NUMBER])\n thread1.start()\n object_qq = Qqsensor(hass, QQ_NUMBER, thread1)\n add_devices([object_qq])", "def test_add_sensor(self):\n #empty platform\n p2 = Platform(\"platform 2\", \"p2\", [], [], [])\n\n # procedure object\n proc1 = Procedure(\"procedure 1\", \"proc1\")\n proc2 = Procedure(\"procedure 2\", \"proc2\")\n # list of procedures\n proList = [proc1, proc2]\n # observable property object\n obs1 = ObservableProperty(\"obs-property1\", \"obs-property\")\n obs2 = ObservableProperty(\"obs-property2\", \"obs-property2\")\n obs3 = ObservableProperty(\"obs-property3\", \"obs-property3\")\n # list of observable properties\n obsList = [obs1, obs2]\n\n # sensor object\n s1 = Sensor(\"Sensor 1\", \"first sensor\", obsList, proList)\n\n p2.add_sensor(s1)", "async def reload_platform(self) -> None:", "def platforms(self) -> Any:\n\n return search_api(\"platforms\")", "def set_platform(identifier):\n global _PLATFORM\n _PLATFORM = identifier", "def add_to_platforms(\n self, data_store, platform_name, platform_type, nationality, privacy, change_id\n ):\n print(\"Ok, adding new platform.\")\n\n platform_name = prompt(\"Please enter a name: \", default=platform_name)\n trigraph = prompt(\n \"Please enter trigraph (optional): \", default=platform_name[:3]\n )\n quadgraph = prompt(\n \"Please enter quadgraph (optional): \", default=platform_name[:4]\n )\n pennant_number = prompt(\"Please enter pennant number (optional): \", default=\"\")\n\n # Choose Nationality\n if nationality:\n chosen_nationality = data_store.add_to_nationalities(nationality, change_id)\n else:\n chosen_nationality = self.resolve_nationality(\n data_store, platform_name, change_id\n )\n\n if chosen_nationality is None:\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )\n\n # Choose Platform Type\n if platform_type:\n chosen_platform_type = data_store.add_to_platform_types(\n platform_type, change_id\n )\n else:\n chosen_platform_type = self.resolve_platform_type(\n data_store, platform_name, change_id\n )\n\n if chosen_platform_type is None:\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )\n\n # Choose Privacy\n if privacy:\n chosen_privacy = data_store.add_to_privacies(privacy, change_id)\n else:\n chosen_privacy = self.resolve_privacy(data_store, change_id)\n\n if chosen_privacy is None:\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )\n\n print(\"-\" * 61)\n print(\"Input complete. About to create this platform:\")\n print(f\"Name: {platform_name}\")\n print(f\"Trigraph: {trigraph}\")\n print(f\"Quadgraph: {quadgraph}\")\n print(f\"Pennant Number: {pennant_number}\")\n print(f\"Nationality: {chosen_nationality.name}\")\n print(f\"Class: {chosen_platform_type.name}\")\n print(f\"Classification: {chosen_privacy.name}\")\n\n choice = create_menu(\n \"Create this platform?: \",\n [\"Yes\", \"No, make further edits\"],\n validate_method=is_valid,\n )\n\n if choice == str(1):\n return (\n platform_name,\n trigraph,\n quadgraph,\n pennant_number,\n chosen_platform_type,\n chosen_nationality,\n chosen_privacy,\n )\n elif choice == str(2):\n return self.add_to_platforms(\n data_store, platform_name, None, None, None, change_id\n )\n elif choice == \".\":\n print(\"-\" * 61, \"\\nReturning to the previous menu\\n\")\n return self.resolve_platform(\n data_store, platform_name, None, None, None, change_id\n )", "def post(self):\n args = platform_group_arguments.parse_args()\n\n platform_group = PlatformGroup(**args)\n self.session.add(platform_group)\n self.session.commit()\n\n return platform_group", "def setup_platform(hass, config, add_entities, discovery_info=None):\n pass", "def get_platform(platform_info=None):\n platform_info = _sanitize_platform_info(platform_info)\n plat = _get_os_platform(platform_info)\n return plat(*platform_info)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n entity = OppleLight(name, host)\n\n add_entities([entity])\n\n _LOGGER.debug(\"Init light %s %s\", host, entity.unique_id)", "def CreateInstance(self):\n\n # Create host instances for cuttlefish device. Currently one host instance\n # has one cuttlefish device. In the future, these logics should be modified\n # to support multiple cuttlefish devices per host instance.\n instance = self._compute_client.GenerateInstanceName(\n build_id=self.build_info.build_id, build_target=self._build_target)\n\n if self._cfg.enable_multi_stage:\n remote_build_id = self.build_info.build_id\n else:\n remote_build_id = self._GetGcsBucketBuildId(\n self.build_info.build_id, self.build_info.release_build_id)\n\n if self._cfg.enable_multi_stage:\n remote_system_build_id = self.system_build_info.build_id\n else:\n remote_system_build_id = self._GetGcsBucketBuildId(\n self.system_build_info.build_id, self.system_build_info.release_build_id)\n\n host_image_name = self._compute_client.GetHostImageName(\n self._cfg.stable_host_image_name,\n self._cfg.stable_host_image_family,\n self._cfg.stable_host_image_project)\n # Create an instance from Stable Host Image\n self._compute_client.CreateInstance(\n instance=instance,\n image_name=host_image_name,\n image_project=self._cfg.stable_host_image_project,\n build_target=self.build_info.build_target,\n branch=self.build_info.branch,\n build_id=remote_build_id,\n kernel_branch=self.kernel_build_info.branch,\n kernel_build_id=self.kernel_build_info.build_id,\n kernel_build_target=self.kernel_build_info.build_target,\n blank_data_disk_size_gb=self._blank_data_disk_size_gb,\n extra_scopes=self._extra_scopes,\n system_build_target=self.system_build_info.build_target,\n system_branch=self.system_build_info.branch,\n system_build_id=remote_system_build_id,\n bootloader_build_target=self.bootloader_build_info.build_target,\n bootloader_branch=self.bootloader_build_info.branch,\n bootloader_build_id=self.bootloader_build_info.build_id)\n\n return instance", "async def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n\n if discovery_info is None:\n _LOGGER.warning(\n \"To use this you need to configure the 'docker_monitor' component\")\n return\n\n host_name = discovery_info[CONF_NAME]\n api = hass.data[DOMAIN][host_name]\n\n switches = [ContainerSwitch(host_name, api, name)\n for name in discovery_info[CONF_CONTAINERS].keys()\n if discovery_info[CONF_CONTAINERS][name][CONF_CONTAINER_SWITCH]]\n\n if switches:\n async_add_entities(switches)\n else:\n _LOGGER.info(\"No containers setup\")", "def __init__(self, player, screen):\n\n # Call the parent constructor\n Level.__init__(self, player, screen)\n\n self.level_x_limit = -1380\n self.level_y_limit = 270\n\n\n # Array with type of platform, and x, y location of the platform.\n level = [\n\n\n ]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.hubSandBits(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.player = self.player\n self.platform_list.add(block)\n\n\n\n\n choosePort =[\n ]\n\n for port in choosePort:\n wego = platforms.ChooseLev(port[0], port[3])\n wego.rect.x = port[1]\n wego.rect.y = port[2]\n wego.player = self.player\n self.platform_choose.add(wego)\n\n\n\n background = platforms.backgroundSandHub()\n background.rect.x = 0\n background.rect.y = 0\n self.decor.add(background)", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def platforms(self):\n return self.rpc.call(MsfRpcMethod.ModulePlatforms)", "def get_platform(build_rules_list, verbose=False, platform=None):\n\n # Make sure the input is PlatformTypes\n if platform:\n platform = PlatformTypes.lookup(platform)\n\n # If it's not, search the build_rules for it\n if not isinstance(platform, PlatformTypes):\n\n # Check build_rules.py\n platform = getattr_build_rules_list(\n build_rules_list, \"PROJECT_PLATFORM\", None)\n\n # Is it not a PlatformTypes?\n if not isinstance(platform, PlatformTypes):\n item = PlatformTypes.lookup(platform)\n if not isinstance(item, PlatformTypes):\n print(\n \"Platform Type \\\"{}\\\" is not supported, using a default.\".format(platform))\n platform = PlatformTypes.default()\n else:\n platform = item\n\n # Print if needed.\n if verbose:\n print(\"Platform name {}\".format(platform))\n return platform", "def __init__(self, machine):\n self.machine = machine # type: MachineController\n self.features = {}\n super().__init__()\n self.debug = False\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_dmds'] = False\n self.features['has_rgb_dmds'] = False\n self.features['has_accelerometers'] = False\n self.features['has_i2c'] = False\n self.features['has_servos'] = False\n self.features['has_lights'] = False\n self.features['has_switches'] = False\n self.features['has_drivers'] = False\n self.features['tickless'] = False\n self.features['has_segment_displays'] = False\n self.features['has_hardware_sound_systems'] = False\n self.features['has_steppers'] = False\n self.features['allow_empty_numbers'] = False\n self.features['hardware_eos_repulse'] = False", "def create(self):\n\n if self.__created:\n return self.process\n\n # Change process name if needed\n if self.processName is not None and self.processName != 'PAT':\n if self.verbose:\n print(\"\")\n print(\"Changing process name from %r to %r...\" % ('PAT', self.processName))\n change_process_name(self.process.framework, 'PAT', self.processName)\n\n if len(self.__systematics) > 0:\n if self.verbose:\n print(\"\")\n\n default_systematics_options = {\n 'jec': {'jetCollection': self.__miniaod_jet_collection,\n 'metCollection': self.__miniaod_met_collection,\n 'uncertaintiesFile': None},\n 'jer': {'jetCollection': self.__miniaod_jet_collection,\n 'metCollection': self.__miniaod_met_collection,\n 'genJetCollection': self.__miniaod_gen_jet_collection,\n 'resolutionFile': self.__jer_resolution_file,\n 'scaleFactorFile': self.__jer_scale_factor_file}\n }\n\n systematics = {}\n for syst in self.__systematics:\n user_systematics_options = self.__systematicsOptions[syst] if syst in self.__systematicsOptions else {}\n systematics[syst] = copy.deepcopy(default_systematics_options[syst])\n systematics[syst].update(user_systematics_options)\n\n print(\"\")\n Systematics.doSystematics(self, systematics)\n\n\n # Add the framework to the path as the last element\n self.path += cms.Sequence(self.process.framework)\n self.process.p = self.path\n\n if self.verbose:\n print(\"\")\n print(\"Framework configuration done.\")\n print(\" Producers: %s\" % ', '.join(self.producers))\n print(\" Analyzers: %s\" % ', '.join(self.analyzers))\n print(\"\")\n\n # Specify scheduling of analyzers and producers\n self.process.framework.analyzers_scheduling = cms.untracked.vstring(self.analyzers)\n self.process.framework.producers_scheduling = cms.untracked.vstring(self.producers)\n\n self.__created = True\n return self.process", "def get_platform(init):\n\n platform = {}\n tmp = re.sub(r'([a-zA-Z])([0-9.])', r'\\1 \\2', re.sub(r'([,_;:-])', '.', init))\n\n split = tmp.split(\" \")\n if len(split) > 1 and split[0].lower().startswith(\"win\"):\n platform['os'] = \"Windows\"\n platform['version'] = split[1]\n elif tmp == \"VISTA\":\n platform['os'] = \"Windows\"\n platform['version'] = \"Vista\"\n else:\n platform['os'] = tmp.title()\n platform['version'] = None\n return platform", "def register_platform_services(platform: entity_platform.EntityPlatform) -> None:\n platform.async_register_entity_service(\n SERVICE_ENABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_DISABLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TOGGLE, ENABLE_DISABLE_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_CANCEL, ENTITY_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_TIME_ADJUST, TIME_ADJUST_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_MANUAL_RUN, MANUAL_RUN_SCHEMA, async_entity_service_handler\n )\n platform.async_register_entity_service(\n SERVICE_SUSPEND, SUSPEND_SCHEMA, async_entity_service_handler\n )", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)", "def build_platform_step(self):\n return [ShellCommand(command=[\"../cordova-cli/bin/cordova\", \"build\", self.platform], workdir='build/mobilespec', timeout=CONFIG.build_timeout, description='Build', name='Build')]", "def get_platform():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/platform\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def platform(aliased=0, terse=0):\n result = _platform_cache.get((aliased, terse), None)\n if result is not None:\n return result\n\n # Get uname information and then apply platform specific cosmetics\n # to it...\n system, node, release, version, machine, processor = uname()\n if machine == processor:\n processor = ''\n if aliased:\n system, release, version = system_alias(system, release, version)\n\n if True: ###\n # Generic handler\n if terse:\n platform = _platform(system, release)\n else:\n bits, linkage = architecture() ###\n platform = _platform(system, release, machine,\n processor, bits, linkage)\n\n _platform_cache[(aliased, terse)] = platform\n return platform", "async def async_setup_platform(\n hass: HomeAssistantType,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n):\n\n component_config = hass.data[DOMAIN][DOMAIN_CONFIG]\n\n source_entity = config[CONF_ENTITY_ID]\n source_entity_domain, source_object_id = split_entity_id(source_entity)\n\n entity_registry = await er.async_get_registry(hass)\n entity_entry = entity_registry.async_get(source_entity)\n\n unique_id = None\n if entity_entry:\n source_entity_name = entity_entry.name or entity_entry.original_name\n source_entity_domain = entity_entry.domain\n unique_id = entity_entry.unique_id\n else:\n source_entity_name = source_object_id.replace(\"_\", \" \")\n\n entity_state = hass.states.get(source_entity)\n if entity_state:\n source_entity_name = entity_state.name\n\n capabilities = entity_entry.capabilities if entity_entry else []\n source_entity = SourceEntity(\n unique_id,\n source_object_id,\n source_entity,\n source_entity_name,\n source_entity_domain,\n capabilities,\n )\n\n try:\n power_sensor = await create_power_sensor(\n hass, entity_entry, config, component_config, source_entity\n )\n except (ModelNotSupported, StrategyConfigurationError) as err:\n pass\n\n entities_to_add = [power_sensor]\n\n should_create_energy_sensor = component_config.get(CONF_CREATE_ENERGY_SENSORS)\n if CONF_CREATE_ENERGY_SENSOR in config:\n should_create_energy_sensor = config.get(CONF_CREATE_ENERGY_SENSOR)\n\n if should_create_energy_sensor:\n energy_sensor = await create_energy_sensor(\n hass, component_config, config, power_sensor, source_entity\n )\n entities_to_add.append(energy_sensor)\n\n if component_config.get(CONF_CREATE_UTILITY_METERS):\n meter_types = component_config.get(CONF_UTILITY_METER_TYPES)\n for meter_type in meter_types:\n entities_to_add.append(\n create_utility_meter_sensor(energy_sensor, meter_type)\n )\n\n async_add_entities(entities_to_add)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n now = date.today()\n icon = config[CONF_ICON]\n poubelle_paire = config[CONF_JOUR_PAIRE]\n poubelle_impaire = config[CONF_JOUR_IMPAIRE]\n jour = config[CONF_JOUR]\n devices = [PoubelleSensor('Poubelle de la semaine', poubelle_paire,poubelle_impaire,jour,icon)]\n add_entities(devices, True)", "def deploy_me(self, type, platform, host_list):\n\n self.tmpl_dir = self.base_dir + '/templates'\n if not os.path.isfile(self.tmpl_dir + '/.initialized'):\n print \"\\tTemplates have not yet been initialized. Please first\"\n print \"\\tmake proper changes to the swift-setup.conf file and than\"\n print \"\\trun swift-setup init with sudo or as root user\\n\\n\"\n return False\n\n execute(self._common_setup, hosts=host_list)\n\n if type == 'admin':\n execute(self._admin_setup, hosts=host_list)\n elif type == 'generic':\n execute(self._swift_generic_setup, hosts=host_list)\n elif type == 'proxy':\n execute(self._swift_proxy_setup, hosts=host_list)\n elif type == 'storage':\n execute(self._swift_storage_setup, hosts=host_list)\n elif type == 'saio':\n execute(self._swift_saio_setup, hosts=host_list)\n\n disconnect_all()\n return True", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()", "def __init__(self):\n super(UpnpEmbeddedDevice, self).__init__()\n return", "def __init__(self, machine):\n self.machine = machine\n self.features = {}\n self.log = None\n self.debug = False\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_dmd'] = False\n self.features['has_rgb_dmd'] = False\n self.features['has_accelerometers'] = False\n self.features['has_i2c'] = False\n self.features['has_servos'] = False\n self.features['has_matrix_lights'] = False\n self.features['has_gis'] = False\n self.features['has_leds'] = False\n self.features['has_switches'] = False\n self.features['has_drivers'] = False\n self.features['tickless'] = False", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n token = config.get(CONF_ACCESS_TOKEN)\n\n client = ClementineRemote(host, port, token, reconnect=True)\n\n add_entities([ClementineDevice(client, config[CONF_NAME])])", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Only act if loaded via mysensors by discovery event.\n # Otherwise gateway is not setup.\n if discovery_info is None:\n return\n\n for gateway in mysensors.GATEWAYS.values():\n # Define the S_TYPES and V_TYPES that the platform should handle as\n # states. Map them in a dict of lists.\n pres = gateway.const.Presentation\n set_req = gateway.const.SetReq\n map_sv_types = {\n pres.S_TEMP: [set_req.V_TEMP],\n pres.S_HUM: [set_req.V_HUM],\n pres.S_BARO: [set_req.V_PRESSURE, set_req.V_FORECAST],\n pres.S_WIND: [set_req.V_WIND, set_req.V_GUST],\n pres.S_RAIN: [set_req.V_RAIN, set_req.V_RAINRATE],\n pres.S_UV: [set_req.V_UV],\n pres.S_WEIGHT: [set_req.V_WEIGHT, set_req.V_IMPEDANCE],\n pres.S_POWER: [set_req.V_WATT, set_req.V_KWH],\n pres.S_DISTANCE: [set_req.V_DISTANCE],\n pres.S_LIGHT_LEVEL: [set_req.V_LIGHT_LEVEL],\n pres.S_IR: [set_req.V_IR_RECEIVE],\n pres.S_WATER: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_CUSTOM: [set_req.V_VAR1,\n set_req.V_VAR2,\n set_req.V_VAR3,\n set_req.V_VAR4,\n set_req.V_VAR5],\n pres.S_SCENE_CONTROLLER: [set_req.V_SCENE_ON,\n set_req.V_SCENE_OFF],\n }\n if float(gateway.protocol_version) < 1.5:\n map_sv_types.update({\n pres.S_AIR_QUALITY: [set_req.V_DUST_LEVEL],\n pres.S_DUST: [set_req.V_DUST_LEVEL],\n })\n if float(gateway.protocol_version) >= 1.5:\n map_sv_types.update({\n pres.S_COLOR_SENSOR: [set_req.V_RGB],\n pres.S_MULTIMETER: [set_req.V_VOLTAGE,\n set_req.V_CURRENT,\n set_req.V_IMPEDANCE],\n pres.S_SOUND: [set_req.V_LEVEL],\n pres.S_VIBRATION: [set_req.V_LEVEL],\n pres.S_MOISTURE: [set_req.V_LEVEL],\n pres.S_AIR_QUALITY: [set_req.V_LEVEL],\n pres.S_DUST: [set_req.V_LEVEL],\n })\n map_sv_types[pres.S_LIGHT_LEVEL].append(set_req.V_LEVEL)\n\n if float(gateway.protocol_version) >= 2.0:\n map_sv_types.update({\n pres.S_INFO: [set_req.V_TEXT],\n pres.S_GAS: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_GPS: [set_req.V_POSITION],\n pres.S_WATER_QUALITY: [set_req.V_TEMP, set_req.V_PH,\n set_req.V_ORP, set_req.V_EC]\n })\n map_sv_types[pres.S_CUSTOM].append(set_req.V_CUSTOM)\n map_sv_types[pres.S_POWER].extend(\n [set_req.V_VAR, set_req.V_VA, set_req.V_POWER_FACTOR])\n\n devices = {}\n gateway.platform_callbacks.append(mysensors.pf_callback_factory(\n map_sv_types, devices, add_devices, MySensorsSensor))", "def set_platform(self, platform_dict):\n if not os.path.exists(self.file_path):\n print(\"netCDF file does not exist, exiting without saving Platform group...\")\n elif self.format == '.nc':\n with netCDF4.Dataset(self.file_path, 'a', format='NETCDF4') as ncfile:\n plat = ncfile.createGroup('Platform')\n [plat.setncattr(k, v) for k, v in platform_dict.items()]\n elif self.format == '.zarr' and not self.append_zarr: # Do not save platform if appending\n zarrfile = zarr.open(self.file_path, mode='a')\n plat = zarrfile.create_group('Platform')\n for k, v in platform_dict.items():\n plat.attrs[k] = v", "async def async_setup_platform(hass, config, async_add_devices, _discovery_info=None):\n pass", "def __new__(cls):\n game_engine = get_gameengine()\n if game_engine is not None:\n return game_engine\n else:\n return super(GameEngine, cls).__new__(cls)\n # end if", "def test_create_platfrom(self):\n # procedure object\n proc1 = Procedure(\"procedure 1\", \"proc1\")\n proc2 = Procedure(\"procedure 2\", \"proc2\")\n # list of procedures\n proList = [proc1, proc2]\n # observable property object\n obs1 = ObservableProperty(\"obs-property1\", \"obs-property\")\n obs2 = ObservableProperty(\"obs-property2\", \"obs-property2\")\n obs3 = ObservableProperty(\"obs-property3\", \"obs-property3\")\n # list of observable properties\n obsList = [obs1, obs2]\n obsList2 =[obs1,obs2]\n # sensor object\n s1 = Sensor(\"Sensor 1\", \"first sensor\", obsList, proList)\n s2 = Sensor(\"Sensor 2\", \"second sensor\", obsList2, proList)\n s3 = Sensor(\"Sensor 3\", \"second sensor\", obsList2, proList)\n act1 = Actuator(\"Actuator 1\", \"first actuator\",[],[])\n act2 = Actuator(\"Actuator 2\", \"second actuator\",[],[])\n act3 = Actuator(\"Actuator 3\", \"third actuator\",[],[])\n #list of actuators\n actList =[act1,act2,act3]\n #list of sensors\n senList = [s1,s2]\n # platform object\n p1 = Platform(\"platform 1\", \"p1\", senList, actList,[])\n p1.add_sensor(s3)\n\n this_graph = cfg.get_graph()\n #print(this_graph.serialize(format='turtle'))\n print(this_graph.serialize(format=\"ttl\").decode('utf-8'))", "def create(cls, world, **kwargs):\n position = (50, 50)\n display_shape = (700, 500)\n text_matrix_shape = (24, 80)\n resource_manager = kwargs.pop(\"resource_manager\")\n db_path = resource_manager.get_path(\"local_computer.db\")\n args = {k: kwargs.pop(k) for k in (\"depth\", \"renderer\", \"pixel_format\", \"bpp\", \"masks\") if\n k in kwargs}\n\n inst = super(LocalComputer, cls).create(\n ctx=world,\n machine_state=MachineState(),\n network_state=NetworkState(),\n file_system=FileSystem(db_path),\n sprite=Sprite.create(position, display_shape, access=sdl2.render.SDL_TEXTUREACCESS_TARGET, **args),\n terminal_display_buffer=DisplayBuffer.create(text_matrix_shape),\n input_output_stream=InputOutputStream(),\n shell_state=ShellState(),\n **kwargs\n )\n\n # Register the components\n world.add_component(inst.machine_state)\n world.add_component(inst.network_state)\n world.add_component(inst.file_system)\n world.add_component(inst.sprite)\n world.add_component(inst.terminal_display_buffer)\n world.add_component(inst.input_output_stream)\n world.add_component(inst.shell_state)\n\n return inst", "def setup(self, create_scale_pods_and_pvcs_using_kube_job_on_ms_consumers):\n if is_managed_service_cluster():\n self.sanity_helpers = SanityManagedService(\n create_scale_pods_and_pvcs_using_kube_job_on_ms_consumers\n )\n else:\n self.sanity_helpers = Sanity()", "def get_platforms(self):\n _log.debug(\"Passing platforms back: {}\".format(\n self._registered_platforms.keys()))\n return self._registered_platforms.values()", "def getPlatform(self):\n self.platform=util.get_platform()\n if not(self.platform.find('linux')==-1): self.platform='Unix' # i suppose, that in all unix systems are paths similiar\n if self.platform=='win32': self.platform='Win32' # this should be done automatically", "def setup_platform(hass, config, add_entities, discovery_info=None):\n hass.data.setdefault(DOMAIN, {})\n\n def service_set_override(call):\n \"\"\"Handle the service call.\"\"\"\n entity_id = call.data.get(ATTR_ENTITY_ID)\n temperature = call.data.get(ATTR_TEMPERATURE)\n until = call.data.get(\n ATTR_UNTIL, (datetime.now() + timedelta(hours=1)).strftime(\"%H:%M\")\n )\n target_devices = [\n dev for dev in hass.data[DOMAIN][\"entities\"] if dev.entity_id in entity_id\n ]\n target_device: WarmupThermostat\n for target_device in target_devices:\n target_device.set_override(temperature, until)\n target_device.schedule_update_ha_state(True)\n\n _LOGGER.info(\"Setting up platform for Warmup component\")\n user = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n warmup = Warmup4IE(user, password)\n\n if warmup is None or not warmup.setup_finished:\n raise PlatformNotReady\n warmup_client = WarmupClient(warmup)\n to_add = []\n for device in warmup.get_all_devices().values():\n to_add.append(WarmupThermostat(hass, device, warmup_client))\n add_entities(to_add)\n hass.data[DOMAIN][\"entities\"] = to_add\n hass.services.register(DOMAIN, \"set_override\", service_set_override)\n return True", "def init_device(platform=\"Android\", uuid=None, **kwargs):\n cls = import_device_cls(platform)\n dev = cls(uuid, **kwargs)\n # Add device instance in G and set as current device.\n G.add_device(dev)\n return dev", "def add_to_platform_start(\n self,\n hass: HomeAssistant,\n platform: EntityPlatform,\n parallel_updates: asyncio.Semaphore | None,\n ) -> None:\n super().add_to_platform_start(hass, platform, parallel_updates)\n\n # Bail out if the sensor doesn't have a unique_id or a device class\n if self.unique_id is None or self.device_class is None:\n return\n registry = er.async_get(self.hass)\n\n # Bail out if the entity is not yet registered\n if not (\n entity_id := registry.async_get_entity_id(\n platform.domain, platform.platform_name, self.unique_id\n )\n ):\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._sensor_option_unit_of_measurement = self._get_initial_suggested_unit()\n return\n\n registry_entry = registry.async_get(entity_id)\n assert registry_entry\n\n # Prime _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self.registry_entry = registry_entry\n self._async_read_entity_options()\n\n # If the sensor has 'unit_of_measurement' in its sensor options, the user has\n # overridden the unit.\n # If the sensor has 'sensor.private' in its entity options, it already has a\n # suggested_unit.\n registry_unit = registry_entry.unit_of_measurement\n if (\n (\n (sensor_options := registry_entry.options.get(DOMAIN))\n and CONF_UNIT_OF_MEASUREMENT in sensor_options\n )\n or f\"{DOMAIN}.private\" in registry_entry.options\n or self.unit_of_measurement == registry_unit\n ):\n return\n\n # Make sure we can convert the units\n if (\n (unit_converter := UNIT_CONVERTERS.get(self.device_class)) is None\n or registry_unit not in unit_converter.VALID_UNITS\n or self.unit_of_measurement not in unit_converter.VALID_UNITS\n ):\n return\n\n # Set suggested_unit_of_measurement to the old unit to enable automatic\n # conversion\n self.registry_entry = registry.async_update_entity_options(\n entity_id,\n f\"{DOMAIN}.private\",\n {\"suggested_unit_of_measurement\": registry_unit},\n )\n # Update _sensor_option_unit_of_measurement to ensure the correct unit\n # is stored in the entity registry.\n self._async_read_entity_options()", "def evaluate_for_platform(self, **kwargs):\n macro = Macro(self.identifier, self.args, self.value)\n kwargs['platform'].define(self.identifier.as_str(), macro)\n return False", "def __init__(self):\n thisType = type(self)\n if not thisType._initialized:\n thisType._initialized = True\n self._embedded_device_registry = {}\n self._root_device_registry = {}\n self._service_registry = {}\n self._scan_for_device_extensions_under_code_container(dynamic_extensions)\n self._scan_for_device_extensions_under_code_container(standard_extensions)\n self._scan_for_service_extensions_under_code_container(dynamic_extensions)\n self._scan_for_service_extensions_under_code_container(standard_extensions)\n return", "def __init__(self, player, screen):\n\n # Call the parent constructor\n Level.__init__(self, player, screen)\n\n self.level_x_limit = -1380\n self.level_y_limit = 270\n\n\n # Array with type of platform, and x, y location of the platform.\n level = [[platforms.SAND_LONG_GROUND, 0, 500],\n [platforms.SAND_LONG_GROUND, 1431, 500],\n\n [platforms.SAND_PYRAMID_LONG, 900, 386],\n [platforms.SAND_PYRAMID_LONG, 1100, 273],\n [platforms.SAND_PYRAMID_LONG, 2200, 160],\n [platforms.SAND_PYRAMID_LONG, 2200, 57],\n [platforms.SAND_PYRAMID_LONG, 1400, -55],\n [platforms.SAND_PYRAMID_LONG, 1850, -168],\n [platforms.SAND_PYRAMID_LONG, 1850, -281],\n\n #be sure to place this in nonwalljump group\n [platforms.SAND_PYRAMID_LONG, 2178, 386],\n [platforms.SAND_PYRAMID_LONG, 2378, 273],\n [platforms.SAND_PYRAMID_LONG, 1500, -394]\n\n\n ]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.hubSandBits(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.player = self.player\n self.platform_list.add(block)\n\n\n\n\n choosePort =[[platforms.PORTAL, -30, 350, 0],\n [platforms.PORTAL, 1556, -120, 2]\n ]\n\n for port in choosePort:\n wego = platforms.ChooseLev(port[0], port[3])\n wego.rect.x = port[1]\n wego.rect.y = port[2]\n wego.player = self.player\n self.platform_choose.add(wego)\n\n\n\n background = platforms.backgroundSandHub()\n background.rect.x = 0\n background.rect.y = 0\n self.decor.add(background)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n\n data = hass.data[LUPUSEC_DOMAIN]\n\n device_types = [CONST.TYPE_OPENING]\n\n devices = []\n for device in data.lupusec.get_devices(generic_type=device_types):\n devices.append(LupusecBinarySensor(data, device))\n\n add_entities(devices)", "def __init__(self, properties):\n super(Engine, self).__init__(self, constants.SYSTEM_PLUGIN_NAME)\n self.context._plugin_id = 0\n self._logger = self.context.get_logger()\n\n # Engine properties\n if not isinstance(properties, dict):\n self._properties = {}\n else:\n self._properties = properties.copy()\n\n #Init engine plugin UUID\n self._properties[constants.PROP_UID] = str(self.uid)\n\n # Next plugin Id (start at 1, as 0 is reserved for the engine itself)\n self._next_plugin_id = 1\n\n #Plugins dict pluginId->plugin\n self._plugins = {}\n self._plugins_lock = threading.RLock()\n\n self._event_dispatcher = EventDispatcher(self)\n self._start_level = 0\n self._state = states.STARTING\n self._logger.info(\"Engine successfully created\")", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n username = config[CONF_USERNAME]\n password = config[CONF_PASSWORD]\n\n customize = config[CONF_CUSTOMIZE]\n\n protocol = \"https\" if config[CONF_SSL] else \"http\"\n\n url = f\"{protocol}://{host}\"\n\n data = HikvisionData(hass, url, port, name, username, password)\n\n if data.sensors is None:\n _LOGGER.error(\"Hikvision event stream has no data, unable to set up\")\n return\n\n entities = []\n\n for sensor, channel_list in data.sensors.items():\n for channel in channel_list:\n # Build sensor name, then parse customize config.\n if data.type == \"NVR\":\n sensor_name = f\"{sensor.replace(' ', '_')}_{channel[1]}\"\n else:\n sensor_name = sensor.replace(\" \", \"_\")\n\n custom = customize.get(sensor_name.lower(), {})\n ignore = custom.get(CONF_IGNORED)\n delay = custom.get(CONF_DELAY)\n\n _LOGGER.debug(\n \"Entity: %s - %s, Options - Ignore: %s, Delay: %s\",\n data.name,\n sensor_name,\n ignore,\n delay,\n )\n if not ignore:\n entities.append(\n HikvisionBinarySensor(hass, sensor, channel[1], data, delay)\n )\n\n add_entities(entities)", "def platform(self, platform):\n # type: (string_types) -> None\n\n if platform is not None:\n if not isinstance(platform, string_types):\n raise TypeError(\"Invalid type for `platform`, type has to be `string_types`\")\n\n self._platform = platform", "def setup_platform(hass, config, add_entities, discovery_info=None):\n\n try:\n pushbullet = PushBullet(config.get(CONF_API_KEY))\n except InvalidKeyError:\n _LOGGER.error(\"Wrong API key for Pushbullet supplied\")\n return False\n\n pbprovider = PushBulletNotificationProvider(pushbullet)\n\n devices = []\n for sensor_type in config[CONF_MONITORED_CONDITIONS]:\n devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))\n add_entities(devices)", "async def async_setup_platform(hass, config, async_add_devices,\n discovery_info=None):\n return True", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n _type: str = config[CONF_TYPE]\n name: str = config[CONF_NAME]\n\n if hass.config.latitude < 0:\n hemisphere = SOUTHERN\n elif hass.config.latitude > 0:\n hemisphere = NORTHERN\n else:\n hemisphere = EQUATOR\n\n _LOGGER.debug(_type)\n add_entities([Season(hemisphere, _type, name)], True)", "async def _async_setup_platform(\n opp: OpenPeerPower,\n integration_name: str,\n integration_platform: str,\n platform_configs: list[dict],\n) -> None:\n if integration_platform not in opp.data:\n await async_setup_component(\n opp, integration_platform, {integration_platform: platform_configs}\n )\n return\n\n entity_component = opp.data[integration_platform]\n tasks = [\n entity_component.async_setup_platform(integration_name, p_config)\n for p_config in platform_configs\n ]\n await asyncio.gather(*tasks)", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def setup_platform(hass, config, add_entities, discovery_info=None):\n \n entity = LiberationRecap()\n add_entities([entity], True)\n \n return True", "def runtime_platform(self):\n return JvmPlatform.global_instance().get_runtime_platform_for_target(self)", "def BuildAndroidPlatform():\n\n platform = emulated_device.AndroidPlatform()\n sdk_root = GetAndroidSdkPath()\n platform.adb = os.path.join(root_dir,\n ''\n 'tools/android/emulator/support/adb.turbo')\n platform.emulator_x86 = os.path.join(sdk_root, 'tools/emulator64-x86')\n platform.emulator_arm = os.path.join(sdk_root, 'tools/emulator64-arm')\n platform.emulator_wrapper_launcher = os.path.join(sdk_root, 'tools/emulator')\n platform.real_adb = os.path.join(sdk_root, 'platform-tools/adb')\n platform.mksdcard = os.path.join(sdk_root, 'tools/mksdcard')\n platform.empty_snapshot_fs = os.path.join(\n sdk_root, 'tools/lib/emulator/snapshots.img')\n platform.emulator_support_lib_path = os.path.join(\n root_dir, 'third_party/browser_automation/lib/')\n platform.base_emulator_path = os.path.join(sdk_root, 'tools')\n return platform", "def get_platform(self):\n return self._platform", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n monitored_conditions = config[CONF_MONITORED_CONDITIONS]\n\n api = GoogleWifiAPI(host, monitored_conditions)\n entities = [\n GoogleWifiSensor(api, name, description)\n for description in SENSOR_TYPES\n if description.key in monitored_conditions\n ]\n add_entities(entities, True)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n description = config.get(CONF_DESCRIPTION)\n product_id = config.get(CONF_PRODUCT_ID)\n domain = config.get(CONF_LOCALE)\n\n add_entities([Geizwatch(name, description, product_id, domain)], True)" ]
[ "0.6996833", "0.6901165", "0.6643392", "0.66429996", "0.6620305", "0.66013455", "0.65052664", "0.64866644", "0.6392875", "0.6308356", "0.6216505", "0.6207374", "0.61393857", "0.6117521", "0.60896856", "0.60258096", "0.6019131", "0.6019086", "0.60079926", "0.5970026", "0.59591275", "0.5955838", "0.589697", "0.5819957", "0.57879514", "0.57720435", "0.5768521", "0.57310385", "0.57151407", "0.57151407", "0.57144135", "0.5708847", "0.5646128", "0.5623428", "0.561759", "0.5601186", "0.5567609", "0.55616015", "0.5556259", "0.5547992", "0.5460174", "0.5455787", "0.54525673", "0.54519135", "0.54334724", "0.54111654", "0.539304", "0.53895766", "0.5389413", "0.53824675", "0.53796273", "0.5363353", "0.53607047", "0.53524536", "0.5351768", "0.53459734", "0.5339577", "0.53244174", "0.5323405", "0.532067", "0.5316376", "0.531309", "0.5293938", "0.52920425", "0.52902406", "0.5279778", "0.5276405", "0.52758193", "0.52689785", "0.52687585", "0.5268443", "0.5259082", "0.5257765", "0.5249938", "0.5245248", "0.5244094", "0.52429944", "0.52363753", "0.5233783", "0.52337044", "0.5231267", "0.5222549", "0.5210857", "0.5208943", "0.52026767", "0.5200947", "0.5195209", "0.5192409", "0.51886195", "0.5183042", "0.51804334", "0.5176643", "0.517463", "0.5173633", "0.51698416", "0.5165116", "0.5164602", "0.515856", "0.51573575", "0.5154377" ]
0.706969
0
Rules specific dict with accumulated (summary) statistics
def stats(self): return {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary(self):\n\n stats = {\n 'invalid': self.num_invalid,\n 'tested': self.num_tested,\n }\n return {\n k: v for k, v in stats.items()\n if k == 'invalid' or v != 0\n }", "def compute_metrics(self, results: list) -> dict:", "def compute_statistics(self):", "def summary(self) -> Dict[str, Dict[str, float]]:\n vals: Dict[str, List[float]] = defaultdict(list)\n if not self.steps: # pragma: no cover\n return {}\n\n for timing_dict in self._timings:\n for step in self.steps:\n if step in timing_dict:\n vals[step].append(timing_dict[step])\n summary = {}\n for step in self.steps:\n if vals[step]:\n summary[step] = {\n \"cnt\": len(vals[step]),\n \"sum\": sum(vals[step]),\n \"min\": min(vals[step]),\n \"max\": max(vals[step]),\n \"avg\": sum(vals[step]) / len(vals[step]),\n }\n return summary", "def advancedStats():", "def total(self):\n gd_total = self._grand_total()\n counts = self._get_as_dict_count()\n for rule in self.rules:\n gd_total += rule(counts)\n return gd_total", "def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:\n tempDict = {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }\n\n\n\n return {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }", "def apply(self):\n counter = {}\n for act in self.activities:\n freq = []\n for trace in self.log:\n freq.append(len(self.project_trace(trace, [act])))\n if not len(freq) == 0:\n counter[act] = {'sum': sum(freq), 'min': min(freq),\n 'max': max(freq)}\n return counter", "def score_aggregate(results):\n scores = []\n truth_count = detected_count = segment_count = 0\n\n for res in results:\n scores.append(res[\"scores\"])\n truth_count += len(res[\"labels\"])\n detected_count += len(res[\"detected\"])\n segment_count += len(res[\"scores\"][\"segments\"])\n\n ret = dict()\n ret[\"scores\"] = sum_scores(scores)\n ret[\"stats\"] = dict(truth_count=truth_count, detected_count=detected_count, segment_count=segment_count)\n return ret", "def report(self):\n m = {}\n num_tok = self.metrics['num_tokens']\n if num_tok > 0:\n if self.metrics['correct_tokens'] > 0:\n m['token_acc'] = self.metrics['correct_tokens'] / num_tok\n m['loss'] = self.metrics['loss'] / num_tok\n if self.metrics['pred_count'] > 0:\n m['pred'] = self.metrics['correct_pred'] / self.metrics['pred_count']\n try:\n m['ppl'] = math.exp(m['loss'])\n except OverflowError:\n m['ppl'] = float('inf')\n if self.metrics['total_skipped_batches'] > 0:\n m['total_skipped_batches'] = self.metrics['total_skipped_batches']\n for k, v in m.items():\n # clean up: rounds to sigfigs and converts tensors to floats\n m[k] = round_sigfigs(v, 4)\n return m", "def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def compute_metrics(\n self,\n preds: Dict[str, torch.Tensor],\n targets: Dict[str, torch.Tensor],\n phase: str,\n ) -> Dict[str, torch.Tensor]:\n if phase == \"train\":\n metrics_dict = self.train_metrics\n elif phase == \"val\":\n metrics_dict = self.val_metrics\n elif phase == \"test\":\n metrics_dict = self.test_metrics\n\n ret = {}\n for metric_name, metric in metrics_dict.items():\n if metric is not None:\n branch = metric_name.split(\"_\")[0]\n ret[metric_name] = metric(preds[branch], targets[branch])\n\n return ret", "def conclusion_summary_map(self):\n pass", "def get_metrics(self, phase):\n dice = {}\n l = len(self.base_dice_scores[phase])\n for i, d in enumerate(self.base_dice_scores[phase]):\n for k in d:\n if k not in dice:\n dice[k] = 0\n dice[k] += d[k] / l\n \n dice_neg = np.mean(self.dice_neg_scores[phase])\n dice_pos = np.mean(self.dice_pos_scores[phase])\n dices = [dice, dice_neg, dice_pos]\n iou = np.nanmean(self.iou_scores[phase])\n return dices, iou", "def stats(self):\n return {attr: getattr(self, attr) for attr in ['cash', 'rawg_quantity', 'rawg_demand', 'rawg_price', 'rig_quantity', 'rig_supply', 'rig_price']}", "def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res", "def data(self):\n data = {}\n if self.base_rule:\n data.update(self.base_rule.data)\n for condition in list(chain.from_iterable(self._conditions.itervalues())):\n data.setdefault(condition.key, []).append(condition)\n for action in list(chain.from_iterable(self._actions.itervalues())):\n data[action.key] = [action] # you can only take a given action _once_\n return data", "def compute_propagation_summary_statistics(data):\n return {\n type: _compute_summary_stats(entries)\n for type, entries in data.items()\n }", "def get_summary_stats(self):\r\n n = len(self.results)\r\n\r\n if n == 0:\r\n mean = None\r\n stdev = None\r\n\r\n elif n == 1:\r\n mean = numpy.mean(self.results)\r\n stdev = None\r\n\r\n else:\r\n mean = numpy.mean(self.results)\r\n stdev = numpy.std(self.results)\r\n\r\n sum_stats = {'n': n, 'mean': mean, 'stdev': stdev}\r\n\r\n return sum_stats", "def statistics(self):\n stats = {'passed': 0, 'failed': 0, 'passed_baseline': 0,\n 'failed_baseline': 0, 'skipped': 0}\n for test in self.cards:\n if test.status == 'passed':\n stats['passed'] += 1\n if test.image_status != 'match':\n stats['failed_baseline'] += 1\n elif test.status == 'failed':\n stats['failed'] += 1\n if test.image_status == 'match':\n stats['passed_baseline'] += 1\n elif test.status == 'skipped':\n stats['skipped'] += 1\n return stats", "def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics", "def calculate_stats(file_data: dict) -> dict:\n specifics = {\n 'assignments': 0,\n 'grade': 0,\n 'graded': 0,\n 'discussion': 0\n }\n for course in file_data['semester_no_dup_crn']:\n x = course.split(DELIMITER)\n if int(x[ASSIGNMENTS]) > 0:\n specifics['assignments'] += 1\n if int(x[GRADE]) > 2:\n specifics['grade'] += 1\n if int(x[GRADED]) > 0:\n specifics['graded'] += 1\n if int(x[DISCUSSION]) > 0:\n specifics['discussion'] += 1\n return {'semester': file_data['semester'],\n 'courses_with_usage': len(file_data['semester_no_dup_crn']),\n 'faculty_with_usage': len(file_data['semester_no_dup_r']),\n 'full_time': len(file_data['full_time']),\n 'total_full_time': file_data['len_full'],\n 'part_time': len(file_data['part_time']),\n 'total_part_time': file_data['len_part'],\n 'staff': len(file_data['staff']),\n 'specifics': specifics,\n 'total_courses': file_data['total_courses']}", "def compute_metrics(self):\n pass", "def summary(self):\n summary = defaultdict(int)\n\n for r in self.results:\n summary[r.result] += 1\n\n return summary", "def get_metrics(y_true, y_pred):\n return {'acc': np.mean(y_true == y_pred)}", "def stats(self):", "def _populate_rules(self,rules):\n rule2_index = {}\n for rule in rules:\n relation = rule.get_attribute('relation')[0] # vals are now lists\n ##: Time signals of IS_INCLUDED should not be used in relative time evaluation. They may cause confusion.\n ##: E.g., ... after 3 days in hospital.... \"3 days in\" is picked instead of \"after 3 days\" \n if relation=='IS_INCLUDED':\n continue\n \n signal = rule.get_attribute('signal')[0]\n confidence = float(rule.get_attribute('confidence')[0])\n rule2_index[signal] = (relation, confidence)\n return rule2_index", "def calc_metrics(data, sampled_data_list, dataset_type):\n result={}\n for sampled_data in sampled_data_list:\n c2st_roc_auc_metric = c2st_roc_auc(data, sampled_data)\n if \"c2st_roc_auc\" in result:\n result[\"c2st_roc_auc\"].append(c2st_roc_auc_metric)\n else:\n result[\"c2st_roc_auc\"] = [c2st_roc_auc_metric]\n mmd_p_val, mmd_stat = rbf_mmd_test(data.values, sampled_data.values)\n if \"mmd_p_val\" in result:\n result[\"mmd_p_val\"].append(mmd_p_val)\n result[\"mmd_stat\"].append(mmd_stat)\n else:\n result[\"mmd_p_val\"] = [mmd_p_val]\n result[\"mmd_stat\"] = [mmd_stat]\n ks_p_val, ks_stat, ks_n, ks_p_val_list, ks_stat_list = ks_test(data, sampled_data)\n if dataset_type != \"norm_dataset\":\n ks_p_val = ks_permutation(ks_stat_list, data, sampled_data)\n if \"ks_p_val\" in result:\n result[\"ks_p_val\"].append(ks_p_val)\n result[\"ks_stat\"].append(ks_stat)\n else:\n result[\"ks_p_val\"] = [ks_p_val]\n result[\"ks_stat\"] = [ks_stat]\n acc_r, acc_g = c2st_accuracy(data, sampled_data)\n if \"c2st_acc_r\" in result:\n result[\"c2st_acc_r\"].append(acc_r)\n result[\"c2st_acc_g\"].append(acc_g)\n else:\n result[\"c2st_acc_r\"] = [acc_r]\n result[\"c2st_acc_g\"] = [acc_g]\n return result", "def summarize(allowances):\n total_allowances = 0\n if isinstance(allowances, dict):\n for key, value in allowances.items():\n total_allowances = total_allowances + int(value)\n #end for\n else:\n total_allowances = allowances\n return total_allowances", "def init_metrics():\n metrics = defaultdict(list)\n metrics['best_acc'] = 0.0\n metrics['best_loss'] = float('inf')\n metrics['best_epoch'] = 0\n return metrics", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def metrics_group():", "def get_analysis(self) -> Dict[str, Any]:\n return {\n \"report\": self.report.to_text(),\n \"errors\": sorted(self.errors, key=lambda k: k[\"start\"]),\n \"has_errors\": self.has_errors(),\n }", "def sum_stats(stats_data):\n t_bounces = 0\n t_complaints = 0\n t_delivery_attempts = 0\n t_rejects = 0\n for dp in stats_data:\n t_bounces += int(dp['Bounces'])\n t_complaints += int(dp['Complaints'])\n t_delivery_attempts += int(dp['DeliveryAttempts'])\n t_rejects += int(dp['Rejects'])\n\n return {\n 'Bounces': t_bounces,\n 'Complaints': t_complaints,\n 'DeliveryAttempts': t_delivery_attempts,\n 'Rejects': t_rejects,\n }", "def _create_summaries(self):\n self._loss_summary = tf.summary.scalar('loss', self._loss)\n self._perplexity_summary = tf.summary.scalar('average_perplexity_per_sentence', self._average_perplexity)", "def summarize_metrics(metrics):\n summarized = {}\n for k in metrics:\n if k.endswith('mse'):\n summarized[k[:-3] + 'rmse'] = np.sqrt(np.mean(metrics[k]))\n elif k.startswith('err'):\n summarized[k + '_mean'] = np.mean(metrics[k])\n summarized[k + '_rmse'] = np.sqrt(np.mean(metrics[k]**2))\n elif k.endswith('nomean'):\n summarized[k] = metrics[k]\n else:\n summarized[k] = np.mean(metrics[k])\n\n return summarized", "def solution(self):\n return {\n \"count\": 0.,\n \"mean\": 0.,\n \"stdev\": 0.,\n \"5%\": 0.,\n \"25%\": 0.,\n \"median\": 0.,\n \"75%\": 0.,\n \"95%\": 0.,\n }", "def solution(self):\n return {\n \"count\": 0.,\n \"mean\": 0.,\n \"stdev\": 0.,\n \"5%\": 0.,\n \"25%\": 0.,\n \"median\": 0.,\n \"75%\": 0.,\n \"95%\": 0.,\n }", "def summary_data(self):\n data = {\n \"total\": self.total,\n \"card_one_value\": self.cards[0].value,\n \"card_two_value\": self.cards[1].value,\n \"card_one_rank\": self.cards[0].rank,\n \"card_two_rank\": self.cards[1].rank,\n \"cards\": \" \".join([str(card) for card in self.cards]),\n \"soft\": int(self.soft),\n \"from_split\": int(self.from_split),\n \"blackjack\": int(self.blackjack),\n \"num_cards\": len(self.cards),\n \"start_total\": self.cards[0] + self.cards[1],\n \"wager\": int(self.wager),\n \"insurance\": int(self.insurance),\n \"surrender\": int(self.surrender),\n \"double_down\": int(self.double_down),\n \"num_aces\": self.num_aces,\n \"num_hard_aces\": self.num_hard_aces\n }\n return data", "def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def dictionary_of_metrics(items):\n total = 0\n count = 0\n for value in items:\n total = total + value\n count = count + 1\n the_mean = round(total / count, 2)\n sorted_items = sorted(items)\n if count % 2 == 1:\n the_median = sorted_items[int(round(count+1)/2-1)]\n else:\n lower_median = sorted_items[int(round(count/2-1))]\n upper_median = sorted_items[int(round(count/2))]\n the_median = (lower_median + upper_median) / 2\n sum_of_sqz = 0 # Calculate Sum of squares for Varience\n for j in items:\n sqrz_calc = (j - the_mean)**2\n sum_of_sqz = sum_of_sqz + sqrz_calc\n the_varience = round(sum_of_sqz / (count - 1), 2)\n the_standard_dev = round((the_varience)**(1/2), 2)\n the_min = sorted_items[0]\n the_max = sorted_items[count - 1]\n dict = {\n 'mean': the_mean,\n 'median': the_median,\n 'var': the_varience,\n 'std': the_standard_dev,\n 'min': the_min,\n 'max': the_max\n }\n return dict", "def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret", "def metrics(self):\n return {**self.prepend_name_dict(self._prefixes[0], self._train_metrics),\n **self.prepend_name_dict(self._prefixes[1], self.validator.metrics)}", "def dictionary_of_metrics(items):\n \n n = len(items)\n average = round(np.mean(items), 2)\n median = round(np.median(items), 2)\n variance = round((sum((items-np.mean(items))**2))/(n-1), 2)\n standard_dev = round(((sum((items-np.mean(items))**2))/(n-1))**(1/2), 2)\n minimum = round(min(items), 2)\n maximum = round(max(items), 2)\n \n return {'mean':average,'median':median,'var':variance,'std':standard_dev,'min':minimum,'max':maximum}\n pass", "def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def summarize_metrics(self, metrics, n_timesteps):\n summary = dict()\n for key, val in list(metrics.items()):\n if type(val) is list:\n # Compute mean and std dev. of metric over sequences\n summary[key] = np.mean(val)\n summary[key + '_std'] = np.std(val)\n else:\n # Average over all timesteps\n summary[key] = val / n_timesteps\n print(('Evaluation\\tKLD: {:7.1f}\\tRecon: {:7.1f}\\t' +\n 'MSE: {:6.3f} +-{:2.3f}')\\\n .format(summary['kld_loss'], summary['rec_loss'],\n summary['mse'], summary['mse_std']))\n return summary", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def calculate_metrics(self):\n sensitivity = TP + FN\n sensitivity = TP / sensitivity\n\n specificity = TN + FP\n specificity = TN / specificity\n\n accuracy = TP + FP + TN + FN\n divisor = TP + TN\n accuracy = divisor / accuracy\n\n positive_predictive = TP + FP\n positive_predictive = TP / positive_predictive\n\n negative_predictive = TN + FN\n negative_predictive = TN / negative_predictive\n\n # This is for format decimal in metrics\n sensitivity = float(\"{0:.4f}\".format(sensitivity))\n specificity = float(\"{0:.4f}\".format(specificity))\n accuracy = float(\"{0:.4f}\".format(accuracy))\n positive_predictive = float(\"{0:.4f}\".format(positive_predictive))\n negative_predictive = float(\"{0:.4f}\".format(negative_predictive))\n\n average = (sensitivity + specificity + accuracy + positive_predictive + negative_predictive) / 5\n\n average = float(\"{0:.4f}\".format(average))\n\n metrics = [sensitivity, specificity, accuracy,positive_predictive,negative_predictive, average]\n\n return metrics", "def statistics(self):\n stats = {}\n fields = {\n 'Hit count:': ('hit_count', Value.from_number),\n 'Miss count:': ('miss_count', Value.from_number),\n 'Hit ratio:': ('hit_ratio', Value.from_percent),\n 'Item count:': ('item_count', Value.from_number),\n 'Total cache size:': ('total_cache_size', Value.from_number),\n 'Oldest item age:': ('oldest_item_age', Value.from_time_ago),\n }\n selector = '#ae-stats-table tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 2, [text(child) for child in children]\n if text(children[0]).strip() in fields:\n # skip rows with invalid or empty cells\n field_name, value_fn = fields[text(children[0]).strip()]\n stats[field_name] = value_fn(text(children[1]))\n # Ensure all fields were filled.\n assert len(stats) == len(fields), (fields.keys(), stats.keys())\n return stats", "def _process_stats_data(self, graph_rule, trans_stats_data):\n formatted_data = {}\n translation_data = {}\n if graph_rule:\n formatted_data['graph_rule'] = graph_rule\n if trans_stats_data.get('branch'):\n formatted_data['branch'] = trans_stats_data['branch']\n ticks = trans_stats_data.get('ticks')\n labels = {}\n [labels.update({index: val}) for index, val in ticks]\n graph_data = trans_stats_data.get('graph_data', {})\n for unit in graph_data:\n temp_stat = {}\n stat = unit.get('data', [])\n for index, val in stat:\n temp_stat.update({labels.get(index): val})\n translation_data[unit.get('label', 'label')] = temp_stat\n formatted_data['translation_stats'] = translation_data\n return formatted_data", "def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1", "def _summarize_results(df, metrics):\n def return_cm(x):\n if isinstance(x, int):\n return (0, 0, 0)\n\n elif len(x) > 3:\n return x[1:]\n\n return x\n\n def get_status(x):\n return {\n \"OK\": 0,\n \"ERROR\": 1\n }[x]\n\n df['status'] = df['status'].apply(get_status)\n df['confusion_matrix'] = df['confusion_matrix'].apply(ast.literal_eval)\n df['confusion_matrix'] = df['confusion_matrix'].apply(return_cm)\n df[['fp', 'fn', 'tp']] = pd.DataFrame(df['confusion_matrix'].tolist(), index=df.index)\n\n # calculate f1 score\n df_ = df.groupby(['dataset', 'pipeline'])[['fp', 'fn', 'tp']].sum().reset_index()\n\n precision = df_['tp'] / (df_['tp'] + df_['fp'])\n recall = df_['tp'] / (df_['tp'] + df_['fn'])\n df_['f1'] = 2 * (precision * recall) / (precision + recall)\n\n result = dict()\n\n # number of wins over ARIMA\n arima_pipeline = 'arima'\n intermediate = df_.set_index(['pipeline', 'dataset'])['f1'].unstack().T\n arima = intermediate.pop(arima_pipeline)\n\n result['# Wins'] = (intermediate.T > arima).sum(axis=1)\n result['# Wins'][arima_pipeline] = None\n\n # number of anomalies detected\n result['# Anomalies'] = df_.groupby('pipeline')[['tp', 'fp']].sum().sum(axis=1).to_dict()\n\n # average f1 score\n result['Average F1 Score'] = df_.groupby('pipeline')['f1'].mean().to_dict()\n\n # failure rate\n result['Failure Rate'] = df.groupby(\n ['dataset', 'pipeline'])['status'].mean().unstack('pipeline').T.mean(axis=1)\n\n result = pd.DataFrame(result)\n result.index.name = 'pipeline'\n result.reset_index(inplace=True)\n\n rank = 'Average F1 Score'\n result = _sort_leaderboard(result, rank, metrics)\n result = result.drop('rank', axis=1).set_index('pipeline')\n\n return result", "def calc_overall_evaluation(count_by_type_dict: dict):\n assert len(count_by_type_dict) > 0, \"count by domain class should not be empty!\"\n for domain_name, domain_cnt in count_by_type_dict.items():\n print('domain_cnt', domain_cnt)\n for mode, res in OverallEval[domain_name].items():\n OverallEval[domain_name][mode]['precision'] = res['precision'] / domain_cnt\n OverallEval[domain_name][mode]['recall'] = res['recall'] / domain_cnt\n OverallEval[domain_name][mode]['f1_score'] = res['f1_score'] / domain_cnt\n log = \"Domain:{}, mode:{}, P:{:.3f}, R:{:.3f}, f1:{:.3f}\".format(\n domain_name, mode, OverallEval[domain_name][mode]['precision'],\n OverallEval[domain_name][mode]['recall'],\n OverallEval[domain_name][mode]['f1_score'])\n print(log)\n #logger.info(log)\n return OverallEval", "def result(\n metrics: Dict[metric_types.MetricKey, float]\n ) -> Dict[metric_types.MetricKey, float]:\n output = {}\n for threshold in thresholds:\n ptn = flip_count_metric_key_by_name_by_threshold[threshold][\n 'positive_to_negative']\n ntp = flip_count_metric_key_by_name_by_threshold[threshold][\n 'negative_to_positive']\n pos_examples = flip_count_metric_key_by_name_by_threshold[threshold][\n 'positive_to_negative_examples_ids']\n neg_examples = flip_count_metric_key_by_name_by_threshold[threshold][\n 'negative_to_positive_examples_ids']\n pos = flip_count_metric_key_by_name_by_threshold[threshold][\n 'positive_examples_count']\n neg = flip_count_metric_key_by_name_by_threshold[threshold][\n 'negative_examples_count']\n output[metric_key_by_name_by_threshold[threshold]\n ['overall']] = (metrics[ntp] + metrics[ptn]) / (\n metrics[pos] + metrics[neg])\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_to_negative']] = metrics[ptn] / metrics[pos]\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_to_positive']] = metrics[ntp] / metrics[neg]\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_to_negative_examples_ids']] = metrics[pos_examples]\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_to_positive_examples_ids']] = metrics[neg_examples]\n\n return output", "def get_errors_summary(statistics):\n all_errors = (report['errors'] for report in statistics) \n \n errors_summary = defaultdict(int)\n for doc in all_errors:\n for key, value in doc.items():\n errors_summary[key] += value\n\n return errors_summary", "def generateStats(self):\n\t\tn = float(self.n)\n\t\tm = float(self.m)\n\t\tk = float(self.k)\n\t\tp_fp = math.pow(1.0 - math.exp(-(k*n)/m), k)\n\t\tprint \"Probability of false positives: \", p_fp\n\t\tprint \"Predicted false positive rate: \", p_fp * 100.0\n\t\tprint \"Number of elements entered in filter: \", n\n\t\tprint \"Number of bits in filter: \", m\n\t\tprint \"Number of hashes in filter: \", k", "def _calculate_stats(values, factor=1):\n result = {'min': min(values) * factor,\n 'max': max(values) * factor,\n 'sum': sum(values) * factor,\n 'mean': 0,\n 'stddev': 0}\n\n if values:\n mean = sum(values) / float(len(values))\n result['mean'] = factor * mean\n result['stddev'] = (\n factor * math.sqrt((1.0 / (len(values) - 1))\n * sum((x - mean) ** 2 for x in values)))\n\n return result", "def calculate_statistics(self, trace, **input_test):\n inputs = self._clean_inputs(input_test)\n mc_logp = self._logp(trace, **inputs)\n mean_mse = self._mse(trace, **inputs)\n mse2 = self._mse2(trace, **inputs)\n mu, sd, zscore = self._alpha_stats(trace)\n return {'logp': mc_logp,\n 'mse': mean_mse,\n 'mse2': mse2,\n 'mu': mu,\n 'sd': sd,\n 'zscore': zscore}", "def total_present_value_rule(_m):\r\n\r\n return sum(m.DELTA[y] * (m.INV[y] + m.FOM[y] + m.OP[y]) for y in m.Y) + m.EOH", "def get_metrics(self, objs_metrics):\n d = {}\n _S = DiffStatus\n\n for status in _S.iter():\n d[status] = [dict(obj_m) for obj_m in objs_metrics if obj_m['status'] == status]\n\n count_a_only = len(d.get(_S.deleted, []))\n count_b_only = len(d.get(_S.added, []))\n count_modified = len(d.get(_S.modified, []))\n count_unchanged = len(d.get(_S.unchanged, []))\n count_common = count_modified + count_unchanged\n\n count_a = count_common + count_a_only\n count_b = count_common + count_b_only\n\n d['count'] = {\n 'a': count_a,\n 'b': count_b,\n 'a_only': count_a_only,\n 'b_only': count_b_only,\n 'modified': count_modified,\n 'unchanged': count_unchanged,\n }\n\n d['summary'] = {\n _S.added: {\n 'count': count_b_only,\n 'relative_to': {\n 'a': count_b_only / count_a,\n 'b': count_b_only / count_b,\n }\n },\n _S.deleted: {\n 'count': count_a_only,\n 'relative_to': {\n 'a': count_a_only / count_a,\n 'b': count_a_only / count_b,\n },\n },\n _S.modified: {\n 'count': count_modified,\n 'relative_to': {\n 'a': count_modified / count_a,\n 'b': count_modified / count_b,\n }\n },\n _S.unchanged: {\n 'count': count_unchanged,\n 'relative_to': {\n 'a': count_unchanged / count_a,\n 'b': count_unchanged / count_b,\n }\n },\n }\n\n return d", "def summarize_rec_data(data):\n\n # Warning: not all collectible data has a summary stats implemented below!\n # See get_rec_stats() above!\n\n stats = {}\n\n if 'hc_ro' in data:\n # Entropy across HC units average over samples.\n hc_ro_arr = np.array(list(data['hc_ro'].values()))\n stats['H HC ro'] = utils.entropy(hc_ro_arr.T).mean()\n\n if 'vs_state' in data:\n # Sum of vS reward estimates change (from first to last sample).\n vs_state = data['vs_state']\n stats['d vS'] = sum(vs_state[max(vs_state.keys())] - vs_state[0])\n\n if 'co_occs' in data:\n # Mean entropy of real location and HC state co-occurance frequencies.\n co_occs = data['co_occs'][max(data['co_occs'].keys())]\n stats['H HC co'] = np.nanmean(get_hc_co_occ_entropy(co_occs))\n stats['H loc co'] = np.nanmean(get_loc_co_occ_entropy(co_occs))\n\n return stats", "def evaluate(self) -> Dict[str, Any]:\n kwargs = {\"ids\": self._ids}\n return {\n metric.value: self._metric_funcs[metric](\n self._targets, self._preds, **kwargs\n )\n for metric in self._metrics\n }", "def dictionary_of_metrics(items):\n \n np_list = np.array(items) #create an array of list to use numpy functions on list\n metric_dict = {'mean': np.mean(np_list).round(2),\n 'median': np.median(np_list).round(2),\n 'var': np.var(np_list, ddof=1).round(2),\n 'std': np.std(np_list, ddof=1).round(2),\n 'min': np.min(np_list).round(2),\n 'max': np.max(np_list).round(2),} #create a dictionary that calculates the five metrics\n \n return metric_dict #return result as a dictionary", "def get_summary_stats(items, attr):\n data = {}\n for item in items:\n stats = models.Stats.query.filter_by(**{attr: item.id})\n data[item.id] = {\n 'ok': sum([int(stat.ok) for stat in stats]),\n 'changed': sum([int(stat.changed) for stat in stats]),\n 'failed': sum([int(stat.failed) for stat in stats]),\n 'skipped': sum([int(stat.skipped) for stat in stats]),\n 'unreachable': sum([int(stat.unreachable) for stat in stats])\n }\n\n # If we're aggregating stats for a playbook, also infer status\n if attr is \"playbook_id\":\n data[item.id]['status'] = _infer_status(item, data[item.id])\n\n return data", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }", "def summary(self):\n if len(self.pfm) == 1:\n return self.pfm[0]\n perf_dict = {}\n perf_dict[self.name] = self.pfm[0]\n perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]): value for idx, value in enumerate(self.pfm)})\n return perf_dict", "def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics", "def sum_dstats(self, stats, smetrics):\n avg = {}\n\n for disk, metrics in stats.iteritems():\n for mname, metric in metrics.iteritems():\n if mname not in smetrics:\n continue\n if mname in avg:\n avg[mname] += metric\n else:\n avg[mname] = metric\n\n return avg", "def event_details_score_all(self, g, w):\n functions = {\n 'malicious-email': self.event_details_score_mal_email,\n 'malicious-destination': self.event_details_score_mal_dest,\n 'endpoint-malware': self.event_details_score_moe\n }\n\n return functions[w.event_type](g, w)", "def compute_diagnostics(self) -> Dict[str, Any]:\n return {}", "def task_summary_dict(request, tasks, fieldlist=None):\n sumd = {}\n numeric_fields_task = ['reqid', 'corecount', 'taskpriority', 'workqueue_id']\n\n if fieldlist:\n flist = fieldlist\n else:\n flist = copy.deepcopy(const.TASK_FIELDS_STANDARD)\n\n for task in tasks:\n for f in flist:\n if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('analy'):\n # Remove the noisy useless parameters in analysis listings\n if flist in ('reqid', 'stream', 'tag'):\n continue\n\n if 'taskname' in task and len(task['taskname'].split('.')) == 5:\n if f == 'project':\n try:\n if not f in sumd:\n sumd[f] = {}\n project = task['taskname'].split('.')[0]\n if not project in sumd[f]:\n sumd[f][project] = 0\n sumd[f][project] += 1\n except:\n pass\n if f == 'stream':\n try:\n if not f in sumd:\n sumd[f] = {}\n stream = task['taskname'].split('.')[2]\n if not re.match('[0-9]+', stream):\n if not stream in sumd[f]:\n sumd[f][stream] = 0\n sumd[f][stream] += 1\n except:\n pass\n if f == 'tag':\n try:\n if not f in sumd:\n sumd[f] = {}\n tags = task['taskname'].split('.')[4]\n if not tags.startswith('job_'):\n tagl = tags.split('_')\n tag = tagl[-1]\n if not tag in sumd[f]:\n sumd[f][tag] = 0\n sumd[f][tag] += 1\n except:\n pass\n if f in task:\n val = task[f]\n if val is None or val == '':\n val = 'Not specified'\n if val == 'anal':\n val = 'analy'\n if f not in sumd:\n sumd[f] = {}\n if val not in sumd[f]:\n sumd[f][val] = 0\n sumd[f][val] += 1\n\n # convert to ordered lists\n suml = []\n for f in sumd:\n itemd = {}\n itemd['field'] = f\n iteml = []\n kys = sumd[f].keys()\n if f != 'ramcount':\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n else:\n newvalues = {}\n for ky in kys:\n if ky != 'Not specified':\n roundedval = int(ky / 1000)\n else:\n roundedval = -1\n if roundedval in newvalues:\n newvalues[roundedval] += sumd[f][ky]\n else:\n newvalues[roundedval] = sumd[f][ky]\n for ky in newvalues:\n if ky >= 0:\n iteml.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})\n else:\n iteml.append({'kname': 'Not specified', 'kvalue': newvalues[ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml", "def stats_preprocessing(self):\n output = {'before_tot':[],\n 'before_unique':[],\n 'after_tot':[],\n 'after_unique':[]}\n for i in range(len(self.table)):\n description_raw = self.table.description.iloc[i].split(' ')\n clean_txt = self.table.clean_text.iloc[i].split(' ')\n\n output['before_tot'].append(len(description_raw))\n output['before_unique'].append(len(set(description_raw)))\n output['after_tot'].append(len(clean_txt))\n output['after_unique'].append(len(set(clean_txt)))\n \n print(\"\"\"Before preprocessing a description had on average {0} words with standard deviation {1}. \\n\nMoreover, the average of unique words was {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['before_tot']), 2), round(stdev(output['before_tot']), 2), \n round(mean(output['before_unique']), 2), round(stdev(output['before_unique'])), 2))\n \n print(\"\"\"\\nAfter preprocessing a description has on average {0} words with standard deviation {1}. \\n \nThe average of unique words is now {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['after_tot']), 2), round(stdev(output['after_tot']), 2), \n round(mean(output['after_unique']),2), round(stdev(output['after_unique']), 2)))\n\n return output", "def calc_shape_statistics(self, stat_names):\n stats = {}\n try:\n all_props = [regionprops(m) for m in self.masks]\n except TypeError:\n raise TypeError(\"masks not the right type\")\n for stat in stat_names:\n stats[stat] = np.mean([p[0][stat] for p in all_props])\n return stats", "def build_rule_count_dict(counts_iterator):\n rule_count_dict = {}\n for l in counts_iterator:\n if l[1] != 'NONTERMINAL':\n x = l[2]\n y = l[1] == 'UNARYRULE' and l[3] or l[3] + ' ' + l[4]\n # if l[1] == 'UNARYRULE':\n # y = l[3]\n # else: # l[1] == 'BINARYRULE'\n # y = l[3] + ' ' + l[4]\n if x not in rule_count_dict:\n rule_count_dict[x] = {}\n rule_count_dict[x][y] = int(l[0])\n return rule_count_dict", "def _compute_summary_stats(entries):\n result = {\n \"average\": None,\n \"median\": None,\n \"min\": None,\n \"max\": None,\n \"per66\": None,\n \"per75\": None,\n \"per90\": None,\n \"per95\": None,\n \"per99\": None,\n \"success_count\": len(entries['success']),\n \"error_count\": len(entries['error']),\n }\n # sort by the duration\n success = sorted(entries['success'], key=lambda x: x[1])\n if len(success) == 0:\n return result\n\n result['average'] = _compute_average(success)\n result['min'] = success[0][1]\n result['max'] = success[-1][1]\n result['median'] = _compute_percentile(success, 50)\n result['per66'] = _compute_percentile(success, 66)\n result['per75'] = _compute_percentile(success, 75)\n result['per90'] = _compute_percentile(success, 90)\n result['per95'] = _compute_percentile(success, 95)\n result['per99'] = _compute_percentile(success, 99)\n assert result['min'] <= result['median']\n assert result['median'] <= result['per66']\n assert result['per66'] <= result['per75']\n assert result['per75'] <= result['per90']\n assert result['per90'] <= result['per95']\n assert result['per95'] <= result['per99']\n assert result['per99'] <= result['max']\n return result", "def stats(self):\n pass", "def _cashflow_rule(self, meta, m):\n activity = m.Activity # dict((comp, getattr(m, f\"{comp.name}_production\")) for comp in m.Components)\n total = self._compute_cashflows(m.Components, activity, m.Times, meta)\n return total", "def get_summary_stats(self, simulation, user) -> Dict[str, Dict]:\n # I'm just going to assume that all subfolders are for time-steps but I'll skip them\n # if they don't have a time field set. (or, horrors, if it is negative)\n stats = dict()\n\n self._skipNLIFilter = True\n # comments in the girder internals indicate that eager evaluation is better here,\n # as there can be time outs\n subfolders = list(\n super(Simulation, self).childFolders(simulation, parentType='folder', user=user)\n )\n self._skipNLIFilter = False\n for folder in subfolders:\n time = folder['meta'].get('time', -1)\n if time < 0:\n continue\n stats[time] = folder['meta'].get('nli', {})\n\n return stats", "def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)", "def calculate_metrics(self):\n \n for cv in self.cat_vals:\n cat_inds = np.where(self.category_values == cv)[0]\n weighted_difference = (self.z[cat_inds]-self.mz[cat_inds])/self.weight_values[cat_inds]\n resid = np.sqrt(np.sum(np.square(weighted_difference))/(cat_inds.size))\n self.metric[str(cv)] = resid\n \n return self.metric", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)", "def calculate_hydro_metrics(self):\n metrics = {}\n\n for metric in self._hydro_metrics():\n metrics[metric] = getattr(self, metric)()\n\n return metrics", "def getStats(cm):\n\n measures = {}\n\n measures['recall'] = cm['TP'] / (cm['TP'] + cm['FN'])\n measures['specificity'] = cm['TN'] / (cm['TN'] + cm['FP'])\n measures['FPR'] = cm['FP'] / (cm['FP'] + cm['TN'])\n measures['FNR'] = cm['FN'] / (cm['TP'] + cm['FN'])\n measures['PBC'] = 100.0 * (cm['FN'] + cm['FP']) / (cm['TP'] + cm['FP'] + cm['FN'] + cm['TN'])\n measures['precision'] = cm['TP'] / (cm['TP'] + cm['FP'])\n measures['f-measure'] = 2.0 * (measures['recall'] * measures['precision']) / (measures['recall'] + measures['precision'])\n \n return measures", "def __get_metrics_adapted(self, policies):\n percent_min = 1 - policies['percent']\n percent_max = 1 + policies['percent']\n metrics = {'cpu_min':percent_min*policies['cpu'], 'cpu_max':percent_max*policies['cpu'],\n 'memory_min':percent_min*policies['ram'], 'memory_max':percent_max*policies['ram'],\n 'disk_min':percent_min*policies['disk'], 'disk_max':percent_max*policies['disk']}\n return metrics", "def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()", "def make_rules(self, old_rules):\n rules = defaultdict(set)\n\n def recurse_disc_rule(attr, rule):\n \"\"\"\n Recursively partition multivalued discrete attributes if\n its worth it\n \"\"\"\n\n\n ro = RuleObj(rule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n\n if not self.prune_rule(ro):\n return set([ro])\n \n c = rule.filter.conditions[0]\n var_type = rule.data.domain[c.position].var_type\n\n if (var_type == Orange.feature.Type.Discrete):\n if len(c.values) == 1:\n return [ro]\n \n refiner = BeamRefiner(attrs=[attr], fanout=10)\n ret = set()\n for _, newrule in refiner(rule):\n ret.update(recurse_disc_rule(attr, newrule))\n return ret\n else:\n if len(rule.data) < self.min_pts:\n return [ro]\n return [ro]\n\n # XXX: figure out this logic!\n\n refiner = BeamRefiner(attrs=[attr], fanout=2)\n ret = set()\n for _, newrule in refiner(rule):\n newro = RuleObj(newrule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n ret.update(recurse_disc_rule(attr, newrule))\n\n \n if old_rules is None:\n base_rule = SDRule(self.full_table, None) \n refiner = BeamRefiner(attrs=self.cols, fanout=10)\n #refiner = BeamRefiner(attrs=['recipient_nm'], fanout=30) \n\n \n for attr, rule in refiner(base_rule):\n ros = recurse_disc_rule(attr, rule)\n #self.top_k({None:ros})\n ros = filter(self.prune_rule, ros)\n rules[(attr,)].update(ros)\n\n else:\n attrs = old_rules.keys()\n for a_idx, attr1 in enumerate(attrs):\n for attr2 in attrs[a_idx+1:]:\n merged_attrs = set(attr1).union(attr2)\n max_attrs_len = max(len(attr1), len(attr2))\n if len(merged_attrs) == max_attrs_len:\n continue\n \n \n a1rules, a2rules = old_rules[attr1], old_rules[attr2]\n\n for ro in self.merge_dims(a1rules, a2rules):\n key = ro.rule.attributes\n\n #self.top_k({None:(ro,)})\n if self.prune_rule(ro):\n rules[key].add(ro)\n \n return rules", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def calc_stat_values(self):", "def hrules(self):\n ...", "def get_stats(self) -> Dict[str, Any]:\r\n stats = {}\r\n for attr in [attr for attr in self.__dict__ if attr not in Stats.PRINT_IGNORES]:\r\n stats[attr] = self.get_stat(attr)\r\n stats[\"level\"] = self.level\r\n return stats", "def compute_metrics(self, episodes):\n metrics = {}\n _, visit_epoch = self.compute_visit_freq_table(episodes)\n metrics['visited_states_in_epoch'] = visit_epoch\n metrics['visited_states_in_history'] = \\\n len(self._visited_states_in_history) / 3 ** (self.n_disks)\n return metrics", "def __init__(self):\n self.stats = {}\n self.stats['hits'] = 0\n self.stats['operations'] = {}\n self.stats['operations']['GetCapabilities'] = {}\n self.stats['operations']['GetCapabilities']['hits'] = 0\n self.stats['operations']['POST'] = {}\n self.stats['operations']['POST']['hits'] = 0" ]
[ "0.6340423", "0.62687165", "0.6262667", "0.62574226", "0.62306124", "0.61828154", "0.6159125", "0.61552644", "0.61206615", "0.6068211", "0.6058657", "0.6044734", "0.60149944", "0.601353", "0.5946292", "0.593793", "0.5930342", "0.5924686", "0.5853451", "0.5822528", "0.5802484", "0.5802227", "0.5788911", "0.57857513", "0.57804376", "0.57786286", "0.57769144", "0.57728845", "0.57425433", "0.57357913", "0.57255185", "0.5704344", "0.57021946", "0.5701771", "0.5689388", "0.5682724", "0.5672401", "0.5670937", "0.5670453", "0.56691414", "0.5657832", "0.5657832", "0.56472117", "0.56446755", "0.5639402", "0.56343454", "0.56296986", "0.5628173", "0.5616454", "0.5615998", "0.56084454", "0.5604812", "0.56032217", "0.56023854", "0.55985975", "0.5594544", "0.5593085", "0.5583116", "0.5580835", "0.5578783", "0.5575684", "0.556788", "0.55663973", "0.5563644", "0.5555946", "0.55501354", "0.55347496", "0.55286247", "0.5519484", "0.5518259", "0.5511333", "0.55032766", "0.5500909", "0.55005324", "0.54990697", "0.54904246", "0.548934", "0.5487803", "0.548564", "0.5485151", "0.5479222", "0.547688", "0.5474183", "0.5466094", "0.5462359", "0.5457632", "0.54560244", "0.54448444", "0.5441279", "0.543952", "0.5417326", "0.5416648", "0.5416525", "0.54133445", "0.5412587", "0.5406641", "0.540407", "0.5397987", "0.5395538", "0.53941685" ]
0.5944312
15
Scoreboard handler for incoming commands
def cmd(self, context, message): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def received_message(self, msg):\n command = int(msg[:8], base=16)\n msg = msg[8:]\n self.log.debug(\"CONTROLLER - RECEIVED COMMAND: \" + str(command))\n self.log.debug(\"CONTROLLER - MSG: \" + str([int(msg[i:i+8], base=16) for i in range(0, len(msg), 8)]))\n if command == 0:\n # 0 - opponent start the game\n self.master.add_log(\"Opponent starts the game.\")\n elif command == 1:\n # 1 - you start the game\n self.master.add_log(\"You start the game! Your turn.\")\n self.master.first = True\n self.master.new_round(False)\n elif command == 2:\n # 2 - start of your turn\n self.master.add_log(\"Your turn.\")\n self.master.new_round()\n elif command == 3:\n # 3 - opponent draws a card\n self.master.opp_hand.add_placeholder()\n self.master.add_log(\"Opponent draw a card.\")\n elif command == 4:\n # 4,x,y - opponent plays a card with x id on y spot on gameboard\n c_id = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n card = self.master.database.get_card(c_id)\n if card.card_type == \"Spell\":\n self.master.opp_sfield.set_card(card)\n else:\n self.master.opp_bfield.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent played a card {card.name}.\")\n elif command == 5:\n # 5,v,x,y - player v picks up card from x space from y spot to his hand\n # v - 0/1 - you/opponent\n # x - 0/1 - mana/battlefield\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n card = self.master.mana.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from mana zone to your hand.\")\n elif c_space == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from battle zone to your hand.\")\n elif c_player == 1:\n if c_space == 0:\n card = self.master.opp_mana.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from mana to his hand.\")\n elif c_space == 1:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from battle zone to his hand.\")\n elif command == 6:\n # 6,v,x,y - player v puts card from x space from y spot to his graveyard\n # v - 0/1 - you/opponent\n # x - 0/1/2 - mana/battlefield/hand\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"yu_mn\", c_pos)\n elif c_space == 1:\n self.master.a_move_to_graveyard(\"yu_bf\", c_pos)\n elif c_space == 2:\n card = self.master.hand[c_pos]\n self.master.a_move_to_graveyard(\"yu_hd\", c_pos)\n self.master.send_message(15, card.id) # Sent back which card was discarded\n elif c_player == 1:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"op_mn\", c_pos, False)\n elif c_space == 1:\n # Do not change to a_move_to_graveyard\n if c_pos == 5:\n card = self.master.opp_sfield.remove_card()\n else:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent's card {card.name} from battle zone was moved to his graveyard.\")\n elif command == 7:\n # 7,x,y - opponent puts y card from x space to manazone\n # x - 0/1/2/3 - hand/deck/graveyard\n c_space = int(msg[:8], base=16)\n c_id = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card {card.name} from his hand to the mana zone\")\n elif c_space == 1:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his deck to the mana zone\")\n elif c_space == 2:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_graveyard.remove_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to the mana zone\")\n elif command == 8:\n # 8,x - opponent adds card from his hand to y shield (face down)\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.add_placeholder(c_pos)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card from his hand to shields\")\n elif command == 9:\n # 9,x,y - Opponent tap/untap card on y spot in mana zone\n # x - 0/1 - tap/untap\n c_tap = bool(int(msg[:8]))\n c_pos = int(msg[8:16], base=16)\n if c_tap:\n self.master.opp_mana.untap_card(c_pos)\n else:\n self.master.opp_mana.tap_card(c_pos)\n elif command == 10:\n # 10,x - (info) opponent looks under his shield on x spot\n c_pos = int(msg[:8], base=16)\n self.master.add_log(f\"Opponent is peeking his {c_pos} shield\")\n elif command == 11:\n # 11,x,y - opponent looks under my shield/card on hand on y spot\n # x - 0/1 - hand/shield\n c_space = int(msg[:8])\n c_pos = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.hand[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} card in hand\")\n elif c_space == 1:\n card = self.master.shields[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} shield\")\n self.master.send_message(111, card.id)\n elif command == 111:\n # 111,x - \n c_id = int(msg[:8], base=16)\n # TODO: split command to separate hand and shield\n # TODO: show in the UI what the card actually is\n self.master.add_log(f\"The choosen card is {c_id}\")\n elif command == 12:\n # 12,x,y - opponent attacks your x card with his y card on the battlefield\n c_opp_pos = int(msg[:8], base=16)\n c_my_pos = int(msg[8:16], base=16)\n opp_card = self.master.opp_bfield[c_opp_pos]\n my_card = self.master.bfield[c_my_pos]\n self.master.add_log(f\"Opponent is attacking your card {my_card.name} with card {opp_card.name}.\")\n self.master.creature_attacked(c_opp_pos, c_my_pos)\n elif command == 112:\n # 112,x - returned which card you will attack\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 13:\n # 13,x,y1,y2,... - opponent attacks your shields with y card\n # x - position of creature on the board\n # ya - a-th shield attacked by this creature\n creature_pos = int(msg[:8], base=16)\n msg = msg[8:]\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n shields_string = \", \".join([str(pos) for pos in shields_pos])\n self.master.add_log(f\"Your shields at pos {shields_string} are being attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.shields_attacked(creature_pos, shields_pos)\n elif command == 113:\n # 113,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block shield attack, continue\n self.master.attack_shield()\n else:\n # Oppponent blocked with creature\n self.master.selected_shields = []\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 14:\n # 14,y1,y2,... - opponent destroys your shields\n # ya - a-th shield\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n self.master.shield_destroyed(shields_pos)\n elif command == 114:\n # 114,x - opponent picked up x shield to his hand\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up {c_pos} shield to his hand.\")\n self.master.refresh_screen()\n elif command == 214:\n # 214,x - opponent played x shield to spell/battle zone\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.add_log(f\"Opponent played a card from {c_pos} shield trigger.\")\n self.master.refresh_screen()\n elif command == 314:\n # 314 - opponent ended handling shield attack\n self.master.selected_card = []\n self.master.your_turn = 1\n elif command == 15:\n # 15 - id of the discarded card\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent discarded {card.name}\")\n self.master.refresh_screen()\n elif command == 16:\n # 16,v,x,y - x player taps/untaps a y creature\n # v - 0/1 - tap/untap\n # x - 0/1 - you/opponent\n # y - pos\n c_tap = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_tap == 0:\n # Tap\n if c_player == 0:\n # You\n self.master.bfield.set_tapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now tapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_tapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now tapped.\")\n if c_tap == 1:\n # Untap\n if c_player == 0:\n # You\n self.master.bfield.set_untapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now untapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_untapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now untapped.\")\n self.master.refresh_screen()\n elif command == 17:\n # 17,c,s1,p1,s2,p2... - opponent chooses which cards to destroy from the list\n # c - how many creatures to destoy\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_destoyed(count, target_list)\n elif command == 117:\n # 117 - opponent choosed cards and his actions ended\n self.master.post_destroy_creatures()\n elif command == 18:\n # 18,x - opponent adds card x from his deck to hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his deck to his hand\")\n elif command == 19:\n # 19,x - opponent adds card x from his graveyard to his hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.remove_card(card)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to his hand\")\n elif command == 20:\n # 20,c,s1,p1,s2,p2... - opponent chooses which cards to move to manazone from the list\n # c - how many creatures to sacrafice\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_put_to_mana(count, target_list)\n elif command == 120:\n # 120 - opponent choosed cards and his actions ended\n self.master.post_sacrafice_creatures()\n elif command == 21:\n # 21,y,x - player x puts card from y pos on battlefield zone to manazone\n # x - 0/1 - opponent/you\n # y - position\n c_player = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n if c_player == 0:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent moved card {card.name} from his battlezone to the mana zone\")\n elif c_player == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.mana.add_card(card)\n self.master.add_log(f\"Opponent moved your card {card.name} from battlezone to your mana zone\")\n elif command == 22:\n # 22,x - player x puts card from y pos on battlefield zone to hand\n # x - position\n c_pos = int(msg[:8], base=16)\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up card {card.name} from his battlezone to his hand\")\n elif command == 23:\n # 23 - opponent added an z effect to x card on y battefield\n c_pos = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_effect_name = int(msg[16:24], base=16)\n effect_name = EffectName(c_effect_name).name\n if c_player == 0:\n # to the opponent\n card = self.master.opp_bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to his card {card.name}\")\n elif c_player == 1:\n # to the player\n card = self.master.bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to your card {card.name}\")\n elif command == 24:\n # 24,x - opponent attacks you directly with x card\n # x - position of creature on the board\n creature_pos = int(msg[:8], base=16)\n self.master.add_log(f\"You are being directly attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.directly_attacked(creature_pos)\n elif command == 124:\n # 124,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block, you win\n self.master.win()\n else:\n # Oppponent blocked with creature\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 25:\n # 25 - opponent won the game\n self.master.lose(True)\n elif command == 26:\n # 26 - opponent lost the game\n self.master.win(True)\n elif command == 27:\n # 27 - start of the next turn\n self.master.turn_count += 1\n self.master.add_turn_info()", "def handle_command(ARGS, CLIENT, command, channel):\n message = '''Commands I know:\n list teams\n scores <optional week number>\n does Brandon suck\n '''\n message = \"\"\n attachments = \"\"\n if command == \"list teams\":\n message = '\\n'.join(map(lambda x: x.team_name, ARGS.league.teams))\n elif command == \"does brandon suck\":\n message = 'yes'\n elif 'scores' in command:\n pieces = command.split(' ')\n if len(pieces) == 1:\n message = 'Current Scoreboard'\n matchups = ARGS.league.scoreboard(projections=True)\n else:\n message = 'Scoreboard for week ' + pieces[1]\n matchups = ARGS.league.scoreboard(pieces[1], projections=True)\n\n attachments = [{\n 'fallback': 'A textual representation of your table data',\n 'fields': [\n {\n 'title': 'Home',\n 'value': '\\n'.join(map(lambda x: x.home_team.team_abbrev + \" \" + str(x.home_score) + \" (\" + str(x.home_projection) + \")\", matchups)),\n 'short': True\n },\n {\n 'title': 'Away',\n 'value': '\\n'.join(map(lambda x: x.away_team.team_abbrev + \" \" + str(x.away_score) + \" (\" + str(x.away_projection) + \")\", matchups)),\n 'short': True\n }\n ]\n }]\n CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, attachments=attachments, as_user=True)\n\n # CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, as_user=True)", "def handle(self, msg):\n\n if msg.command == \"PING\":\n self._sendmsg(\"PONG :{}\".format(msg.args[0]))\n\n elif msg.command == \"JOIN\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has joined {}\".format(name, channel))\n\n elif msg.command == \"PART\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has left {}\".format(name, channel))\n\n elif msg.command == \"KICK\":\n name = msg.sendername\n channel = msg.args[0]\n victim = msg.args[1]\n print(\"{} has kicked {} from {}\".format(name, victim, channel))\n\n elif msg.command == \"QUIT\":\n name = msg.sendername\n print(\"{} has quit IRC\".format(name))\n\n elif msg.command == \"KILL\":\n name = msg.sendername\n victim = msg.args[0]\n print(\"{} has killed {}\".format(name, victim))\n\n elif msg.command == \"NICK\":\n name = msg.sendername\n newname = msg.args[0]\n print(\"{} is now known as {}\".format(name, newname))\n\n elif msg.command == \"MODE\":\n name = msg.sendername\n target = msg.args[0]\n mode = msg.args[1]\n print(\"{} has set the mode of {} to {}\".format(name, target, mode))\n\n elif msg.command == \"NOTICE\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}]! {}\".format(name, target, message))\n\n elif msg.command == \"PRIVMSG\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}] {}\".format(name, target, message))\n\n elif msg.command.isdigit():\n print(msg.args[-1])\n\n else:\n print(str(msg))\n\n hooks.handle(self, msg)", "def handle(self, m):\n\n\t\tline = m.split(\" \")\n\n\t\tif line[0] == \"PING\":\n\t\t\tself(\"PONG\", line[1])\n\t\telif len(line) > 1 and line[1] == \"001\":\n\t\t\tself.callbacks[\"loggedin\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"JOIN\":\n\t\t\tself.callbacks[\"joined\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"PRIVMSG\":\n\t\t\tself.callbacks[\"messaged\"](self, *line)", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def handle_command(self, command, user, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean\"\n\n # Finds and executes the given command, filling in response\n response = None\n print('From Usr= '+str(user))\n if len(self.game.players)>=1 : \n print('Expected Usr= ' + str(self.game.players[self.cur_player].user_id))\n\n # STATE INIT\n if self.game_state == GAME_STATE_INIT :\n\n # Join game\n if command.startswith(\"join\"):\n res = self.game.add_player(user)\n user_name = self.get_users_displayname(user)\n if res == 1 :\n response = \"Welcome Aboard \" + str(user_name) + \"! :partyparrot:\"\n response_type = \"public\"\n else : \n response = \"You are already playing...\"\n response_type = \"public\"\n\n # Leave Game\n if command.startswith(\"leave\"):\n self.game.leave_game(user)\n response = \"User has left the game\"\n\n # Start Round\n if command.startswith(\"start\"):\n if len(self.game.players) > 1 :\n self.game.start_game()\n response = \"Let's begin\"\n response_type = \"public\"\n self.game_state = GAME_STATE_SET_BLINDS\n # Notify each player of their hand\n for player in self.game.players: \n private_response = \"Your hand: \"\n private_response += player.hand.print_hand()\n self.slack_client.api_call(\n \"chat.postEphemeral\",\n channel=channel,\n text=private_response,\n user=player.user_id \n )\n self.slack_client.api_call(\n \"channels.setTopic\",\n channel=channel,\n topic=\"A game is in progress! :congaparrot::congaparrot::congaparrot::congaparrot:\"\n ) \n else : \n response = \"Not enough players have joined yet.\"\n \n # State Betting\n if self.game_state == GAME_STATE_BETTING :\n responce_type = \"public\"\n # Check if user can actually play...\n if self.game.players[self.cur_player].active and \\\n not self.game.players[self.cur_player].all_in and \\\n self.game.players[self.cur_player].user_id == user:\n # Raising\n valid_command = False\n if command.startswith(\"raise \") :\n raise_str = command[6:].strip()\n if raise_str.isdigit() : \n res = self.game.raise_bet(self.game.players[self.cur_player].user_id,int(raise_str))\n if res == 2 :\n response = \"Player is all in!\"\n valid_command = True\n elif res == 1 :\n response = \"Current bet is set to \" + str(self.game.max_bet)\n valid_command = True\n else : \n response = \"... You can't raise '\" + raise_str +\"'\"\n \n # Calling\n if command.startswith(\"call\"):\n res = self.game.call(self.game.players[self.cur_player].user_id)\n response = \"Player calls.\"\n valid_command = True\n # All In\n if command.startswith(\"all\"):\n self.game.go_all_in(self.game.players[self.cur_player].user_id)\n response = \"Player is all in!\"\n valid_command = True\n # Fold\n if command.startswith(\"fold\"):\n self.game.fold(self.game.players[self.cur_player].user_id)\n response = \"Player folds\"\n valid_command = True\n # Check\n if command.startswith(\"check\"):\n res = self.game.check(user)\n response = \"Player Checks\"\n if res == 1 : \n valid_command = True\n\n # Move onto next player after the current player makes a move\n if valid_command :\n self.cur_player = ((self.cur_player+1)%len(self.game.players))\n while not self.game.players[self.cur_player].active :\n self.cur_player = ((self.cur_player+1)%len(self.game.players))\n print(self.cur_player)\n\n \n \n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def execute(self, irc_c, msg, cmd):", "def handle_command(self, command, players, user, channel):\r\n response = self.help()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n elif command[0] == self.NEW_GAME_COMMAND:\r\n return self.new_game(players, channel)\r\n \r\n elif command[0] == self.TARGET_COMMAND:\r\n return self.target(user)\r\n \r\n elif command[0] == self.SURVIVORS_COMMAND:\r\n return self.survivors()\r\n \r\n elif command[0] == self.EXPIRE_COMMAND:\r\n return self.expire(channel)\r\n \r\n elif command[0] == self.REMOVE_COMMAND:\r\n return self.remove(command, channel)\r\n \r\n elif command[0] == self.KILL_COMMAND:\r\n (success, response) = self.kill(user, command)\r\n if success and self.game.get_active_channel() != \"\" and channel != self.game.get_active_channel():\r\n post_to_channel(self.game.get_active_channel(), response)\r\n return \"\"\r\n \r\n elif command[0] == self.LOAD_LAST_GAME_COMMAND:\r\n return self.load_last_game(channel)\r\n \r\n return response", "def handleMessage(msg):", "def _handle_commands(self, event, session):\n message = event['body']\n\n for regex, func, help in self._COMMANDS:\n match = regex.match(message)\n if match is not None:\n func(self, event, session=session, **match.groupdict())\n return True\n\n return False", "def handle(self, message):", "def on_command(self, game) -> None:\n pass", "def handle_hive_commands(data):\n print data\n\n incomingCommand = str(data)\n command = incomingCommand[6:]\n\n #Check for a start command\n if command == \"start\":\n\tR.state = \"active\"\n\tR.hiveCommand = command\n\tStateMachine.state_start()\n\n #Check for a stop command\n elif command == \"stop\":\n\tR.state = \"dormant\"\n\tR.hiveCommand = command\n\tStateMachine.state_stop()\n\n #Check for a pause command\n elif command == \"pause\":\n\tR.hiveCommand = command\n\tStateMachine.state_wait_for_start()\n\n #Check for formation commands\n elif command == \"line\":\n\tR.hiveCommand = command\n elif command == \"square\":\n\tR.hiveCommand = command\n\n #Check for incorrect commands\n else:\n\tprint \"Invalid command.\"", "def handleCommand(self, command, prefix, params):\n irc.IRCClient.handleCommand(self, command, prefix, params)\n if len(params) < 2:\n return\n plugins = plugin_manager.filter(\n channel=self.channel, action=command.lower())\n for plugin in plugins:\n plugin.handle_action(protocol=self, action=command.lower(),\n user=prefix, message=params[1])", "def handle_command(command, channel):\n response = \"Not sure what you mean. \" + \\\n \"Try the following commands: \\n\" +\\\n \"@netsilbot alert list\\n\" +\\\n \"@netsilbot alert details <alertID>\\n\" +\\\n \"@netsilbot service list\\n\" +\\\n \"@netsilbot service details <serviceID>\\n\"+\\\n \"(You can add 'text' or 'raw' options for formatting the output)\"\n\n\n if command.startswith(COMMANDS[0]):\n #print command\n subcommand = command.split(' ')[1]\n if(subcommand=='list'):\n if(len(command.split(' '))>2):\n formatOutput = command.split(' ')[2]\n else:\n formatOutput=''\n\n response = GetAlertList(formatOutput)\n\n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='details'):\n response = GetAlertDetails([],command.split(' ')[2])\n sendSlackMessage(response, channel)\n\n elif(subcommand=='rule'):\n subsubcommand = command.split(' ')[2]\n if(subsubcommand=='list'):\n if(len(command.split(' '))>3):\n formatOutput = command.split(' ')[3]\n else:\n formatOutput=''\n\n response = GetAlertRuleList(formatOutput)\n \n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subsubcommand=='details'):\n response = GetAlertRuleDetails([],command.split(' ')[3])\n sendSlackMessage(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='template'):\n subsubcommand = command.split(' ')[2]\n if(subsubcommand=='list'):\n if(len(command.split(' '))>3):\n formatOutput = command.split(' ')[3]\n else:\n formatOutput=''\n\n response = GetAlertTemplateList(formatOutput)\n \n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subsubcommand=='details'):\n response = GetAlertTemplateDetails([],command.split(' ')[3])\n sendSlackMessage(response, channel)\n\n else:\n sendSlackMessage(response, channel)\n\n elif command.startswith(COMMANDS[1]):\n subcommand = command.split(' ')[1]\n if(subcommand=='list'):\n if(len(command.split(' '))>2):\n formatOutput = command.split(' ')[2]\n else:\n formatOutput=''\n\n response = GetServiceList(formatOutput)\n\n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='details'):\n response = GetServiceDetails([],command.split(' ')[2])\n sendSlackMessage(response, channel)\n \n else:\n sendSlackMessage(response, channel)\n\n elif command.startswith(COMMANDS[2]):\n subcommand = command.split(' ')[1]\n if(subcommand=='run'):\n if(len(command.split(' '))>2):\n queryText = command.split('run')[1].strip()\n else:\n queryText=''\n\n print queryText\n\n response=''\n response = RunQuery(query=queryText)\n #print response\n\n sendSlackMessageWithAttactment(response, channel)\n \n else:\n sendSlackMessage(response, channel)\n\n else:\n sendSlackMessage(response, channel)", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "async def on_message(message):\n\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n # intializes a scores object\n hiscores = Scores(message)\n\n if message.content.startswith('!hello'):\n msg = 'Hello {0.author.mention}'.format(message)\n await message.channel.send(msg)\n\n # get the command without !\n command = message.content.split()[0][1:]\n\n # retrieve the score of a player\n if message.content.startswith('!') and command in SKILLS:\n\n # retrieve the username that comes after the !level command and set underscores\n username = message.content.split()[1:]\n username = '_'.join(username)\n\n # get scores\n await hiscores.show_score(username, command)\n\n if message.content.startswith('!compare'):\n\n # get skill\n skill = message.content.split()[1]\n\n # check if the skill is valid, if not we compare based on total level and experience\n if not skill in SKILLS:\n\n # get the players\n players = ' '.join(message.content.split()[1:])\n players = players.split(' - ')\n\n for i, player in enumerate(players):\n players[i] = player.replace(' ', '_')\n\n # compare the players on total level if nothing is given\n await hiscores.compare(players, 'total')\n\n else:\n\n # get the players after the skill\n players = ' '.join(message.content.split()[2:])\n players = players.split(' - ')\n\n for i, player in enumerate(players):\n players[i] = player.replace(' ', '_')\n\n print(players)\n print(skill)\n # compare the players on total level if nothing is given\n await hiscores.compare(players, skill)\n\n\n if message.content.startswith('!pok'):\n msg = 'Heb je m al Marc?'.format(message)\n await message.channel.send(msg)", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Person not found. Last name may need to be capitalized. Try *{}*.\".format(\"@Rog Smog find [Last name]\")\n\n # Finds and executes the given command, filling in response\n response = None\n\n #Score lookup\n if command.startswith(\"score\"):\n print(\"Score request recieved. Processing...\")\n wb = openpyxl.load_workbook('Feedback_Form.xlsx')\n sheet = wb.active\n for row in range(2, sheet.max_row +1):\n if sheet['B' + str(row)].value is not None:\n open(\"FeedbackEmails.txt\", \"a\").write(sheet['B' + str(row)].value)\n open(\"PriorTrainJUST.txt\", \"a\").close()\n if sheet['B' + str(row)].value is not None and eval('command').replace('score ','') in sheet['B' + str(row)].value:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text= [\"FIX\", sheet['BX' + str(row)].value, \"VT\", sheet['AZ' + str(row)].value, \"Live\", sheet['AB' + str(row)].value]\n )\n print(\"Score request processed.\")\n if eval('command').replace('score ','') not in open(\"FeedbackEmails.txt\").read():\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text= \"Email not found.\"\n )\n open('FeedbackEmails.txt', 'w').close()\n \n #Person Lookup\n if command.startswith(\"find\"):\n print(\"Info request recieved. Processing...\")\n wb = openpyxl.load_workbook('Database.xlsx')\n sheet = wb.active\n for row in range(2, sheet.max_row +1):\n if sheet['D' + str(row)].value is not None and eval('command').replace('find ','') in sheet['D' + str(row)].value:\n response = [sheet['B' + str(row)].value, sheet['D' + str(row)].value, sheet['G' + str(row)].value, sheet['L' + str(row)].value, sheet['H' + str(row)].value, sheet['R' + str(row)].value]\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )\n print(\"request processed.\")\n print(eval('command').replace('find ',''))", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "def handle_command(message, slack_config):\n\n message.react(\"+1\")\n\n handler = {\n \"schedule_job\": handle_schedule_job,\n \"cancel_job\": handle_cancel_job,\n \"schedule_suppression\": handle_schedule_suppression,\n \"cancel_suppression\": handle_cancel_suppression,\n }[slack_config[\"type\"]]\n\n handler(message, slack_config)", "def handle_command(command, channel):\r\n # Default response is help text for the user\r\n default_response = \"Not sure what you mean\"\r\n default_food_response = \"I didn't quite catch that, but I see that you mentioned something about food. If you want me to order some food, try: @Starter Bot Order <<food>>\"\r\n\r\n # Finds and executes the given command, filling in response\r\n # This is where you start to implement more commands!\r\n response = None\r\n\r\n verb_list=['order','place','make']\r\n food_list = [line.rstrip('\\n') for line in open('food.txt')]\r\n\r\n print(\"Made the lists\")\r\n\r\n predictor = Predictor.from_path(\"srl-model-2018.05.25.tar.gz\")\r\n result=predictor.predict(command)\r\n print(result)\r\n\r\n for dictionary in result['verbs']:\r\n verb = dictionary['verb']\r\n if verb in verb_list:\r\n if verb=='order':\r\n try:\r\n response = dictionary['description']\r\n response=response.split('ARG1: ')[1].replace(']','')\r\n except:\r\n print(\"We did an oopsie here\")\r\n\r\n print(\"Went through the dictionaries\")\r\n\r\n if response == None:\r\n for word in command:\r\n if word in food_list:\r\n response=default_food_response\r\n break\r\n\r\n # Sends the response back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=response or default_response\r\n )", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def handle_command(command, channel):\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n response = None\n\n if command.startswith(EXAMPLE_COMMAND):\n response = \"Sure...write some code then I can do that!\"\n elif command.startswith(\"date\"):\n response = currentDate()\n elif command.startswith(\"time\"):\n response = currentTime()\n elif command.startswith(\"your my best friend\") or command.startswith(\"you are my best friend\"):\n response = \"Thanks so much, buddy!!! \\n Your the best!!\"\n elif command.startswith(\"hello\") or command.startswith(\"hi\") or command.startswith(\"hey\"):\n response = \"Hello, My name is BackTalker\"\n elif command.startswith(\"thanks\") or command.startswith(\"thank you\"):\n response = \"Your Welcome\"\n elif command.startswith(\"math\"):\n problem = command[4:]\n response = \"The answer for {} is {}\".format(problem, str(eval(problem)))\n elif command.startswith(\"say something\"):\n response = compliments() \n elif command.startswith(\"weather\"):\n response = currentWeather()\n elif command.startswith(\"cmpt371\"):\n word = command[8:]\n response = cmpt371(word)\n\n\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])", "def handle_message(self, msg):\n Logger.debug(\"Slave: Trying to parse\")\n if MessageKeys.command_key in msg.fields:\n Logger.info(\"Slave: Message command: %s\", str(msg.get_command()))\n return self.messagehandler[msg.get_command()](self, msg)\n return self.handle_invalid_command(msg)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def handle_command(command, channel, user):\n\n\tif command.startswith('start game'):\n\t\tprint \"{} started a game\".format(user)\n\t\tsend_message(\"Generating map...\", channel)\n\t\tgames[user] = Game()\n\t\tsend_message(\"Game started! Type `end game` to stop. Don't forget to use the `help` command!\", channel)\n\t\tsend_message(\"You wake up in a stone dungeon. It seems like you were \"\n\t\t\t\t\t + \"chained to the wall but something or someone broke you\"\n\t\t\t\t\t + \" break from it. Although you don't remember much about\"\n\t\t\t\t\t + \" how you got hear, you do remember one thing: \"\n\t\t\t\t\t + \"You need to escape the *Coveo Lab*\", channel)\n\telif command.startswith('end game'):\n\t\tgames.pop(user, None)\n\t\tsend_message(\"Game stopped. You can now start a new one.\", channel)\n\telif user in games:\n\t\tsend_message(games[user].update(command), channel)\n\telse:\n\t\tsend_message(\"Please type the command `start game` to play\", channel)", "def _command(self, *cmd, handler=None):", "async def on_message(message):\n #Before doing anything\n #Check to see if the message started with the command character\n if not message.content.startswith(commandCharacter):\n #If it didn't, return\n return\n \n #Ensure the bot wasn't the one who sent the message\n if message.author == client.user:\n #If it was, return\n return\n \n #Kill is checked by default (cannot be disabled)\n if message.content.startswith(commandCharacter+'kill'):\n await client.send_message(message.channel, 'Goodbye Forever...')\n await client.logout()\n os.system('stty sane')\n exit(0)\n \n #Parse through the list of all enabled commands\n for command in enabledCommands:\n #We want to ignore case when comparing the message content\n messageContent = message.content.lower()\n #If the message matches one of our commands, we will handle it\n #Requires whitespace after command name\n if messageContent.startswith(commandCharacter+command):\n await handleCommand(client, message, voicePlayerList)", "def __serve_command_route(self, update: tg.Update, context: tg_ext.CallbackContext) -> None:\n message = update.message\n\n text = message.text\n command = text.split()[0].strip('/')\n\n user = User(message.chat)\n try: # try-except for user start initialization when he is not in db\n user.state = self.__state_manager.get_state(user.id)\n user.roles = self.__role_auth.get_user_roles(user.id)\n except (StateError, RoleError):\n user.state = 'free'\n user.roles = ['user']\n\n found_routes = self.__find_command_routes(command, user.state, user.roles)\n for route in found_routes:\n route.callback(user=user,\n message=text)", "def handle_command(game, command, user, channel):\n\n prefix = command.strip().split(\" \")[0]\n suffix = command.strip().split(\" \")[1:]\n\n if prefix == START_GAME:\n if len(suffix):\n for i in suffix:\n if not check_handle(i):\n return None, \"I'm sorry, one of the users you selected: \" \\\n \"'\" + i + \"' is not properly formatted. Please \" \\\n \"try again using their @user Slack handle.\"\n if game.new_game(user, suffix, channel):\n return None, \"Game Started with users \" + \\\n \" \".join(suffix).upper()\n\n else:\n return None, \"A game with \" + \\\n \" \".join(game.players.keys()).upper() + \" is already in \" \\\n \"progress. Please contact them to end the game or use \" \\\n \"the '@rps quit' command if you're not afraid to burn \" \\\n \"it all to the ground.\"\n else:\n return None, DEFAULT_RESPONSE\n\n elif prefix == CHOOSE:\n code = game.add(user, suffix[0])\n if code == 1:\n if 0 in game.players.values():\n return None, \"Choice was successfully added\"\n\n else:\n players = game.d.copy()\n to_channel = game.channel\n winners = game.play()\n\n if len(winners):\n return to_channel, \" \".join(winners).upper() + \\\n \" are a force to be reckoned with!\\nResults: \\n\" \\\n \"Rock - \" + \" \".join(players[\"rock\"]).upper() + \"\\n\" \\\n \"Paper - \" + \" \".join(players[\"paper\"]).upper() + \\\n \"\\nScissors - \" + \" \".join(players[\"scissors\"]).upper()\n\n else:\n return to_channel, \"It's a draw!\\nResults: \\n\" \\\n \"Rock - \" + \" \".join(players[\"rock\"]).upper() + \"\\n\" \\\n \"Paper - \" + \" \".join(players[\"paper\"]).upper() + \\\n \"\\nScissors - \" + \" \".join(players[\"scissors\"]).upper()\n\n elif code == 2:\n return None, \"Choice was not successfully added, \" \\\n \"it appears you're not in the current game, \" \\\n + user.upper() + \".\"\n\n elif code == 3:\n return None, \"Choice was not successfully added, \" \\\n \"the input '\" + suffix[0] + \"' is invalid.\"\n\n elif prefix == END_GAME:\n game.clear()\n return None, \"Game ended.\"\n\n elif prefix == STATUS:\n if 0 in game.players.values():\n return None, \"There is currently a game in progress, \" \\\n \"waiting for : \" + \\\n \" \".join(key if not value else \"\"\n \"\" for key, value in game.players.items())\n elif game.players:\n return None, \"There is currently a game in progress, but \" \\\n \"all players have played.\"\n else:\n return None, \"There is currently no game in progress.\"\n\n elif prefix == HELP:\n return None, HELP_TEXT\n\n return None, DEFAULT_RESPONSE", "def on_win(data):\n print(str(data))\n update_score_db(data['winner'], data['loser'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def handle_command(command, players_dict, all_properties):\n\n if command == '':\n return 'roll'\n\n words = command.split()\n cmd = words[0]\n args = words[1:]\n\n if cmd == 'end':\n return 'end'\n elif cmd == 'pay':\n amount = int(args[2])\n if args[0] != 'bank':\n player_pay = players_dict[args[0].lower()]\n player_pay.pay(amount)\n player_pay_name = player_pay.name\n else:\n player_pay_name = 'BANK'\n\n if args[1] != 'bank':\n player_receive = players_dict[args[1].lower()]\n player_receive.receive(amount)\n player_receive_name = player_receive.name\n else:\n player_receive_name = 'BANK'\n \n print('\\n *** {0} paid {1} ${2} ***\\n'.format(\n player_pay_name, player_receive_name, amount))\n\n return 'paid'\n elif cmd == 'bought':\n player_name = args[0]\n player = players_dict[args[0].lower()]\n\n short_name = args[1]\n the_property = all_properties.get(short_name)\n\n player.receive_property(the_property)\n\n print('\\n *** {0} bought {1} ***\\n'.format(\n player.name, the_property.name))\n elif cmd == 'price':\n short_name = args[0]\n the_property = all_properties.get(short_name)\n\n print 'Price of {0} is ${1}.'.format(the_property.name, the_property.price)\n elif cmd == 'leave':\n player_name = args[1]\n player = players_dict[args[1].lower()]\n dist = args[2]\n player.position = 10 + int(dist)\n player.leave_jail()", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "def command(data):\n LOG.debug(f\"Received text from {data['user']['name']}: {data['command']}\")\n\n room_id = data[\"room\"]\n user_id = data[\"user\"][\"id\"]\n\n if user_id != self.user:\n timer = self.timers_per_room.get(room_id)\n timer.reset()\n\n message = data[\"command\"]\n for user in self.players_per_room[room_id]:\n if user[\"id\"] == user_id:\n user[\"msg_n\"] += 1\n # Let's do some message mangling, but only to every second message\n if user[\"msg_n\"] % 2 == 0:\n message = message[::-1]\n message = message.upper()\n\n # emit the message to all other users\n # (the user who sent will see the original; has already seen it)\n for user in self.players_per_room[room_id]:\n if user[\"id\"] != user_id:\n self.sio.emit(\n \"text\",\n {\n \"room\": data[\"room\"],\n \"receiver_id\": user[\"id\"],\n \"message\": message,\n \"impersonate\": user_id,\n },\n callback=self.message_callback,\n )", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def handle_message(self, message):", "def _handle_bot_command(self, bot_command: BotCommand) -> str:\n try:\n player = self.ping_pong_service.get_player(bot_command.sender_id)\n except pingpong_service.PlayerDoesNotExist:\n self.ping_pong_service.add_new_player(bot_command.sender_id)\n return responses.new_player()\n\n if bot_command.command_type is None:\n return responses.unknown_command()\n elif bot_command.command_type == CommandType.HELP:\n return responses.help()\n elif bot_command.command_type == CommandType.NAME:\n if bot_command.command_value:\n success = self.ping_pong_service.update_display_name(player, bot_command.command_value.lower())\n if success:\n return responses.name_updated(bot_command.command_value.lower())\n else:\n return responses.name_taken()\n else:\n return responses.name(player.name)\n elif bot_command.command_type == CommandType.MATCH:\n return self._handle_match_command(bot_command.command_value)\n elif bot_command.command_type == CommandType.STATS:\n name = bot_command.command_value\n if name:\n try:\n rating, wins, losses, ratio = self.ping_pong_service.get_player_stats(name)\n return responses.player_stats(name, rating, ratio, wins, losses)\n except pingpong_service.PlayerDoesNotExist:\n return responses.player_does_not_exist()\n else:\n return responses.stats(\n self.ping_pong_service.get_total_matches(), self.ping_pong_service.get_leaderboard()\n )\n elif bot_command.command_type == CommandType.UNDO:\n return responses.unknown_command()\n # w_name, w_rating, l_name, l_rating = pingpong_service.undo_last_match()\n # return responses.match_undone(w_name, w_rating, l_name, l_rating)\n return responses.unknown_command()", "def main(self):\n\n dp = self.dispatcher\n\n dp.add_handler(MessageHandler(Filters.text, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.command, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.photo, self.__photo_handler))\n dp.add_handler(MessageHandler(\n Filters.location, self.__location_handler))\n dp.add_handler(CallbackQueryHandler(callback=self.__clb_handler))\n\n self.updater.start_polling()\n self.updater.idle()", "def handle_message(self, msg):\n pass", "def execute(UserMessage,player):\n if UserMessage['Action'] == \"Kill\": #This list represents the commands file I had before, neater that way?\n SendKill(UserMessage,player)\n if UserMessage['Action'] == 'Vote1':\n Vote1(UserMessage)\n if UserMessage['Action'] == 'Vote2':\n Vote2(UserMessage)\n if UserMessage['Action'] == \"DataPLZ\":\n DataPLZ(UserMessage)\n pass", "def cmd_handler():\n context = zmq.Context()\n\n # socket to receive commands (a subscription to ELECTION_CODE channel)\n cmd_socket = context.socket(zmq.SUB)\n cmd_socket.connect (\"tcp://%s:5556\" % SERVER_HOST)\n topicfilter = \"politiche2013\"\n cmd_socket.setsockopt(zmq.SUBSCRIBE, topicfilter)\n\n # socket to send replies\n reply_sender = context.socket(zmq.PUSH)\n reply_sender.connect(\"tcp://%s:5557\" % SERVER_HOST)\n\n # main loop\n while True:\n print \"Aye sir, unit {0} ready for your commands ...\".format(computer_id)\n # wait for a command\n string = cmd_socket.recv()\n\n # action\n print \"Message received: '%s'\" % (string,)\n\n # send reply to server\n print \"Sending reply to server\"\n reply = { 'unit' : computer_id, 'status' : 'configured'}\n reply_sender.send_json(reply)", "def on_command(server, user, command, args):", "def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)", "def accept_command():\n # TODO", "def action_handler(self):\n if self.state == data.DEAD:\n return\n\n x = 0\n for check in self.state_chart[self.state]:\n if not check:\n x += 1\n continue\n elif check():\n self.state = x\n\n # Some messages when state changes\n if self.state == data.CHASE:\n self.handler.message_box.add_msg(\"{} sees you!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n elif self.state == data.RUN:\n self.handler.message_box.add_msg(\"{} runs away!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n\n x += 1\n\n if self.state == data.HOLD:\n return\n elif self.state == data.CHASE:\n self.chase(self.handler.player)\n elif self.state == data.RUN:\n self.run(self.handler.player)", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "def main(self):\n\n updater = Updater(self.token)\n dp = updater.dispatcher\n\n dp.add_handler(MessageHandler(Filters.text, self.__msg_handler))\n dp.add_handler(MessageHandler(Filters.command, self.__msg_handler))\n dp.add_handler(CallbackQueryHandler(callback=self.__clb_handler))\n\n updater.start_polling()\n updater.idle()", "def handle(self, rsm_ctx):\n pass", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def handle_message(self, msg, status):\n\n body = ensure_unicode(msg.Body)\n chat_id = get_chat_id(msg.Chat)\n\n if len(body) == 0:\n return False\n\n for name, cmd in self.commands.items():\n if body == name:\n cmd(msg, chat_id)\n return True\n\n\n if self.troller_is_running.get(chat_id):\n response = self.alice.respond(body)\n if response:\n msg.Chat.SendMessage(response)\n return True\n else:\n return False\n else:\n return False", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def handle_command(self, command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(\"HI\")\n\n # Finds and executes the given command, filling in response\n handler = self.dispatch_config.get_handler_by_command(command.split(None, 1)[0])\n if handler is None:\n print(\"unrecognized command detected: \" + command.split(None, 1)[0])\n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response\n )\n else:\n print(\"using: \" + handler[\"fullpath\"] + \" to handle the request\")\n if handler[\"class\"] in self.handlers:\n self.handlers[handler[\"class\"]].handle_command(command, channel)\n else:\n cls = locate(handler[\"fullpath\"])\n print(cls)\n self.handlers[handler[\"class\"]] = cls(self.slack_client, self.config)\n self.handlers[handler[\"class\"]].handle_command(command, channel)", "def commands():", "def handle_input(self):\n\n\t\tline = sys.stdin.readline().strip()\n\n\t\tif line == '':\n\t\t\t# print('')\n\t\t\tself.print_prompt()\n\t\t\treturn\n\n\t\tcommand_name, *parts = line.split()\n\n\t\tif command_name in self.commands:\n\t\t\t# Call given command and unpack parts into args\n\t\t\tself.commands[command_name]['callback'](*parts)\n\t\telse:\n\t\t\tprint(command_name + ' : command not found')\n\t\t\tself.print_available_commands()\n\n\n\t\tself.print_prompt()", "def exec_commands(com):\n reply = ''\n if com is not None:\n if com == commands[0]:\n tables = db.create_tables(houses, from_)\n if tables == True:\n for j in range(len(c_responses[0]) - 1):\n# can use join and split functions to create softer code?? at least in future instances\n bot.send_message(c_responses[0][j], from_)\n else:\n reply = c_responses[0][(len(c_responses[0])-1)]\n elif com == commands[1]:\n house_info = db.house_info(from_)\n # Add feautures to find highest scoring house and return number of members\n reply = \"Houses:\\n\"\n for house in house_info:\n reply += house[1] + \"\\n\"\n if house[2] != None:\n reply += f\"Score: {house[2]}pts\\n\\n\"\n else:\n reply += f\"Score: 0pts\\n\\n\"\n elif com.startswith(commands[2]):\n instructions = com.split()\n id = 0\n info = user_query()\n user_id = info['user']['id']\n check = db.check_admin(from_, user_id)\n if check and check != 'not sorted':\n for house in houses:\n id += 1\n if house == instructions[1]:\n score = db.update_house_score(id, instructions[2], from_)\n reply = f\"{instructions[1]} new score is {score}\"\n else:\n reply = \"You have no power over me! PS:(if you are an admin use the /appoint me command to be recognised as such)\"\n\n\n elif com == commands[3]:\n username = item['message']['from']['username']\n user_id = item['message']['from']['id']\n num = db.add_member_info(username, from_, user_id)\n if num[1]:\n reply = f\"Better be... {houses[num[0]-1]}\"\n else:\n print(num[0][0])\n reply = f\"I stand by my decision, {houses[num[0][0]-1]} will help you on the way to greatness!\"\n elif com == commands[4]:\n m_list = db.member_info(from_)\n reply = str(m_list)\n elif com == commands[5]:\n info = user_query()\n username = info['user']['username']\n m_info = db.member_info(from_, username)\n reply = f\"\"\"\n Username: {m_info[2]}\\nHouse: {houses[m_info[3]]}\\nStatus: {m_info[4]}\\nScore: {m_info[5]}\\n\n \"\"\"\n elif com == commands[6]:\n info = user_query()\n username = info['user']['username']\n user_id = info['user']['id']\n status_info = info['status']\n if status_info == 'creator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Headmaster')\n reply = f\"Rise Headmaster {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Headmaster\"\n elif status_info == 'administrator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Professor')\n reply = f\"Hence forth you shall be known as Professor {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Professor\"\n else:\n reply = 'Desist pretender! Only the entitled may command me so!'\n elif com == commands[7]:\n for command in commands:\n reply += f'{command}\\n'\n print(reply)\n \n return reply", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def cmd(self, message):\n pass", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def handle_lobby_command(self, lobby_command, client_socket):\n lobby_command = lobby_command.decode('utf-8')\n words = lobby_command.split()\n first = words[0]\n\n # Catch initial errors\n if len(words) == 1:\n msg = ''\n if first == \"$$create\" or first == \"$$delete\":\n msg = \"Must specify a room name argument to execute $$create or $$delete! [E.g. $$create pokemon]\"\n print(\"User did not specify roomname to create or delete\")\n elif first == \"$$send\":\n msg = \"Must specify a room to send your message for $$send [E.g. $$send pokemon]!\"\n print(\"User did not specify a room to send a message to\")\n elif first == \"$$join\" or first == \"$$leave\":\n msg = \"Must specify a room name argument to execute $$join or $$leave! [E.g. $$join pokemon]\"\n print(\"User did not specify roomname to join or leave\")\n elif first == \"$$enter\":\n msg = \"Must specify a room name argument to execute $$enter! [E.g. $$enter pokemon]\"\n print(\"User did not specify roomname to enter\")\n elif msg != '':\n print(\"Error catching failed ...\")\n return\n\n if first == \"$$create\":\n self.handle_create_room(lobby_command, client_socket)\n elif first == \"$$delete\":\n self.handle_delete_room(lobby_command, client_socket)\n elif first == \"$$join\":\n self.handle_join_room(lobby_command, client_socket)\n elif first == \"$$leave\":\n self.handle_leave_room(lobby_command, client_socket)\n elif first == \"$$list\":\n self.handle_list_room(lobby_command, client_socket)\n elif first == \"$$send\":\n self.handle_send_to_room(lobby_command, client_socket)\n elif first == \"$$enter\":\n self.handle_enter_room_session(lobby_command, client_socket)\n elif first == \"$$exit\":\n self.handle_exit_room_session(lobby_command, client_socket)\n elif first == \"$$whoami\":\n self.handle_whoami(client_socket)\n else:\n print(\"Not sure how this lobby command got to server. Should have been filtered by client filter\")", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(QUESTION_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.endswith(\"?\") or command.endswith(\"!\") or command.endswith(\".\"):\n command = command[:-1]\n # print(f\"command is {command}\")\n parsed_command = command.lower().split(\" \")\n print(f\"parsed_command: {parsed_command}\")\n # Extract the question number\n\n question_number = parsed_command[-1]\n\n print(f\"The question number is {question_number}\")\n if \"quiz\" or \"ask\" in parsed_command:\n # Call function to return question from a database\n q_or_a = \"q\"\n if \"answer\" in parsed_command:\n # print(\"answer\")\n q_or_a = \"a\"\n\n response = return_response(question_number, q_or_a)\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "async def data_received(self, data):\n prefix, command, args = parse_raw_irc_command(data)\n await self.command_handler.run(command, prefix, *args)", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def handle_command(channel, command):\n print(\"Channel = \", channel)\n print(\"Command = \", command)\n \n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command == \"help\":\n response = \"Sure...write some more code then I can do that!\"\n #help command lists all possible commands\n # if command == \"help\":\n # \tresponse = \"\"\n #report command \n elif command == \"report\":\n response = \"Here I will report on stuff...\"\n else:\n response = \"Try typing help to see valid commands\"\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.startswith(EXAMPLE_COMMAND):\n #response = \"Sure...write some more code then I can do that!\"\n command1, command2, food, floor = command.split()\n \n find_value(food+floor, call_response)\n response = food + \" is\" + find_value + \" there\"\n \n #response = food + \" is available on floor \" + floor\n \n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def process_context(command):\n global MOVEMENT\n global ACTION\n global FIGHT\n #global CURRENT_OPTIONS\n\n enemy = ENEMY_LIST[ZERO_BASE_PLYR_POS] # Check for enemies at the new location\n \n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"\n\n if enemy != 4:\n MOVEMENT = False\n ACTION = False\n FIGHT = True\n show_action(False, 10)\n show_movement(False, 10)\n show_special(False, 10)\n show_fight(True, 10)\n fight(enemy, command)\n\n elif MOVEMENT:\n MOVEMENT = False\n ACTION = True\n FIGHT = False\n show_fight(False, 10)\n show_movement(False, 10)\n show_special(False, 10)\n show_action(True, 10)\n\n else:\n MOVEMENT = True\n ACTION = False\n FIGHT = False\n show_fight(False, 10)\n show_action(False, 10)\n show_movement(True, 10)\n show_special(True, 10)\n\n #clear_messages(0)\n update_player_on_map()", "def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def handle_command(command, channel):\n\n # Default response is help text for the user\n default_response = \"Hmm, I don't understand.\"\n\n ABOUT_COMMAND = 'about'\n HELP_COMMAND = 'help'\n\n # Finds and executes the given command, filling in response\n response = None\n\n # This is where you start to implement more commands!\n if command.startswith(ABOUT_COMMAND):\n response = about_course(command)\n elif command.startswith(HELP_COMMAND):\n response = help_text(command)\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle_message(self, data, channel):\n pass", "def handle(self, msg, options):\n raise NotImplementedError()", "def handle(message):\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def _board(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.send_line('STATUS BOARD %s' % repr(self.game))", "def _handle_message(self, msg):\n self.event('message', msg)", "def _on_inbound_message(self, message):\n if message.channel.startswith(\"actuators/commands/\"):\n actuation = self.inbound_message_deserializer.deserialize_actuator_command(message)\n if actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_SET:\n self.actuation_handler.handle_actuation(actuation.reference, actuation.value)\n\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_STATUS:\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_UNKNOWN:\n print(\"Received unsupported actuation command\")\n\n else:\n print(\"Received unsupported message: \\n\" +\n message.channel + \"\\n\" + message.payload)", "def command():\n pass", "def command_processor(self, command_dict):\n current_command = command_dict.get('CMD')\n\n if current_command == 'PING':\n return {'CMD': 'PONG'}\n\n elif current_command == 'NEW_PEER_JOIN':\n peer_id = command_dict.get('peer_id')\n peer_host = command_dict.get('peer_host')\n port = command_dict.get('peer_port')\n protocol_logger('NEW_PEER_JOIN recieved from {} at {}:{}'.format(peer_id, peer_host, port))\n\n protocol_logger('Adding new peer with id {} at {}:{}'.format(peer_id, peer_host, port))\n self.add_new_peer(peer_id, peer_host, port)\n\n protocol_logger('Peer added successfully. Initiating upload node data to remote peer')\n self.data_upload(peer_id)\n\n elif current_command == 'NEW_JOIN_DATA_UPLOAD':\n protocol_logger('NEW_PEER_JOIN_DATA_UPLOAD recieved')\n\n self.handle_data_upload(command_dict.get('peer_list'), command_dict.get('data_list'))\n\n elif current_command == 'ADD_PEER':\n\n peer_id = command_dict.get('peer_id')\n peer_host = command_dict.get('peer_host')\n port = command_dict.get('peer_port')\n protocol_logger('ADD_PEER recieved')\n\n self.add_new_peer(peer_id, peer_host, port)\n protocol_logger('New peer {} at {}:{} added to peer list successfully'.format(peer_id, peer_host, port))\n\n elif current_command == 'VALIDATE_BLOCK':\n protocol_logger('VALIDATE_BLOCK recieved')\n block_data = command_dict.get('block_data')\n\n protocol_logger('Initiaiting block validation')\n if not self.chain_instance.validate_block(block_data):\n protocol_logger('Block validation failed')\n return 'KCA'\n\n elif current_command == 'ADD_BLOCK':\n protocol_logger('ADD_BLOCK recieved')\n block_data = command_dict.get('data')\n protocol_logger('Adding data block')\n self.chain_instance.add_block(block_data)\n protocol_logger('Block added successfully')\n\n return 'ACK'", "def Handle(interface,command,args,messagetype):\n ballfile = open(\"data/8ball.txt\")\n lines = ballfile.readlines()\n if not args.endswith(\"?\"):\n args=args+\"?\"\n interface.Reply(args+\" \"+lines[random.randint(0,len(lines)-1)])", "def enter_game_scores():\n pass", "def __msg_handler(self, update, bot):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def handle_text_messages(self, update, context):\n\n # Split user input into single words\n words = set(update.message.text.lower().split())\n logging.debug(f'Received message: {update.message.text}')\n\n # For debugging: Log users that received something from bot\n chat_user_client = update.message.from_user.username\n if chat_user_client == None:\n chat_user_client = update.message.chat_id\n\n\n # Possibility: received command from menu_trigger\n for Trigger in self.menu_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.show_menu(update, context)\n logging.info(f'{chat_user_client} checked out the menu!')\n\n return\n\n\n # Possibility: received command from loan_stats_trigger\n for Trigger in self.loan_stats_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n #self.send_textfile('under_construction.txt', update, context)\n self.show_loan_stats(update, context)\n self.send_signature(update, context)\n logging.info(f'{chat_user_client} got loan stats!')\n\n return\n\n # Possibility: received command from il_trigger\n for Trigger in self.il_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.show_il(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get IL info!')\n\n return\n\n # Possibility: received command from assets_trigger\n for Trigger in self.assets_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.self.show_assets(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get asset info!')\n\n return", "def on_message(data):\n pass", "def handle_msg(self, state_id, msg):\n pass", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def handle_command(self, command, channel, user):\r\n response = \"Hello. Type \\\"@hexbot help\\\" for more information\"\r\n command = command.split()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n if command[0] == self.HELP_COMMAND:\r\n response = self.help()\r\n elif command[0] == self.DEBUG_COMMAND:\r\n response = self.debug(command, channel);\r\n elif command[0] == self.ASSASSIN_COMMAND:\r\n command.pop(0)\r\n response = self.assassin(command, channel, user);\r\n \r\n return response", "def update_handler(self, update):\n \n m = update.get('message',{})\n\n sender = self.get_sender(m)\n if sender == self.user_id: return\n \n # code that'll execute upon receiving any message\n if self.greeting and m:\n self.greeting(m)\n\n # parse bot commands\n command, params = self._parse_commands(m)\n \n if command: \n self._apply_command_filter(m, command, params)\n else:\n self._apply_msg_filter(m)", "def run(self):\n # To add a command to the command dispatch table, append here.\n self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, 2]})\n self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, 1]})\n self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, 2]})\n self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, 2]})\n self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, 3]})\n self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, 3]})\n self.command_dispatch.update({self.STRING_DATA: [self._string_data, 2]})\n self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, 2]})\n self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, 2]})\n self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, 2]})\n self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, 2]})\n self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, 2]})\n\n while not self.is_stopped():\n if len(self.pymata.command_deque):\n # get next byte from the deque and process it\n data = self.pymata.command_deque.popleft()\n\n # this list will be populated with the received data for the command\n command_data = []\n\n # process sysex commands\n if data == self.START_SYSEX:\n # next char is the actual sysex command\n # wait until we can get data from the deque\n while len(self.pymata.command_deque) == 0:\n pass\n sysex_command = self.pymata.command_deque.popleft()\n # retrieve the associated command_dispatch entry for this command\n dispatch_entry = self.command_dispatch.get(sysex_command)\n\n # get a \"pointer\" to the method that will process this command\n method = dispatch_entry[0]\n\n # now get the rest of the data excluding the END_SYSEX byte\n end_of_sysex = False\n while not end_of_sysex:\n # wait for more data to arrive\n while len(self.pymata.command_deque) == 0:\n pass\n data = self.pymata.command_deque.popleft()\n if data != self.END_SYSEX:\n command_data.append(data)\n else:\n end_of_sysex = True\n\n # invoke the method to process the command\n method(command_data)\n # go to the beginning of the loop to process the next command\n continue\n\n #is this a command byte in the range of 0x80-0xff - these are the non-sysex messages\n\n elif 0x80 <= data <= 0xff:\n # look up the method for the command in the command dispatch table\n # for the digital reporting the command value is modified with port number\n # the handler needs the port to properly process, so decode that from the command and\n # place in command_data\n if 0x90 <= data <= 0x9f:\n port = data & 0xf\n command_data.append(port)\n data = 0x90\n # the pin number for analog data is embedded in the command so, decode it\n elif 0xe0 <= data <= 0xef:\n pin = data & 0xf\n command_data.append(pin)\n data = 0xe0\n else:\n pass\n\n dispatch_entry = self.command_dispatch.get(data)\n\n # this calls the method retrieved from the dispatch table\n method = dispatch_entry[0]\n\n # get the number of parameters that this command provides\n num_args = dispatch_entry[1]\n\n #look at the number of args that the selected method requires\n # now get that number of bytes to pass to the called method\n for i in range(num_args):\n while len(self.pymata.command_deque) == 0:\n pass\n data = self.pymata.command_deque.popleft()\n command_data.append(data)\n #go execute the command with the argument list\n method(command_data)\n\n # go to the beginning of the loop to process the next command\n continue", "def _callback_main(self, data):\n alpha = data.data\n self.command_synergy(alpha)", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):" ]
[ "0.6840156", "0.6722847", "0.65974706", "0.64543533", "0.64348984", "0.6269596", "0.6221818", "0.6210287", "0.62007767", "0.6155704", "0.61550456", "0.6142969", "0.61363643", "0.6136049", "0.60979337", "0.6096791", "0.60831475", "0.6043705", "0.60334855", "0.60296637", "0.5975915", "0.5969062", "0.5966546", "0.59524125", "0.5952321", "0.5929195", "0.5922977", "0.5914357", "0.59078497", "0.5887404", "0.586177", "0.58527637", "0.58517635", "0.584856", "0.58419937", "0.58355707", "0.5825082", "0.5821656", "0.58094347", "0.5805278", "0.5796556", "0.5790298", "0.5785491", "0.5785041", "0.5776689", "0.577319", "0.5756841", "0.57463294", "0.5739822", "0.57375276", "0.57317007", "0.5729227", "0.5725812", "0.5718862", "0.57106584", "0.5707952", "0.57042634", "0.5701425", "0.56972116", "0.56961054", "0.5695805", "0.5685021", "0.5679226", "0.5676315", "0.56735325", "0.5672902", "0.5671238", "0.56658703", "0.5665045", "0.56634766", "0.5649785", "0.56464905", "0.5633643", "0.5631851", "0.5631851", "0.5631851", "0.5631851", "0.5624847", "0.5624844", "0.56190485", "0.5615207", "0.5614026", "0.56089264", "0.5603611", "0.5598764", "0.5597487", "0.5597405", "0.55929685", "0.558869", "0.55858815", "0.5580623", "0.55664927", "0.5558591", "0.55558485", "0.5550233", "0.5548317", "0.5546797", "0.5543995", "0.5543995", "0.5543995", "0.5543995" ]
0.0
-1
Scoreboard handler for incoming responses
def response(self, context, message): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "async def _response_handler(self):", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass", "def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"", "def on_win(data):\n print(str(data))\n update_score_db(data['winner'], data['loser'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def _transit_to_scores(self, **kwargs):\n logging.debug(\"in _transit_to_scores\")\n handler = kwargs['handler']\n\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if not game:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Game for hangout %s not found\" % (self.hangout_id,)})\n return False\n if game.state != self.state_name:\n return False # not in 'voting' state\n game.state = 'scores'\n participants = self._calculate_scores(game)\n game.put()\n # send out the score info on the channels.\n # TODO: currently, the scores for this round are only recorded briefly,\n # as the code below will reset them as part of the setup for the\n # next round/game. Might want to change this.\n # TODO: should the broadcasting part be part of the handler logic or\n # the state transition logic?\n self._broadcast_scores(participants, game.key.id(), game.current_round)\n\n # We can now start a new round. This resets the card selection and vote\n # fields. If we've had N rounds, this is a new game instead. \n if game.current_round >= (config.ROUNDS_PER_GAME - 1):\n # if have reached the limit of rounds for a game,\n # then start new game using the participants of the current game\n self.start_new_game(participants)\n return True\n else:\n # otherwise, start new round in the current game\n logging.info(\"starting new round.\")\n game.start_new_round(participants)\n return True", "def test_parse_score_msg_of_responder(self):\r\n valid_grader_msgs = [\r\n u'<span>MESSAGE</span>', # Valid XML\r\n textwrap.dedent(\"\"\"\r\n <div class='matlabResponse'><div id='mwAudioPlaceHolder'>\r\n <audio controls autobuffer autoplay src='data:audio/wav;base64='>Audio is not supported on this browser.</audio>\r\n <div>Right click <a href=https://endpoint.mss-mathworks.com/media/filename.wav>here</a> and click \\\"Save As\\\" to download the file</div></div>\r\n <div style='white-space:pre' class='commandWindowOutput'></div><ul></ul></div>\r\n \"\"\").replace('\\n', ''), # Valid HTML5 real case Matlab response, invalid XML\r\n '<aaa></bbb>' # Invalid XML, but will be parsed by html5lib to <aaa/>\r\n ]\r\n\r\n invalid_grader_msgs = [\r\n '<audio', # invalid XML and HTML5\r\n ]\r\n\r\n answer_ids = sorted(self.problem.get_question_answers())\r\n\r\n # CodeResponse requires internal CorrectMap state. Build it now in the queued state\r\n old_cmap = CorrectMap()\r\n for i, answer_id in enumerate(answer_ids):\r\n queuekey = 1000 + i\r\n queuestate = CodeResponseTest.make_queuestate(queuekey, datetime.now(UTC))\r\n old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))\r\n\r\n for grader_msg in valid_grader_msgs:\r\n correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})\r\n incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})\r\n xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, }\r\n\r\n for i, answer_id in enumerate(answer_ids):\r\n self.problem.correct_map = CorrectMap()\r\n self.problem.correct_map.update(old_cmap)\r\n output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i)\r\n self.assertEquals(output[answer_id]['msg'], grader_msg)\r\n\r\n for grader_msg in invalid_grader_msgs:\r\n correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg})\r\n incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg})\r\n xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, }\r\n\r\n for i, answer_id in enumerate(answer_ids):\r\n self.problem.correct_map = CorrectMap()\r\n self.problem.correct_map.update(old_cmap)\r\n\r\n output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i)\r\n self.assertEquals(output[answer_id]['msg'], u'Invalid grader reply. Please contact the course staff.')", "def event_handler(self, response):\n pass", "def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])", "def handle(self, message):", "def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()", "def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"", "def _handle( self, state, msg ):\n\t\tstate.requests[ msg.id ] = msg\n\t\tstatistics.requests.new()\n\t\tCORE.info( 'Incoming request of type %s' % msg.command )\n\t\tif not state.authenticated and msg.command != 'AUTH':\n\t\t\tres = Response( msg )\n\t\t\tres.status = BAD_REQUEST_UNAUTH\n\t\t\tself._response( res, state )\n\t\telif msg.command == 'AUTH':\n\t\t\tstate.authResponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tstate.authenticate( msg.body[ 'username' ], msg.body[ 'password' ] )\n\t\t\texcept ( TypeError, KeyError ), e:\n\t\t\t\tstate.authResponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\tstate.authResponse.message = 'insufficient authentification information'\n\t\telif msg.command == 'GET' and ( 'ucr' in msg.arguments or 'info' in msg.arguments ):\n\t\t\tresponse = Response( msg )\n\t\t\tresponse.result = {}\n\t\t\tresponse.status = SUCCESS\n\t\t\tif 'ucr' in msg.arguments:\n\t\t\t\tif not isinstance(msg.options, (list, tuple)):\n\t\t\t\t\traise InvalidOptionsError\n\t\t\t\tfor value in msg.options:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif not value:\n\t\t\t\t\t\t\t# make sure that 'value' is non-empty\n\t\t\t\t\t\t\tCORE.warn('Empty UCR variable requested. Ignoring value...')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif value.endswith('*'):\n\t\t\t\t\t\t\tvalue = value[ : -1 ]\n\t\t\t\t\t\t\tfor var in filter( lambda x: x.startswith( value ), ucr.keys() ):\n\t\t\t\t\t\t\t\tresponse.result[ var ] = ucr.get( var )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresponse.result[ value ] = ucr.get( value )\n\t\t\t\t\texcept ( TypeError, IndexError, AttributeError ), e:\n\t\t\t\t\t\tCORE.warn('Invalid UCR variable requested: %s' % (value,))\n\t\t\t\t\t\tresponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\t\t\tresponse.message = _('Invalid UCR variable requested: %s') % (value,)\n\n\t\t\telif 'info' in msg.arguments:\n\t\t\t\ttry:\n\t\t\t\t\tfd = gzip.open( '/usr/share/doc/univention-management-console-server/changelog.Debian.gz' )\n\t\t\t\t\tline = fd.readline()\n\t\t\t\t\tfd.close()\n\t\t\t\t\tmatch = MagicBucket.CHANGELOG_VERSION.match( line )\n\t\t\t\t\tif not match:\n\t\t\t\t\t\traise IOError\n\t\t\t\t\tresponse.result[ 'umc_version' ] = match.groups()[ 0 ]\n\t\t\t\t\tresponse.result[ 'ucs_version' ] = '{0}-{1} errata{2} ({3})'.format( ucr.get( 'version/version', '' ), ucr.get( 'version/patchlevel', '' ), ucr.get( 'version/erratalevel', '0' ), ucr.get( 'version/releasename', '' ) )\n\t\t\t\t\tresponse.result[ 'server' ] = '{0}.{1}'.format( ucr.get( 'hostname', '' ), ucr.get( 'domainname', '' ) )\n\t\t\t\t\tresponse.result[ 'ssl_validity_host' ] = int( ucr.get( 'ssl/validity/host', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\t\tresponse.result[ 'ssl_validity_root' ] = int( ucr.get( 'ssl/validity/root', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\texcept IOError:\n\t\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\t\t\tpass\n\n\t\t\tself._response( response, state )\n\t\telif msg.command == 'STATISTICS':\n\t\t\tresponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tpwent = pwd.getpwnam( state.username )\n\t\t\t\tif not pwent.pw_uid in ( 0, ):\n\t\t\t\t\traise KeyError\n\t\t\t\tCORE.info( 'Sending statistic data to client' )\n\t\t\t\tresponse.status = SUCCESS\n\t\t\t\tresponse.result = statistics.json()\n\t\t\texcept KeyError:\n\t\t\t\tCORE.info( 'User not allowed to retrieve statistics' )\n\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\tself._response( response, state )\n\t\telse:\n\t\t\t# inform processor\n\t\t\tif not state.processor:\n\t\t\t\tstate.processor = Processor( *state.credentials() )\n\t\t\t\tcb = notifier.Callback( self._response, state )\n\t\t\t\tstate.processor.signal_connect( 'response', cb )\n\t\t\tstate.processor.request( msg )", "def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def handleMessage(msg):", "def responder():\n pass", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def handle_response(message_dict):\n c.master_command(bot_token, user_token, discuss_bot_id, discussion_chat_id,\n message_dict)", "def hears(request):\n\n #Wit makes our responses timeout, so we ignore Slack retries\n if \"HTTP_X_SLACK_RETRY_NUM\" in request.META:\n return HttpResponse(\"OK\", 200)\n\n slack_event = json.loads(request.body)\n\n # ============= Slack URL Verification ============ #\n # In order to verify the url of our endpoint, Slack will send a challenge\n # token in a request and check for this token in the response our endpoint\n # sends back.\n # For more info: https://api.slack.com/events/url_verification\n if \"challenge\" in slack_event:\n return HttpResponse(slack_event[\"challenge\"], 200)\n #removed {\"content_type\":\"application/json\"} from flask response\n\n # ============ Slack Token Verification =========== #\n # We can verify the request is coming from Slack by checking that the\n # verification token in the request matches our app's settings\n if pyBot.verification != slack_event.get(\"token\"):\n print \"Invalid Slack verification token: %s \\npyBot has: \\\n %s\\n\\n\" % (slack_event[\"token\"], pyBot.verification)\n # By adding \"X-Slack-No-Retry\" : 1 to our response headers, we turn off\n # Slack's automatic retries during development.\n return HttpResponse(message, 403)\n\n # ====== Process Incoming Events from Slack ======= #\n # If the incoming request is an Event we've subcribed to\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n # Then handle the event by event_type and have your bot respond\n return _event_handler(event_type, slack_event)\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return HttpResponse(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404)", "def handleSayServ(self, req):\n rospy.loginfo(\"Request arrived to AnimatedSpeech\")\n val = self.speech.say(req.msg.data)\n return SpeechResponse(val)", "def obj_received(self, obj):\n\n # TODO do something like handler registry\n\n if isinstance(obj, pb.Ping):\n self.handle_ping(obj)\n\n elif isinstance(obj, pb.Pong):\n self.handle_pong(obj)\n\n elif isinstance(obj, pb.ACS):\n if self.factory.config.failure != 'omission':\n res = self.factory.acs.handle(obj, self.remote_vk)\n self.process_acs_res(res, obj)\n\n elif isinstance(obj, pb.TxReq):\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.TxResp):\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationReq):\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationResp):\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.SigWithRound):\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\n\n elif isinstance(obj, pb.CpBlock):\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Cons):\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\n\n elif isinstance(obj, pb.AskCons):\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\n\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\n\n elif isinstance(obj, pb.Bracha):\n if self.factory.config.failure != 'omission':\n self.factory.bracha.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Mo14):\n if self.factory.config.failure != 'omission':\n self.factory.mo14.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Dummy):\n logging.info(\"NODE: got dummy message from {}\".format(b64encode(self.remote_vk)))\n\n else:\n raise AssertionError(\"invalid message type {}\".format(obj))\n\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()", "def process(self):\n # hello_message = HelloMessage(*self.message.value)\n # TODO: assert realm is in allowed list\n welcome_message = WelcomeMessage()\n self.answer_message = welcome_message", "def grade_handler(self, request, suffix): # pylint: disable=unused-argument\r\n response_xml_template = textwrap.dedent(\"\"\"\\\r\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n <imsx_POXEnvelopeResponse xmlns = \"http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0\">\r\n <imsx_POXHeader>\r\n <imsx_POXResponseHeaderInfo>\r\n <imsx_version>V1.0</imsx_version>\r\n <imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier>\r\n <imsx_statusInfo>\r\n <imsx_codeMajor>{imsx_codeMajor}</imsx_codeMajor>\r\n <imsx_severity>status</imsx_severity>\r\n <imsx_description>{imsx_description}</imsx_description>\r\n <imsx_messageRefIdentifier>\r\n </imsx_messageRefIdentifier>\r\n </imsx_statusInfo>\r\n </imsx_POXResponseHeaderInfo>\r\n </imsx_POXHeader>\r\n <imsx_POXBody>{response}</imsx_POXBody>\r\n </imsx_POXEnvelopeResponse>\r\n \"\"\")\r\n # Returns when `action` is unsupported.\r\n # Supported actions:\r\n # - replaceResultRequest.\r\n unsupported_values = {\r\n 'imsx_codeMajor': 'unsupported',\r\n 'imsx_description': 'Target does not support the requested operation.',\r\n 'imsx_messageIdentifier': 'unknown',\r\n 'response': ''\r\n }\r\n # Returns if:\r\n # - score is out of range;\r\n # - can't parse response from TP;\r\n # - can't verify OAuth signing or OAuth signing is incorrect.\r\n failure_values = {\r\n 'imsx_codeMajor': 'failure',\r\n 'imsx_description': 'The request has failed.',\r\n 'imsx_messageIdentifier': 'unknown',\r\n 'response': ''\r\n }\r\n\r\n try:\r\n imsx_messageIdentifier, sourcedId, score, action = self.parse_grade_xml_body(request.body)\r\n except Exception as e:\r\n error_message = \"Request body XML parsing error: \" + escape(e.message)\r\n log.debug(\"[LTI]: \" + error_message)\r\n failure_values['imsx_description'] = error_message\r\n return Response(response_xml_template.format(**failure_values), content_type=\"application/xml\")\r\n\r\n # Verify OAuth signing.\r\n try:\r\n self.verify_oauth_body_sign(request)\r\n except (ValueError, LTIError) as e:\r\n failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)\r\n error_message = \"OAuth verification error: \" + escape(e.message)\r\n failure_values['imsx_description'] = error_message\r\n log.debug(\"[LTI]: \" + error_message)\r\n return Response(response_xml_template.format(**failure_values), content_type=\"application/xml\")\r\n\r\n real_user = self.system.get_real_user(urllib.unquote(sourcedId.split(':')[-1]))\r\n if not real_user: # that means we can't save to database, as we do not have real user id.\r\n failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)\r\n failure_values['imsx_description'] = \"User not found.\"\r\n return Response(response_xml_template.format(**failure_values), content_type=\"application/xml\")\r\n\r\n if action == 'replaceResultRequest':\r\n self.set_user_module_score(real_user, score, self.max_score())\r\n\r\n values = {\r\n 'imsx_codeMajor': 'success',\r\n 'imsx_description': 'Score for {sourced_id} is now {score}'.format(sourced_id=sourcedId, score=score),\r\n 'imsx_messageIdentifier': escape(imsx_messageIdentifier),\r\n 'response': '<replaceResultResponse/>'\r\n }\r\n log.debug(\"[LTI]: Grade is saved.\")\r\n return Response(response_xml_template.format(**values), content_type=\"application/xml\")\r\n\r\n unsupported_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)\r\n log.debug(\"[LTI]: Incorrect action.\")\r\n return Response(response_xml_template.format(**unsupported_values), content_type='application/xml')", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def api_handin_report(uuid):\n obj = request.payload\n\n # check uuid, so that we can prevent replay attack\n if obj['uuid'] != uuid:\n return 'uuid mismatch, do not attack'\n\n # construct HwScore object from payload\n try:\n score = HwScore.from_plain(obj)\n except Exception:\n return 'not valid score object'\n\n # load the handin object, and report error if not exist\n handin = Handin.query.filter(Handin.uuid == uuid).first()\n if not handin:\n return 'requested handin not found'\n\n # if handin.state not in ['Running', 'Pending'], it must already have a\n # score. reject the API call.\n if handin.state != 'Running' and handin.state != 'Pending':\n return 'score already reported'\n\n # Special hack: unittest will catch all exceptions.\n #\n # Such submissions may result 0.0 base score but marked as 'Accepted'.\n # I decide to treat these submissions 'Rejected', because no one\n # would accept a totally bad submission.\n handin.score = score.get_score()\n if handin.score < 1e-5 and score.accepted:\n score.accepted = False\n score.result = lazy_gettext('No test passed, submission rejected.')\n\n # update result of handin\n handin.state = 'Accepted' if score.accepted else 'Rejected'\n if score.accepted:\n handin.result = lazy_gettext('Your submission is accepted.')\n elif unicode(score.result):\n handin.result = score.result\n else:\n handin.result = lazy_gettext('Your submission is rejected.')\n handin.compile_error = score.compile_error\n handin.partials = score.partials\n\n # update hwscore table and set the final score of this homework\n if handin.is_accepted():\n final_score = handin.score * handin.scale\n hwscore = (FinalScore.query.filter(FinalScore.hwid == handin.hwid).\n filter(FinalScore.user_id == handin.user_id)).first()\n if not hwscore:\n hwscore = FinalScore(user_id=handin.user_id, hwid=handin.hwid,\n score=final_score)\n db.session.add(hwscore)\n elif final_score > hwscore.score:\n hwscore.score = final_score\n\n try:\n db.session.commit()\n except Exception:\n app.logger.exception('Cannot update result of submission(%s).' % uuid)\n return 'update database failed'\n\n return 'OK'", "def handle_message(self, message):", "def handle(req):\n return logic(req)", "def main_response(self, data):", "def main_response(self, data):", "def on_response(self, response):\n pass", "def handle_tournament_over(self, score):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n #############################\n print(\"Tournament over. Your score was: \" + str(score))", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def response_handling(self) -> global___Snippet.StreamingResponseHandling:", "def _winble_response_callback(self, value):\n if self.data_received_handler is not None:\n if callable(self.data_received_handler):\n self.data_received_handler(value)\n else:\n raise ValueError('data_received_handler is not callable.')", "def respond_to_message(self):\n\n MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n data = Converter(self.state).get_messages(meta_data=self.meta_data, message_data=self.message_data)\n\n outgoing_messages = data.get(\"messages\", [])\n events_to_publish = data.get(\"publish_events\", [])\n\n agent_messages = [message[\"message\"] for message in outgoing_messages if message[\"sending_to\"] == \"AGENT\"]\n user_messages = [message[\"message\"] for message in outgoing_messages if message[\"sending_to\"] == \"USER\"]\n\n agent_response = Util.send_messages(messages=agent_messages, sending_to=\"AGENT\")\n user_response = Util.send_messages(messages=user_messages, sending_to=\"USER\")\n\n if agent_response or user_response:\n\n Util.update_state(meta_data=self.meta_data, state=self.state)\n Util.log_events(meta_data=self.meta_data, state=self.state, events=events_to_publish)\n\n return 1", "def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')", "def handle_continue_event(methods=[\"GET\", \"POST\"]):\n if game.state == \"between rounds\":\n game.between_rounds()\n if not game.state == \"game over\":\n socketio.emit(\"restart round\")\n elif game.state == \"game over\":\n socketio.emit(\"end game\", game.score_dict)\n elif game.state == \"between tricks\":\n game.between_tricks()\n socketio.emit(\"next trick\")", "def serve(self, rq):\n # Call callback by key directly from socket\n request = rq['request']\n\n if request in self.callbacks :\n self.callbacks[request](rq)\n else :\n print \"unrecognised request\"", "def serve(self, rq):\n # Call callback by key directly from socket\n request = rq['request']\n\n if request in self.callbacks :\n self.callbacks[request](rq)\n else :\n print \"unrecognised request\"", "async def event_handler(self, response):\n data = ujson.loads(response.data)\n if isinstance(data, dict):\n if data['event'] == 'subscribed':\n print('Subscribed to channel: {0}, for pair: {1}, on channel ID: {2}'.format(data['channel'], data['pair'], data['chanId']))\n self.channel_mapping[data['chanId']] = (data['channel'], data['pair'])\n elif data['event'] == 'info':\n print('Exchange: {0} Websocket version: {1}'.format(self.id, data['version']))\n elif isinstance(data, list):\n if isinstance(data[1], str):\n print('Heartbeat on channel {0}'.format(data[0]))\n else:\n # Published data, time stamp and send to appropriate queue\n timestamp = self.microseconds() / 1000\n datetime = self.iso8601(timestamp)\n if self.channel_mapping[data[0]][0] == 'book':\n pair_id = self.channel_mapping[data[0]][1]\n await self.queues['orderbooks'][pair_id].put((data, timestamp, datetime))", "def process_server_response(self, server_response):", "def _respond_message(self, msg):\n self.set_status(200)\n self.set_header(\"Content-Type\", \"application/x-mplane+json\")\n self.write(mplane.model.unparse_json(msg))\n self.finish()", "def log_response(handler_input, response):\n # type: (HandlerInput, Response) -> None\n print(\"Alexa Response: {}\\n\".format(response))", "def log_response(handler_input, response):\n # type: (HandlerInput, Response) -> None\n print(\"Alexa Response: {}\\n\".format(response))", "def log_response(handler_input, response):\n # type: (HandlerInput, Response) -> None\n print(\"Alexa Response: {}\\n\".format(response))", "def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)", "def lro_handling(self) -> global___Snippet.LroResponseHandling:", "def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)", "def __server_inbound(d):\n\n\t\tif d[\"verb\"] == \"/say\":\n\t\t\tprint(\"[{}]: {}\".format(d[\"usr\"], d[\"body\"]))\n\n\t\telse:\n\t\t\tif d[\"success\"] == \"true\":\n\n\t\t\t\tif d[\"verb\"] == \"/set_alias\":\n\t\t\t\t\tprint(\"[Server]: Your alias has been set to {}\".format(d[\"body\"]))\n\t\t\t\t\treturn d[\"body\"]\n\n\t\t\t\telif d[\"verb\"] == \"/create\":\n\t\t\t\t\tprint(\"[Server]: {} is created!\".format(d[\"body\"]))\n\n\t\t\t\telif d[\"verb\"] == \"/join\":\n\t\t\t\t\tprint(\"[Server]: You have joined room {}\".format(d[\"body\"]))\n\n\t\t\t\telif d[\"verb\"] == \"/block\":\n\t\t\t\t\tprint(\"[Server]: You have blocked {}\".format(d[\"body\"]))\n\n\t\t\t\telif d[\"verb\"] == \"/unblock\":\n\t\t\t\t\tprint(\"[Server]: You have unblocked {}\".format(d[\"body\"]))\n\n\t\t\t\telif d[\"verb\"] == \"/delete\":\n\t\t\t\t\tprint(\"[Server]: You have deleted room {}\".format(d[\"body\"]))\n\n\t\t\t\telif d[\"verb\"] == \"/lsroom\":\n\t\t\t\t\trooms = d[\"rooms\"]\n\t\t\t\t\tprint(\"\\n[Server]: Available rooms: {}\".format(len(rooms)))\n\t\t\t\t\tprint(tabulate.tabulate([[_] for _ in rooms],\n\t\t\t\t\t\t\t\t\t\t\theaders = ['Chatroom'],\n\t\t\t\t\t\t\t\t\t\t\ttablefmt = 'orgtbl'))\n\t\t\t\t\tprint()\n\n\t\t\t\telif d[\"verb\"] == \"/lsusr\":\n\t\t\t\t\tlu = d[\"live_users\"]\n\t\t\t\t\tprint(\"\\n[Server]: Alive users: {}\".format(len(lu)))\n\t\t\t\t\tprint(tabulate.tabulate([[_[0], _[1]] for _ in lu],\n\t\t\t\t\t\t\t\t\t\t\theaders = ['Alias', 'Chatroom'],\n\t\t\t\t\t\t\t\t\t\t\ttablefmt = 'orgtbl'))\n\t\t\t\t\tprint()\n\n\t\t\telse:\n\t\t\t\t## TODO: add failed reason?\n\t\t\t\tprint(\"[Server]: {} operation failed! Reason: {}\".format(d[\"verb\"], d[\"reason\"]))", "def update_score(self, data):\r\n queuekey = data['queuekey']\r\n score_msg = data['xqueue_body']\r\n self.lcp.update_score(score_msg, queuekey)\r\n self.set_state_from_lcp()\r\n self.publish_grade()\r\n\r\n return dict() # No AJAX return is needed\r", "def enter_game_scores():\n pass", "def process_hub_reply(self, hub_reply):\n\n # Typical response from hub is \"OK\" if there are no user or\n # automated librian requests. Almost all responses are just \"OK\"\n # therefore the default process_hub_reply is \"pass\"\n # TODO Respond to hub repies if they are other than 'OK'\n # for example, push \"send 10 frames\" request onto deque\n # and then add \"do requested extra frames\" to detectors loop\n # so that images get sent even though there is no routine reason\n pass", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def ball_result_received(self, data):\n #print(f\"Reward {self.reward_num} Received, val {data.reward}\")\n self.reward = data.reward\n self.reward_num += 1", "def on_response(self, response):\n log.debug(\"Received response: %s\", response)", "def response_received(self, ignored):\n self._received += 1", "def score():\n highscore = session.get(\"highscore\", 0)\n nplays = session.get(\"nplays\", 0)\n score = request.json[\"score\"]\n if score > highscore:\n highscore = score\n session['highscore'] = highscore\n session['nplays'] = nplays +1\n\n return jsonify(brokeRecord = highscore)", "def handle_response(self, response):\n self.__log(f'Received response from server. The code is: \"{response}\"')\n if not response.status_code == 200:\n self.handle_api_error(response)\n self.to_output_file(response.text)", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def process_incoming_message(self):\n\n # Get the webhook data\n post_data = request.json\n\n # Determine the Spark Room to send reply to\n room_id = post_data[\"data\"][\"roomId\"]\n\n # Get the details about the message that was sent.\n message_id = post_data[\"data\"][\"id\"]\n message = self.spark.messages.get(message_id)\n if self.DEBUG:\n sys.stderr.write(\"Message content:\" + \"\\n\")\n sys.stderr.write(str(message) + \"\\n\")\n\n # First make sure not processing a message from the bots\n # Needed to avoid the bot talking to itself\n # We check using IDs instead of emails since the email\n # of the bot could change while the bot is running\n # for example from [email protected] to [email protected]\n if message.personId in self.spark.people.me().id:\n if self.DEBUG:\n sys.stderr.write(\"Ignoring message from our self\" + \"\\n\")\n return \"\"\n\n # Log details on message\n sys.stderr.write(\"Message from: \" + message.personEmail + \"\\n\")\n\n # Find the command that was sent, if any\n command = \"\"\n for c in self.commands.items():\n if message.text.find(c[0]) != -1:\n command = c[0]\n sys.stderr.write(\"Found command: \" + command + \"\\n\")\n # If a command was found, stop looking for others\n break\n\n # Build the reply to the user\n reply = \"\"\n\n # Take action based on command\n # If no command found, send the default_action\n if command in [\"\"] and self.default_action:\n # noinspection PyCallingNonCallable\n reply = self.commands[self.default_action][\"callback\"](message)\n elif command in self.commands.keys():\n # noinspection PyCallingNonCallable\n reply = self.commands[command][\"callback\"](message)\n else:\n pass\n\n # allow command handlers to craft their own Spark message\n if reply and isinstance(reply, Response):\n reply.roomId = room_id\n reply = reply.as_dict()\n self.spark.messages.create(**reply)\n reply = \"ok\"\n elif reply:\n self.spark.messages.create(roomId=room_id, markdown=reply)\n return reply", "def responseHandler(data): \r\n if len(data) <= 0:\n print \"oh no!\\n\\n\"\n else:\n print \"\\n\\n%s\" % data", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def process_messages(self):\n pass", "async def post_leaderboard(\n self,\n ctx: commands.Context,\n leaderboard_type: Literal[\n \"season\",\n \"weekly\",\n \"worst\",\n \"playoffs\",\n \"playoffs_weekly\",\n \"pre-season\",\n \"pre-season_weekly\",\n ],\n ) -> None:\n leaderboard_type_str = leaderboard_type.replace(\"_\", \" \").title()\n leaderboard = await self.pickems_config.guild(ctx.guild).leaderboard()\n if leaderboard == {} or leaderboard is None:\n await ctx.send(_(\"There is no current leaderboard for this server!\"))\n return\n if leaderboard_type != \"worst\":\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][leaderboard_type], reverse=True\n )\n else:\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][\"total\"] - i[1][\"season\"], reverse=True\n )\n msg_list = []\n count = 1\n user_position = None\n total_str = {\n \"season\": \"total\",\n \"playoffs\": \"playoffs_total\",\n \"pre-season\": \"pre-season_total\",\n }.get(leaderboard_type, \"total\")\n\n for member_id in leaderboard:\n if str(member_id[0]) == str(ctx.author.id):\n user_position = leaderboard.index(member_id)\n member = ctx.guild.get_member(int(member_id[0]))\n if member is None:\n member_mention = _(\"User has left the server \") + member_id[0]\n else:\n member_mention = member.mention\n if leaderboard_type in [\"weekly\", \"playoffs_weekly\", \"pre-season_weekly\"]:\n points = member_id[1].get(leaderboard_type, 0)\n msg_list.append(\"#{}. {}: {}\\n\".format(count, member_mention, points))\n elif leaderboard_type in [\"season\", \"playoffs\", \"pre-season\"]:\n total = member_id[1].get(total_str, 0)\n wins = member_id[1].get(leaderboard_type, 0)\n try:\n percent = (wins / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {wins}/{total} correct ({percent:.4}%)\\n\"\n )\n else:\n total = member_id[1].get(total_str, 0)\n losses = member_id[1].get(total_str, 0) - member_id[1].get(leaderboard_type)\n try:\n percent = (losses / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {losses}/{total} incorrect ({percent:.4}%)\\n\"\n )\n count += 1\n leaderboard_list = [msg_list[i : i + 10] for i in range(0, len(msg_list), 10)]\n if user_position is not None:\n user = leaderboard[user_position][1]\n wins = user[\"season\"]\n total = user[total_str]\n losses = user[total_str] - user[\"season\"]\n position = _(\n \"{member}, you're #{number} on the {leaderboard_type} leaderboard!\\n\"\n ).format(\n member=ctx.author.display_name,\n number=user_position + 1,\n leaderboard_type=leaderboard_type_str,\n )\n if leaderboard_type == \"season\":\n percent = (wins / total) * 100\n position += _(\"You have {wins}/{total} correct ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n elif leaderboard_type == \"worst\":\n percent = (losses / total) * 100\n position += _(\"You have {wins}/{total} incorrect ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n await ctx.send(position)\n await BaseMenu(\n source=LeaderboardPages(pages=leaderboard_list, style=leaderboard_type_str),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def score():\n # Get decision score for our example that came with the request\n data = flask.request.json\n x = np.matrix(data[\"example\"])\n score = PREDICTOR.predict_proba(x)\n # Put the result in a nice dict so we can send it as json\n results = {\"score\": score[0,1]}\n return flask.jsonify(results)", "def handle_recv(self,stream,msgs):\n pass", "def _run(self):\n self.running = True\n\n while self.running:\n try:\n print \"Response monitor running...\"\n\n # Get the message count\n messageCount = self.scHandle.amazonSQSManager.getQueueCount(self.scHandle.amazonSQSManager.responsesQueue)\n\n print '%i messages in queue...' % messageCount\n\n # Read a response\n response = self.scHandle.responseManager.getResponseFromResponsesQueue()\n\n # TODO: Do something with the response\n if response:\n print(response)\n\n except Exception, responseMonitorException:\n print \"Response monitor failed with exception %s.\" % str(responseMonitorException)\n\n finally:\n # Wait for a bit\n sleep(self.PAUSE_TIME)", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n tweet = None\n includes = {}\n errors = []\n matching_rules = []\n\n if \"data\" in data:\n tweet = Tweet(data[\"data\"])\n self.on_tweet(tweet)\n if \"includes\" in data:\n includes = self._process_includes(data[\"includes\"])\n self.on_includes(includes)\n if \"errors\" in data:\n errors = data[\"errors\"]\n self.on_errors(errors)\n if \"matching_rules\" in data:\n matching_rules = [\n StreamRule(id=rule[\"id\"], tag=rule[\"tag\"])\n for rule in data[\"matching_rules\"]\n ]\n self.on_matching_rules(matching_rules)\n\n self.on_response(\n StreamResponse(tweet, includes, errors, matching_rules)\n )", "def handle_message(self, msg):\n pass", "def handle_event(request):\n\n payload = json.loads(request.body)\n if payload['type'] == \"url_verification\":\n return JsonResponse({\"challenge\": payload['challenge']})\n elif payload['type'] == \"event_callback\":\n event = payload['event']\n if event['type'] == \"team_join\":\n slack_post(event['user']['id'], text=\"Welcome to LNL!\", content=views.welcome_message())\n elif event['type'] == \"app_home_opened\":\n load_app_home(event['user'])\n elif event['type'] == \"channel_created\":\n if settings.SLACK_AUTO_JOIN:\n join_channel(event['channel']['id'])\n return HttpResponse()\n return HttpResponse(\"Not implemented\")", "def api_handin_proclog(uuid):\n obj = request.payload\n\n # check uuid, so that we can prevent replay attack\n if obj['uuid'] != uuid:\n return 'uuid mismatch, do not attack'\n\n # load the handin object, and report error if not exist\n handin = Handin.query.filter(Handin.uuid == uuid).first()\n if not handin:\n return 'requested submission not found'\n\n # if handin.state != 'Accepted' and handin.state != 'Rejected',\n # the process must have exited without report the score.\n # mark such handin as \"Rejected\"\n if handin.state != 'Accepted' and handin.state != 'Rejected':\n handin.state = 'Rejected'\n handin.result = lazy_gettext('Process exited before reporting score.')\n handin.partials = []\n\n try:\n handin.exitcode = obj['exitcode']\n handin.stdout = obj['stdout']\n handin.stderr = obj['stderr']\n db.session.commit()\n except Exception:\n app.logger.exception('Cannot log proccess of submission(%s).' % uuid)\n return 'update database failed'\n\n return 'OK'", "def score():\n # Get probability from our data\n data = flask.request.json\n x = np.matrix(data[\"example\"])\n x_add = scaler.transform(x[0, (0,4,5,6,7,8)])\n x_scaled = np.delete(x, [0,4,5,6,7,8], axis=1)\n x_scaled = np.insert(x_scaled, (0,3,3,3,3,3), x_add, axis=1)\n prob = model.predict_proba(x_scaled)\n # Put the results in a dict to send as json\n results = {\"prob\": prob[0,1]}\n return flask.jsonify(results)", "def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))", "def process_response(self, sender, response):\n\t\tif sender is None or (sender.did_quit() and not self._is_resume(response)):\n\t\t\treturn self.process_invalid_response()\n\n\t\t# Generic logic for responding to any type of message goes here\n\t\t# if self._is_quit(response):\n\t\t# \treturn self.process_quit_response(sender)\n\t\tif self._is_quit(response):\n\t\t\treturn self.process_pause_response(sender)\n\t\telif sender.did_quit() and self._is_resume(response):\n\t\t\treturn self.process_resume_response(sender)\n\n\t\tlast_sent_message = Message.objects.get_last_sent_message_requiring_response(to=sender)\n\t\tif not last_sent_message:\n\t\t\treturn self.process_no_recent_message_response(sender, response)\n\n\t\tresponse_generator = ResponseCenter.RESPONSE_MAP.get(\n\t\t\tlast_sent_message._type, self.process_unrequired_response)\n\n\t\treturn response_generator(self, sender, last_sent_message, response)", "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def handle(self, rsm_ctx):\n pass", "async def _receive_updated_response(self, data):\n serialized_text_responses = await serialize_text_algo_api_response(data)\n await self.send_serialized_data(serialized_text_responses)", "def sample_handler(controller, msg, pkt):\n pass", "def _response_handler_callback(response):\n response_data = json.loads(response)\n if ('status' in response_data and response_data['status'] != 1) or ('status' not in response_data):\n Mixpanel.LOGGER.warning(\"Bad API response: \" + response)\n raise RuntimeError('Import or Update Failed')\n Mixpanel.LOGGER.debug(\"API Response: \" + response)", "def callback(ch, method, properties, body):\n requestParams = json.loads(body.decode('utf-8'))\n # print(\"inside the callback\")\n arg1 = int(requestParams[0])\n arg2 = int(requestParams[1])\n result = whaleClassifier.test(arg1, arg2)\n # what this does it publish the RESULT to the exchange (as producers of content \n # cannot send stuff directly to queues, they send to exchanges and then exchanges \n # send to queues. Note Exchange='' is default exchange which then sends to the\n # queue that is listed on the ROUTING_KEY argument.)\n ch.basic_publish(exchange='', \n routing_key=results_queue, \n body=json.dumps(result),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ))\n # ch.basic_ack(delivery_tag=method.delivery_tag) #need this line so that we don't resend this same message again the next time\n # we start up this script. Which eventually clogs up memory", "def serve(self, event: Dict) -> Union[MSG_RETURN, None]:\n raw_msg = event['content']['body']\n for k in self.routes.keys():\n m = re.search(k, raw_msg, re.IGNORECASE)\n\n if m:\n\n matches = m.groupdict()\n route = matches.get('route')\n msg = matches.get('msg')\n\n func = self.routes.get(k)\n\n if func:\n\n logger.info(\n (\n 'matched route %s '\n 'with msg %s '\n 'from %s '\n 'and triggered \"%s\"'\n ),\n route, msg, raw_msg, func.__name__\n )\n\n return func(route, msg, event)\n\n return None", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def response(self, command_code, data):\n name, request_func, response_func = afpcommands.commands[command_code]\n return response_func(data)", "def process_response(self,response):\n return self.action.process_response(response)", "def __data_handler__(self, msg):\n print(msg)", "def _ngl_handle_msg(self, widget, msg, buffers):\n self._ngl_msg = msg\n\n msg_type = self._ngl_msg.get('type')\n if msg_type == 'request_frame':\n self.frame += self.player.step\n if self.frame >= self.count:\n self.frame = 0\n elif self.frame < 0:\n self.frame = self.count - 1\n elif msg_type == 'repr_parameters':\n data_dict = self._ngl_msg.get('data')\n name = data_dict.pop('name') + '\\n'\n selection = data_dict.get('sele', '') + '\\n'\n # json change True to true\n data_dict_json = json.dumps(data_dict).replace(\n 'true', 'True').replace('false', 'False')\n data_dict_json = data_dict_json.replace('null', '\"null\"')\n\n if self.player.widget_repr is not None:\n # TODO: refactor\n repr_name_text = widget_utils.get_widget_by_name(\n self.player.widget_repr, 'repr_name_text')\n repr_selection = widget_utils.get_widget_by_name(\n self.player.widget_repr, 'repr_selection')\n repr_name_text.value = name\n repr_selection.value = selection\n elif msg_type == 'request_loaded':\n if not self.loaded:\n # trick to trigger observe loaded\n # so two viewers can have the same representations\n self.loaded = False\n self.loaded = msg.get('data')\n elif msg_type == 'request_repr_dict':\n # update _repr_dict will trigger other things\n # see _handle_repr_dict_changed\n self._ngl_repr_dict = self._ngl_msg.get('data')\n elif msg_type == 'stage_parameters':\n self._ngl_full_stage_parameters = msg.get('data')\n elif msg_type == 'async_message':\n if msg.get('data') == 'ok':\n self._event.set()", "def ping_pong_tally(user_name, msg_logs, scorecard_map):\n for row in msg_logs:\n msg = ujson.loads(row[0])\n if is_my_outgoing_msg(msg):\n # I sent a msg, that shows my interest, therefore bump my pondness value\n scorecard_map[user_name].my_pval += 1\n else: # this is an incoming message from my friend\n # Someone sent me a msg, that shows their interest, therefore bump their pondness value\n scorecard_map[user_name].their_pval += 1", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def handle(self, data):\n pass", "def update_score(self, score_msg, queuekey):\r\n cmap = CorrectMap()\r\n cmap.update(self.correct_map)\r\n for responder in self.responders.values():\r\n if hasattr(responder, 'update_score'):\r\n # Each LoncapaResponse will update its specific entries in cmap\r\n # cmap is passed by reference\r\n responder.update_score(score_msg, cmap, queuekey)\r\n self.correct_map.set_dict(cmap.get_dict())\r\n return cmap" ]
[ "0.667267", "0.60985816", "0.60786134", "0.60786134", "0.60626084", "0.60359406", "0.59885734", "0.59627366", "0.59537965", "0.592757", "0.5912877", "0.5900186", "0.58672774", "0.5839805", "0.5789892", "0.5711895", "0.56955504", "0.56955504", "0.56919116", "0.56762016", "0.56595826", "0.5656125", "0.56362826", "0.56351966", "0.56220245", "0.5610472", "0.560969", "0.5607208", "0.55896926", "0.558956", "0.5582991", "0.55781454", "0.5569742", "0.5569742", "0.55547345", "0.5553598", "0.5551508", "0.5551508", "0.55378634", "0.55378634", "0.5532469", "0.55299985", "0.5529478", "0.5526674", "0.5521924", "0.5521924", "0.55208385", "0.55195254", "0.55190825", "0.5514174", "0.5514174", "0.5514174", "0.5509213", "0.5504942", "0.55020267", "0.54954046", "0.5477347", "0.54742575", "0.54663044", "0.5453391", "0.5451478", "0.5451142", "0.54445565", "0.5442562", "0.5428104", "0.54252434", "0.5423443", "0.5410284", "0.540886", "0.54062563", "0.53950286", "0.5388765", "0.53877175", "0.5385798", "0.5368132", "0.53617835", "0.5356134", "0.5349869", "0.534744", "0.5347201", "0.5345627", "0.53298604", "0.5328176", "0.53270715", "0.53251314", "0.5324388", "0.5320045", "0.53153116", "0.5311721", "0.5310302", "0.5309159", "0.53035426", "0.5299822", "0.52975774", "0.52950317", "0.5290858", "0.52906394", "0.5289384", "0.5284434", "0.5283969" ]
0.53896457
71
Use for commands that can't be handled
def _unhandled(self, context, message, reason): # TODO: call host's method instead self._host.unhandled.append((context.str, message.serialize(), reason)) self._host.expected[context.str] = None eprint("{}: Command {} can't be handled due to {}".format(self._host.name, message.serialize(), reason))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _command(self, *cmd, handler=None):", "def check_commands(self):\n pass", "def accept_command():\n # TODO", "def commands():", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def command():\n pass", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "def unknown_command(self, cmd, *parms):\n print >>sys.stderr, \"Unknown command '%s'\" % (cmd)", "def cmd(self):", "def execute(arg):\n print('Invalid command!!!')\n return", "def process_commands(self, commands: List[str]):", "def _command_processor(self, cmd: str) -> None:\n\n if cmd == \"translate\":\n oracion = self.session.prompt(\n \"... Texto en español: \",\n validator=TbSETValidator(\"text_max_len\"),\n complete_while_typing=False)\n\n self.translate(oracion)\n elif cmd == \"train\":\n confirmation = self.session.prompt(\"... This will take at least 30' with a GPU. Are you sure? (y/n): \",\n validator=TbSETValidator(\"yes_no\"))\n\n if confirmation in \"yY\":\n self.train()\n else:\n print(\"Wrong command, please try again.\\n\")", "def process_command(self, cmd, config):\n return None, None", "def do_command(self, args):\n pass", "def cmd(self, message):\n pass", "def cmd(self, context, message):\r\n return True", "def _setup_command(self):\r\n raise NotImplementedError", "def run_command_check(self):\n pass", "def _commands(self) -> Dict[str, List[str]]:\r\n pass", "def command(self):\n raise NotImplementedError", "def unknown_command(data):\n command = data['command']\n return 'unknown_command: {}'.format(command)", "async def on_command_error(self, ctx, err):\n if type(err) is CommandNotFound:\n await self.send_message(ctx, 'I don\\'t know how to do that.')", "def execute_command(self):\n raise Exception(\"Not implemented\")", "def handle_commands_preset(self,cl,addr) :\n self.curDir = ['CTF','Challenges','tempUser'+str(random.randint(100,999))]\n try :\n client = cl\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(\"\"\"\nCustom Shell Server With Limited Functionality\n\nNew User Login from {} at {}\n \\n\"\"\".format(addr[0],time.ctime()).encode())\n shellin = \"\" \n while True:\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(self.userp)\n shellin = client.recv(2048).decode().strip('\\n')\n if shellin == \"exit\" or shellin == \"exit \" or shellin ==\"exit \" or shellin ==\"exit \" :\n break\n elif shellin == \"\" :\n continue\n elif shellin.split()[0] in self.denied :\n client.send(self.err.format(shellin.split()[0]).encode())\n else :\n self.handle_extended_commands(client,addr,shellin)\n continue\n client.close()\n except Exception as E:\n print(E)\n print(Log(\"Connection with {} Terminated\".format(addr)))", "def handle_command(cmd_text):\n global keylist\n\n cmd = \"\"\n key = \"\"\n if \";\" in cmd_text:\n #key, cmd = cmd_text.split(\";\")\n key = get_key(cmd_text)\n cmd = get_command(cmd_text)\n\n if key in keylist:\n ui.report_verbose('skipping duplicate command')\n return False\n\n keylist.append(key)\n keylist = keylist[-numkeys:]\n else:\n # no key, always issue command\n key = \"no key\"\n cmd = cmd_text\n watermark = 0\n\n host, time, regex = parse_key(key)\n\n ui.verbose_entry(\"host\", host)\n ui.verbose_entry(\"time\", time)\n ui.verbose_entry(\"regex\", regex)\n\n if regex:\n matcher = re.compile(regex)\n if not matcher.match(host_name):\n ui.report_verbose(\"skipping - host name: \" + host_name + \" does not match regex: \" + regex)\n return True\n\n if verbose_mode:\n ui.verbose_entry(\"command\", cmd)\n ui.verbose_entry(\"key\", key)\n else:\n ui.info_entry(\"command\", cmd)\n\n lc.command(\":::\")\n lc.command(cmd)\n lc.flush_output();\n return True", "def __no_command(*args):\n pass", "def cmd_noop(args):", "def _handle_commands(self, event, session):\n message = event['body']\n\n for regex, func, help in self._COMMANDS:\n match = regex.match(message)\n if match is not None:\n func(self, event, session=session, **match.groupdict())\n return True\n\n return False", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n\n if hasattr(ctx.command, 'on_command_error'):\n return\n\n # This prevents any cogs with an overwritten cog_command_error being handled here.\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n return\n\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(f'Command pas trouvé')\n return\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{ctx.command} has been disabled.')\n return\n\n if isinstance(error,commands.errors.PrivateMessageOnly):\n await ctx.message.delete()\n channel = await ctx.message.author.create_dm()\n await channel.send(f'{ctx.command} ne peut être exécuté que en message privé !!')\n return\n # For this error example we check to see where it came from...\n if isinstance(error, commands.BadArgument):\n await ctx.send('Mauvais arguments passés')\n return\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('Il manque des arguments à la commande')\n return\n # All other Errors not returned come here. And we can just print the default TraceBack.\n logger.error(f'Ignoring exception in command {ctx.command} : {type(error)} {error} {error.__traceback__}')", "def _get_supported_commands(self):\n logger.info(\"Default unconfigured API, not adding any commands!\")\n pass", "def mainCommand(self, args):\r\n command = args.pop(0).lower() # calls exception if no arguments present\r\n if command in vars(CommandManager):\r\n vars(CommandManager)[command](self, *args) # calls exception if wrong amount of arguments\r", "def additional_command(self):\n pass", "def runCommand(self): \\\n # pylint: disable=no-self-use", "async def on_command_error(ctx: commands.Context, error: commands.CommandError):\n if isinstance(error, commands.CommandNotFound):\n message = f\"This command is not listed in {bot.user} dictionary. Please try again.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n # return # Return because we don't want to show an error for every command not found\n elif isinstance(error, commands.CommandOnCooldown):\n message = f\"This command is on cooldown. Please try again after {round(error.retry_after, 1)} seconds.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.MissingPermissions):\n message = \"You are missing the required permissions to run this command!\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.NoPrivateMessage):\n message = \"Private messages only. How cute.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.MissingRequiredArgument):\n message = \"Command is missing an argument. Try again.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.CheckFailure):\n message = \"You do not have the permissions to do this.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, (commands.MissingRole, commands.MissingAnyRole)):\n message = \"You don't have any role to run this command.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n else:\n message = \"Oh no! Something went wrong while running the command!\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)", "def cmd_not_understood(self, line):\n self.respond('500 Command \"%s\" not understood.' %line)", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def process_dead_command(self):\n command_terminal = adapter_serializers.CommandTerminal(data=self.command)\n if not command_terminal.is_valid():\n logger.error('Receive an invaid data : {}'.format(command_terminal.format_errors()))\n raise natrix_exceptions.TriggerBugException(\n message=u'command is invalid: {}'.format(command_terminal.format_errors())\n )\n\n if not command_terminal.process():\n # TODO:\n logger.error('failed')\n else:\n logger.info('success')", "def invalid_command(response): # string -> interaction\r\n print(\"Sorry; '\" + response + \"' isn't a valid command. Please try again.\")", "def _process_commands(self, pwd, cmds):\n if self.func_map.get(cmds[0]):\n func = self.func_map[cmds[0]]\n \n args, kwargs = self._get_args(cmds[1:]) \n err_msg = self._check_input(func, args, kwargs)\n if err_msg: return err_msg\n \n _, return_msg = func(*args, **kwargs)\n\n else:\n return_msg = '[error]: no cmd found.'\n return return_msg", "def handle_command(self, command, players, user, channel):\r\n response = self.help()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n elif command[0] == self.NEW_GAME_COMMAND:\r\n return self.new_game(players, channel)\r\n \r\n elif command[0] == self.TARGET_COMMAND:\r\n return self.target(user)\r\n \r\n elif command[0] == self.SURVIVORS_COMMAND:\r\n return self.survivors()\r\n \r\n elif command[0] == self.EXPIRE_COMMAND:\r\n return self.expire(channel)\r\n \r\n elif command[0] == self.REMOVE_COMMAND:\r\n return self.remove(command, channel)\r\n \r\n elif command[0] == self.KILL_COMMAND:\r\n (success, response) = self.kill(user, command)\r\n if success and self.game.get_active_channel() != \"\" and channel != self.game.get_active_channel():\r\n post_to_channel(self.game.get_active_channel(), response)\r\n return \"\"\r\n \r\n elif command[0] == self.LOAD_LAST_GAME_COMMAND:\r\n return self.load_last_game(channel)\r\n \r\n return response", "def handle_admincommands(bot, ievent):\n cmnds = getcmndtable()\n if not ievent.rest: ievent.reply(\"commands: \", cmnds)\n else:\n try: ievent.reply(\"%s command is found in %s \" % (ievent.rest, cmnds[ievent.rest]))\n except KeyError: ievent.reply(\"no such commands available\")", "def handle_invalid_command(self, msg):\n return self.create_response(Command.INVALID_COMMAND.value)", "async def on_command_error(\n self,\n ctx: commands.Context,\n error: commands.CommandError\n ):\n # Skips errors that were already handled locally.\n if getattr(ctx, 'handled', False):\n return\n\n if isinstance(error, commands.NoPrivateMessage):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Command Can Not Be Used In Direct Messages`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.TooManyArguments):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Passed In Too Many Arguments`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.NSFWChannelRequired):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`This Channel Is Not NSFW`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.CommandNotFound):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Not Found`', #Todo - Possibly remove this\n color=0xFF0000 #Because its kinda annoying ngl\n )\n await ctx.send(embed=embed)\n \n elif isinstance(error, discord.Forbidden):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Discord Is Restricting Command Execution`',\n color=0xFF0000\n )\n embed.add_field(\n name='Possiblities',\n value='`You Are Trying To Use This Command On Someone Who Is Higher Than Either The Bot Or You`',\n inline=True\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.MissingRequiredArgument):\n embed = discord.Embed(\n title='Oops!',\n description=f'Command Failed To Execute. Reason:\\n`Missing Required Argument:`\\n`{error.param.name}`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif (\n isinstance(error, commands.NotOwner)\n or isinstance(error, commands.MissingPermissions)\n ):\n embed = discord.Embed(\n title='Oops',\n description='Command Failed To Execute. Reason:\\n`Missing Permissions`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif (\n isinstance(error, commands.CommandOnCooldown)\n or isinstance(error, commands.CheckFailure)\n ):\n embed = discord.Embed(\n title='Oops',\n description='Command Failed To Execute. Reason\\n```{error}```',\n color=0xFF0000\n ) \n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.DisabledCommand): #SoonTM\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Command Is Disabled`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.BadArgument):\n embed = discord.Embed(\n title='Oops!',\n description=f'Command Failed To Execute. Reason:\\n`Bad Argument`\\n```{error}```',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.BotMissingPermissions):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Bot Is Missing Permissions`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n log.error(\n f'{ctx.command.qualified_name} cannot be executed because the '\n f'bot is missing the following permissions: '\n f'{\", \".join(error.list)}'\n )\n\n elif isinstance(error, commands.CommandInvokeError):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`INTERNAL ERROR`',\n color=0xFF0000 \n )\n embed.set_footer(text='Please Contact Tylerr#6979 For Help')\n await ctx.send(embed=embed)\n log.error(\n f'{ctx.command.qualified_name} failed to execute. ',\n exc_info=error.original\n )", "def getCommands(self):", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, ignored):\n return\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{self.bot.settings.prefix}{ctx.command} has been disabled.')\n return\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.channel.send(f'{self.bot.settings.prefix}{ctx.command} can not be used in Private Messages.')\n except:\n pass\n return\n\n elif isinstance(error, commands.BadArgument):\n await ctx.send(f'Refer to.{self.bot.settings.prefix}help {ctx.command}')\n return\n\n elif isinstance(error, commands.BotMissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'I need the **{fmt}** permission(s) to run this command.')\n return\n\n if isinstance(error, commands.MissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'You need the **{fmt}** permission(s) to use this command.')\n return\n\n # All other Errors not returned come here... And we can just print the default TraceBack.\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "def _transform_command(self) -> None:\n self.command = None if self.command == [] else self.command", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def test_unknown_command(self):\n\n self.assertRaises(commands.CommandNotFoundError,\n self.commands.run_command,\n '<unknown_command>', '')", "def at_pre_cmd(self):\n pass", "def default(self, line):\n print \"Command not found\\n\"", "def processCommand(self, command, args):\n\n commandMap = { \n \"new\" : self.createNewList,\n \"view\" : self.trelloView,\n \"add\" : self.trelloAddCard, \n \"remove\" : self.trelloDeleteCard, \n }\n\n if command not in commandMap: return \">> Command not found\" \n \n return commandMap[command](args)", "def state_COMMAND(self, command):\n\t\tif command.strip() == '':\n\t\t\tself.consecutiveErrors = self.consecutiveErrors + 1;\n\t\t\tif self.consecutiveErrors == 10:\n\t\t\t\tself.sendCode(221, 'Too Many Consectutive Protocol Errors (Your talking shit, Go Away)')\n\t\t\t\tself.do_QUIT()\n\t\t\treturn False;\n\t\tself.consecutiveErrors = 0\n\t\tsplits = command.split(None)\n\t\tmethod = getattr(self, 'do_' + splits[0].upper(), None)\n\t\tif method is not None:\n\t\t\tmethod(splits[1:])\n\t\telse:\n\t\t\tself.sendCode(500, 'Command Not Implemented')", "async def command_interpreter(self, command: str) -> None:\n for cls in GlobalCommandRegistry:\n if not asyncio.iscoroutinefunction(GlobalCommandRegistry[cls].main):\n continue\n if command.startswith(tuple(GlobalCommandRegistry[cls].helper['name'])):\n result = await asyncio.gather(GlobalCommandRegistry[cls](command, self.print_queue).main())\n if result is False:\n print(\"Result is false?!\")\n raise KeyboardInterrupt", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def _fake_execute(self, *cmd, **kwargs):\n cmdlist = list(cmd)\n exe = cmdlist.pop(0)\n if exe == 'vgc-cluster':\n exe = cmdlist.pop(0)\n if exe == \"request-cancel\":\n self._request_cancel = True\n if self._return_blocked > 0:\n return 'Request cancelled', ''\n else:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif self._fail_vgc_cluster:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif exe == \"--version\":\n return \"HGST Solutions V2.5.0.0.x.x.x.x.x\", ''\n elif exe == \"space-list\":\n return self._parse_space_list(cmdlist)\n elif exe == \"space-create\":\n self._parse_space_create(cmdlist)\n if self._return_blocked > 0:\n self._return_blocked = self._return_blocked - 1\n out = \"VGC_CREATE_000002\\nBLOCKED\\n\"\n raise processutils.ProcessExecutionError(stdout=out,\n exit_code=1)\n return '', ''\n elif exe == \"space-delete\":\n return self._parse_space_delete(cmdlist)\n elif exe == \"space-extend\":\n return self._parse_space_extend(cmdlist)\n elif exe == \"host-storage\":\n if self._fail_host_storage:\n raise processutils.ProcessExecutionError(exit_code=1)\n return HGST_HOST_STORAGE, ''\n elif exe == \"domain-list\":\n return self._parse_domain_list()\n elif exe == \"network-list\":\n return self._parse_network_list()\n elif exe == \"space-set-apphosts\":\n if self._fail_set_apphosts:\n raise processutils.ProcessExecutionError(exit_code=1)\n return '', ''\n else:\n raise NotImplementedError\n elif exe == 'ip':\n if self._fail_ip:\n raise processutils.ProcessExecutionError(exit_code=1)\n else:\n return IP_OUTPUT, ''\n elif exe == 'dd':\n self.dd_count = -1\n for p in cmdlist:\n if 'count=' in p:\n self.dd_count = int(p[6:])\n elif 'bs=' in p:\n self.bs = p[3:]\n return DD_OUTPUT, ''\n else:\n return '', ''", "def command_processor():\n MOVEMENT = [\"up\",\"right\",\"down\",\"left\",\"north\",\"west\",\"east\",\"south\",\"u\",\"r\",\"l\",\"d\"]\n FISHING = [\"fish\",\"fishing\",\"f\"]\n HELP = [\"help\",\" h \"]\n EAT = [\"eat\",\"eating\",\"refuel\",\"nom\",\"e\"]\n \n validity = 0\n while validity == 0:\n command = []\n action = input(\"Enter a command: \").lower()\n action = ' ' + action + ' '\n for direction in MOVEMENT:\n direction = ' ' + direction + ' '\n if direction in action:\n command.append('movement')\n command.append(direction)\n validity += 1\n\n for fish in FISHING:\n fish = ' ' + fish + ' '\n if fish in action:\n command.append('fishing')\n command.append(fish)\n validity +=1\n\n for help in HELP: \n help = ' ' + help + ' '\n if help in action:\n command.append(\"help\")\n command.append(help)\n validity += 1\n\n for eat in EAT:\n eat = ' ' + eat + ' '\n if eat in action:\n command.append(\"eat\")\n command.append(eat)\n validity += 1\n \n if validity > 1:\n print(\"Please type less keywords\")\n validity = 0\n elif validity == 1:\n return command\n elif validity == 0:\n color.write(\"Please enter a keyword. Enter help for instructions.\\n\\n\",\"ERROR\")", "async def hockey_commands(self, ctx: commands.Context) -> None:\n pass", "def on_command(server, user, command, args):", "def unknown_command(update: Update, context: CallbackContext) -> None:\n logging.info(update.message.text)\n message = r\"Ой, а такой команды я не знаю\\.\\.\\. Попробуй /help\"\n\n update.message.reply_markdown_v2(message)", "async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def _handle_bot_command(self, bot_command: BotCommand) -> str:\n try:\n player = self.ping_pong_service.get_player(bot_command.sender_id)\n except pingpong_service.PlayerDoesNotExist:\n self.ping_pong_service.add_new_player(bot_command.sender_id)\n return responses.new_player()\n\n if bot_command.command_type is None:\n return responses.unknown_command()\n elif bot_command.command_type == CommandType.HELP:\n return responses.help()\n elif bot_command.command_type == CommandType.NAME:\n if bot_command.command_value:\n success = self.ping_pong_service.update_display_name(player, bot_command.command_value.lower())\n if success:\n return responses.name_updated(bot_command.command_value.lower())\n else:\n return responses.name_taken()\n else:\n return responses.name(player.name)\n elif bot_command.command_type == CommandType.MATCH:\n return self._handle_match_command(bot_command.command_value)\n elif bot_command.command_type == CommandType.STATS:\n name = bot_command.command_value\n if name:\n try:\n rating, wins, losses, ratio = self.ping_pong_service.get_player_stats(name)\n return responses.player_stats(name, rating, ratio, wins, losses)\n except pingpong_service.PlayerDoesNotExist:\n return responses.player_does_not_exist()\n else:\n return responses.stats(\n self.ping_pong_service.get_total_matches(), self.ping_pong_service.get_leaderboard()\n )\n elif bot_command.command_type == CommandType.UNDO:\n return responses.unknown_command()\n # w_name, w_rating, l_name, l_rating = pingpong_service.undo_last_match()\n # return responses.match_undone(w_name, w_rating, l_name, l_rating)\n return responses.unknown_command()", "def safe_known_command(self, command):\n return self._known_command(command, self.safe_do_command)", "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def test_unkown_command(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"something\")\n self.assertEqual(f.getvalue().strip(),\n \"*** Unknown syntax: something\")", "def not_found(client, server, command):\n server.logger.info(\"BLACKLIST {} : {}\".format(client.ip, command))\n client.send(\"sh: {}: command not found\\n\".format(command))\n client.exit_status = 127", "def process_command(self, command, discord_id):\n\n try:\n character = self.known_characters[discord_id]\n except KeyError:\n print(\"Process_command got message from unregistered player, this should not happen\")\n return\n\n character.clear_log()\n self.current_character = character # this is for directing log messages to the appropriate log\n # it is reset at the start of every turn obviously\n\n splitted = command.split(\" \", maxsplit=1) # just take off the first verb for use as command\n if len(splitted) == 1:\n cmd = splitted[0]\n words = \"\"\n else:\n cmd, words = splitted\n if cmd not in self.command_dict.keys():\n character.log(\"Unrecognised command: {}\", cmd)\n return character.print_log() # return early because couldn't do anything\n else:\n executable_command = self.command_dict[cmd]\n # the name of the command as it appears in the object's __dict__\n\n if executable_command == \"on_status\":\n # special command with no target object, just prints player stats and return early\n character.report_status()\n return character.print_log()\n\n resolution_order = [character.equipped, character.items, character.visible_things] # reset everytim\n if executable_command == \"on_take\":\n resolution_order.reverse() # player wants to take visible things, not equipped things.\n\n args = []\n target = None\n\n for ls in resolution_order:\n # the order of these lists is important: items equipped or held by the player\n # must take precedence, otherwise if a player tries to unequip a worn item in a\n # room that contains an item with the same name, the command dispatcher might pick up\n # the room's version of the item first and fail to unequip it. These cases should be rare.\n for k in ls:\n # first check for exact words\n if k.__doc__ in words:\n if target is None:\n target = k # target first, then args, to cope with \"use x on y\"\n else:\n args.append(k)\n\n if len(args) == 0 and len(words) > 0:\n for ls in resolution_order:\n # then check for partially-typed words if nothing was found\n for k in ls:\n if words in k.__doc__:\n if target is None:\n target = k\n else:\n args.append(k)\n\n if executable_command == \"on_go\":\n for direction in [\"north\", \"south\", \"east\", \"west\"]:\n # all directions are permitted because if it's not valid it will be caught by\n # the room's on_go function\n if direction in words:\n args.append(direction)\n target = character.location\n\n if target is None:\n\n if len(words) > 0:\n character.log(\"Unrecognised target: {}.\", words)\n return character.print_log()\n\n if executable_command == \"on_attack\":\n # player might have mistyped a name or just attack with no monster, consistently pick the\n # first monster for them to attack, if present. If not, pass it on to self.location\n # which will of course fail\n if character.check_if_monsters():\n target = character.monsters_in_play[0]\n\n else:\n # either the player typed (\"look\"), which is just to look at the room,\n # or they typed any other no-argument command which is handled by\n # the MyItem class e.g. status, quit\n target = character.location\n\n try:\n to_run = target.__getattribute__(executable_command)\n # look up the command in target's dictionary\n\n except AttributeError:\n character.log(\"Can't {} this.\", cmd)\n return character.print_log()\n\n # THE IMPORTANT PART #\n to_run(*args) # evaluate the command we looked up, passing the arguments the player typed\n\n if not (executable_command in [\"on_go\", \"on_look\", \"on_attack\"]):\n # monsters only attack if the player is still, otherwise they'd attack every time the\n # player ran and running would be pointless\n # not really fair to have the look command trigger attacks either, but anything else\n # is fair game e.g. interacting with objects\n for mon in character.monsters_in_play:\n mon.attack_player()\n\n if not executable_command == \"on_look\":\n # only process heartbeats if the player command actually did something\n for item in self.registered_countdowns:\n item.heartbeat()\n\n return character.print_log()", "def commands():\n # Check invalid command\n check50.run(run_command).stdin(\"cs50\").stdout(\"Invalid command.\")\n\n # Check for upper case abreviation\n try:\n check50.run(run_command).stdin(\"W\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")\n\n # Check for lower case abbreviation\n try:\n check50.run(run_command).stdin(\"w\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")", "def execute_command(command):\r\n if 0 == len(command):\r\n return\r\n\r\n if command[0] in verbs[\"move\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"go where?\")\r\n else:\r\n execute_go(command[1])\r\n\r\n elif command[0] in verbs[\"take\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Take what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_take(item_id)\r\n\r\n elif command[0] in verbs[\"drop\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Drop what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_drop(item_id)\r\n\r\n elif command[0] in verbs[\"use\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"use what?\")\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n execute_use(item_id)\r\n\r\n elif command[0] in verbs[\"look\"]:\r\n if len(command) == 1:\r\n print_room(current_room)\r\n elif command[1] in nouns[\"inventory\"]:\r\n print_inventory_items(inventory)\r\n elif command[1] in nouns[\"self\"]:\r\n print_condition()\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if item_id in inventory.keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif item_id in current_room[\"items\"].keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif entity_id in current_room[\"entities\"].keys():\r\n wrap_print(entities[entity_id][\"description\"])\r\n else:\r\n wrap_print(\"You can not view that.\")\r\n\r\n elif command[0] in verbs[\"attack\"]:\r\n if len(command) > 2:\r\n item_id = get_multi_word_string(command, items)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if len(command) <= 1:\r\n wrap_print(\"attack what?\")\r\n elif entity_id not in current_room[\"entities\"].keys():\r\n wrap_print(\"You cannot attack that.\")\r\n elif len(command) <= 2:\r\n wrap_print(\"What with?\")\r\n elif item_id not in inventory.keys():\r\n wrap_print(\"You do not have a that item.\")\r\n elif items[item_id][\"damage\"] == False:\r\n wrap_print(\"You cannot attack using that item.\")\r\n else:\r\n execute_attack(entity_id, item_id)\r\n\r\n elif command[0] == \"help\":\r\n print(\"To move in a given direction type: go <DIRECTION>\")\r\n print(\"To pick up an item type: take <ITEM>\")\r\n print(\"To drop an item type: drop <ITEM>\")\r\n print(\"To use an item type: use <ITEM>\")\r\n print(\"To look at something of interest type: view <ITEM>\")\r\n print(\"to attack a character type: attack <CHARACTER> with <item>\")\r\n print(\"to : attack <CHARACTER> with <item>\")\r\n print(\"To quit the game type: quit\\n\")\r\n wrap_print(\"\"\"Verb variations are supported, so 'run south', or 'inspect item' are valid inputs.\"\"\")\r\n wrap_print(\"\"\"Items and characters with multiple words in their name are also supported like regular items.\"\"\")\r\n\r\n elif command[0] == \"quit\":\r\n if len(command) == 1:\r\n wrap_print(\"goodbye!\")\r\n global playing\r\n playing = False\r\n\r\n else:\r\n wrap_print(\"That makes no sense.\")", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def on_command(self, game) -> None:\n pass", "def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True", "def error_check(command):\r\n\r\n # TODO\r", "async def custom(self, ctx):\n if ctx.invoked_subcommand is None:\n raise commands.CommandNotFound(\"Subcommand '{}' does not exist.\".format(ctx.subcommand_passed))", "def _find_next_commands(self, task_ex):\n raise NotImplementedError", "def handle_command(self, command, channel, user):\r\n response = \"Hello. Type \\\"@hexbot help\\\" for more information\"\r\n command = command.split()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n if command[0] == self.HELP_COMMAND:\r\n response = self.help()\r\n elif command[0] == self.DEBUG_COMMAND:\r\n response = self.debug(command, channel);\r\n elif command[0] == self.ASSASSIN_COMMAND:\r\n command.pop(0)\r\n response = self.assassin(command, channel, user);\r\n \r\n return response", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "async def on_message(message):\n #Before doing anything\n #Check to see if the message started with the command character\n if not message.content.startswith(commandCharacter):\n #If it didn't, return\n return\n \n #Ensure the bot wasn't the one who sent the message\n if message.author == client.user:\n #If it was, return\n return\n \n #Kill is checked by default (cannot be disabled)\n if message.content.startswith(commandCharacter+'kill'):\n await client.send_message(message.channel, 'Goodbye Forever...')\n await client.logout()\n os.system('stty sane')\n exit(0)\n \n #Parse through the list of all enabled commands\n for command in enabledCommands:\n #We want to ignore case when comparing the message content\n messageContent = message.content.lower()\n #If the message matches one of our commands, we will handle it\n #Requires whitespace after command name\n if messageContent.startswith(commandCharacter+command):\n await handleCommand(client, message, voicePlayerList)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def get_command(self, player):\n last_output = player._program_output[-1]\n if last_output == \"COMMAND\":\n return self._cmd_main(player)\n elif last_output == \"SHIELD CONTROL INOPERABLE\": # I don;t think this can happen. It always prints \"COMMAND\" after an error\n # TODO Should check all the error messages to COMMAND, like \"SHIELD CONTROL INOPERABLE\", and handle them.\n return self._cmd_main(player) # Pick a different command.\n elif last_output == \"PHOTON TORPEDO COURSE (1-9)\":\n return self._cmd_torpedos(player)\n elif last_output == \"COMPUTER ACTIVE AND AWAITING COMMAND\":\n return self._cmd_computer(player)\n elif last_output == \"COURSE (0-9)\":\n return self._cmd_course(player)\n elif last_output.endswith(\"NUMBER OF UNITS TO SHIELDS\"):\n return self._cmd_shield_units(player)\n elif last_output == \"WARP FACTOR (0-8)\" or last_output == 'WARP FACTOR (0-0.2)':\n return self._cmd_warp(player)\n elif last_output == ' INITIAL COORDINATES (X,Y)' or last_output == ' FINAL COORDINATES (X,Y)':\n return self._cmd_coords(player)\n elif last_output == \"NUMBER OF UNITS TO FIRE\":\n return self._cmd_pha_units(player)\n elif last_output == \"LET HIM STEP FORWARD AND ENTER 'AYE'\":\n return self._cmd_aye(player)\n elif last_output == \"WILL YOU AUTHORIZE THE REPAIR ORDER (Y/N)\":\n return self._cmd_repair(player)\n\n raise Exception(F\"Unknown prompt in trek_bot: '{last_output}'\")", "def handle_command(robot_name, command):\n (command_name, arg) = split_command_input(command)\n if \" \" in arg:\n (digit, rev) = arg.split(' ')\n\n\n if command_name == 'off':\n return False\n elif command_name == 'help':\n (do_next, command_output) = do_help()\n elif command_name == 'forward':\n (do_next, command_output) = do_forward(robot_name, int(arg))\n elif command_name == 'back':\n (do_next, command_output) = do_back(robot_name, int(arg))\n elif command_name == 'right':\n (do_next, command_output) = do_right_turn(robot_name)\n elif command_name == 'left':\n (do_next, command_output) = do_left_turn(robot_name)\n elif command_name == 'sprint':\n (do_next, command_output) = do_sprint(robot_name, int(arg))\n elif command_name == 'replay' and arg == 'silent':\n (do_next, command_output) = replay_silent(robot_name,command)\n elif command_name == 'replay' and arg == 'reversed':\n (do_next, command_output) = do_replay_reversed(robot_name,command)\n elif command_name == 'replay' and arg == 'reversed silent':\n (do_next, command_output) = do_Replay_reversed_silent(robot_name,command)\n elif command_name == 'replay' and is_int(arg):\n (do_next, command_output) = replay_limit_range(robot_name,command)\n elif command_name == 'replay' and '-' in arg:\n (do_next, command_output) = replay_limit_parameters(robot_name,command)\n elif command_name == 'replay' and arg == '':\n (do_next, command_output) = replay_commands(robot_name,command)\n elif command_name == 'replay' and is_int(digit) and rev == 'reversed':\n (do_next, command_output) = do_replay_limit_reversed(robot_name,command)\n elif command_name == 'replay' and is_int(digit) and rev == 'silent':\n (do_next, command_output) = do_replay_limit_range_silent(robot_name,command)\n \n \n\n print(command_output)\n show_position(robot_name)\n return do_next", "def test_unsupportedCommand(self):\n self.server.lineReceived(b\"001 HULLABALOO\")\n self.assertEqual(self.transport.value(),\n b\"001 BAD Unsupported command\\r\\n\")", "def _process_command(self, command, opts):\n command_type = postproc.get_structure_type(command)\n if not opts.Ignore_motion and command_type == MOTION_COMMAND:\n return _process_motion_command(command, opts)\n elif not opts.Ignore_IOs and command_type == IO_COMMAND:\n return _process_io_command(command, opts)", "def handle_command(command, channel, user):\n response = GENERAL_COMMANDS[\"unsupported\"]\n \n if command.lower() in CONTRACT_COMMANDS:\n response = CONTRACT_COMMANDS[command.lower()]\n\n elif command.lower() in GENERAL_COMMANDS:\n response = GENERAL_COMMANDS[command.lower()]\n \n slack_client.api_call(\"chat.postMessage\", channel=channel,\n text=response, as_user=True)", "def _execute_impl(self, commands):\n raise NotImplementedError(\"abstract method\")", "def ConsoleRun(self, command, sender):\n pass", "async def on_command_error(self, ctx, error):\n\n if hasattr(ctx.command, 'on_error'):\n return\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n error = getattr(error, 'original', error)\n\n if isinstance(error, ignored):\n return\n \n elif isinstance(error, commands.DisabledCommand):\n return await ctx.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} has been disabled.'))\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n return await ctx.author.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} can not be used in DMs.'))\n except:\n pass\n\n\n elif isinstance(error, bot_tools.AdminCheckFailure):\n return await ctx.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} can only be used by admins.'))\n\n elif isinstance(error, bot_tools.OwnerCheckFailure):\n return await ctx.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} can only be used by the server owner.'))\n\n\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "async def handle_unexpected_error(ctx: Context, e: errors.CommandError) -> None:\n await ctx.send(\n f\"Sorry, an unexpected error occurred. Please let us know!\\n\\n\"\n f\"```{e.__class__.__name__}: {e}```\"\n )\n\n ctx.bot.stats.incr(\"errors.unexpected\")\n\n with push_scope() as scope:\n scope.user = {\n \"id\": ctx.author.id,\n \"username\": str(ctx.author)\n }\n\n scope.set_tag(\"command\", ctx.command.qualified_name)\n scope.set_tag(\"message_id\", ctx.message.id)\n scope.set_tag(\"channel_id\", ctx.channel.id)\n\n scope.set_extra(\"full_message\", ctx.message.content)\n\n if ctx.guild is not None:\n scope.set_extra(\n \"jump_to\",\n f\"https://discordapp.com/channels/{ctx.guild.id}/{ctx.channel.id}/{ctx.message.id}\"\n )\n\n log.error(f\"Error executing command invoked by {ctx.message.author}: {ctx.message.content}\", exc_info=e)", "def cmd(self, cmd):\n return cmd", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def func(self):\n try:\n if not self.switches or \"all\" in self.switches:\n self.list_favor()\n elif \"set\" in self.switches or \"add\" in self.switches:\n self.add_favor()\n elif \"remove\" in self.switches:\n self.remove_favor()\n else:\n raise CommandError(\"Invalid switch.\")\n except CommandError as err:\n self.msg(err)\n else:\n self.mark_command_used()", "def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)", "def test_bad_command1(self):\n with self.assertRaises(ValueError):\n command = Command('Fake Command1')", "def normal(self):\n self.run_command('normal')", "def at_post_cmd(self):\n pass", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "async def _run_command(self, command, *args, **kwargs):\n pass", "async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ):\n log.debug(\"The Error Handler was invoked to handle an error\")\n\n trace = \"\".join(\n traceback.format_exception(type(error), error, error.__traceback__)\n )\n trace = trace.strip()\n\n if hasattr(ctx.command, \"on_error\"):\n log.debug(\"Invoked, but will not override command's own error handler\")\n return\n\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n log.debug(\"Invoked, but will not override cog's own error handler\")\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n ignored = (commands.CommandNotFound,)\n\n if isinstance(error, ignored):\n log.debug(f\"Ignored exception {type(error)} - {error}\")\n return\n\n # Check for specific exceptions to be handled\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f\"{ctx.command} has been disabled.\")\n\n elif isinstance(error, commands.CommandOnCooldown):\n try:\n await ctx.send(str(error))\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.author.send(\n f\"{ctx.command} can not be used in Private Messages.\"\n )\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.errors.CheckFailure):\n log.debug(f\"A command was called, but a check failed. Trace: \\n{trace}\")\n\n elif isinstance(error, commands.MissingRequiredArgument):\n log.debug(f\"A command was missing a required argument. Trace: \\n{trace}\")\n try:\n await ctx.send(\"```\\nUsage:\\n\" + ctx.command.help + \"```\")\n except discord.HTTPException:\n pass\n\n elif isinstance(error, merrors.MiltonInputError):\n # Send feedback to user\n try:\n await ctx.send(error.msg)\n except discord.HTTPException:\n pass\n\n else:\n # All other Errors not returned come here.\n # Skip the prompt line\n if \"CommandInterface\" in self.bot.cogs:\n print(\"\")\n\n log.error(f\"Ignoring exception in command {ctx.command}:\\n\" f\"{trace}\")\n\n # Re-print the handle for the CLI cog\n if \"CommandInterface\" in self.bot.cogs:\n print(\">> \", end=\"\")" ]
[ "0.7336675", "0.7285146", "0.7252042", "0.70243955", "0.6982735", "0.6982735", "0.6982735", "0.6982735", "0.6879094", "0.67902136", "0.67638505", "0.6750508", "0.67460364", "0.6706385", "0.66952014", "0.66615903", "0.6648451", "0.6611464", "0.65597415", "0.6558557", "0.653128", "0.65279436", "0.65165573", "0.65025306", "0.649691", "0.64261323", "0.6405249", "0.6402391", "0.6397524", "0.63842523", "0.6376105", "0.6362293", "0.6362117", "0.6360058", "0.63345855", "0.6332264", "0.63263464", "0.6324785", "0.6322544", "0.6314247", "0.6307266", "0.63062817", "0.629657", "0.62789416", "0.6276269", "0.62732035", "0.6269491", "0.62609917", "0.6251915", "0.6249648", "0.6238897", "0.62323505", "0.622903", "0.62172127", "0.6208194", "0.6203508", "0.61996347", "0.6199404", "0.6188536", "0.6185449", "0.61796224", "0.6176149", "0.6173438", "0.61700815", "0.61666787", "0.6162986", "0.6159328", "0.6156853", "0.61547744", "0.6151212", "0.61412597", "0.6134187", "0.6125218", "0.61247694", "0.6099776", "0.609394", "0.6090887", "0.60888946", "0.60884947", "0.608669", "0.60835564", "0.60790735", "0.6073376", "0.6063006", "0.60616076", "0.6057159", "0.6050891", "0.60504895", "0.6046879", "0.60424966", "0.6041711", "0.6039592", "0.60388565", "0.6022692", "0.6015751", "0.5998989", "0.5998414", "0.5991389", "0.59834796", "0.59807724", "0.5969405" ]
0.0
-1
Coverage summary tupple with amount of cases and covered cases May be used to determine coverage percentage
def coverage(self): return 0, 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_project_test_coverage(self) -> None:\n print_statistics = {}\n total_number_columns = 0\n number_columns_without_tests = 0\n\n for model_name in self.dbt_tests.keys():\n columns = self.dbt_tests[model_name]\n\n model_number_columns = 0\n model_columns_without_tests = 0\n\n for column in columns:\n total_number_columns += 1\n model_number_columns += 1\n\n if len(column[\"tests\"]) == 0:\n number_columns_without_tests += 1\n model_columns_without_tests += 1\n\n print_statistics[model_name] = self.calculate_coverage_percentage(\n misses=model_columns_without_tests, total=model_number_columns\n )\n\n print_statistics[\"\"] = \"\"\n print_statistics[\"Total\"] = self.calculate_coverage_percentage(\n misses=number_columns_without_tests, total=total_number_columns\n )\n\n self.create_table(\n title=\"Test Coverage\",\n columns=[\"Model Name\", r\"% coverage\"],\n data=print_statistics,\n )", "def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n COV.html_report()\n COV.erase()\n return 0\n return 1", "def task_coverage():\n return {\n 'actions': ['py.test --cov nikola --cov-report term-missing tests/'],\n 'verbosity': 2,\n }", "def coverage_stats(self) -> (int, int):\n covered = sum(1 for line in self.source_code if line.coverage > 0)\n lines = sum(1 for line in self.source_code if line.coverage >= 0)\n return (covered, lines)", "def derive_project_coverage(self) -> None:\n self.get_project_column_description_coverage()\n self.get_project_test_coverage()", "def cov():\n tests = unittest.TestLoader().discover('tests')\n result = unittest.TextTestRunner(verbosity=1).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def show_coverage_report(bv, save_output=False):\n if no_coverage_warn():\n return\n num_functions, blocks_covered, blocks_total = covdb.get_overall_function_coverage()\n title = \"Coverage Report for %s\" % covdb.module_name\n report = \"%d Functions, %d blocks covered of %d total\\n\" % (num_functions, blocks_covered, blocks_total)\n embedded_css = '''<style type=\"text/css\" media=\"screen\">\n\ntable {\n table-layout: fixed;\n width: 100%;\n border-collapse: collapse;\n white-space: nowrap;\n}\n\ntable th, td {\n border: 1px solid gray;\n padding: 4px;\n white-space: nowrap;\n overflow: hidden;\n text-overflow: ellipsis;\n color: #e0e0e0;\n}\n\ntable tr:nth-child(even) {\n background-color: #242424;\n}\ntable tr:nth-child(odd) {\n background-color: #2a2a2a;\n}\n\ntable th {\n font: bold;\n background-color: #181818;\n}\n\na:link { color: #80c6e9; }\n\n</style>\\n'''\n report_html = (\"<h3>%d Functions, %d blocks covered of %d total</h3>\\n\" %\n (num_functions, blocks_covered, blocks_total))\n column_titles = ['Start Address', 'Function Name', 'Coverage Percent', 'Blocks Covered / Total', 'Complexity']\n report_html += (\"<table>\\n<tr>%s</tr>\\n\" % ''.join('<th>%s</th>' % title for title in column_titles))\n function_dict = {f.name: f for f in bv.functions}\n name_dict = {}\n for f in bv.functions:\n name_dict[f.name] = f.symbol.short_name\n max_name_length = max([len(name) for name in name_dict.values()])\n for mangled_name, stats in sorted(covdb.function_stats.items(), key=lambda x: x[1].coverage_percent, reverse=True):\n name = name_dict[mangled_name]\n pad = \" \" * (max_name_length - len(name))\n function_addr = function_dict[mangled_name].start\n report += \" 0x%08x %s%s : %.2f%% coverage\\t( %-3d / %3d blocks)\\n\" % \\\n (function_addr, name, pad, stats.coverage_percent, stats.blocks_covered, stats.blocks_total)\n # build the html table row one item at a time, then combine them\n function_link = '<a href=\"binaryninja://?expr=0x%x\">0x%08x</a>' % (function_addr, function_addr)\n function_name = html_escape(name)\n coverage_percent = '%.2f%% coverage' % stats.coverage_percent\n blocks_covered = '%d / %d blocks' % (stats.blocks_covered, stats.blocks_total)\n row_data = [function_link, function_name, coverage_percent, blocks_covered, str(stats.complexity)]\n table_row = '<tr>' + ''.join('<td>%s</td>' % item for item in row_data) + '</tr>'\n report_html += table_row\n\n report_html += \"</table>\\n\"\n report_html = '<html>\\n<head>\\n%s\\n</head>\\n<body>\\n%s\\n</body>\\n</html>' % (embedded_css, report_html)\n\n # Save report if it's too large to display or if user asks\n target_dir, target_filename = os.path.split(bv.file.filename)\n html_file = os.path.join(target_dir, 'coverage-report-%s.html' % target_filename)\n choices = [\"Cancel Report\", \"Save Report to File\", \"Save Report and Open in Browser\"]\n choice = 0\n save_file = 1\n save_and_open = 2\n if len(report_html) > 1307673: # if Qt eats even one little wafer more, it bursts\n choice = interaction.get_choice_input(\n \"Qt can't display a report this large. Select an action.\",\n \"Generated report too large\",\n choices)\n if choice in [save_file, save_and_open]:\n save_output = True\n else:\n bv.show_html_report(title, report_html, plaintext=report)\n\n if save_output:\n with open(html_file, 'w') as f:\n f.write(report_html)\n log.log_info(\"[*] Saved HTML report to %s\" % html_file)\n if choice == save_file:\n interaction.show_message_box(\"Report Saved\",\n \"Saved HTML report to: %s\" % html_file,\n enums.MessageBoxButtonSet.OKButtonSet,\n enums.MessageBoxIcon.InformationIcon)\n if choice == save_and_open:\n open_new_browser_tab(\"file://\" + html_file)", "def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def get_project_total_test_coverage(self) -> str:\n number_not_documented_columns = 0\n number_of_columns = 0\n\n for description in self.dbt_definitions.values():\n if description == COLUMN_NOT_DOCUMENTED:\n number_not_documented_columns += 1\n number_of_columns += 1\n\n return self.calculate_coverage_percentage(\n misses=number_not_documented_columns,\n total=number_of_columns,\n )", "def percent_covered(self):\n out = self.coverage\n return out and out.cover", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def test(coverage):\n print('success')\n pass", "def cov():\n cov = coverage.coverage(branch=True, include='project/*')\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def coverage(session):\n session.install(\"coverage[toml]\", \"codecov\")\n session.run(\"coverage\", \"xml\", \"--fail-under=0\")\n session.run(\"codecov\", *session.posargs)", "def calculate_coverage_percentage(self, misses: int, total: int) -> str:\n if total == 0:\n return \"0.0\"\n\n percentage_failure = round((1 - (misses / total)) * 100, 1)\n return str(percentage_failure)", "def cover(ctx, html=False):\n header(cover.__doc__)\n extra = \"--cov-report html\" if html else \"\"\n with ctx.cd(ROOT):\n ctx.run(\n \"pytest --benchmark-skip --cov flask_restx --cov-report term --cov-report xml {0}\".format(\n extra\n ),\n pty=True,\n )", "def get_project_column_description_coverage(self) -> None:\n print_statistics = {}\n for model_name, path in self.all_dbt_models.items():\n schema_content = open_yaml(path)\n\n number_documented_columns = len(\n self.get_documented_columns(\n schema_content=schema_content,\n model_name=model_name,\n )\n )\n\n number_not_documented_columns = len(\n self.get_not_documented_columns(\n schema_content=schema_content,\n model_name=model_name,\n )\n )\n\n print_statistics[model_name] = self.calculate_coverage_percentage(\n misses=number_not_documented_columns,\n total=(number_documented_columns + number_not_documented_columns),\n )\n\n print_statistics[\"\"] = \"\"\n print_statistics[\"Total\"] = self.get_project_total_test_coverage()\n\n self.create_table(\n title=\"Documentation Coverage\",\n columns=[\"Model Name\", r\"% coverage\"],\n data=print_statistics,\n )", "def get_coverage_report(details=False)->str:\n model = get_coverage_report_model()\n\n out = StringIO() \n formatter = TextCoverageReportFormatter(model, out)\n formatter.details = details\n formatter.report()\n \n return out.getvalue()", "def cov():\n cov = coverage.coverage(\n branch=True,\n include='project/*',\n omit=\"*/__init__.py\"\n )\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print 'Coverage Summary:'\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def derive_model_coverage(self) -> None:\n self.get_model_column_description_coverage()\n self.get_model_test_coverage()", "def cov(test_class):\n if test_class == 'all':\n tests = unittest.TestLoader().discover('project/tests')\n else:\n # note, test module must be imported above, doing lazily for now\n test_module = globals()[test_class]\n tests = unittest.TestLoader().loadTestsFromTestCase(test_module)\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def current_nbc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(nbc_cov_dict[layer.name])\n total = total + np.size(nbc_cov_dict[layer.name])\n return covered / float(total)", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def get_test_cases_coverage(session_id):\n tc_stats={}\n tc_stats_list=[]\n total_executed=0\n sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!=\"null\"'\n params={\"sid\":session_id}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n tests=c.fetchall()\n conn.close()\n if len(tests)>0:\n for t in tests:\n total_executed=0\n sql=\"SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n files=c.fetchall()\n conn.close()\n for f in files:\n line_count=get_executable_lines_count_for_file(f[0])\n # get executions\n sql=\"SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid\"\n params={\"sid\":session_id,\"tid\":t[0],\"fid\":f[0]}\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql,params)\n executed=c.fetchone()\n conn.close()\n total_executed+=executed[0]\n # save test case and it's executions\n tc_stats={}\n tc_stats[\"test_id\"]=t[0]\n tc_stats[\"total_executed\"]=total_executed\n tc_stats[\"total_executed\"]\n \n tc_stats_list.append(tc_stats)\n return tc_stats_list", "def main(plot):\n plot_coverage(plot)\n return 0", "def coverage(context):\n context.run(\" \".join([\n \"python -m pytest\",\n \"--cov=%s\" % PACKAGE_NAME,\n \"--cov-report html\",\n \"--cov-branch\",\n \"--cov-fail-under=75\"\n ]))", "def overallCoverage(dataset, embeddings_col):\n from sparknlp.internal import _EmbeddingsOverallCoverage\n from sparknlp.common import CoverageResult\n return CoverageResult(_EmbeddingsOverallCoverage(dataset, embeddings_col).apply())", "def coverage(ctx):\n ctx.run(\"coverage run --source {PROJECT_NAME} -m pytest\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"coverage report -m\")\n ctx.run(\"coverage html\")", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats", "def run_test_summary1c():\n print()\n print('--------------------------------------------------')\n print('Testing the summary1c function:')\n print('--------------------------------------------------')\n\n format_string = ' summary1c( {} )'\n test_results = [0, 0] # Number of tests passed, failed.\n\n # Test 1:\n expected = 1 + 2 + 5 + 7 # which is 15\n sequence = (20, 23, 29, 30, 33, 29, 100, 2, 4)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 2:\n expected = 1 + 4 + 6 # which is 11\n sequence = (23, 29, 30, 33, 29, 100, 2)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 3:\n expected = 16\n sequence = (20, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30, 2)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 4:\n expected = 5\n sequence = (29, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 5:\n expected = 5\n sequence = (30, 33, 29, 17, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 6:\n expected = 2\n sequence = (30, 33, 13, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 7:\n expected = 0\n sequence = (30, 33, 4, 10, 21, 100, 99, 40, 30, 30)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 8:\n expected = 3\n sequence = (5, 3, 3)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 9:\n expected = 1\n sequence = (5, 3)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 10:\n expected = 0\n sequence = (5,)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 11:\n expected = 0\n sequence = ()\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n # Test 12:\n expected = 0\n sequence = (4,)\n print_expected_result_of_test([sequence], expected, test_results,\n format_string)\n actual = summary1c(sequence)\n print_actual_result_of_test(expected, actual, test_results)\n\n print_summary_of_test_results(test_results)", "def get_model_test_coverage(self) -> None:\n # Init variables\n model_number_columns = 0\n model_columns_without_tests = 0\n untested_columns = []\n\n columns = self.dbt_tests.get(self.model_name)\n\n if not columns:\n logger.info(\n f\"There is no documentation entry for '{self.model_name}' in your schema.yml files. \"\n \"You might need to run `dbt-sugar doc` first.\"\n )\n return\n\n for column in columns:\n model_number_columns += 1\n if len(column[\"tests\"]) == 0:\n model_columns_without_tests += 1\n untested_columns.append(column[\"name\"])\n\n percentage_not_tested_columns = self.calculate_coverage_percentage(\n misses=model_columns_without_tests, total=model_number_columns\n )\n\n data = self.print_nicely_the_data(\n data=untested_columns, total=percentage_not_tested_columns\n )\n\n self.create_table(\n title=\"Test Coverage\", columns=[\"Untested Columns\", r\"% coverage\"], data=data\n )", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats", "def testViewCoverageData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n covRefDbList = []\n covSampleList = []\n entryCountD = {}\n for entryId in entryD:\n for _, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n\n for _, aD in analD.items():\n entryCountD[entryId] = True\n covRefDb = aD[\"coverage_inst_refdb\"]\n covSample = aD[\"coverage_inst_entity\"]\n if covRefDb is not None:\n covRefDb = 0.0 if covRefDb < 0.0 else covRefDb\n covRefDb = 1.0 if covRefDb > 1.0 else covRefDb\n covRefDbList.append(covRefDb)\n if covSample is not None:\n covSample = 0.0 if covSample < 0.0 else covSample\n covSample = 1.0 if covSample > 1.0 else covSample\n covSampleList.append(covSample)\n #\n logger.info(\"covRefDbList %d covSampleList %d\", len(covRefDbList), len(covSampleList))\n #\n cu = DisorderChartUtils()\n cu.doHistogramChart(\n covRefDbList,\n plotPath=self.__plotCoverageRefDb,\n yPlotScale=\"log\",\n yPlotMax=100000,\n yPlotMin=1000,\n xPlotMin=0.0,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Reference Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageRefDb,\n \"UniProt reference sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \"\n % (len(covRefDbList), len(entryCountD)),\n )\n cu.doHistogramChart(\n covSampleList,\n plotPath=self.__plotCoverageSample1,\n yPlotScale=\"log\",\n xPlotMin=0.0,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n yPlotMax=100000,\n yPlotMin=1000,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Sample Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageSample1,\n \"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(covSampleList), len(entryCountD)),\n )\n #\n cu.doHistogramChart(\n covSampleList,\n plotPath=self.__plotCoverageSample2,\n yPlotScale=\"log\",\n yPlotMax=100000,\n yPlotMin=1000,\n xPlotMin=0.8,\n xPlotMax=1.001,\n xPlotIncr=0.1,\n # yPlotMax=100000,\n xPlotLabel=\"Coverage Fraction\",\n yPlotLabel=\"Protein Instances\",\n plotTitle=\"Sample Sequence Coverage\",\n )\n self.__writeLegend(\n self.__plotCoverageSample1,\n \"Sample sequence coverage for all (%d) protein sequences (%d X-ray structures with resolution limit < 3.5 Angstoms) \" % (len(covSampleList), len(entryCountD)),\n )\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_coverage(self) -> None:\n coverage = self.portfolio_coverage_tvp.get_portfolio_coverage(self.data, PortfolioAggregationMethod.WATS)\n self.assertAlmostEqual(coverage, 32.0663, places=4,\n msg=\"The portfolio coverage was not correct\")", "def to_html(self) -> str:\n source_name = escape(self.source_name)\n (covered, lines) = self.coverage_stats()\n lines_stats = \"{} / {} ({} lines of code)\".format(covered, lines, len(self.source_code))\n (br_covered, br_count, calls_covered, calls_count) = self.branch_stats()\n branch_stats = \"{} / {}\".format(br_covered, br_count)\n call_stats = \"{} / {}\".format(calls_covered, calls_count)\n (fn_covered, fn_count) = self.function_stats()\n fn_stats = \"{} / {}\".format(fn_covered, fn_count)\n\n self.decode_cpp_function_names()\n\n result = [\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <title>Coverage report of file \"\"\" + source_name + \"\"\"</title>\n <style type=\"text/css\">\n /*<![CDATA[*/\n .cov-health-zero td { color: white; }\n .cov-health-zero a { color: #CCCCFF; }\n .cov-health-zero a:visited { color: #FFCCFF; }\n .cov-health-zero:nth-child(odd) td { background-color: #CC0000; }\n .cov-health-zero:nth-child(even) td { background-color: #DD0000; }\n .cov-health-na td { color: silver; }\n .cov-health-na td:nth-child(2) { visibility: hidden; }\n .branch { cursor: help; }\n .branch-taken { color: silver; }\n .branch-taken:hover { color: black; }\n .branch-not-taken { color: red; }\n .branch-not-taken:hover { color: maroon; }\n #source tbody td:last-child, #funcs tbody td:first-child\n { text-align: left; font-family: monospace; white-space: pre; }\n .sortable { border-collapse: collapse; }\n div { width: 100%; overflow: hidden; }\n .sortable td { text-align: right; padding-left: 2em; }\n .sortable tbody tr:nth-child(odd) { background-color: #FFFFCC; }\n .sortable tbody tr:nth-child(even) { background-color: #FFFFDD; }\n #source tbody tr:hover td:last-child { font-weight: bold; }\n #source tbody td:first-child { max-width: 7em; font-size: smaller; word-wrap: break-word; }\n #source tbody td:nth-child(2) { font-size: smaller; color: silver; }\n #summary { float: right; border-collapse: collapse; }\n #summary td { border: 1px solid black; }\n caption { font-weight: bold; }\n /*]]>*/\n </style>\n <script src=\"sorttable.js\"></script>\n </head>\n <body>\n <p><a href=\"index.html\">&lArr; Back</a> | Go to line #<input type=\"number\" id=\"goto\" /></p>\n <h1>\"\"\" + source_name + \"\"\"</h1>\n <div>\n <table id=\"summary\">\n <caption>Summary</caption>\n <tr><td>Lines</td><td>\"\"\" + lines_stats + \"\"\"</td></tr>\n <tr><td>Branches</td><td>\"\"\" + branch_stats + \"\"\"</td></tr>\n <tr><td>Calls</td><td>\"\"\" + call_stats + \"\"\"</td></tr>\n <tr><td><a href=\"#functions\">Functions</a></td><td>\"\"\" + fn_stats + \"\"\"</td></tr>\n </ul>\n </table>\n <table class=\"sortable\" id=\"source\">\n <thead><tr><th>Branches</th><th>Cov</th><th>Line</th><th class=\"sorttable_nosort\">Source</th></tr></thead>\n <tbody>\n \"\"\"]\n result.extend(line.to_html() for line in self.source_code)\n result.append(\"\"\"\n </tbody>\n </table>\n </div>\n <h2 id=\"functions\">Functions</h2>\n <div>\n <table class=\"sortable\" id=\"funcs\">\n <thead><tr><th>Function</th><th>Calls</th><th>Ret.</th><th>Blk. Exec.</th></tr></thead>\n <tbody>\"\"\")\n result.extend(func.to_html() for func in self.source_functions)\n result.append(\"\"\"\n </tbody>\n </table>\n </div>\n <script>\n //<![CDATA[\n document.getElementById('goto').onchange = function()\n {\n location = \"#line-\" + this.value;\n }\n //]]>\n </script>\n </body>\n </html>\n \"\"\")\n return '\\n'.join(result)", "def get_model_column_description_coverage(self) -> None:\n not_documented_columns = self.get_not_documented_columns(\n schema_content=self.model_content,\n model_name=self.model_name,\n ).keys()\n\n number_not_documented_columns = len(not_documented_columns)\n number_documented_columns = len(\n self.get_documented_columns(\n schema_content=self.model_content,\n model_name=self.model_name,\n )\n )\n number_columns = number_documented_columns + number_not_documented_columns\n\n # This means that they are not columns, and we want to skip the printing.\n if number_columns == 0:\n return\n\n percentage_not_documented_columns = self.calculate_coverage_percentage(\n misses=number_not_documented_columns,\n total=number_columns,\n )\n logger.debug(\n f\"percentage_not_documented_columns for '{self.model_name}': {percentage_not_documented_columns}\"\n )\n\n data = self.print_nicely_the_data(\n data=list(not_documented_columns), total=percentage_not_documented_columns\n )\n\n self.create_table(\n title=\"Documentation Coverage\",\n columns=[\"Undocumented Columns\", r\"% coverage\"],\n data=data,\n )", "def main():\n coverage = calculate_code_coverage()\n platform = os.uname()[0]\n if coverage < CODE_COVERAGE_GOAL[platform]:\n data = {\n 'expected': CODE_COVERAGE_GOAL[platform],\n 'observed': coverage,\n }\n print '\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\033[0m' % data\n sys.exit(1)", "def current_knc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(knc_cov_dict[layer.name])\n total = total + np.size(knc_cov_dict[layer.name])\n return covered / float(total)", "def get_testcase_summary(output):\n print(\"Inside Test Summary\")\n re_tc_summary = re.compile(r\"^\\[(\\d+\\.\\d+)\\][^\\]+\\{\\{(__testcase_summary);(\\d+);(\\d+)\\}\\}\")\n #re_tc_summary = re.compile(r\"^\\[(\\d+\\.\\d+)\\][^\\{]+\\{\\{(__testcase_summary);(\\d+);(\\d+)\\}\\}\")\n print(\"re_tc_summary =\",re_tc_summary.pattern)\n #print(dir(re_tc_summary))\n \n for line in output.splitlines():\n print \"line=\",line\n m = re_tc_summary.search(line)\n print (\"m=\",m.groups())\n if m:\n _, _, passes, failures = m.groups()\n return int(passes), int(failures)\n return None", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def report_coverage(fp=None, details=False):\n if fp is None:\n fp = sys.stdout\n fp.write(get_coverage_report(details))", "def define_coverage(self, id=None, units=None, standard_name=None, coverage_dimensions=None):", "def pytest_report_header(config):\n circle_node_total, circle_node_index = read_circleci_env_variables()\n return \"CircleCI total nodes: {}, this node index: {}\".format(circle_node_total, circle_node_index)", "def coverage(session) -> None:\n session.install(\".[test]\", \"pytest-cov\")\n session.run(\n \"pytest\", \"-n\", \"auto\", \"--cov=./\", \"--cov-report=xml\", *session.posargs\n )", "def to_html(self) -> str:\n coverage_class = 'zero' if self.called == 0 else 'all'\n return '''<tr id=\"func-{}\" class=\"cov-health-{}\">\n <td><a href=\"#line-{}\">{}</a></td>\n <td>{}</td><td>{}%</td><td>{}%</td>\n </tr>\\n'''.format(\n self.name, coverage_class, self.linenum, self.pretty_name, self.called,\n self.returned, self.blocks\n )", "def query_coverage(query):\n length = query_length(query)\n coverage = (int(query['alignment length'])/length) * 100\n return coverage", "def test_summary_report(self):\n self.driver.get('http://psl-outbreak.herokuapp.com/report')\n self.driver.find_element_by_id('summary_report_cases').click()", "def test_coverage_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-2.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_1(inst2)", "def summary_string(self) -> str:", "def get_coverage_stats(\n contig_depth_file, contig_fasta, contig_read_counts_file, contig_stats_out\n):\n print(\"getting coverage stats\")\n # add other files if requested\n # read counts\n logger.info(\"Parsing read count file: {}\".format(contig_read_counts_file))\n read_count_table = pandas.read_csv(\n contig_read_counts_file, delim_whitespace=True, names=[\"ReadCount\", \"Contig\"]\n ).set_index(\"Contig\")\n\n # convert base by base depth data into coverage\n logger.info(\"Parsing read depth file: {}\".format(contig_depth_file))\n mapping_depth_table = get_samtool_depth_table(contig_depth_file, contig_fasta,)\n contig_stats = mapping_depth_table.join(read_count_table, how=\"left\").fillna(0)\n\n for col in [\"Length\", \"ReadCount\", \"MaxCov\", \"MinCov\", \"CumuLength\"]:\n if col in contig_stats.columns:\n contig_stats[col] = contig_stats[col].astype(int)\n\n logger.info(\"Writing coverage table to: {}\".format(contig_stats_out))\n contig_stats.to_csv(contig_stats_out, sep=\"\\t\", float_format=\"%0.2f\")", "def analyze_coverage(results, outcomes, allow_list, full_coverage):\n available = check_test_cases.collect_available_test_cases()\n for key in available:\n hits = outcomes[key].hits() if key in outcomes else 0\n if hits == 0 and key not in allow_list:\n if full_coverage:\n results.error('Test case not executed: {}', key)\n else:\n results.warning('Test case not executed: {}', key)\n elif hits != 0 and key in allow_list:\n # Test Case should be removed from the allow list.\n if full_coverage:\n results.error('Allow listed test case was executed: {}', key)\n else:\n results.warning('Allow listed test case was executed: {}', key)", "def generate_cobertura_xml(self, coverage_data):\n\n dom_impl = minidom.getDOMImplementation()\n doctype = dom_impl.createDocumentType(\"coverage\", None,\n \"http://cobertura.sourceforge.net/xml/coverage-03.dtd\")\n document = dom_impl.createDocument(None, \"coverage\", doctype)\n root = document.documentElement\n summary = coverage_data['summary']\n self._attrs(root, {\n 'branch-rate': self._percent(summary['branches-total'],\n summary['branches-covered']),\n 'branches-covered': str(summary['branches-covered']),\n 'branches-valid': str(summary['branches-total']),\n 'complexity': '0',\n 'line-rate': self._percent(summary['lines-total'],\n summary['lines-covered']),\n 'lines-valid': str(summary['lines-total']),\n 'timestamp': coverage_data['timestamp'],\n 'version': '1.9'\n })\n\n sources = self._el(document, 'sources', {})\n source = self._el(document, 'source', {})\n source.appendChild(document.createTextNode(self.base_dir))\n sources.appendChild(source)\n\n root.appendChild(sources)\n\n packages_el = self._el(document, 'packages', {})\n\n packages = coverage_data['packages']\n for package_name, package_data in list(packages.items()):\n package_el = self._el(document, 'package', {\n 'line-rate': package_data['line-rate'],\n 'branch-rate': package_data['branch-rate'],\n 'name': package_name\n })\n classes_el = self._el(document, 'classes', {})\n for class_name, class_data in list(package_data['classes'].items()):\n class_el = self._el(document, 'class', {\n 'branch-rate': self._percent(class_data['branches-total'],\n class_data['branches-covered']),\n 'complexity': '0',\n 'filename': class_name,\n 'line-rate': self._percent(class_data['lines-total'],\n class_data['lines-covered']),\n 'name': class_data['name']\n })\n\n # Process methods\n methods_el = self._el(document, 'methods', {})\n for method_name, hits in list(class_data['methods'].items()):\n method_el = self._el(document, 'method', {\n 'name': method_name,\n 'signature' : '',\n 'hits': hits\n })\n methods_el.appendChild(method_el)\n\n # Process lines\n lines_el = self._el(document, 'lines', {})\n lines = list(class_data['lines'].keys())\n lines.sort()\n for line_number in lines:\n line_el = self._el(document, 'line', {\n 'branch': class_data['lines'][line_number]['branch'],\n 'hits': str(class_data['lines'][line_number]['hits']),\n 'number': str(line_number)\n })\n if class_data['lines'][line_number]['branch'] == 'true':\n total = int(class_data['lines'][line_number]['branches-total'])\n covered = int(class_data['lines'][line_number]['branches-covered'])\n percentage = int((covered * 100.0) / total)\n line_el.setAttribute('condition-coverage',\n '{0}% ({1}/{2})'.format(\n percentage, covered, total))\n lines_el.appendChild(line_el)\n\n class_el.appendChild(methods_el)\n class_el.appendChild(lines_el)\n classes_el.appendChild(class_el)\n package_el.appendChild(classes_el)\n packages_el.appendChild(package_el)\n root.appendChild(packages_el)\n\n return document.toprettyxml()", "def trivial_cover(regions_count, clinics_count, clinics):\n clinics_built = [0]*range(0, clinics_count)\n coverted = set()\n\n for clinic in clinics:\n clinics_built[clinic.index] = 1\n coverted |= set(clinic.regions)\n if len(coverted) >= regions_count:\n break # We are done, we cover all the regions\n\n # Calculamos el costo total de construcción\n total_costs = sum([clinic.cost*clinics_built[clinic.index] for clinic in clinics])\n \n # Convertimos la solución en el formato esperado\n output_data = str(total_cost) + '\\n'\n output_data += ' '.join(map(str, clinics_built))\n\n return output_data", "def summary(self) -> str:\n pass", "def calculate_coverage(path, alignment, number_of_fastas):\n\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/' + alignment\n fastas_iterator = parse_multifasta_file(path_to_alignment, number_of_fastas)\n fastas = []\n targer_name, target_seq = next(fastas_iterator)\n fastas.append(target_seq)\n length_of_target = 0\n for i in target_seq:\n if i != '-':\n length_of_target += 1\n for i in range(1, number_of_fastas):\n name, seq = next(fastas_iterator)\n fastas.append(seq)\n coverage = 0\n for i in range(len(fastas[0])):\n for j in range(1, len(fastas)):\n if fastas[0][i] != '-' and fastas[j][i] != '-':\n coverage += 1\n break\n coverage_percent = round(coverage / length_of_target * 100, 2)\n return coverage_percent", "def summary(self):\n raise NotImplementedError", "def test_coverage_4(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_4(inst2)", "def compute_filecoverage(self):\n result = dict()\n for filename, fns in self.point_symbol_info.items():\n file_points = []\n for fn, points in fns.items():\n file_points.extend(points.keys())\n covered_points = self.covered_points & set(file_points)\n result[filename] = int(math.ceil(\n len(covered_points) * 100 / len(file_points)))\n return result", "def recordCoverage( options, data ):\n for c in data.chrNames:\n data.mafWigDict[ c ]['columnsInBlocks'] = 0\n for m in data.mafBlocksByChrom[ c ]:\n if m.refEnd > m.refStart:\n data.mafWigDict[ c ]['columnsInBlocks'] += ( m.refEnd + 1 ) - m.refStart\n else:\n data.mafWigDict[ c ]['columnsInBlocks'] += ( m.refStart + 1 ) - m.refEnd", "def test_coverage_2(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-selfpay.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_2(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_2(inst2)", "def html_it():\n import coverage\n cov = coverage.coverage()\n cov.start()\n import here # pragma: nested\n cov.stop() # pragma: nested\n cov.html_report(directory=\"../html_other\")", "def _cmd_coverage(args):\n pset = coverage.do_coverage(\n args.interval,\n args.bam_file,\n args.count,\n args.min_mapq,\n args.processes,\n args.fasta,\n )\n if not args.output:\n # Create an informative but unique name for the coverage output file\n bambase = core.fbase(args.bam_file)\n bedbase = core.fbase(args.interval)\n tgtbase = (\n \"antitargetcoverage\" if \"anti\" in bedbase.lower() else \"targetcoverage\"\n )\n args.output = f\"{bambase}.{tgtbase}.cnn\"\n if os.path.exists(args.output):\n args.output = f\"{bambase}.{bedbase}.cnn\"\n core.ensure_path(args.output)\n tabio.write(pset, args.output)", "def summary(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def summarize(self):\n\n def increment_summary(summary_obj, case_obj):\n \"\"\"increment ReportSummary count was ReportCase status\n\n Whatever the status of the case object, the corresponding property\n will be incremented by 1 in the summary object\n\n Args:\n summary_obj (ReportSummary): summary object to increment\n case_obj (ReportCase): case object\n \"\"\"\n summary_obj.increment(case_obj.get_status())\n\n summary = ReportSummary()\n [increment_summary(summary, case) for case in self.cases]\n self.summary = summary", "def test_get_vulnerability_occurrences_summary(self):\n pass", "def header_summary(\n self, \n router_context,\n tests_by_status\n ):\n raise MissingOverload", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def print_coverage(lengths):\n\n primerAlen = 1330\n primerBlen = 1353\n primerClen = 1237\n\n print(\"SRR ID\\tPrimer A\\tPrimer B\\tPrimer C\")\n for s in lengths:\n sys.stdout.write(s)\n sys.stdout.write(\"\\t{}\".format(1.0 * lengths[s][\"PrimerA\"]/primerAlen))\n sys.stdout.write(\"\\t{}\".format(1.0 * lengths[s][\"PrimerB\"]/primerBlen))\n sys.stdout.write(\"\\t{}\\n\".format(1.0 * lengths[s][\"PrimerC\"]/primerClen))", "def pytest_terminal_summary(self, terminalreporter, exitstatus):\n # pylint: disable=unused-argument\n terminalreporter.section(\"Test Information\")\n for test, info in self._info.items():\n for datum in info:\n terminalreporter.write(\"{}: {}\\n\".format(test, datum))", "def test_coverage_3(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-ehic.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_3(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_3(inst2)", "def summarise(self):\n self.summary = az.summary(self.trace, var_names=[\"~chol\"], round_to=2)\n print(self.summary)\n return self.summary", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def get_coverage(self):\n if len(self) == 1:\n return self.subacqs[0].get_coverage()\n return np.array([self.subacqs[i].get_coverage() for i in range(len(self))])", "def _calc_coverage(self, cds_aln):\n # Aligned region is part of a read that intersects with cds.\n coverage = 0\n for aln_reg in cds_aln.aligned_regions.values(): # aln_reg is of type CdsAlnSublocation\n location = aln_reg.location # location is of type Location\n coverage += location.length()\n coverage = coverage / float(Location.from_location_str(cds_aln.cds.location).length())\n return coverage", "def summary_stats(tile_summary):\n return \"Original Dimensions: %dx%d\\n\" % (tile_summary.orig_w, tile_summary.orig_h) + \\\n \"Original Tile Size: %dx%d\\n\" % (tile_summary.orig_tile_w, tile_summary.orig_tile_h) + \\\n \"Scale Factor: 1/%dx\\n\" % tile_summary.scale_factor + \\\n \"Scaled Dimensions: %dx%d\\n\" % (tile_summary.scaled_w, tile_summary.scaled_h) + \\\n \"Scaled Tile Size: %dx%d\\n\" % (tile_summary.scaled_tile_w, tile_summary.scaled_tile_w) + \\\n \"Total Mask: %3.2f%%, Total Tissue: %3.2f%%\\n\" % (\n tile_summary.mask_percentage(), tile_summary.tissue_percentage) + \\\n \"Tiles: %dx%d = %d\\n\" % (tile_summary.num_col_tiles, tile_summary.num_row_tiles, tile_summary.count) + \\\n \" %5d (%5.2f%%) tiles >=%d%% tissue\\n\" % (\n tile_summary.high, tile_summary.high / tile_summary.count * 100, TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >=%d%% and <%d%% tissue\\n\" % (\n tile_summary.medium, tile_summary.medium / tile_summary.count * 100, TISSUE_LOW_THRESH,\n TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >0%% and <%d%% tissue\\n\" % (\n tile_summary.low, tile_summary.low / tile_summary.count * 100, TISSUE_LOW_THRESH) + \\\n \" %5d (%5.2f%%) tiles =0%% tissue\" % (tile_summary.none, tile_summary.none / tile_summary.count * 100)", "def calculate_coverage(length_total, length_query, length_subject, option_cov=\"mean\"):\n if option_cov == \"mean\":\n cov = length_total / ((length_query + length_subject) / 2.0)\n elif option_cov == \"subject\":\n cov = length_total / length_subject\n elif option_cov == \"query\":\n cov = length_total / length_query\n elif option_cov == \"shortest\":\n cov = length_total / min(length_query, length_subject)\n elif option_cov == \"longest\":\n cov = length_total / max(length_query, length_subject)\n\n return cov", "def html_index(source_files: iter([SourceFile]), compile_root: str) -> str:\n def single_summary(source_file: SourceFile) -> str:\n (covered, lines) = source_file.coverage_stats()\n (br_covered, br_count, _, _) = source_file.branch_stats()\n (fn_covered, fn_count) = source_file.function_stats()\n (coverage_percent, coverage_health) = to_percentage(covered, lines, 90, 75)\n (branch_percent, branch_health) = to_percentage(br_covered, br_count, 75, 50)\n (fn_percent, fn_health) = to_percentage(fn_covered, fn_count, 90, 75)\n\n\n return '''<tr>\n <td><a href=\"{}\">{}</a></td>\n <td class=\"cov-health-{}\" title=\"{}/{}\">{}%</td>\n <td class=\"cov-health-{}\" title=\"{}/{}\">{}%</td>\n <td class=\"cov-health-{}\" title=\"{}/{}\">{}%</td>\n </tr>'''.format(\n to_html_filename(source_file.source_name),\n escape(source_file.source_name),\n coverage_health, covered, lines, coverage_percent,\n branch_health, br_covered, br_count, branch_percent,\n fn_health, fn_covered, fn_count, fn_percent\n )\n\n title = escape(compile_root)\n\n html_res = [\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>Coverage report for \"\"\" + title + \"\"\"</title>\n <style type=\"text/css\">\n /*<![CDATA[*/\n .cov-health-all { background-color: #80FF80; }\n .cov-health-zero { background-color: black; color: white; }\n .cov-health-good { background-color: yellow; }\n .cov-health-normal { background-color: orange; }\n .cov-health-bad { background-color: red; }\n td { text-align: right; padding: 0.1em 0.5em; }\n td:first-child { text-align: left; }\n table { border-collapse: collapse; }\n tr { border: 1px solid black; }\n /*]]>*/\n </style>\n <script src=\"sorttable.js\"></script>\n </head>\n <body>\n <h1>Coverage report for \"\"\" + title + \"\"\"</h1>\n <div><table class=\"sortable\">\n <thead><tr><th>File</th><th>Lines</th><th>Branch</th><th>Functions</th></tr></thead>\n <tbody>\n \"\"\"]\n\n html_res.extend(single_summary(s) for s in source_files)\n html_res.append('</tbody></table></div></body></html>')\n\n return '\\n'.join(html_res)", "def summary(self):\n return ''", "def test_summaryRsrcsNoHeader(self):\n self.summaryRsrcsNoHeader(\"alert\")\n self.summaryRsrcsNoHeader(\"dashboard\")", "def summary_stats(self):\n capital_gains = self.df['values'].iloc[-1].sum() - self.tc.starting_cash\n total_return = capital_gains / self.tc.starting_cash\n days_invested = (self.df.index[-1] - self.df.index[0]).days\n annualized_returns = (total_return + 1) ** (365 / days_invested) - 1\n annualized_volatility = self.df['returns'].std() * (252 ** 0.5)\n sharpe = annualized_returns / annualized_volatility\n num_trades = self.trades.shape[0]\n stats = pd.Series(\n data=[capital_gains, total_return, annualized_returns, annualized_volatility, sharpe, num_trades],\n index=['Capital Gains', 'Total Return', 'Annualized Return', 'Annualized Volatility', 'Sharpe Ratio',\n 'Number of Trades']\n )\n return stats", "def report_totals(output):\n groups = (STATS_PATC.match(line) for line in output.splitlines())\n tuples = (g.groups() for g in groups if g)\n\n results = [0,0,0,0,0]\n for t in tuples:\n results[0] += int(t[0]) # total\n results[1] += int(t[1]) # failures\n results[2] += int(t[2]) # errors\n results[3] += int(t[3]) # skipped\n results[4] += float(t[4]) # elapsed time\n\n print 'Tests run: %d, Failures: %d, Errors: %d, Skipped: %d, '\\\n 'Time elapsed: %.2f' % tuple(results)", "def get_coverage_report_model()->CoverageReport:\n covergroups = CoverageRegistry.inst().covergroup_types()\n\n db = MemFactory.create() \n save_visitor = CoverageSaveVisitor(db)\n now = datetime.now\n save_visitor.save(TestData(\n UCIS_TESTSTATUS_OK,\n \"UCIS:simulator\",\n ucis.ucis_Time()), covergroups)\n\n return CoverageReportBuilder.build(db)", "def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)", "def coverage(text: str) -> float:\n words = set(text.split(' '))\n return len([w for w in words if frequency(w) != 0]) / len(words) * 100", "def metadata_reporter(self):\n logging.info('Creating summary report')\n header = '{}\\n'.format(','.join(self.headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # PipelineVersion\n data += self.commit + ','\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)", "def get_summary_stats(self, output_csv=None):\n\n contig_size_list = []\n\n self.summary_info[\"ncontigs\"] = len(self.contigs)\n\n for contig_id, sequence in self.contigs.items():\n\n logger.debug(\"Processing contig: {}\".format(contig_id))\n\n # Get contig sequence size\n contig_len = len(sequence)\n\n # Add size for average contig size\n contig_size_list.append(contig_len)\n\n # Add to total assembly length\n self.summary_info[\"total_len\"] += contig_len\n\n # Add to average gc\n self.summary_info[\"avg_gc\"].append(\n sum(map(sequence.count, [\"G\", \"C\"])) / contig_len\n )\n\n # Add to missing data\n self.summary_info[\"missing_data\"] += sequence.count(\"N\")\n\n # Get average contig size\n logger.debug(\"Getting average contig size\")\n self.summary_info[\"avg_contig_size\"] = \\\n sum(contig_size_list) / len(contig_size_list)\n\n # Get average gc content\n logger.debug(\"Getting average GC content\")\n self.summary_info[\"avg_gc\"] = \\\n sum(self.summary_info[\"avg_gc\"]) / len(self.summary_info[\"avg_gc\"])\n\n # Get N50\n logger.debug(\"Getting N50\")\n cum_size = 0\n for l in sorted(contig_size_list, reverse=True):\n cum_size += l\n if cum_size >= self.summary_info[\"total_len\"] / 2:\n self.summary_info[\"n50\"] = l\n break\n\n if output_csv:\n logger.debug(\"Writing report to csv\")\n # Write summary info to CSV\n with open(output_csv, \"w\") as fh:\n summary_line = \"{}, {}\\\\n\".format(\n self.sample, \",\".join(\n [str(x) for x in self.summary_info.values()]))\n fh.write(summary_line)", "def test_concentration_profile(self):\n # TODO: add an output for average particle concentration", "def cowreport():\n central = pytz.timezone(\"America/Chicago\")\n yesterday = (utc() - datetime.timedelta(days=1)).astimezone(central)\n midnight = yesterday.replace(hour=0, minute=0)\n midutc = midnight.astimezone(pytz.UTC)\n begints = midutc.strftime(\"%Y-%m-%dT%H:%M\")\n endts = (midutc + datetime.timedelta(hours=24)).strftime(\"%Y-%m-%dT%H:%M\")\n api = (\n f\"http://iem.local/api/1/cow.json?begints={begints}&endts={endts}&\"\n \"phenomena=SV&phenomena=TO&lsrtype=SV&lsrtype=TO\"\n )\n data = requests.get(api, timeout=60).json()\n st = data[\"stats\"]\n if st[\"events_total\"] == 0:\n text = \"No SVR+TOR Warnings Issued.\"\n html = f\"<h3>IEM Cow Report</h3><pre>{text}</pre>\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n return txt, html\n\n vp = st[\"events_verified\"] / float(st[\"events_total\"]) * 100.0\n text = (\n f\"SVR+TOR Warnings Issued: {st['events_total']:3.0f} \"\n f\"Verified: {st['events_verified']:3.0f} [{vp:.1f}%]\\n\"\n \"Polygon Size Versus County Size \"\n f\"[{st['size_poly_vs_county[%]']:.1f}%]\\n\"\n \"Average Perimeter Ratio \"\n f\"[{st['shared_border[%]']:.1f}%]\\n\"\n \"Percentage of Warned Area Verified (15km) \"\n f\"[{st['area_verify[%]']:.1f}%]\\n\"\n \"Average Storm Based Warning Size \"\n f\"[{st['avg_size[sq km]']:.0f} sq km]\\n\"\n f\"Probability of Detection(higher is better) [{st['POD[1]']:.2f}]\\n\"\n f\"False Alarm Ratio (lower is better) [{st['FAR[1]']:.2f}]\\n\"\n f\"Critical Success Index (higher is better) [{st['CSI[1]']:.2f}]\\n\"\n )\n\n html = f\"<h3>IEM Cow Report</h3><pre>{text}</pre>\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n\n return txt, html", "def run(self):\n cmd = 'coverage run setup.py test && coverage report -m'\n check_call(cmd, shell=True)", "def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')", "def parse_coverage(depth_filename, allow_missing=True):\n\n delims = [ 0, 10, 100, 1000, 2000, 10000]\n nbins = len(delims)+1\n\n bin_labels = ['0'] + [f\"{delims[i-1]+1}x-{delims[i]}x\" for i in range(1,nbins-1)] + [f\"> {delims[-1]}x\"]\n bin_labels = [ f\"Fraction with {l} coverage\" for l in bin_labels ]\n\n ret = {\n 'bin_labels': bin_labels,\n 'bin_fractions': [ None for b in range(nbins) ],\n 'mean_coverage': None,\n 'qc_meancov': 'FAIL',\n 'qc_cov100': 'FAIL',\n 'qc_cov1000': 'FAIL',\n 'cov100': 0\n }\n\n if file_is_missing(depth_filename, allow_missing):\n return ret\n\n coverage = []\n for line in open(depth_filename):\n t = line.split('\\t')\n assert len(t) == 3\n coverage.append(int(float(t[2].strip(\"\\n\"))))\n\n coverage = np.array(coverage)\n bin_assignments = np.searchsorted(np.array(delims), coverage, side='left')\n bin_fractions = np.bincount(bin_assignments, minlength=nbins) / float(len(coverage))\n assert bin_fractions.shape == (nbins,)\n\n\n ret['cov100'] = np.mean(coverage >= 100)\n ret['bin_fractions'] = [ xround(f,3) for f in bin_fractions ]\n ret['mean_coverage'] = xround(np.mean(coverage), 1)\n ret['qc_meancov'] = \"PASS\" if (np.mean(coverage) >= 2000) else \"FAIL\"\n ret['qc_cov100'] = \"PASS\" if (np.mean(coverage >= 100) >= 0.9) else \"FAIL\"\n ret['qc_cov1000'] = \"PASS\" if (np.mean(coverage >= 1000) >= 0.9) else \"WARN\"\n\n return ret", "def do(self, callback_name, *args):\n value_dict = self._evaluator.evaluate(self.data_stream)\n print(\"Train test coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n print(\"{0}:{1}\".format(key, value * self.coverage))", "def test_run_coverage(self):\n cmd = GreenTestCommand(Distribution())\n cmd.coverage = True\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(_subprocess_call_args(), Contains(\"-r\"))", "def cuv(ctx, coverage_fname, exclude, branch):\n if coverage_fname is None:\n coverage_fname = find_coverage_data('.')\n # coverage_fname still could be None\n\n cfg = Config()\n ctx.obj = cfg\n\n cfg.nice_width = min(80, shutil.get_terminal_size()[0])\n cfg.exclude = exclude\n\n cfg.branch = branch\n if coverage_fname is not None:\n cfg.data = coverage.Coverage(data_file=coverage_fname)\n cfg.data.load()\n else:\n raise click.UsageError(\n \"No coverage data. Do you have a .coverage file?\"\n )", "def test_summary_success(self):\n\n summary_data_key = 'summary_data'\n\n response = self.send_request(view_name='upload_summary_view', params={'upload_id': 1})\n context_data = response.context_data\n self.assertTrue(summary_data_key in context_data)\n\n summary_data = context_data[summary_data_key]\n self.assertEquals(3, len(summary_data))\n\n self.assertEqual(Decimal('100.0'), summary_data[0].pre_tax_amount)", "def _coverage(self, chr, limit, nbins):\n\n c = np.zeros(nbins, dtype=np.int)\n chr_start, chr_stop = self.refs[chr][1:]\n bin_size = float((limit[1] - limit[0]) / nbins)\n\n for i in range(chr_start, chr_stop):\n read_start = self.lines[i][3]\n read_len = len(self.lines[i][9])\n\n start_bin = int((read_start - limit[0]) / bin_size)\n stop_bin = int((read_start + read_len - limit[0]) / bin_size)\n\n # print start_bin, stop_bin\n c[start_bin:stop_bin + 1] += 1\n \n return c", "def getTotalCaseAndControlCounts(genotypesFilename):\r\n\r\n\tcomphetSuffix = \"\"\r\n\tif \"comphet\" in genotypesFilename:\r\n\t\tcomphetSuffix = \" (#1)\"\r\n\r\n\t# We read through the whole file. Might take a while, but easier than dealing with all edge cases.\r\n\tmaxCoveredCasePercentage = 0\r\n\tmaxCoveredControlPercentage = 0\r\n\treader = csv.reader(open(genotypesFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor variant in reader:\r\n\r\n\t\tvariant = dict(zip(header, variant))\r\n\t\tcasePercentage = float(variant[\"Covered Case Percentage\" + comphetSuffix])/100.0\r\n\t\tif casePercentage > maxCoveredCasePercentage:\r\n\t\t\tmaxCoveredCasePercentage = casePercentage\r\n\t\t\tcoveredCases = int(variant[\"Covered Case\" + comphetSuffix])\r\n\t\t\ttotalCases = int(round(coveredCases/casePercentage))\r\n\r\n\t\tcontrolPercentage = float(variant[\"Covered Ctrl Percentage\" + comphetSuffix])/100.0\r\n\t\tif controlPercentage > maxCoveredControlPercentage:\r\n\t\t\tmaxCoveredControlPercentage = controlPercentage\r\n\t\t\tcoveredControls = int(variant[\"Covered Ctrl\" + comphetSuffix])\r\n\t\t\ttotalControls = int(round(coveredControls/controlPercentage))\r\n\treturn totalCases, totalControls", "def generate_report():\n if os.path.isdir(\"build/coverage\"):\n shutil.rmtree(\"build/coverage\")\n commands = '''\nscons -uij32 --optimization=coverage controller/cplusplus_test\nlcov --base-directory build/coverage --directory build/coverage -c -o build/coverage/controller_test.info\ngenhtml -o build/coverage/controller/test_coverage -t test --num-spaces 4 build/coverage/controller_test.info\n'''\n for cmd in commands.splitlines():\n cmd_args = cmd.split()\n if (len(cmd_args) == 0):\n continue\n cmd = cmd_args[0]\n cmd_path = find_executable(cmd)\n if not cmd_path:\n continue\n pid = os.fork()\n if pid == 0:\n # Avoid stdout buffering by execing command into child process.\n os.execv(cmd_path, cmd_args)\n os.waitpid(pid, 0)" ]
[ "0.71685416", "0.68137187", "0.6776906", "0.6677319", "0.6650848", "0.6639017", "0.6626381", "0.65998113", "0.6589576", "0.65476686", "0.6533306", "0.6496852", "0.6492893", "0.64699256", "0.64314204", "0.64311635", "0.64148486", "0.63928735", "0.6356816", "0.63496864", "0.63363206", "0.6288406", "0.6266407", "0.62500393", "0.62024015", "0.61881036", "0.61556363", "0.6153905", "0.61179703", "0.6105436", "0.61032206", "0.6089348", "0.6077586", "0.6036977", "0.6015071", "0.60113275", "0.60109013", "0.59781253", "0.5977678", "0.5970312", "0.5964489", "0.5956227", "0.59155035", "0.5900403", "0.5882995", "0.5879575", "0.587582", "0.58629966", "0.5821904", "0.5814581", "0.57977504", "0.577273", "0.575227", "0.5749255", "0.5747918", "0.5735202", "0.5731492", "0.5707693", "0.5705943", "0.57026005", "0.5685508", "0.5683569", "0.5677856", "0.5675753", "0.56738245", "0.5668328", "0.5667812", "0.5660833", "0.56594783", "0.564732", "0.56437427", "0.5641916", "0.56372786", "0.5635899", "0.56292456", "0.5608846", "0.55929166", "0.55740994", "0.55711234", "0.5544603", "0.55395234", "0.55232507", "0.55186677", "0.55178505", "0.55129856", "0.55111057", "0.5505072", "0.55043805", "0.54955375", "0.5492022", "0.54883343", "0.5482901", "0.54788476", "0.5476473", "0.54708874", "0.54668874", "0.5465019", "0.5464465", "0.54550874", "0.54434067" ]
0.7372552
0
Coverage handler for incoming messages
def receive_message(self, context, message): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_handler(controller, msg, pkt):\n pass", "def handle(self, message):", "def handle_message(self, message):", "def processMessage(self, *args, **kwargs):\r\n pass", "def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()", "def handle_message(self, msg):\n pass", "def _report_message(message, level, request, extra_data, payload_data):\n if not _check_config():\n return\n\n filtered_message = events.on_message(message,\n request=request,\n extra_data=extra_data,\n payload_data=payload_data,\n level=level)\n\n if filtered_message is False:\n return\n\n data = _build_base_data(request, level=level)\n\n # message\n data['body'] = {\n 'message': {\n 'body': filtered_message\n }\n }\n\n if extra_data:\n extra_data = extra_data\n data['body']['message'].update(extra_data)\n\n request = _get_actual_request(request)\n _add_request_data(data, request)\n _add_person_data(data, request)\n _add_lambda_context_data(data)\n data['server'] = _build_server_data()\n\n if payload_data:\n data = dict_merge(data, payload_data, silence_errors=True)\n\n payload = _build_payload(data)\n send_payload(payload, payload.get('access_token'))\n\n return data['uuid']", "def process_messages(self):\n pass", "def process_message(self, msg, src):", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def test_messages(self):\n pass", "def handleMessage(msg):", "def __data_handler__(self, msg):\n print(msg)", "def test_sendimmessages(self):\n pass", "def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)", "def test_handle_request_get(self):\n # setup\n incoming_message = cast(\n HttpMessage,\n self.build_incoming_message(\n message_type=HttpMessage,\n performative=HttpMessage.Performative.REQUEST,\n to=self.skill_id,\n sender=self.sender,\n method=self.get_method,\n url=self.url,\n version=self.version,\n headers=self.headers,\n body=self.body,\n ),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.http_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(1)\n\n mock_logger.assert_any_call(\n logging.INFO,\n \"received http request with method={}, url={} and body={!r}\".format(\n incoming_message.method, incoming_message.url, incoming_message.body\n ),\n )\n\n # _handle_get\n message = self.get_message_from_outbox()\n has_attributes, error_str = self.message_has_attributes(\n actual_message=message,\n message_type=HttpMessage,\n performative=HttpMessage.Performative.RESPONSE,\n to=incoming_message.sender,\n sender=incoming_message.to,\n version=incoming_message.version,\n status_code=200,\n status_text=\"Success\",\n headers=incoming_message.headers,\n body=json.dumps({\"tom\": {\"type\": \"cat\", \"age\": 10}}).encode(\"utf-8\"),\n )\n assert has_attributes, error_str\n\n mock_logger.assert_any_call(\n logging.INFO,\n f\"responding with: {message}\",\n )", "def test(coverage):\n print('success')\n pass", "def process(self, message: Message, **kwargs: Any) -> None:", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def message_handler(self, dest, source, message):\n pass", "def handle_message(self, data, channel):\n pass", "def process(self, payload, status_code=0):", "def on_message(data):\n pass", "def _handleIncomingDataAnalysis(self, msg: str):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleIncomingDataAnalysis method is being called\")\n\t\tad = DataUtil.jsonToActuatorData(self, msg)\n\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def handle_delivery(channel, method, header, body):\n print(body)", "def ceilometer_callback(self, ch, method, properties, body):\n payload = json.loads(body)\n try:\n message_body = json.loads(payload['oslo.message'])\n samples = message_body['args']['data']\n #print \"--------------------------------------------------\"\n self.pool.spawn_n(self.zabbix_sender.consume_samples,samples)\n except Exception,e:\n log.warn(str(e))", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def _handle_message(self, msg):\n self.event('message', msg)", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )", "def MessageHandlerMethod(**kwargs):\n data: dict = kwargs['data']\n bus: AbstractPikaBus = kwargs['bus']\n payload: dict = kwargs['payload']\n print(payload)\n if payload['reply']:\n payload['reply'] = False\n bus.Reply(payload=payload)", "def test_send(self):\n # Required to get useful test names\n super(TestCisPlyOutput_local, self).test_send()", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def get_message():\n # Only run xray in the AWS Lambda environment\n if runs_on_aws_lambda():\n xray_subsegment = xray_recorder.current_subsegment()\n xray_subsegment.put_annotation(\"key\", \"value\")\n # Sample metadata\n # subsegment.put_metadata(\"operation\", \"metadata\", \"python object/json\")\n xray_recorder.end_subsegment()", "def handleMessage(self, message):\n\n if 'started' in message.tags:\n self.handleMessage_started(message)\n\n elif 'deployment_computed' in message.tags:\n self.handleMessage_computed(message)\n\n elif 'deployment_end' in message.tags:\n self.handleMessage_end(message)", "def agent_message(self, in_message):\n\n logging.debug(\"Received %s\" % in_message)\n\n if in_message.startswith(\"start_testing\"):\n self._start_testing()\n\n elif in_message.startswith(\"finish_testing\"):\n epoch = int(in_message.split(\" \")[1]) \n self._finish_testing(epoch)\n else:\n return \"I don't know how to respond to your message\"", "def test_handle_request_post(self):\n # setup\n incoming_message = cast(\n HttpMessage,\n self.build_incoming_message(\n message_type=HttpMessage,\n performative=HttpMessage.Performative.REQUEST,\n to=self.skill_id,\n sender=self.sender,\n method=self.post_method,\n url=self.url,\n version=self.version,\n headers=self.headers,\n body=self.body,\n ),\n )\n\n # operation\n with patch.object(self.logger, \"log\") as mock_logger:\n self.http_handler.handle(incoming_message)\n\n # after\n self.assert_quantity_in_outbox(1)\n\n mock_logger.assert_any_call(\n logging.INFO,\n \"received http request with method={}, url={} and body={!r}\".format(\n incoming_message.method, incoming_message.url, incoming_message.body\n ),\n )\n\n # _handle_post\n message = self.get_message_from_outbox()\n has_attributes, error_str = self.message_has_attributes(\n actual_message=message,\n message_type=HttpMessage,\n performative=HttpMessage.Performative.RESPONSE,\n to=incoming_message.sender,\n sender=incoming_message.to,\n version=incoming_message.version,\n status_code=200,\n status_text=\"Success\",\n headers=incoming_message.headers,\n body=self.body,\n )\n assert has_attributes, error_str\n\n mock_logger.assert_any_call(\n logging.INFO,\n f\"responding with: {message}\",\n )", "def test_handle_weather_message_calls_current(self):\n pass", "def test_dispatch_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_inbound(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.inbound'), [msg])", "def consume(self, handler) -> None:\n pass # pragma: no cover", "def event_handler(self, response):\n pass", "def test_handler(self):\n mock_sqr: SequenceRun = SequenceRunFactory()\n\n workflow: dict = bcl_convert.handler({\n 'gds_volume_name': mock_sqr.gds_volume_name,\n 'gds_folder_path': mock_sqr.gds_folder_path,\n 'seq_run_id': mock_sqr.run_id,\n 'seq_name': mock_sqr.name,\n }, None)\n\n logger.info(\"-\" * 32)\n logger.info(\"Example bcl_convert.handler lambda output:\")\n logger.info(json.dumps(workflow))\n\n # assert bcl convert workflow launch success and save workflow run in db\n workflows = Workflow.objects.all()\n self.assertEqual(1, workflows.count())", "def test_filter(self, logger: Logger, mocker: MockerFixture) -> None:\n task = OctaveTask()\n task.session_id = \"123\"\n handler = OutputHandler(task)\n logger.addHandler(handler)\n\n send_func = mocker.patch(\"matl_online.tasks.OutputHandler.send\")\n\n logger.warning(\"warning\")\n logger.error(\"error\")\n logger.debug(\"debug\")\n\n assert len(handler.contents) == 0\n send_func.assert_not_called()", "def test_dispatch_raw(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [])\n msg = msg_helper.make_inbound('message')\n yield worker_helper.dispatch_raw('fooconn.foo', msg)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.foo'), [msg])", "def on_message(channel, method_frame, header_frame, body):\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n publisher = Publisher()\n\n message = body.decode(\"utf8\")\n print(message)\n logging.info(message)\n\n response = format_response(code=\"ERR400\", status=\"error\", message=\"\", files_ids=[], action=\"\")\n\n try:\n data = json.loads(message)\n action = data[\"action\"]\n if action in actions:\n threading.Thread(target=actions[action], args=(data, )).start()\n else:\n response[\"action\"] = action\n response[\"message\"] = \"This action does not exist on server.\"\n publisher.send_message(json.dumps(response))\n\n except json.JSONDecodeError:\n response[\"code\"] = \"ERR500\"\n response[\"message\"] = error = \"Invalid JSON file\"\n print(error)\n publisher.send_message(json.dumps(response))", "def handle_messages():\n print(\"Handling Messages\")\n payload = request.get_data()\n for sender, incoming_message, payload in messaging_events(payload):\n # The following statements check which options the user selected\n # Response handler contains \"templates\" for the various messages\n user_name = get_full_name(sender, PAT)\n if \"hei\" in incoming_message.lower() or \"hallo\" in incoming_message.lower() or \"yo\" in incoming_message.lower()\\\n or \"hi\" in incoming_message.lower():\n send_message(PAT, send_message(PAT, response_handler.greeting_message(sender, user_name)))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n elif payload == \"change subject\" or \"change subject\" in incoming_message.lower():\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"help\" in incoming_message.lower():\n\n send_message(PAT, response_handler.text_message(sender, \"Are you lost ...? \"))\n send_message(PAT, response_handler.text_message(sender, \"You can change course at any time simply by \"\n \"writing the course code on the form: [TAG][CODE]\\n\"\n \"ex. TDT4120\"))\n send_message(PAT, response_handler.text_message(sender, \"If you want to see your currently selected course \"\n \"and other information type 'Status'.\"))\n send_message(PAT, response_handler.text_message(sender, \"You can also type 'Hei' or 'Hallo' at any time \"\n \"to receive a greeting that shows your options.\"))\n send_message(PAT, response_handler.text_message(sender, \"Here is a list of commands you can use. This is \"\n \"recommended for the experienced user:\\n\"\n \"Change subject\\n\"\n \"Give feedback\\n\"\n \"How did today's lecture go?\\n\"\n \"Get schedule\\n\"\n \"Get info\\n\"\n \"All lectures\\n\"\n \"A specific lecture\\n\"\n \"You can type most of the commands in chat. Just \"\n \"give it a try!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"status\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n user = get_full_name(sender, PAT)\n lecture_id_current = lecture_methods.get_lecture_from_date(year, week, day, subject)\n lecture = feedback_methods.get_lecture_object(lecture_id_current)\n\n if user_methods.has_user(user_name):\n sub = user_methods.get_subject_from_user(user_name) + \" : \" + \\\n subject_info.course_name(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.user_info(sender, user_name, sub))\n if feedback_methods.user_has_feedback_for_lecture(user, lecture):\n send_message(PAT, response_handler.text_message(sender, \"You have given feedback for \"\n + subject + \"today. Well done! Be proud of \"\n \"yourself and remember to check in \"\n \"tomorrow.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please press 'Give Feedback' or write it in the \"\n \"chat to do so.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"We seem to not be able to detect you in the database. \"\n \"Please report this to the staff!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n # Checks if the subject has lectures in the database, adds them if not.\n\n elif payload == \"give feedback\" or \"give feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.give_feedback_choice(sender))\n\n elif payload == \"lecture speed\" or \"lecture speed\" in incoming_message.lower():\n\n subject = user_methods.get_subject_from_user(user_name)\n\n if lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added.\"))\n send_message(PAT, response_handler.lec_feed(sender))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" does not exist. Likely due to the subject having \"\n \"no lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif payload == \"evaluation_questions\" or \"lecture questions\" in incoming_message.lower():\n # User wants to give feedback for a lecture.\n subject = user_methods.get_subject_from_user(user_name)\n payload = \"evaluation_questions\" # if user typed 'lecture questions' the payload will be None\n\n if lecture_methods.check_lecture_in_db(subject):\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because there \"\n \"is no lecture today, or because you have already \"\n \"given feedback for this lecture.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n schedule = subject_info.get_schedule(subject)\n if schedule:\n database_entry = subject_info.gather_lecture_information(schedule)\n lecture_methods.add_lecture_information_db(database_entry)\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \" were not in the database. It is now added\"))\n if feedback_methods.user_can_give_feedback_evaluation(user_name,\n user_methods.get_subject_from_user(\n user_name)):\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Feedback can not be given either because \"\n \"there is no lecture today, or because you\"\n \" have already given feedback for this lecture.\"\n \"\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n send_message(PAT, response_handler.text_message(sender, \"Lectures for the subject \" + subject +\n \"does not exist. Likely due to the subject having \"\n \"no \"\n \"lectures this semester.\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n\n elif \"too slow\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '0'\n message_response = \"too slow\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"it's all right\" in incoming_message.lower() or \"its all right\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '1'\n message_response = \"It's all right\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif \"too fast\" in incoming_message.lower():\n # Adds feedback if the subject has a lecture on the given day\n # and if the user has not already given feedback\n payload = '2'\n message_response = \"too fast\"\n if feedback_methods.add_entry(user_name, user_methods.get_subject_from_user(user_name), payload):\n send_message(PAT, response_handler.text_message(sender, \"You chose: \" + \"'\" +\n message_response + \"'\" + \"\\nFeedback Received!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"There is either no lecture active in the selected\"\n \" subject, or you have already given feedback\"\n \" to the active lecture.\\nFeedback denied!\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif (\"today\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"todays\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()) or \\\n (\"today's\" in incoming_message.lower() and \"lecture\" in incoming_message.lower()):\n # Gathers the correct information about the date.\n year = feedback_methods.get_year()\n week = feedback_methods.get_week()\n day = feedback_methods.get_day()\n subject = user_methods.get_subject_from_user(user_name)\n # Gathers the feedback from today's lecture:\n if lecture_methods.check_lecture_in_db(subject):\n feedback_list = feedback_methods.get_single_lecture_feed(year, week, day, subject)\n if feedback_list[0] is not None:\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"No feedback for the given lecture on this date. \"\n \"Please try again at a later date.\"))\n send_message(PAT,\n response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.text_message(sender, \"No lecture present in the database. \"\n \"Please provide some feedback and try again.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get schedule\" or \"get schedule\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n schedule = subject_info.printable_schedule(subject_info.get_schedule(subject))\n if len(schedule) > 640:\n msg_list = message_split.message_split(schedule)\n for msg in msg_list:\n print(msg)\n send_message(PAT, response_handler.text_message(sender, msg))\n else:\n send_message(PAT, response_handler.text_message(sender, schedule))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get info\" or \"get info\" in incoming_message.lower():\n subject = user_methods.get_subject_from_user(user_name)\n send_message(PAT, response_handler.text_message(sender,\n subject_info.printable_course_info(\n subject_info.get_course_json(subject))))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"get feedback\" or \"get feedback\" in incoming_message.lower():\n send_message(PAT, response_handler.get_feedback_specific_or_all(sender))\n\n elif payload == \"all_lectures\" or \"all lectures\" in incoming_message.lower():\n # The user wants to see feedback for all lectures in the selected subject\n subject = user_methods.get_subject_from_user(user_name)\n if not lecture_methods.check_lecture_in_db(subject):\n send_message(PAT, response_handler.text_message(sender, \"Course has no feedback.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n feedback, feedbackevaluation = feedback_methods.get_all_subject_feed(subject)\n if len(feedback) > 0:\n percent_list = bot_feedback.generate_percent_for_speed(feedback)\n send_message(PAT, response_handler.all_feedback_speed(sender, subject, percent_list))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture speed.\"))\n if len(feedbackevaluation) > 0:\n percent_list_questions = bot_feedback.generate_percent_for_questions(feedbackevaluation)\n\n send_message(PAT, response_handler.all_feedback_questions(sender, subject, percent_list_questions))\n else:\n send_message(PAT,\n response_handler.text_message(sender, \"Course has no feedback for lecture questions.\"))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload == \"a_specific_lecture\" or \"a specific lecture\" in incoming_message.lower():\n # Let the user choose what year to get feedback from.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n if len(years) > 0:\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n else:\n send_message(PAT, response_handler.text_message(sender, 'No feedback for the selected subject.'))\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif payload is not None:\n # Underneath are check that use .split() on the payload.\n if \"evaluation_questions\" in payload.split()[0]:\n payload_split = payload.split()\n if len(payload_split) == 1:\n # 1st question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 2:\n # 2nd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 3:\n # 3rd question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 4:\n # 4th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 5:\n # 5th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 6:\n # 6th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 7:\n # 7th question\n send_message(PAT, response_handler.lecture_feedback_questions(sender, payload))\n elif len(payload_split) == 8:\n # store feedback.\n subject = user_methods.get_subject_from_user(user_name)\n if feedback_methods.add_feedback_evaluation(user_name, subject, int(payload_split[1]),\n int(payload_split[2]), int(payload_split[3]),\n int(payload_split[4]), int(payload_split[5]),\n int(payload_split[6]), int(payload_split[7])):\n # Storing the feedback succeeded.\n send_message(PAT, response_handler.text_message(sender, 'Feedback received!'))\n send_message(PAT, response_handler.has_course(sender, subject))\n else:\n # Storing the feedback failed.\n send_message(PAT, response_handler.text_message(sender,\n \"There is either no lecture active in the \"\n \"selected subject, or you have already given \"\n \"feedback to the active lecture.\\n Feedback \"\n \"denied!\"))\n send_message(PAT, response_handler.has_course(sender, subject))\n pass\n\n elif \"get_lecture_feedback_year\" in payload.split()[0]:\n # Let the user choose what semester to get feedback from.\n semesters = []\n if lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 1, 17, int(payload.split()[1])):\n semesters.append('Spring')\n elif lecture_feedback_db_methods.check_lecture_semester(user_methods.get_subject_from_user(user_name),\n 32, 49, int(payload.split()[1])):\n semesters.append('Fall')\n if len(semesters) > 0:\n send_message(PAT, response_handler.get_feedback_semester(sender, payload.split()[1], semesters))\n else:\n # Take the user one step up to choose a different year.\n years = lecture_feedback_db_methods.get_year(user_methods.get_subject_from_user(user_name))\n send_message(PAT, response_handler.get_feedback_year(sender, years))\n\n elif \"get_lecture_feedback_semester\" in payload.split()[0]:\n # Let the user choose what weeks to get feedback from.\n\n week_list = lecture_feedback_db_methods.get_lecture_weeks(user_methods.get_subject_from_user(user_name),\n int(payload.split()[1]), payload.split()[2])\n if len(week_list) > 8:\n send_message(PAT, response_handler.get_feedback_month(sender, payload.split()[1], week_list))\n else:\n send_message(PAT, response_handler.get_feedback_week(sender, payload.split()[1], week_list))\n\n elif \"get_lecture_feedback_month\" in payload.split()[0]:\n # Let the user select week\n week_list = []\n payload_split = payload.split()\n for i in range(2, len(payload_split)):\n week_list.append(int(payload_split[i].rstrip(',')))\n\n send_message(PAT, response_handler.get_feedback_week(sender, payload_split[1], week_list))\n\n elif \"get_lecture_feedback_week\" in payload.split()[0]:\n # Lets the user select day\n lecture_days = lecture_feedback_db_methods.get_day_of_lecture_in_week(\n user_methods.get_subject_from_user(user_name), payload.split()[1], payload.split()[2])\n\n send_message(PAT, response_handler.get_feedback_day(sender, payload.split()[1], lecture_days,\n payload.split()[2]))\n\n elif \"get_lecture_feedback_day\" in payload.split()[0]:\n\n subject = user_methods.get_subject_from_user(user_name)\n # Gives the user feedback from the selected day.\n feedback_list = feedback_methods.get_single_lecture_feed(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n feedback_questions_list = feedback_methods.get_single_lecture_feedback_questions(payload.split()[1],\n payload.split()[2],\n payload.split()[3],\n subject)\n\n if len(feedback_list[1]) > 0: # Checks if there is feedback in the variable.\n send_message(PAT, response_handler.present_single_lecture_feedback(sender, feedback_list))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture speed.\"))\n if len(feedback_questions_list) > 0: # Checks if there is feedback in the variable.\n feedback_questions = bot_feedback.generate_percent_for_questions(feedback_questions_list)\n send_message(PAT,\n response_handler.present_single_lecture_feedback_questions(sender, feedback_questions))\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"This lecture has no feedback for lecture \"\n \"questions.\"))\n\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n elif ime_data_fetch.subject_exists_boolean(incoming_message.upper().split()[0]):\n if user_methods.has_user(user_name):\n user_methods.add_subject(user_name, incoming_message.split()[0])\n else:\n user_methods.add_user(user_name, incoming_message.split()[0])\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n\n else:\n send_message(PAT, response_handler.text_message(sender,\n \"Type 'help' to see what you can do with L.I.M.B.O.\\n If \"\n \"you tried to enter a subject-code and got this message,\"\n \" you either misspelled it or the subject you are looking \"\n \"for is not a subject at NTNU.\"))\n if user_methods.has_user(user_name):\n send_message(PAT, response_handler.has_course(sender, user_methods.get_subject_from_user(user_name)))\n else:\n send_message(PAT, response_handler.no_course(sender))\n\n return \"ok\"", "def message_received_handler(pdu, **kwargs):\n\n logging.warning('Message received handler (Override me)')", "def _process_message(self, obj):\n pass", "def callback(ch, method, properties, body):\n requestParams = json.loads(body.decode('utf-8'))\n # print(\"inside the callback\")\n arg1 = int(requestParams[0])\n arg2 = int(requestParams[1])\n result = whaleClassifier.test(arg1, arg2)\n # what this does it publish the RESULT to the exchange (as producers of content \n # cannot send stuff directly to queues, they send to exchanges and then exchanges \n # send to queues. Note Exchange='' is default exchange which then sends to the\n # queue that is listed on the ROUTING_KEY argument.)\n ch.basic_publish(exchange='', \n routing_key=results_queue, \n body=json.dumps(result),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ))\n # ch.basic_ack(delivery_tag=method.delivery_tag) #need this line so that we don't resend this same message again the next time\n # we start up this script. Which eventually clogs up memory", "def test_message_user():", "def handle(self, data):\n pass", "def onMessage(self, payload, isBinary):", "def testIgnoreMessage(self):\n\n self.logger.accept('c',self.logger.foo)\n self.logger.accept('c',self.logger.bar)\n self.logger.ignore('c')\n messager.send('c')\n # No methods should have been called.\n self.assertEqual(self.logger.log,[])", "def __call__(self, test_case, response, **assertions):\n self.assert_x_sendfile_response(test_case, response)\n for key, value in iteritems(assertions):\n assert_func = getattr(self, 'assert_%s' % key)\n assert_func(test_case, response, value)", "def callback():\n signature = request.headers['X-Line-Signature']\n body = request.get_data(as_text=True)\n logger.info('Request body: %s', body)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n logger.exception(\n 'Invalid signature. Please check your channel access token/channel secret.')\n abort(400)\n\n return 'OK'", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "def handle_request(self):\n try:\n content_type = self.headers.get('content-type')\n\n if content_type != 'application/json':\n self.write_empty_response(400)\n return\n\n content_len = int(self.headers.get('content-length', 0))\n\n # If content was provided, then parse it\n if content_len > 0:\n message = json.loads(self.rfile.read(content_len))\n else:\n self.write_empty_response(400)\n return\n\n helper.log_info(f'Incoming POST from {self.client_address[0]}: {message}')\n\n aspect_type = message['aspect_type']\n object_id = message['object_id']\n object_type = message['object_type']\n # make owner_id a str to avoid issues with athlete_checkpoint dict\n owner_id = str(message['owner_id'])\n\n athlete_checkpoint = helper.get_check_point(\"webhook_updates\") or {}\n\n # We only care about activity updates. New activities are pulled in automatically as strava_api input restarts.\n if aspect_type == 'update' and object_type == 'activity':\n if owner_id not in athlete_checkpoint:\n athlete_checkpoint[owner_id] = []\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n else:\n athlete_checkpoint[owner_id].append(object_id)\n helper.save_check_point(\"webhook_updates\", athlete_checkpoint)\n helper.log_debug(f'webhooks_updates checkpoint: {helper.get_check_point(\"webhook_updates\")}')\n\n # Send data to Splunk\n data = json.dumps(message)\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n\n # Strava API expects a 200 response\n self.write_empty_response(200)\n\n # Restart strava_api inputs to pull in the data unless it's a delete, as the input doesn't do anything with that anyway.\n if aspect_type != 'delete':\n self.restart_input('strava_api', self.SESSION_KEY)\n helper.log_info(f'Reloading Strava API input to retrieve updated activity {object_id} for athlete {owner_id}.')\n\n except Exception as ex:\n helper.log_error(f'Something went wrong in handle request: {ex}')", "def __call__ (self, event, payload):\n\n logging.info ('\\n\\nReceived Event: '+ str(event) + '\\nPayload: ' + str(payload))\n\n try:\n\n if event == 'AlertHandler:StartDebug':\n logging.getLogger().setLevel(logging.DEBUG)\n logging.info ('Logging level changed to DEBUG Mode')\n\n elif event == 'AlertHandler:EndDebug':\n logging.getLogger().setLevel(logging.INFO)\n logging.info ('Logging level changed to INFO Mode')\n \n elif event in self.args['AlertEvent'].keys():\n handler = retrieveHandler(self.args['AlertEvent'][event],'AlertHandler')\n handler(payload)\n\n except Exception, ex: \n \n logging.error('Exception Caught while handling the event: ' + str(event) + ' payload: ' + str(payload) ) \n logging.error(str(ex))\n\n return", "def handler(event, context):\n pub_sub_message = base64.b64decode(event['data']).decode('utf-8')\n\n if pub_sub_message == 'executor':\n LOGGER.debug('POST: %s', EVENTS_EXECUTION_ENDPOINT)\n response = requests.post(EVENTS_EXECUTION_ENDPOINT, json={'type': 'POLICY'},\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n elif pub_sub_message == 'validator':\n LOGGER.debug('POST: %s', EVENTS_VALIDATION_ENDPOINT)\n response = requests.post(EVENTS_VALIDATION_ENDPOINT,\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n else:\n LOGGER.warn('Unexpected message from PubSub: %s', pub_sub_message)\n return", "def test_dispatch_outbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.outbound'), [])\n msg = msg_helper.make_outbound('message')\n yield worker_helper.dispatch_outbound(msg, 'fooconn')\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.outbound'), [msg])", "def on_delivered(self, frame):\n pass", "def test_create_message_with_succes(self, mock_client): \n\n event = {\n 'operation': 'createMessage', \n 'arguments': {\n 'template': 'my-sample-geofence-id',\n 'input': {\n 'service': 'APNS',\n 'action': 'OPEN_APP',\n 'title': 'Sample Title',\n 'body': 'This is a sample body'\n }\n }\n }\n\n response = {\n \"Arn\": f'arn:aws:mobiletargeting:eus-east-1:SOME_ACCOUNT_ID:templates/my-sample-geofence-id/PUSH',\n \"RequestID\": \"some-request-id\",\n \"Message\": 'some message' \n }\n\n mock_client().create_push_template.return_value = response\n response = manageMessages.handler(event, None)\n\n self.assertTrue(response)\n self.assertEqual(response['status'], 'MESSAGE_CREATED')", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "def handle_send_message(self, message_header, message):\n pass", "def verify_as_target(self, message_handler):", "def handle(self, rsm_ctx):\n pass", "def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))", "def test_sample_status_custom(self):\n self.app = self.make_app(argv = ['report', 'sample_status', self.examples[\"project\"], self.examples[\"flowcell\"], '--debug', '--customer_reference', 'MyCustomerReference', '--uppnex_id', 'MyUppnexID', '--ordered_million_reads', '10', '--phix', '{1:0.1, 2:0.2}'],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n s_param_map = {x[\"scilifelab_name\"]:x for x in data[\"s_param\"]}\n self.assertEqual(s_param_map['P001_101_index3']['uppnex_project_id'], 'MyUppnexID')\n self.assertEqual(s_param_map['P001_101_index3']['customer_reference'], 'MyCustomerReference')\n self.assertEqual(s_param_map['P001_101_index3']['ordered_amount'], 10)", "def testWholeRequest(self):\n body = self.protocol.encode_message(self.request_message)\n self.Reinitialize(input=body,\n content_type=self.content_type)\n self.factory.add_request_mapper(self.mapper())\n self.service_handler.handle('POST', '/my_service', 'method1')\n VerifyResponse(self,\n self.service_handler.response,\n '200',\n 'OK',\n self.protocol.encode_message(self.response_message),\n self.content_type)", "def commands_coverage_server():\n try:\n coverage()\n coverage_server()\n except KeyboardInterrupt:\n logger.info(\"Command canceled\")", "def test_send(self):\n # Required to get useful test names\n super(TestCisObjOutput_local, self).test_send()", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "async def testsay(self, ctx, *, message):\n await ctx.send(message)", "def test_message_group():", "def obj_received(self, obj):\n\n # TODO do something like handler registry\n\n if isinstance(obj, pb.Ping):\n self.handle_ping(obj)\n\n elif isinstance(obj, pb.Pong):\n self.handle_pong(obj)\n\n elif isinstance(obj, pb.ACS):\n if self.factory.config.failure != 'omission':\n res = self.factory.acs.handle(obj, self.remote_vk)\n self.process_acs_res(res, obj)\n\n elif isinstance(obj, pb.TxReq):\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.TxResp):\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationReq):\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationResp):\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.SigWithRound):\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\n\n elif isinstance(obj, pb.CpBlock):\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Cons):\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\n\n elif isinstance(obj, pb.AskCons):\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\n\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\n\n elif isinstance(obj, pb.Bracha):\n if self.factory.config.failure != 'omission':\n self.factory.bracha.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Mo14):\n if self.factory.config.failure != 'omission':\n self.factory.mo14.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Dummy):\n logging.info(\"NODE: got dummy message from {}\".format(b64encode(self.remote_vk)))\n\n else:\n raise AssertionError(\"invalid message type {}\".format(obj))\n\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()", "def setUp(self):\n self.hex_data = \"0251112233445566778899a1a2a3a4a5a6a7a8a9aaabacadae\"\n self.message_id = 0x51\n self.bytes_data = bytearray(unhexlify(self.hex_data))\n self.address = Address(\"112233\")\n self.target = Address(\"445566\")\n self.flags = MessageFlags(0x77)\n self.cmd1 = int(0x88)\n self.cmd2 = int(0x99)\n self.user_data = UserData(unhexlify(\"a1a2a3a4a5a6a7a8a9aaabacadae\"))\n\n self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_data)\n set_log_levels(\n logger=\"info\",\n logger_pyinsteon=\"info\",\n logger_messages=\"info\",\n logger_topics=False,\n )", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def receive_message(self, message):", "def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sms_call_me(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n elif \"media\" in data[0][\"message\"]:\n handle_inbound_media_mms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"], data[0][\"message\"][\"media\"])\n else:\n handle_inbound_sms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n else:\n print(data)\n return \"\"", "def callback(ch, method, properties, body):\n print(f\" [x] Received {str(body)} kW.\")\n\n try:\n timestamp = properties.timestamp\n current_time = datetime.utcfromtimestamp(timestamp).replace(\n tzinfo=timezone.utc\n )\n except AttributeError:\n # If we don't get a timestamp from the broker, add a timestamp here.\n current_time = datetime.now().replace(tzinfo=timezone.utc)\n\n pv_photovoltaic = generate_pv_output(current_time)\n\n report_item = PVMeterReportItem(\n timestamp=current_time.isoformat(),\n pv_meter=int(body),\n pv_photovoltaic=pv_photovoltaic,\n )\n generate_report(report_item)\n\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def incoming(self, msg):\n hdr = msg.header\n\n # Signals:\n if hdr.message_type is MessageType.signal:\n key = (hdr.fields.get(HeaderFields.path, None),\n hdr.fields.get(HeaderFields.interface, None),\n hdr.fields.get(HeaderFields.member, None)\n )\n cb = self.signal_callbacks.get(key, None)\n if cb is not None:\n cb(msg.body)\n return\n\n # Method returns & errors\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\n if reply_handle is not None:\n if hdr.message_type is MessageType.method_return:\n reply_handle.set_result(msg.body)\n return\n elif hdr.message_type is MessageType.error:\n reply_handle.set_exception(DBusErrorResponse(msg))\n return\n\n if self.on_unhandled:\n self.on_unhandled(msg)", "def test_send(self, logger: Logger, mocker: MockerFixture) -> None:\n identifier = \"123\"\n task = OctaveTask()\n task.session_id = identifier\n handler = OutputHandler(task)\n logger.addHandler(handler)\n\n emit = mocker.patch(\"matl_online.tasks.socket.emit\")\n\n logger.info(\"test1\")\n logger.info(\"[STDERR]error\")\n handler.send()\n\n assert emit.called == 1\n assert len(emit.call_args) == 2\n\n event, payload = emit.call_args[0]\n\n expected_data = {\n \"session\": identifier,\n \"data\": [\n {\"type\": \"stdout\", \"value\": \"test1\"},\n {\"type\": \"stderr\", \"value\": \"error\"},\n ],\n }\n\n assert payload == expected_data\n assert event == \"status\"\n assert emit.call_args[1].get(\"room\") == identifier", "def _process_msg(cls, msg):\n raise NotImplementedError", "def test_get_request_output(self):\n pass", "def on_message(self, unused_channel, basic_deliver, properties, body):\n\n start = time.time()\n self.invocations += 1\n\n logger.info(\n u\"[{}] received message #{} from exchange {}: {}\".format(self.bot_id,\n basic_deliver.delivery_tag, self.exchange,\n body.decode('utf-8')))\n\n self.statsd.incr(self.statsd_prefix + \"message.receive\")\n\n # Ack the message before processing to tell rabbit we got it.\n # TODO before sending ack we should persist the message in a local queue to avoid the possibility of losing it\n self.acknowledge_message(basic_deliver.delivery_tag)\n\n try:\n\n try:\n json_body = json.loads(body)\n\n except ValueError as ve:\n logger.exception(\n \"[{}] Invalid JSON received from exchange: {} error: {} msg body: []\".format(self.bot_id,\n self.exchange,\n ve.message, body))\n raise\n\n else:\n response_messages = self.callback_func(json_body)\n\n if response_messages is None:\n response_messages = []\n\n logger.info(\"[{}] Sending {} response messages\".format(self.bot_id, len(response_messages)))\n\n for message in response_messages:\n self._channel.basic_publish(exchange=message.get('exchange', self.exchange),\n routing_key=message.get('queue', self.queue_name),\n body=message.get('body'))\n logger.info(\"[{}] published message {}\".format(self.bot_id, message))\n self.statsd.incr(self.statsd_prefix + \"message.publish\")\n\n except Exception as e:\n msg = \"[{}] Unexpected error - {}, message {}, from exchange {}. sending to error queue {}\"\n self.statsd.incr(self.statsd_prefix + \"message.error\")\n logger.exception(msg.format(self.bot_id, e, body, self.exchange, self.error_queue_name))\n self._channel.basic_publish(exchange='',\n routing_key=self.error_queue_name,\n body=body)\n\n\n exec_time_millis = int((time.time() - start) * 1000)\n self.total_execution_time += exec_time_millis\n\n logger.debug(\"Consumer {0} message handling time: {1}ms\".format(self.consumer_id, exec_time_millis))\n\n # if we have processed 100 messages, log out the average execution time at INFO then reset the total\n if self.invocations % 100 == 0:\n average_execution_time = self.total_execution_time / 100\n logger.info(\"Consumer {0} Avg message handling time (last 100): {1}ms\".format(self.consumer_id, average_execution_time))\n self.total_execution_time = 0\n\n self.statsd.timing(self.statsd_prefix + 'message.process.time', int((time.time() - start) * 1000))", "def test_base_logging(self):\n\n n = nodes.BaseNode(log_output=True)\n n.channel = FakeChannel(self.loop)\n\n m = generate_msg(message_content='test')\n\n ret = self.loop.run_until_complete(n.handle(m))\n\n # Check return\n self.assertTrue(isinstance(ret, message.Message))\n self.assertEqual(ret.payload, 'test', \"Base node not working !\")\n self.assertEqual(n.processed, 1, \"Processed msg count broken\")\n\n n.channel.logger.log.assert_any_call(10, 'Payload: %r', 'test')\n n.channel.logger.log.assert_called_with(10, 'Meta: %r', {'question': 'unknown'})", "def handle(self):\n self.app.logger.info('==== handle github event: %s', self.event)\n # self.app.logger.info('data send: %s', json.dumps(self.data, indent=2))\n if self.event == 'ping':\n return {'msg': 'Hi!'}\n else:\n task_match = []\n repo_config = self.get_repo_config()\n if repo_config:\n for task_config in repo_config['tasks']:\n event_hit = False\n if self.event == 'push':\n event_hit = self._is_task_push(task_config)\n elif self.event == 'pull_request':\n event_hit = self._is_task_pull_request(task_config)\n if event_hit:\n task_match.append(task_config)\n # work start execute here...\n for task in task_match:\n self.app.logger.info(\"event hit, start tasks under %s/%s...\", self.repo_meta['owner'], self.repo_meta['name'])\n self._jenkins_build(task)\n pass\n return \"OK\"", "def test_filter_messages(self):\n pass", "def setUp(self):\n h = self.MyTestHandler()\n h.request = Request.blank('/rpc/')\n h.response = Response()\n self.handler = h", "def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)", "def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)", "def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)", "def handle_msg(self, state_id, msg):\n pass", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "async def _response_handler(self):", "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)" ]
[ "0.6550436", "0.6415692", "0.6320498", "0.60273343", "0.60133165", "0.60033965", "0.6000999", "0.59604585", "0.595996", "0.5894243", "0.58883953", "0.58782285", "0.58495873", "0.5848556", "0.5842221", "0.5829234", "0.58113396", "0.5810897", "0.57167685", "0.57069564", "0.5706161", "0.5698894", "0.56914896", "0.56771106", "0.5666061", "0.56043994", "0.56010646", "0.56002337", "0.558659", "0.55622864", "0.55611587", "0.5559642", "0.5556685", "0.55503064", "0.55351305", "0.55343854", "0.5522892", "0.55211914", "0.5520903", "0.5505625", "0.5474699", "0.54700255", "0.54620135", "0.5458619", "0.5437926", "0.5433857", "0.5432834", "0.54267186", "0.542601", "0.5425983", "0.54205334", "0.5411932", "0.5403868", "0.53916717", "0.53839374", "0.53775245", "0.53764075", "0.53755134", "0.53749615", "0.5370837", "0.53660035", "0.5365036", "0.5362513", "0.535948", "0.5347617", "0.5343025", "0.5340501", "0.5339317", "0.53389806", "0.533551", "0.53353685", "0.533126", "0.5327873", "0.5321106", "0.53175485", "0.53175485", "0.53169966", "0.5310814", "0.5306644", "0.52980655", "0.5297261", "0.5294621", "0.5291826", "0.5291036", "0.52905077", "0.52761286", "0.5270015", "0.52665377", "0.5264968", "0.52505183", "0.52504665", "0.52502227", "0.5248917", "0.52374893", "0.52315503", "0.52315503", "0.5227989", "0.52258396", "0.52220947", "0.52177423" ]
0.5589948
28
Encodes the input sequence and returns the hidden state from the last step of the encoder RNN.
def encode(self, x): _, hid = self.encoder(x) #All RNN classes output a tuple of 2 objects: the output of the RNN first and the hidden state from the last item in return hid #the input sequence second. We're only interested in the hidden state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _encode(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)", "def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)", "def _encode_back(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)", "def encode(self,x,x_len):\n\n ## Check to see if batch_size parameter is fixed or base on input batch\n cur_batch_size = x.size()[1]\n encode_init_state = self.encoder.initialize_hidden_state(cur_batch_size)\n encoder_state, encoder_outputs = self.encoder.forward(x, encode_init_state, x_len)\n\n return encoder_outputs, encoder_state", "def _encode(self):\n with tf.variable_scope('encoding'):\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\n tf.get_variable_scope().reuse_variables()\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)", "def _add_input_encoder(self, inputs, seq_len):\n with tf.variable_scope(\"encoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n\n return fw_states, bw_states, final_fw, final_bw", "def encode_input(self, x_tensor, inp_lens_tensor):\r\n input_emb = self.input_emb.forward(x_tensor)\r\n enc_output_each_word, enc_context_mask, enc_final_states = self.encoder(input_emb, inp_lens_tensor)\r\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\r\n # print('lest go', enc_final_states_reshaped[1].shape)\r\n return enc_output_each_word, enc_context_mask, enc_final_states_reshaped", "def encode(self, inputs, masks):\n with tf.variable_scope(\"encoder\") as scope_encoder:\n #compute sequence length\n sequence_lengths = tf.reduce_sum(masks, axis = 1) \n #create a forward cell\n fw_cell = tf.contrib.rnn.LSTMCell(self.size)\n\n #pass the cells to bilstm and create the bilstm\n bw_cell = tf.contrib.rnn.LSTMCell(self.size)\n output, final_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, \\\n bw_cell, inputs, \\\n sequence_length = sequence_lengths, \\\n dtype = tf.float32, \\\n parallel_iterations = 256)\n output_lstm = tf.concat([output[0], output[1]], axis = -1)\n final_state_lstm = tf.concat([final_state[0], final_state[1]], axis = -1)\n return output_lstm, final_state_lstm", "def _add_encoder(self, encoder_inputs, seq_len):\n with tf.variable_scope('encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states\n return encoder_outputs, fw_st, bw_st", "def encode(self, state):\n raise NotImplementedError", "def build_encoder(self):\n with tf.variable_scope(\"encoder\") as scope:\n length1 = tf.to_int32(tf.reduce_sum(self.encode_mask1, 1), name=\"length1\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n thought_vectors1 = tf.concat(states, 1, name=\"thought_vectors1\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors1 = tf.identity(state, name=\"thought_vectors1\")\n \n scope.reuse_variables()\n\n length2 = tf.to_int32(tf.reduce_sum(self.encode_mask2, 1), name=\"length2\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n thought_vectors2 = tf.concat(states, 1, name=\"thought_vectors2\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors2 = tf.identity(state, name=\"thought_vectors2\")\n\n self.thought_vectors1 = thought_vectors1\n self.thought_vectors2 = thought_vectors2", "def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded", "def encoder(self, tensor):\n with tf.variable_scope(\"encoder\"):\n tensor = tf.nn.embedding_lookup(self.embedding, tensor)\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units)\n outputs, state = tf.nn.dynamic_rnn(cell, tensor, sequence_length=self.seq_len, dtype=tf.float32)\n output = outputs[:,-1,:]\n output = tf.nn.l2_normalize(output, -1)\n\n return output", "def forward(self, input, hidden, give_gates=False, debug=False):\n\n emb = self.encoder(input)\n if emb.dim()<3:\n emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n\n if give_gates:\n if debug:\n return decoded, hidden, extras, emb\n else:\n return decoded, hidden, extras\n else:\n if debug:\n return decoded, hidden, emb\n else:\n return decoded, hidden", "def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)", "def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):\n\n batch_size = 1\n layer_states = []\n for rnn in rnns:\n hidden_size = rnn.weight_hh.size()[1]\n \n # h_0 of shape (batch, hidden_size)\n # c_0 of shape (batch, hidden_size)\n if rnn.weight_hh.is_cuda:\n h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n else:\n h_0 = torch.zeros(batch_size,hidden_size)\n c_0 = torch.zeros(batch_size,hidden_size)\n\n layer_states.append((h_0, c_0))\n\n outputs = []\n for token in sequence:\n rnn_input = embedder(token)\n (cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)\n\n outputs.append(output)\n\n return (cell_states, hidden_states), outputs", "def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol:\n # (seq_len, batch_size, num_embed)\n data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length,\n use_sequence_length=True)\n # (seq_length, batch, cell_num_hidden)\n hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len)\n # (seq_length, batch, cell_num_hidden)\n hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len)\n # (seq_length, batch, cell_num_hidden)\n hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length,\n use_sequence_length=True)\n # (seq_length, batch, 2 * cell_num_hidden)\n hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name=\"%s_rnn\" % self.prefix)\n\n return hidden_concat", "def encode_input_for_decoder(x_tensor, inp_lens_tensor, model_input_emb: EmbeddingLayer, model_enc: RNNEncoder):\n input_emb = model_input_emb.forward(x_tensor)\n (enc_output_each_word, enc_context_mask, enc_final_states) = model_enc.forward(input_emb, inp_lens_tensor)\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\n return (enc_output_each_word, enc_context_mask, enc_final_states_reshaped)", "def run_encoder(self, sess, batch):\n feed_dict = self._make_feed_dict(batch, just_enc=True) \n (enc_states, dec_in_state, global_step) = sess.run(\n [self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\n\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\n return enc_states, dec_in_state", "def encoder_one_way(self, cell, x, seq_len, init_state=None):\n # Output is the outputs at all time steps, state is the last state\n with tf.variable_scope(\"dynamic_rnn\"):\n outputs, state = tf.nn.dynamic_rnn(\\\n cell, x, sequence_length=seq_len, initial_state=init_state,\n dtype=self.floatX)\n # state is a StateTuple class with properties StateTuple.c and StateTuple.h\n return outputs, state", "def encode(self, input):\n h = np.zeros(self.hidden_size) \n \n preactivation = np.dot(self.W.T, input) + self.b\n sigmoid(preactivation, h)\n \n return h", "def encode(self, sequence):\n fwd_states, bwd_states = self.encode_fwd_bwd(sequence)\n bwd_states = bwd_states[::-1]\n return [dy.concatenate([fwd_states[i], bwd_states[i]]) for i in range(len(fwd_states))]", "def forward(self, inp, state):\n emb = self.drop(self.encoder(inp))\n y, state_next = self.rnn(emb, state)\n y = self.drop(y)\n y = self.decoder(y)\n return y, state_next", "def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\n\n # compute context vector using attention mechanism\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\n context, attn_probs = self.attention(\n query=query, proj_key=proj_key,\n value=encoder_hidden, mask=src_mask)\n\n # update rnn hidden state\n rnn_input = torch.cat([prev_embed, context], dim=2)\n output, hidden = self.rnn(rnn_input, hidden)\n \n pre_output = torch.cat([prev_embed, output, context], dim=2)\n pre_output = self.dropout_layer(pre_output)\n pre_output = self.pre_output_layer(pre_output)\n\n return output, hidden, pre_output", "def encoder(enc_input, attn_bias, n_layer, n_head,\n d_key, d_value, d_model, d_inner_hid, pos_enc,\n preporstprocess_dropout, attention_dropout,\n relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n for i in range(n_layer):\n enc_output = encoder_layer(enc_input, attn_bias, n_head,\n d_key, d_value, d_model,d_inner_hid, pos_enc,\n prepostprocess_dropout, attention_dropout,relu_dropout,\n preprocess_cmd, postprocess_cmd\n )\n enc_input = enc_output\n enc_output = pre_process_layer(enc_output,\n preprocess_cmd, preporstprocess_dropout)\n return enc_output", "def build_sentence_encoder(self, raw_encoder_input, input_seq_len):\n with tf.variable_scope('text_encoder'):\n self.embedding = \\\n tf.get_variable(\n \"embedding\", initializer=tf.random_uniform(\n [self.config.word_voc_size,\n self.config.word_embedding_space_size],\n -self.config.TRAIN.SENCODER.none_rnn_para_initial_max,\n self.config.TRAIN.SENCODER.none_rnn_para_initial_max))\n inputs = tf.nn.embedding_lookup(self.embedding, raw_encoder_input)\n\n # now it is [MAX_SEQ_LENGTH, batch_size, embedding_length]\n input_batch_order = tf.transpose(inputs, [1, 0, 2])\n\n # now it is [MAX_SEQ_LENGTH * batch_size, embedding_length]\n input_batch_order = tf.reshape(\n input_batch_order, [-1, self.config.word_embedding_space_size])\n\n # now it is LIST OF [BATCH_SIZE, embedding_length]\n encoder_input = tf.split(0, self.config.seq_max_len,\n input_batch_order)\n\n # the encoder part\n encode_gru_cell = tf.nn.rnn_cell.GRUCell(\n self.config.encoder_dimension)\n # big news: The state is final state, output is a list of tensor.\n # We don't to do that\n _, sentence_rep = tf.nn.rnn(encode_gru_cell, encoder_input,\n dtype=tf.float32,\n sequence_length=input_seq_len)\n self.sentence_rep = sentence_rep\n self.sentence_rep = tf.nn.l2_normalize(self.sentence_rep, 1)\n return", "def encode(self, src_seq, src_lens):\n src_embed = self.word_embedding(src_seq)\n src_encodings, final_states = self.encoder_lstm(src_embed, src_lens)\n\n return src_encodings, final_states, src_embed", "def _add_seq2seq(self):\n mode = self._mode\n vsize = self._vocab.size() # size of the vocabulary\n\n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-config.rand_unif_init_mag, config.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=config.trunc_norm_init_std)\n\n # Add embedding matrix (shared by the encoder and decoder inputs)\n with tf.variable_scope('embedding'):\n embedding = tf.get_variable('embedding', [vsize, config.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n if mode==\"train\": self._add_emb_vis(embedding) # add to tensorboard\n emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_dec_inputs = tf.nn.embedding_lookup(embedding, self._dec_batch) # tensor with shape (batch_size, max_dec_steps, emb_size)\n #emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)\n\n # Add the encoder.\n enc_fw_states, enc_bw_states, enc_fw, enc_bw = self._add_input_encoder(emb_enc_inputs, self._enc_lens)\n\n print(\"Encoder FW\", enc_fw_states.shape)\n print(\"Encoder BW\", enc_bw_states.shape)\n raise Exception(\"testing mode\")\n\n #reshape encoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\n enc_fw_states = tf.reshape(enc_fw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_fw_states)[1]])\n enc_bw_states = tf.reshape(enc_bw_states, [config.batch_size, config.hidden_dim * tf.shape(enc_bw_states)[1]])\n\n\n # python run.py --mode=decode --data_path=data/chunked/train_1/train_1_*.bin --vocab_path=data/vocab_1 --exp_name=full1isto1\n\n # Add the decoder.\n dec_fw_states, dec_bw_states = self._add_input_decoder(emb_dec_inputs, self._dec_lens, enc_fw, enc_bw)\n\n #reshape decoder states from [batch_size, input_size, hidden_dim] to [batch_size, input_size * hidden_dim]\n dec_fw_states = tf.reshape(dec_fw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_fw_states)[1]])\n dec_bw_states = tf.reshape(dec_bw_states, [config.batch_size, config.hidden_dim * tf.shape(dec_bw_states)[1]])\n #print(\"Decoder FW\", dec_fw_states.shape)\n #print(\"Decoder BW\", dec_bw_states.shape)\n\n\n #enc_c = tf.concat(axis=1, values=[enc_fw.c, enc_bw.c])\n #enc_h = tf.concat(axis=1, values=[enc_fw.h, enc_bw.h])\n #dec_c = tf.concat(axis=1, values=[dec_fw.c, dec_bw.c])\n #dec_h = tf.concat(axis=1, values=[dec_fw.h, dec_bw.h])\n\n final_encoding = tf.concat(axis=1, values=[enc_fw_states, enc_bw_states, dec_fw_states, dec_bw_states])\n #print(\"Final encoding\", final_encoding.shape)\n #raise Exception(\"Test\")\n dims_final_enc = tf.shape(final_encoding)\n\n \"\"\"\n #convo_input = tf.concat(axis=1, values=[enc_c, enc_h, dec_c, dec_h])\n input_layer = tf.reshape(final_encoding, [config.batch_size, dims_final_enc[1], 1])\n print(\"Convolution input shape\", input_layer.shape)\n\n conv1 = tf.layers.conv1d(\n inputs=input_layer,\n filters=8,\n kernel_size=5,\n padding=\"same\",\n activation=tf.nn.relu)\n conv1 = tf.layers.batch_normalization(conv1)\n print(\"Convolution1 output shape\", conv1.shape)\n\n pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2)\n print(\"Pool1 output shape\", pool1.shape)\n\n conv2 = tf.layers.conv1d(\n inputs=pool1,\n filters=16,\n kernel_size=5,\n padding=\"same\",\n activation=tf.nn.relu)\n\n\n conv2 = tf.layers.batch_normalization(conv2)\n print(\"Convolution2 output shape\", conv2.shape)\n\n pool2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2)\n print(\"Pool2 output shape\", pool2.shape)\n\n dims_pool2 = tf.shape(pool2)\n\n pool2_flat = tf.reshape(pool2, [config.batch_size, dims_pool2[1] * 16])\n print(\"Pool2_flat output shape\", pool2_flat.shape)\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n \"\"\"\n #raise Exception(\"testing mode\")\n\n #dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode==\"train\")\n #print(\"Dense output shape\", dense.shape)\n\n #raise Exception(\"Just testing\")\n # Add the output projection to obtain the vocabulary distribution\n with tf.variable_scope('output_projection'):\n w = tf.get_variable('w', [dims_final_enc[1], 2], dtype=tf.float32, initializer=self.trunc_norm_init)\n bias_output = tf.get_variable('bias_output', [2], dtype=tf.float32, initializer=self.trunc_norm_init)\n #concatenate abstract and article outputs [batch_size, hidden_dim*4]\n\n\n #get classification output [batch_size, 1] default on last axis\n self._logits = tf.matmul(final_encoding, w) + bias_output\n #self._logits = tf.layers.dense(final_encoding, 2, kernel_initializer=self.trunc_norm_init, bias_initializer=self.trunc_norm_init)\n #self._prob = tf.nn.softmax(logits, \"class_prob\")\n\n if mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'):\n #self._prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self._targets)\n #class_weights = tf.constant([0.1, 5.])\n self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._targets, logits=self._logits))\n #self._loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self._targets, logits=self._logits, pos_weight=class_weights))\n tf.summary.scalar('loss', self._loss)\n\n\n\n #if mode == \"decode\":", "def encode(self, input_):\n return self.encoder(input_)", "def encode(self, seq):", "def forward(self, x):\n # Get results of encoder network\n q = self.encode_nn(x)\n\n return q", "def pretrain_forward(self, inp):\n return self.encoder(inp)", "def build_encoder(self):\n \n # some general variables concerning the current processed batch\n batch_size=self.image_embeddings.get_shape()[0]\n sentence_length = self.config.sentence_length # == self.seq_embeddings.get_shape()[2]\n max_text_length = tf.shape(self.seq_embeddings)[1] # maximum text length for this batch\n \n # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the\n # modified LSTM in the \"Show and Tell\" paper has no biases and outputs\n # new_c * sigmoid(o).\n \n # create an lstm cell that will process a sentence (a sequence of tokens)\n lstm_cell_sentences = tf.nn.rnn_cell.BasicLSTMCell(\n num_units=self.config.sentence_embedding_size, state_is_tuple=True) # num_units describes the size of the internal memory cell (but it is also the output size)\n \n # we also need an lstm cell that will process a sequence of sentences (a text)\n lstm_cell_text = tf.nn.rnn_cell.BasicLSTMCell(\n num_units=self.config.article_embedding_size, state_is_tuple=True)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all lstm cells\n lstm_cell_sentences = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell_sentences,\n input_keep_prob=self.config.dropout_keep_prob_encoder,\n output_keep_prob=self.config.dropout_keep_prob_encoder)\n lstm_cell_text = tf.nn.rnn_cell.DropoutWrapper(\n lstm_cell_text,\n input_keep_prob=self.config.dropout_keep_prob_encoder,\n output_keep_prob=self.config.dropout_keep_prob_encoder)\n\n with tf.variable_scope(\"lstm_sentence_encode\", initializer=self.initializer) as lstm_scope:\n # we use the image embedding only to feed the text lstm with image information\n # The sentences are initialized with a zero state\n \n # Set the initial LSTM state.\n initial_state_sentences = lstm_cell_sentences.zero_state(\n batch_size=batch_size, dtype=tf.float32)\n\n # At first, generate a mask for all sentences. \n # This will allow us to specify the individual length of each sentence \n # This lengths are fed into tf.nn.dynamic_rnn, which will produce zero outputs for \n # all padded tokens.\n # Note, that self.input_seqs contains a zero for each padded token (zero is not in the vocabulary)\n zeros = tf.zeros_like(self.input_seqs)\n self.sentence_mask = tf.select(tf.greater(self.input_seqs, zeros) , tf.ones_like(self.input_seqs), zeros) # type int64\n\n #self.sentence_mask = tf.cast(self.sentence_mask, tf.int32)\n \n # In the following, we run a hierarchical approach:\n # Tokens of a sentence are mapped onto an embedding vector through lstm_cell_sentences\n # The resulting sentence embeddings are passed though lstm_cell_text to gather text embeddings\n \n # Since we have to generate an embedding for each sentence in a text, we need a loop somehow.\n # But the number of sentences in a text is dynamically determined for each batch (max_text_length).\n # Therefore, we cannot use unpack and a python loop. Instead we use the while_loop control method of TF.\n \n \n # The output of lstm_cell_sentences will be stored in this matrix, but only \n # the lstm output of the last not padded word in a sentence\n lstm_outputs_sentences = tf.zeros(tf.pack([batch_size, max_text_length, self.config.sentence_embedding_size])) # tf.pack is a hotfix, since a normal array passing would not work as max_text_length is a tensor\n #lstm_outputs_sentences = tf.zeros([batch_size, max_text_length, self.config.embedding_size])\n \n # Allow the LSTM variables to be reused.\n #lstm_scope.reuse_variables()\n\n # now we compute the lstm outputs for each token sequence (sentence) in the while loop body\n def body(i,n,los):\n \"\"\"Compute lstm outputs for sentences i (sentences with index i in text) of current batch.\n\n Inputs:\n i: control variable of loop (runs from 0 to n-1)\n n: max_text_length\n los: lstm_outputs_sentences\n\n Outputs:\n i: incremented\n n: unchanged\n los: input with updated values in index i of second dimension\n \"\"\"\n # extract correct lstm input (i-th sentence from each batch)\n #es = tf.slice(self.seq_embeddings,[0,i,0,0],[batch_size, 1, sentence_length, self.config.word_embedding_size])\n es = tf.slice(self.seq_embeddings,tf.pack([0,i,0,0]),tf.pack([batch_size, 1, sentence_length, self.config.word_embedding_size]))\n es = tf.squeeze(es, axis=1) # get rid of sentence index dimension\n es = tf.reshape(es, tf.pack([batch_size, sentence_length, self.config.word_embedding_size])) # dirty hack, to ensure that shape is known (needed by further methods)\n\n # extract masks of sentences i\n sm = tf.slice(self.sentence_mask,tf.pack([0,i,0]),tf.pack([batch_size, 1, sentence_length]))\n sm = tf.squeeze(sm, axis=1)\n # compute sentence lengths\n sm = tf.reduce_sum(sm, 1)\n sm = tf.reshape(sm, tf.pack([batch_size])) # dirty hack, to ensure that shape is known\n\n # feed i-th sentences through lstm\n lstm_outputs_sentences_tmp, _ = tf.nn.dynamic_rnn(cell=lstm_cell_sentences,\n inputs=es,\n sequence_length=sm,\n initial_state=initial_state_sentences,\n dtype=tf.float32,\n scope=lstm_scope)\n # lstm_outputs_sentences_tmp has shape (batch_size, sentence_length, sentence_embedding_size\n # lstm_outputs_sentences_tmp contains an output for each token in the sentences, but we are only interested in the \n # output of the last token of a sentence\n \n # Now we extract only those outputs (output of last token, which is not a padded token) from lstm_outputs_sentences_tmp\n\n # sm contains the length of each sentence, meaning we can access the right output with the index (length - 1)\n # Note, that the actual masks where reduced to lengths in the above statements.\n sm = tf.sub(sm, 1) # sentence mask contains now the index of the last token in each sentence\n # Those sentence, that have zero tokens (padded sentences) have now an index of -1. We have to set them back to 0\n # which are simply zero outputs of the lstm\n zeros = tf.zeros_like(sm)\n sm = tf.select(tf.less(sm, zeros) , zeros, sm)\n\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_sentences_tmp.\n # Therefore, we have to produce the \"indices\" parameter of this method first.\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_sentences\n # Hence the innermost dimension must be a 2D vector: (batch, token) <- index of desired embedding in lstm_outputs_sentences\n # for sentence with index (batch, i) in self.seq_embeddings\n\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\n sm = tf.expand_dims(sm, 1)\n sm = tf.cast(sm, dtype=tf.int32)\n\n # use tf.range to generate the equivalence of sm for batch indices\n #batch_indices = tf.range(0, batch_size)\n batch_indices = tf.constant(np.arange(int(batch_size)), dtype=tf.int32)\n batch_indices = tf.expand_dims(batch_indices, 1) \n\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_sentences_tmp\n gather_indices = tf.concat(1, [batch_indices, sm])\n\n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\n lstm_outputs_sentences_tmp = tf.gather_nd(lstm_outputs_sentences_tmp, gather_indices)\n lstm_outputs_sentences_tmp = tf.expand_dims(lstm_outputs_sentences_tmp, 1) \n\n # add the current output to our list of outputs\n los = tf.concat(1, [tf.slice(los, tf.pack([0,0,0]), tf.pack([batch_size, i, self.config.sentence_embedding_size])),\n lstm_outputs_sentences_tmp,\n tf.slice(los, tf.pack([0,i+1,0]), tf.pack([batch_size,n-i-1,self.config.sentence_embedding_size]))])\n \n return i+1,n,los\n\n def condition(i,n,los):\n \"\"\"Break condition for while loop\n\n Inputs:\n i: control variable of loop (runs from 0 to n-1)\n n: max_text_length\n los: lstm_outputs_sentences\n\n Outputs:\n Ture, if body should be run.\n \"\"\"\n\n return i < n\n\n result = tf.while_loop(condition, body, loop_vars=[0, max_text_length, lstm_outputs_sentences])\n lstm_outputs_sentences = result[2] \n \n with tf.variable_scope(\"lstm_text_encode\", initializer=self.initializer) as lstm_scope: \n \n # Feed the image embeddings to set the initial LSTM state.\n zero_state_text = lstm_cell_text.zero_state(\n batch_size=batch_size, dtype=tf.float32)\n _, initial_state_text = lstm_cell_text(self.image_embeddings, zero_state_text)\n \n # Allow the LSTM variables to be reused.\n lstm_scope.reuse_variables()\n \n # lstm_outputs_sentences has now the last lstm output for each sentence in the batch (output of last unpadded token)\n # Its shape is (batch_size, max_text_length, sentence_embedding_size)\n \n # Now we use the sentence embeddings to generate text embeddings\n # Run the batch of sentence embeddings through the LSTM.\n self.sentence_sequence_length = tf.reduce_sum(self.input_mask, 1)\n lstm_outputs_text, _ = tf.nn.dynamic_rnn(cell=lstm_cell_text,\n inputs=lstm_outputs_sentences,\n sequence_length=self.sentence_sequence_length,\n initial_state=initial_state_text,\n dtype=tf.float32,\n scope=lstm_scope)\n # lstm_outputs_text has now the lstm output of each sentence_embedding,\n # where the output of the last unpadded sentence_embedding is considered as the text embedding.\n # Note, that we could also call it article embedding, since it comprises the information of the \n # text and the image.\n # Its shape is (batch_size, max_text_length, article_embedding_size)\n\n # extract the text embedding from lstm_outputs_text\n \n # sequence_length contains the length of each text, meaning we can access the right output with the index (length - 1)\n last_sentence = tf.sub(self.sentence_sequence_length, 1) # sentence mask contains now the index of the last unpadded sentence in each text\n\n # We use tf.gather_nd to extract the desired outputs from lstm_outputs_text.\n # Therefore, we have to produce the \"indices\" parameter of this method first.\n # The elements of the last dimension in this matrix determine the indices for gathering slices from lstm_outputs_text\n # Hence the innermost dimension must be a 2D vector: (batch, sentence)\n\n # We generate for each of the two indices a seperate matrix and concatenate them at the end\n last_sentence = tf.expand_dims(last_sentence, 1)\n\n # use tf.range to generate the equivalence of sm for batch indices\n batch_indices = tf.range(0, batch_size)\n batch_indices = tf.expand_dims(batch_indices, 1) \n\n # then use tf.concat to generate the actual tensor, that can be used to gather the right outputs from lstm_outputs_text\n gather_indices = tf.concat(1, [batch_indices, last_sentence])\n \n # now we can consider the elements (of the last dimension) of gather_indices as indices for the correct ouput\n self.article_embeddings = tf.gather_nd(lstm_outputs_text, gather_indices)\n \n # As the image information might have gone lost in the hierarchical rnn, the reader might reconsider it.\n if self.config.reconsider_image:\n with tf.variable_scope(\"reconsider_image\", initializer=self.initializer, reuse=None) as reconsider_image_scope: \n # concat current article embedding with image_embedding and map them through an fully connected layer onto a new embedding\n article_image_concat = tf.concat(1, [self.article_embeddings, self.image_embeddings])\n \n self.article_embeddings = tf.contrib.layers.fully_connected(\n inputs=article_image_concat,\n num_outputs=self.config.article_embedding_size,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=reconsider_image_scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n self.article_embeddings = tf.nn.dropout(self.article_embeddings, self.config.dropout_keep_prob_encoder)\n \n # self.article_embeddings contains now the text/article embedding for each article in the batch\n # Its shape is (batch_size, article_embedding_size)\n \n # All variables up until this point are shared with the autoencoder. So these are the variables\n # (the whole encoder network) that we want to restore/share.\n self.autoencoder_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)", "def encoder_inference(self, features, states):\n with tf.name_scope(f\"{self.name}_encoder\"):\n outputs = tf.expand_dims(features, axis=0)\n outputs, new_states = self.encoder.recognize(outputs, states)\n return tf.squeeze(outputs, axis=0), new_states", "def encode(data, encoder):\n # Get the list of hidden depths\n\thd = encoder.hidden_depths\n # Find the middle hidden layer\n\tmiddle_layer_index = (len(hd)-1)/2\n # Initialize empty container for the encoded data\n\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\n\tfor i, d_ in enumerate(data):\n # feed forward, get all the activations, and just keep\n # the middle layer, which is the encoding\n\t\tx, z_container, x_container = encoder.ff(d_,True,True)\n\t\tx_encoded = x_container[1+middle_layer_index]\n\t\tdata_encoded[i] = x_encoded\n\t#\n\treturn data_encoded", "def dis_encoder_seq2seq(hparams):\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n ## Encoder forward variables.\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n return variable_mapping", "def encode(self, n_dimension=2, learning_rate=0.01, training_epochs=10, batch_size=400):\n X = tf.placeholder(tf.float32,[None, self.n_input])\n tf.set_random_seed(50)\n \n \n n_hidden_layer1 = int(math.pow(2, int(2*math.log(self.n_input,2)/3+math.log(n_dimension,2)/3)))\n n_hidden_layer2 = int(math.pow(2, int(math.log(self.n_input,2)/3+2*math.log(n_dimension,2)/3)))\n n_hidden_layer3 = n_dimension\n \n weights = {\n 'encoder_w1':tf.Variable(tf.random_normal([self.n_input, n_hidden_layer1])),\n 'encoder_w2':tf.Variable(tf.random_normal([n_hidden_layer1, n_hidden_layer2])),\n 'encoder_w3':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer3])),\n \n 'decoder_w1':tf.Variable(tf.random_normal([n_hidden_layer3, n_hidden_layer2])),\n 'decoder_w2':tf.Variable(tf.random_normal([n_hidden_layer2, n_hidden_layer1])),\n 'decoder_w3':tf.Variable(tf.random_normal([n_hidden_layer1, self.n_input])),\n }\n \n biases = {\n 'encoder_b1':tf.Variable(tf.random_normal([n_hidden_layer1])),\n 'encoder_b2':tf.Variable(tf.random_normal([n_hidden_layer2])),\n 'encoder_b3':tf.Variable(tf.random_normal([n_hidden_layer3])),\n \n 'decoder_b1':tf.Variable(tf.random_normal([n_hidden_layer2])),\n 'decoder_b2':tf.Variable(tf.random_normal([n_hidden_layer1])),\n 'decoder_b3':tf.Variable(tf.random_normal([self.n_input])),\n }\n \n \n def encoder(x):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_w1']), biases['encoder_b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_w2']), biases['encoder_b2']))\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_w3']), biases['encoder_b3']))\n \n return layer_3\n\n def decoder(x):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_w1']), biases['decoder_b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_w2']), biases['decoder_b2']))\n layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_w3']), biases['decoder_b3']))\n \n return layer_3\n \n encoder_op = encoder(X)\n decoder_op = decoder(encoder_op)\n\n y_pred = decoder_op\n y_true = X\n\n cost = tf.reduce_mean(tf.pow(y_pred - y_true, 2))\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n \n \n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n n_batch = int(self.data.shape[0]/batch_size)\n for epoch in tqdm(range(training_epochs)):\n for batch_idx in range(n_batch):\n start = batch_idx * batch_size\n stop = start + batch_size\n _, encoder_result = sess.run([optimizer, encoder_op], feed_dict={X: self.data[start:stop]})\n self.X_test = sess.run(encoder_op, feed_dict={X:self.data})\n self.X_cost = sess.run(cost, feed_dict={X:self.data})\n \n return self.X_test, self.X_cost", "def encode(self, X):\r\n return self._encoder.predict(X)", "def forward(self, input, last_hidden, last_context, encoder_outputs):\r\n # input: B x 1 x d, last_hidden: (num_layers * num_directions) x B x h\r\n # last_context: B x 1 x h, encoder_outputs: B x S x h\r\n\r\n # output = embedded\r\n rnn_input = torch.cat((input, last_context), 2) # B x 1 x (d + h)\r\n output, hidden = self.rnn(rnn_input, last_hidden) # output: B x 1 x h\r\n\r\n # calculate attention from current RNN state and all encoder outputs; apply to encoder outputs\r\n attn_weights = self.attn(output, encoder_outputs) # B x S\r\n context = attn_weights.unsqueeze(1).bmm(encoder_outputs) # B x 1 x h\r\n\r\n # final output layer (next word prediction) using the RNN hidden state and context vector\r\n output = f.log_softmax(self.out(torch.cat((context.squeeze(1), output.squeeze(1)), 1)), 1)\r\n\r\n # Return final output, hidden state, and attention weights (for visualization)\r\n return output, hidden, context, attn_weights", "def encode(self, game_state: ssm.SnakeStateMachine) -> int:\n state = [e.encode(game_state) for e in self._encoders]\n return self._state2id[tuple(state)]", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def forward(self,\n state,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False):\n\n # need_attn must be True if need_head_weights\n need_attn = True if need_head_weights else need_attn\n print('encoder padding {}, self padding {}'.format(encoder_padding_mask, self_attn_padding_mask.size()))\n residual = state.clone()\n # print('self attention')\n state, _ = self.self_attn(query=state,\n key=state,\n value=state,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n '''\n ___QUESTION-6-DESCRIBE-E-START___\n How does encoder attention differ from self attention? What is the difference between key_padding_mask \n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\n '''\n '''\n The encoder attention is making the target input word pay attention to the source sequence from encoder, while the self attention is making the input word pay attention to the words in other positions of the input sequence.\n The key_padding mask masks padded tokens ⟨pad⟩ so the model does not attend to these positions, while the attn mask masks the following tokens at each position to ensure the decoder do not look forward into the sequence.\n In encoder attention, we want the decoder to pay attention to the entire source sequence. The attn mask is not needed to mask the subsequent positions because it is not paying attention to itself.\n\n '''\n # print('encoder attention')\n state, attn = self.encoder_attn(query=state,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n need_weights=need_attn or (not self.training and self.need_attn))\n '''\n ___QUESTION-6-DESCRIBE-E-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.encoder_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state, attn", "def _build_encoder(self, hparams):\n\t\tnum_layers = self.num_encoder_layers\n\t\tnum_redisual_layers = self.num_encoder_residual_layers\n\n\t\twith tf.variable_scope('encoder') as _:\n\t\t\tself.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)\n\n\t\t\tif hparams.encoder_type == 'uni':\n\t\t\t\t_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))\n\t\t\t\t# 1. build a list of cells\n\t\t\t\tcell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)\n\t\t\t\t# 2. forward\n\t\t\t\t# encoder_outputs: [batch, time, hidden]\n\t\t\t\t# encoder_state: ([batch, hidden] for _ in range(layers))\n\t\t\t\tencoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n\t\t\t\t\tcell,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\t\t\telif hparams.encoder_type == 'bi':\n\t\t\t\tif not num_layers % 2 == 0:\n\t\t\t\t\t_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))\n\t\t\t\t\traise ValueError\n\t\t\t\tnum_bi_layers = int(num_layers / 2)\n\t\t\t\tnum_bi_residual_layers = num_bi_layers - 1\n\t\t\t\t_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))\n\n\t\t\t\tcell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\t\t\t\tcell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\n\t\t\t\t# bi_outputs: (fw, bw): fw: [batch, seq, hidden]\n\t\t\t\t# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]\n\t\t\t\tbi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\t\tcell_fw,\n\t\t\t\t\tcell_bw,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\n\t\t\t\tif num_bi_layers == 1:\n\t\t\t\t\tencoder_state = bi_state\n\t\t\t\telse:\n\t\t\t\t\tencoder_state = []\n\t\t\t\t\tfor layer_id in range(num_bi_layers):\n\t\t\t\t\t\tencoder_state.append(bi_state[0][layer_id])\t\t# fw state in layer id\n\t\t\t\t\t\tencoder_state.append(bi_state[1][layer_id])\t\t# bw state in layer id\n\t\t\t\t\tencoder_state = tuple(encoder_state)\n\t\t\t\tencoder_outputs = tf.concat(bi_outputs, -1)\t\t# [batch, seq, hidden * 2]\n\t\t\telse:\n\t\t\t\t_error('Unknow encoder type: {}'.format(hparams.encoder_type))\n\t\t\t\traise ValueError\n\t\t\n\t\treturn encoder_outputs, encoder_state", "def encode(self, x):\n self.eval()\n x = torch.as_tensor(x).unsqueeze(0)\n if self.do_mt:\n enc_output, _ = self.encoder_mt(x, None)\n else:\n enc_output, _ = self.encoder(x, None)\n return enc_output.squeeze(0)", "def encoder_layer(enc_input, attn_bias, n_head, d_key,\n d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,\n attention_dropout, relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n attn_output = multi_head_attention(\n pre_process_layer(enc_input, preprocess_cmd, prepostprocess_dropout),\n None, None, attn_bias, d_key, d_value, d_model, pos_enc,\n n_head, attention_dropout\n )\n attn_output = post_process_layer(enc_input, attn_output,\n postprocess_cmd, prepostprocess_dropout)\n ffd_output = positionwise_feed_forward(\n pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),\n d_inner_hid, d_model, relu_dropout\n )\n return post_process_layer(attn_output, ffd_output,\n postprocess_cmd, prepostprocess_dropout)", "def transparent_forward(self, input, hidden, give_gates=False, debug=False):\n\n lseq, nseq = input.shape\n ispad = (input == self.padding)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n emb = self.encoder(input)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H", "def __encoder_lstm(self, x, x_lengths):\n embedded_x = self.input_embedding.forward(x) # (input_seq_len x batch x embed_dim)\n embedded_x = self.embedding_dropout.forward(embedded_x)\n\n # pack and unpack the padded batch for the encoder\n packed_x = nn.utils.rnn.pack_padded_sequence(embedded_x, x_lengths)\n h, _ = self.encoder.forward(packed_x) # (input_seq_len x batch x 2*encoder_state_dim)\n unpacked_h, _ = nn.utils.rnn.pad_packed_sequence(h)\n\n return unpacked_h", "def forward(self,\n state,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False):\n\n # need_attn must be True if need_head_weights\n need_attn = True if need_head_weights else need_attn\n\n residual = state.clone()\n state, _ = self.self_attn(query=state,\n key=state,\n value=state,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n '''\n ___QUESTION-6-DESCRIBE-E-START___\n How does encoder attention differ from self attention? What is the difference between key_padding_mask\n and attn_mask? If you understand this difference, then why don't we need to give attn_mask here?\n\n Encoder attention differs from self-attention in that it attends to the\n output embeddings of the encoder instead of the embeddings in the decoder.\n key_padding_mask is used to adjust the length of the sentences, whereas\n attn_mask prevents the decoder from attending to future positions.\n We do not use attn_mask while attending to the decoder since we want all\n the embeddings in the decoder to have access to all the encoder output\n embeddings.\n '''\n state, attn = self.encoder_attn(query=state,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n need_weights=need_attn or (not self.training and self.need_attn))\n '''\n ___QUESTION-6-DESCRIBE-E-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.encoder_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state, attn", "def _init_rnn_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple(\n [self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def build_model(self):\n # Define model inputs for the encoder/decoder stack\n x_enc = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"x_enc\")\n x_dec = Input(shape=(self.seq_len_out, self.output_feature_amount), name=\"x_dec\")\n\n # Add noise\n x_dec_t = GaussianNoise(0.2)(x_dec)\n\n input_conv2 = Conv1D(filters=64, kernel_size=5, strides=2, activation='relu', padding='same')\n input_conv1 = Conv1D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same', name=\"last_conv_layer\")\n\n input_conv2_out = input_conv2(x_enc)\n input_conv1_out = input_conv1(input_conv2_out)\n\n # Define the encoder GRU, which only has to return a state\n encoder_gru = GRU(self.state_size, return_sequences=True, return_state=True, name=\"encoder_gru\")\n encoder_out, encoder_state = encoder_gru(input_conv1_out)\n\n # Decoder GRU\n decoder_gru = GRU(self.state_size, return_state=True, return_sequences=True,\n name=\"decoder_gru\")\n # Use these definitions to calculate the outputs of out encoder/decoder stack\n dec_intermediates, decoder_state = decoder_gru(x_dec_t, initial_state=encoder_state)\n\n # Define the attention layer\n attn_layer = AttentionLayer(name=\"attention_layer\")\n attn_out, attn_states = attn_layer([encoder_out, dec_intermediates])\n\n # Concatenate decoder and attn out\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([dec_intermediates, attn_out])\n\n # Define the dense layer\n dense = Dense(self.output_feature_amount, activation='linear', name='output_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Define the encoder/decoder stack model\n encdecmodel = tsModel(inputs=[x_enc, x_dec], outputs=decoder_pred)\n\n # Define the separate encoder model for inferencing\n encoder_inf_inputs = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"encoder_inf_inputs\")\n\n input_conv2_inf = input_conv2(encoder_inf_inputs)\n input_conv1_inf_out = input_conv1(input_conv2_inf)\n\n encoder_inf_out, encoder_inf_state = encoder_gru(input_conv1_inf_out)\n encoder_model = tsModel(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n # Define the separate encoder model for inferencing\n decoder_inf_inputs = Input(shape=(1, self.output_feature_amount), name=\"decoder_inputs\")\n encoder_inf_states = Input(shape=(encdecmodel.get_layer('last_conv_layer').output_shape[1], self.state_size), name=\"decoder_inf_states\")\n decoder_init_state = Input(shape=(self.state_size,), name=\"decoder_init\")\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = tsModel(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return encoder_model, decoder_model, encdecmodel", "def make_encoder(self, input_size: int, latent_size: int) -> nn.Module:\n pass", "def _encode_event_idx(self, event_idx, step_idx):\n enc_dec = self.trans_model._config.encoder_decoder\n input_ = np.zeros(enc_dec.input_size)\n input_[event_idx] = 1.0\n\n offset = enc_dec._one_hot_encoding.num_classes\n n = step_idx + 1\n for i in range(enc_dec._binary_counter_bits):\n input_[offset] = 1.0 if (n // 2 ** i) % 2 else -1.0\n offset += 1\n\n return np.expand_dims(input_, 0)", "def forward(self, trg_embed, encoder_hidden, encoder_final, \n src_mask, trg_mask, hidden=None, max_len=None):\n \n # the maximum number of steps to unroll the RNN\n #print(\"czw src mask\", src_mask.size())\n #print(\"czw trg embed\", trg_embed.size())\n #print(\"czw encoder_hidden\", encoder_hidden.size())\n #print(\"czw encoder_final\", encoder_final[0].size())\n if max_len is None:\n max_len = trg_embed.size(1)\n\n # initialize decoder hidden state\n if hidden is None:\n hidden = self.init_hidden(encoder_final)\n \n # pre-compute projected encoder hidden states\n # (the \"keys\" for the attention mechanism)\n # this is only done for efficiency\n proj_key = self.attention.key_layer(encoder_hidden)\n \n # here we store all intermediate hidden states and pre-output vectors\n decoder_states = []\n pre_output_vectors = []\n \n # unroll the decoder RNN for max_len steps\n for i in range(max_len):\n prev_embed = trg_embed[:, i].unsqueeze(1)\n output, hidden, pre_output = self.forward_step(\n prev_embed, encoder_hidden, src_mask, proj_key, hidden)\n decoder_states.append(output)\n pre_output_vectors.append(pre_output)\n\n decoder_states = torch.cat(decoder_states, dim=1)\n pre_output_vectors = torch.cat(pre_output_vectors, dim=1)\n return decoder_states, hidden, pre_output_vectors # [B, N, D]", "def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r", "def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')", "def decode(self, input_size):\n output = np.zeros(input_size)\n \n preactivation = np.dot(self.W, self.h) + self.c\n sigmoid(preactivation, output)\n \n return output", "def make_encoder(opt, embeddings, intent_size, output_size, use_history=False, hidden_depth=1, identity=None,\n hidden_size=None):\n # encoder = StateEncoder(intent_size=intent_size, output_size=output_size,\n # state_length=opt.state_length, extra_size=3 if opt.dia_num>0 else 0 )\n\n # intent + price\n diaact_size = (intent_size+1)\n extra_size = 3 + 2\n if hidden_size is None:\n hidden_size = opt.hidden_size\n if not opt.use_utterance:\n embeddings = None\n if use_history:\n extra_size = 3\n # + pmask\n diaact_size += 1\n if identity is None:\n encoder = HistoryIDEncoder(None, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n # encoder = HistoryIDEncoder(identity, diaact_size*2+extra_size, embeddings, output_size,\n # hidden_depth=hidden_depth)\n encoder = HistoryIDEncoder(identity, diaact_size * 2, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth, rnn_state=True)\n else:\n if identity is None:\n encoder = CurrentEncoder(diaact_size*opt.state_length+extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n else:\n extra_size = 3\n # + pmask\n diaact_size += 1\n encoder = HistoryIDEncoder(identity, diaact_size * opt.state_length, extra_size, embeddings, output_size,\n hidden_depth=hidden_depth)\n\n return encoder", "def encoder_bi(self, cell_fw, cell_bw, x, seq_len, init_state_fw=None,\n init_state_bw=None):\n # Output is the outputs at all time steps, state is the last state\n with tf.variable_scope(\"bidirectional_dynamic_rnn\"):\n outputs, state = tf.nn.bidirectional_dynamic_rnn(\\\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=x,\n sequence_length=seq_len,\n initial_state_fw=init_state_fw,\n initial_state_bw=init_state_bw,\n dtype=self.floatX)\n # outputs: a tuple(output_fw, output_bw), all sequence hidden states,\n # each as tensor of shape [batch,time,units]\n # Since we don't need the outputs separate, we concat here\n outputs = tf.concat(outputs,2)\n outputs.set_shape([None, None, self.bi_encoder_hidden])\n # If LSTM cell, then \"state\" is not a tuple of Tensors but an\n # LSTMStateTuple of \"c\" and \"h\". Need to concat separately then new\n if \"LSTMStateTuple\" in str(type(state[0])):\n c = tf.concat([state[0][0],state[1][0]],axis=1)\n h = tf.concat([state[0][1],state[1][1]],axis=1)\n state = tf.contrib.rnn.LSTMStateTuple(c,h)\n else:\n state = tf.concat(state,1)\n # Manually set shape to Tensor or all hell breaks loose\n state.set_shape([None, self.bi_encoder_hidden])\n return outputs, state", "def forward(self, inp, hidden=None, give_gates=False, debug=False, readout_time=None):\n\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n\n if hidden is None:\n hidden = self.init_hidden(inp.shape[1])\n # if emb.dim()<3:\n # emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n # print(output.shape)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n if readout_time is None:\n decoded = decoded[-1,...] # assume only final timestep matters\n\n if give_gates:\n return decoded, hidden, extras\n else:\n return decoded, hidden", "def get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n\n # handle -1 cases\n ll_ = (last_word_indices != -1).long()\n last_word_indices = last_word_indices * ll_\n\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output", "def make_prediction(self, previous_timesteps_x, previous_y):\n # Get the state from the Encoder using the previous timesteps for x\n # Expand the previous timesteps, we must make the input a batch (going from shape (100, 149) to (1, 100, 149))\n enc_outs, enc_last_state = self.encoder.predict(np.expand_dims(previous_timesteps_x, axis=0))\n dec_state = enc_last_state\n\n # Initialize the outputs on the previous y so we have something to feed the net\n # It might be neater to feed a start symbol instead\n dec_out = np.expand_dims(previous_y, axis=0)\n outputs = []\n attention_weights = []\n for i in range(self.seq_len_out):\n dec_out, attention, dec_state = self.decoder.predict([enc_outs, dec_state, dec_out])\n outputs.append(dec_out)\n\n # Add attention weights\n attention_weights.append(attention)\n\n # Reshape and transpose attention weights so they make more sense\n attention_weights = np.reshape(np.stack(attention_weights), newshape=(self.seq_len_out,\n self.encoder.get_layer(\"last_conv_layer\")\n .output_shape[1])).transpose()\n\n # Concatenate the outputs, as they are batches\n # For example, going from a list of (1,1,1) to one unit of (1,100,1)\n # So we take the 0th element from the batch which are our outputs\n return np.concatenate(outputs, axis=1)[0], attention_weights", "def encoder(self, inputs):\n pass", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def forward(self,\n input,\n hidden,\n encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.rnn(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights", "def add_model(self):\n\n b_sz = tf.shape(self.encoder_input)[0]\n tstp_en = tf.shape(self.encoder_input)[1]\n tstp_de = tf.shape(self.decoder_input)[1]\n\n encoder_dropout_input = tf.nn.dropout(self.encoder_input, self.ph_dropout, name='encoder_Dropout')\n decoder_dropout_input = tf.nn.dropout(self.decoder_input, self.ph_dropout, name='decoder_Dropout')\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size)\n \"\"\"#(batch_size, num_sentence, hidden_size)\"\"\"\n encoder_outputs, state = tf.nn.dynamic_rnn(lstm_cell, encoder_dropout_input, self.encoder_tstps, \n dtype=tf.float32, swap_memory=True, time_major=False, scope = 'rnn_encode')\n self.state=state\n with tf.variable_scope('decoder') as vscope:\n decoder_outputs, _ = tf.nn.dynamic_rnn(lstm_cell, decoder_dropout_input, self.decoder_tstps, #(batch_size, time_steps, hidden_size)\n initial_state=state, dtype=tf.float32, swap_memory=True, time_major=False, scope='rnn_decode')\n \n with tf.variable_scope('rnn_decode'):\n #tf.reshape(self.ph_decoder_label, shape=(-1, 1)) #(batch_size*time_steps, 1)\n encoder_outputs_reshape = tf.reshape(encoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_0') #(batch_size*time_steps, hidden_size)\n decoder_outputs_reshape = tf.reshape(decoder_outputs, shape=(-1, self.config.hidden_size), name='add_model_reshape_1') #(batch_size*time_steps_1, hidden_size)\n encoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(encoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\n bias=False, scope='Ptr_W1')\n decoder_outputs_linear_reshape = tf.nn.rnn_cell._linear(decoder_outputs_reshape, output_size=self.config.hidden_size, #(#(batch_size*time_steps, hidden_size))\n bias=False, scope='Ptr_W2')\n encoder_outputs_linear = tf.reshape(encoder_outputs_linear_reshape, tf.shape(encoder_outputs), name='add_model_reshape_2')\n decoder_outputs_linear = tf.reshape(decoder_outputs_linear_reshape, tf.shape(decoder_outputs), name='add_model_reshape_3')\n \n encoder_outputs_linear_expand = tf.expand_dims(encoder_outputs_linear, 1) #(b_sz, 1, tstp_en, h_sz)\n decoder_outputs_linear_expand = tf.expand_dims(decoder_outputs_linear, 2) #(b_sz, tstp_de, 1, h_sz)\n \n after_add = tf.tanh(encoder_outputs_linear_expand + decoder_outputs_linear_expand) #(b_sz, tstp_de, tstp_en, h_sz)\n \n after_add_reshape = tf.reshape(after_add, shape=(-1, self.config.hidden_size), name='add_model_reshape_4')\n \n after_add_linear_reshape = tf.nn.rnn_cell._linear(after_add_reshape, output_size=1, #(b_sz*tstp_de*tstp_en, 1)\n bias=False, scope='Ptr_v')\n after_add_linear = tf.reshape(after_add_linear_reshape, shape=tf.shape(after_add)[:3], name='add_model_reshape_5') #(b_sz, tstp_de, tstp_en)\n\n en_length_mask = tf.sequence_mask(self.encoder_tstps, #(b_sz, tstp_en)\n maxlen=tf.shape(after_add_linear)[-1], dtype=tf.bool)\n en_length_mask = tf.expand_dims(en_length_mask, 1) #(b_sz, 1, tstp_en)\n en_length_mask = tf.tile(en_length_mask, [1, tstp_de, 1])\n\n logits = tf.select(en_length_mask, after_add_linear,\n tf.ones_like(after_add_linear) * (-np.Inf)) # shape(b_sz, tstp_de, tstp_en)\n \n flat_logits = tf.reshape(logits, shape=[b_sz * tstp_de, tstp_en])\n\n vscope.reuse_variables()\n outputs_ta, _, _ = self.decoder(lstm_cell, state, encoder_outputs, encoder_dropout_input, scope='rnn_decode')\n outputs = outputs_ta.pack() #(time_steps, batch_size)\n outputs = tf.transpose(outputs, [1, 0]) #(batch_size, time_steps)\n \n state = tf.concat(1, state)\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.hidden_size, state_is_tuple=False)\n beam_outputs, beam_seq, beam_prob = self.beam_decoder(lstm_cell, state, encoder_outputs, \n encoder_dropout_input, beam_size=self.config.beam_size, scope='rnn_decode')\n \n self.logits = logits\n self.encoder_outputs = encoder_outputs\n self.beam_seq = beam_seq\n self.beam_prob = beam_prob\n return flat_logits, outputs, beam_outputs", "def __call__(self, sequence):\n fwd_states, bwd_states = self.encode_fwd_bwd(sequence)\n return dy.concatenate([fwd_states[-1], bwd_states[-1]])", "def __init__(self, input_size, hidden_size, bidirection, config):\r\n super(Encoder, self).__init__()\r\n\r\n self.config = config\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.bidirection = bidirection\r\n\r\n if self.config.model in ['LSTM', 'GRU']:\r\n self.rnn = getattr(nn, self.config.model)(self.input_size, self.hidden_size, self.config.nlayer_enc,\r\n batch_first=True, dropout=self.config.dropout,\r\n bidirectional=self.bidirection)\r\n else:\r\n try:\r\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[self.config.model]\r\n except KeyError:\r\n raise ValueError(\"\"\"An invalid option for `--model` was supplied,\r\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\r\n self.rnn = nn.RNN(self.input_size, self.hidden_size, self.config.nlayers, nonlinearity=nonlinearity,\r\n batch_first=True, dropout=self.config.dropout, bidirectional=self.bidirection)", "def init_hidden_state(self, encoder_out: torch.Tensor):\n pass", "def forward(self, *args): # noqa: R0914\r\n encoder_out, (hn, cn) = self.unified_encoder(*args)\r\n device = hn.device\r\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\r\n non_sequential_cat_decoded = []\r\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\r\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\r\n\r\n hn = torch.unsqueeze(hn, 0)\r\n cn = torch.unsqueeze(cn, 0)\r\n # decoded is the output prediction of timestep i-1 of the decoder\r\n decoded = torch.zeros(encoder_out.shape[0], int(\r\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\r\n seq_cont_decoded = torch.Tensor(device=device)\r\n seq_cat_decoded = []\r\n for _ in range(self.unified_encoder.seq_cat_count):\r\n seq_cat_decoded.append(torch.Tensor(device=device))\r\n\r\n for _ in range(encoder_out.shape[1]):\r\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\r\n # Predict all categorical columns\r\n out_cat_onehot = []\r\n if self.unified_encoder.seq_cat_count != 0:\r\n for idx, out in enumerate(out_cat):\r\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\r\n seq_cat_decoded[idx] = torch.cat(\r\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\r\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\r\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\r\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\r\n else:\r\n decoded = out_cont\r\n seq_cont_decoded = torch.cat(\r\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\r\n\r\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded", "def encode(input):\n return ModelEncoder().encode(input)", "def generate_encoder(input_shape: Tuple[int]=(100,1), lstm_units:int = 100, latent_dim:int=20)->tf.keras.Model:\n\n input = tf.keras.layers.Input(shape=input_shape , name=\"encoder_input\")\n #create a bi-directional LSTM layer\n encoded = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=lstm_units, return_sequences=True))(input)\n encoded = tf.keras.layers.Flatten()(encoded)\n encoded = tf.keras.layers.Dense(units=latent_dim, name=\"latent_encoding\")(encoded)\n encoded = tf.keras.layers.Reshape(target_shape=(latent_dim, 1) , name=\"output_encoder\")(encoded)\n\n model = tf.keras.Model(inputs=input, outputs=encoded, name=\"encoder\")\n\n return model", "def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores", "def _basic_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n feed_previous,\n dtype=dtypes.float32,\n scope=None):\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n enc_cell = copy.deepcopy(cell)\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n if feed_previous:\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\n else:\n return _rnn_decoder(decoder_inputs, enc_state, cell)", "def get_rnn_init_state(combiner_outputs: Dict[str, torch.Tensor], sequence_reducer: SequenceReducer, num_layers: int) ->torch.Tensor:\n if ENCODER_OUTPUT_STATE not in combiner_outputs:\n encoder_output_state = combiner_outputs[HIDDEN]\n else:\n encoder_output_state = combiner_outputs[ENCODER_OUTPUT_STATE]\n if isinstance(encoder_output_state, tuple):\n if len(encoder_output_state) == 2:\n encoder_output_state = encoder_output_state[0]\n elif len(encoder_output_state) == 4:\n encoder_output_state = torch.mean([encoder_output_state[0], encoder_output_state[2]])\n else:\n raise ValueError(f'Invalid sequence decoder inputs with keys: {combiner_outputs.keys()} with extracted encoder ' + f'state: {encoder_output_state.size()} that was invalid. Please double check the compatibility ' + 'of your encoder and decoder.')\n if len(encoder_output_state.size()) > 3:\n raise ValueError('Init state for RNN decoders only works for 1d or 2d tensors (encoder_output).')\n if len(encoder_output_state.size()) == 3:\n encoder_output_state = sequence_reducer(encoder_output_state)\n return repeat_2D_tensor(encoder_output_state, num_layers)", "def rnn_with_embedding(self,cell,init_state,input_seq,\n input_seq_len,reuse=None,\n scope=\"RNN\"): \n with tf.variable_scope(scope,reuse=reuse) as vs:\n log(vs.name+\"/Encoding sequences\")\n with tf.device('/cpu:0'):\n emb = tf.get_variable(\"emb\",\n [self.vocab_size,self.hidden_size],\n dtype=tf.float32)\n un_emb = tf.get_variable(\"unemb\",\n [self.hidden_size,self.vocab_size],\n tf.float32)\n # We need a bias\n un_emb_b = tf.get_variable(\"unemb_b\",\n [self.vocab_size],\n dtype=tf.float32)\n \n assert scope+\"/emb:0\" in emb.name,\\\n \"Making sure the reusing is working\"\n emb_input_seq = tf.nn.embedding_lookup(\n emb,input_seq)\n emb_input_list = tf.unpack(\n tf.transpose(emb_input_seq,[1,0,2]))\n \n # RNN pass\n if init_state is None:\n init_state = cell.zero_state(\n tf.shape(emb_input_list[0])[0],tf.float32)\n \n emb_output_list, final_state = tf.nn.rnn(\n cell,emb_input_list,initial_state=init_state,\n sequence_length=input_seq_len)\n\n # We shift the predicted outputs, because at\n # each word we're trying to predict the next.\n emb_output_list = emb_output_list[:-1]\n \n # Unembedding\n output_list = [tf.matmul(t,un_emb) + un_emb_b\n for t in emb_output_list]\n outputs = tf.transpose(tf.pack(output_list),[1,0,2])\n\n return outputs, final_state", "def transparent_forward(self, inp, hidden=None, give_gates=False, debug=False):\n\n lseq = inp.shape[0]\n nseq = inp.shape[1]\n # ispad = (input == self.padding)\n\n if hidden is None:\n hidden = self.init_hidden(nseq)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H", "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output", "def _basic_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n feed_previous,\n dtype=dtypes.float32,\n scope=None):\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n enc_cell = copy.deepcopy(cell)\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n if feed_previous:\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\n else:\n return _rnn_decoder(decoder_inputs, enc_state, cell)", "def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states", "def forward(self, source, out_seq_len = None):\n # source seems to be a (1,1,2)\n\n batch_size = source.shape[0]\n seq_len = source.shape[1]\n if out_seq_len is None:\n out_seq_len = seq_len\n\n \n #############################################################################\n # TODO: #\n # Implement the forward pass of the Seq2Seq model. Please refer to the #\n # following steps: #\n # 1) Get the last hidden representation from the encoder. Use it as #\n # the first hidden state of the decoder #\n # 2) The first input for the decoder should be the <sos> token, which #\n # is the first in the source sequence. #\n # 3) Feed this first input and hidden state into the decoder # \n # one step at a time in the sequence, adding the output to the #\n # final outputs. #\n # 4) Update the input and hidden weights being fed into the decoder #\n # at each time step. The decoder output at the previous time step # \n # will have to be manipulated before being fed in as the decoder #\n # input at the next time step. #\n #############################################################################\n output, hidden = self.encoder(source)\n outputs = torch.zeros(batch_size, out_seq_len, self.decoder.output_size, device=self.device)\n # initialize -- batch size = 128, seq_len = 20.\n output, hidden = self.decoder(source[:, 0], hidden)\n # output of shape -- batch size,\n #outputs.size() = [20 , 5893]\n #output.size() = [ 128, 5893]\n\n\n #simple:\n # output.size() = (8)\n # outputs.size() = (2,8)\n outputs[:, 0, :] = output\n output_idx = outputs[:,0,:].argmax(1)\n output_idx = output_idx.unsqueeze(1)\n for i in range(1, out_seq_len):\n output, hidden = self.decoder(output_idx , hidden)\n outputs[:,i,:] = output\n output_idx = outputs[:,i,:].argmax(1)\n output_idx = output_idx.unsqueeze(1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return outputs", "def get_rnn_init_state(\n combiner_outputs: Dict[str, torch.Tensor], sequence_reducer: SequenceReducer, num_layers: int\n) -> torch.Tensor:\n if ENCODER_OUTPUT_STATE not in combiner_outputs:\n # Use the combiner's hidden state.\n encoder_output_state = combiner_outputs[HIDDEN]\n else:\n # Use the encoder's output state.\n encoder_output_state = combiner_outputs[ENCODER_OUTPUT_STATE]\n if isinstance(encoder_output_state, tuple):\n if len(encoder_output_state) == 2:\n # LSTM encoder. Use the hidden state and ignore the cell state.\n encoder_output_state = encoder_output_state[0]\n elif len(encoder_output_state) == 4:\n # Bi-directional LSTM encoder. Use the average of hidden states and ignore cell state.\n encoder_output_state = torch.mean([encoder_output_state[0], encoder_output_state[2]])\n else:\n raise ValueError(\n f\"Invalid sequence decoder inputs with keys: {combiner_outputs.keys()} with extracted encoder \"\n + f\"state: {encoder_output_state.size()} that was invalid. Please double check the compatibility \"\n + \"of your encoder and decoder.\"\n )\n\n if len(encoder_output_state.size()) > 3:\n raise ValueError(\"Init state for RNN decoders only works for 1d or 2d tensors (encoder_output).\")\n\n if len(encoder_output_state.size()) == 3:\n # Reduce to [batch_size, hidden_size].\n encoder_output_state = sequence_reducer(encoder_output_state)\n\n return repeat_2D_tensor(encoder_output_state, num_layers)", "def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )", "def encode(self, x: Tensor) ->Tensor:\n return self.encoder(x)[0]", "def encode(self) -> str:\n return Activation._encoder.encode(self)", "def encoder(list_of_str, key):\n tokenized = self.tokenizer.encode_commands(list_of_str)\n hidden = self.tokenizer.tokenize(tokenized)\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\n return hidden", "def forward(self,\n input,\n hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = F.relu(embedded)\n output, hidden = self.rnn(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden", "def encode(self,\n data: mx.sym.Symbol,\n data_length: mx.sym.Symbol,\n seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:\n # data: (batch_size, seq_len, num_hidden)\n data = mx.sym.FullyConnected(data=data,\n num_hidden=self.config.cnn_config.num_hidden,\n no_bias=True,\n flatten=False,\n weight=self.i2h_weight)\n\n # Multiple layers with residual connections:\n for layer in self.layers:\n data = data + layer(data, data_length, seq_len)\n return data, data_length, seq_len", "def __call__(self, encoder_hidden_states):\n params = self.dec_params\n search_params = self.search_params\n\n lm_params = self.lm_params\n get_top_k_fn = self.top_k_setup_with_lm(encoder_hidden_states)\n\n x = params.embedding[data_utils.GO_ID]\n x_lm = lm_params.embedding[data_utils.GO_ID]\n\n # Initialize Decoder states\n h_size = params.dec_lstm_w.shape[1]/4\n zero_dec_state = (np.zeros(h_size), np.zeros(h_size))\n\n dec_lm_h_size = params.lm_lstm_w.shape[1]/4\n zero_dec_lm_state = (np.zeros(dec_lm_h_size), np.zeros(dec_lm_h_size))\n\n # Initialize LM state\n lm_h_size = lm_params.lstm_w.shape[1]/4\n zero_lm_state = (np.zeros(lm_h_size), np.zeros(lm_h_size))\n\n zero_attn = np.zeros(encoder_hidden_states.shape[1])\n\n # Maintain a tuple of (output_indices, score, encountered EOS?)\n output_list = []\n final_output_list = []\n k = search_params.beam_size # Represents the current beam size\n step_count = 0\n\n # Run step 0 separately\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, [zero_dec_state, zero_dec_lm_state, zero_lm_state],\n zero_attn, beam_size=k)\n for idx in xrange(top_k_indices.shape[0]):\n output_tuple = (BeamEntry([top_k_indices[idx]], state_list, context_vec),\n top_k_model_scores[idx])\n if top_k_indices[idx] == data_utils.EOS_ID:\n final_output_list.append(output_tuple)\n # Decrease the beam size once EOS is encountered\n k -= 1\n else:\n output_list.append(output_tuple)\n\n step_count += 1\n while step_count < 120 and k > 0:\n # These lists store the states obtained by running the decoder\n # for 1 more step with the previous outputs of the beam\n next_dec_states = []\n next_context_vecs = []\n\n score_list = []\n model_score_list = []\n index_list = []\n for candidate, cand_score in output_list:\n x = params.embedding[candidate.get_last_output()]\n x_lm = lm_params.embedding[candidate.get_last_output()]\n\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, candidate.get_dec_state(),\n candidate.get_context_vec(), beam_size=k)\n\n next_dec_states.append(state_list)\n next_context_vecs.append(context_vec)\n\n index_list.append(top_k_indices)\n score_list.append(top_k_scores + cand_score)\n model_score_list.append(top_k_model_scores + cand_score)\n\n # Score of all k**2 continuations\n all_scores = np.concatenate(score_list, axis=0)\n all_model_scores = np.concatenate(model_score_list, axis=0)\n # All k**2 continuations\n all_indices = np.concatenate(index_list, axis=0)\n\n # Find the top indices among the k^^2 entries\n top_k_indices = np.argpartition(all_scores, -k)[-k:]\n next_k_indices = all_indices[top_k_indices]\n top_k_scores = all_model_scores[top_k_indices]\n # The original candidate indices can be found by dividing by k.\n # Because the indices are of the form - i * k + j, where i\n # represents the ith output and j represents the jth top index for i\n orig_cand_indices = np.divide(top_k_indices, k, dtype=np.int32)\n\n new_output_list = []\n\n for idx in xrange(k):\n orig_cand_idx = int(orig_cand_indices[idx])\n # BeamEntry of the original candidate\n orig_cand = output_list[orig_cand_idx][0]\n next_elem = next_k_indices[idx]\n # Add the next index to the original sequence\n new_index_seq = orig_cand.get_index_seq() + [next_elem]\n dec_state = next_dec_states[orig_cand_idx]\n context_vec = next_context_vecs[orig_cand_idx]\n\n output_tuple = (BeamEntry(new_index_seq, dec_state, context_vec),\n top_k_scores[idx] +\n search_params.word_ins_penalty*len(new_index_seq))\n if next_elem == data_utils.EOS_ID:\n # This sequence is finished. Put the output on the final list\n # and reduce beam size\n final_output_list.append(output_tuple)\n k -= 1\n else:\n new_output_list.append(output_tuple)\n\n output_list = new_output_list\n step_count += 1\n\n final_output_list += output_list\n\n best_output = max(final_output_list, key=lambda output_tuple: output_tuple[1])\n output_seq = best_output[0].get_index_seq()\n return np.stack(output_seq, axis=0)", "def forward(self, input, hidden):\r\n output, hidden = self.rnn(input, hidden)\r\n output = f.log_softmax(self.out(output.squeeze(1)), 1)\r\n return output, hidden", "def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \n be after multi-head attention? HINT: formulate your answer in terms of \n constituent variables like batch_size, embed_dim etc...\n '''\n '''\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state", "def get_recurrent_encoder(config: RecurrentEncoderConfig, prefix: str) -> 'Encoder':\n # TODO give more control on encoder architecture\n encoder_seq = EncoderSequence([], config.dtype)\n\n if config.conv_config is not None:\n encoder_seq.append(ConvolutionalEmbeddingEncoder, config=config.conv_config,\n prefix=prefix + C.CHAR_SEQ_ENCODER_PREFIX)\n if config.conv_config.add_positional_encoding:\n # If specified, add positional encodings to segment embeddings\n encoder_seq.append(AddSinCosPositionalEmbeddings,\n num_embed=config.conv_config.num_embed,\n scale_up_input=False,\n scale_down_positions=False,\n prefix=\"%s%sadd_positional_encodings\" % (prefix, C.CHAR_SEQ_ENCODER_PREFIX))\n encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.TIME_MAJOR)\n else:\n encoder_seq.append(ConvertLayout, target_layout=C.TIME_MAJOR, num_hidden=0)\n\n if config.reverse_input:\n encoder_seq.append(ReverseSequence, infer_hidden=True)\n\n if config.rnn_config.residual:\n utils.check_condition(config.rnn_config.first_residual_layer >= 2,\n \"Residual connections on the first encoder layer are not supported\")\n\n # One layer bi-directional RNN:\n encoder_seq.append(BiDirectionalRNNEncoder,\n rnn_config=config.rnn_config.copy(num_layers=1),\n prefix=prefix + C.BIDIRECTIONALRNN_PREFIX,\n layout=C.TIME_MAJOR)\n\n if config.rnn_config.num_layers > 1:\n # Stacked uni-directional RNN:\n # Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer.\n remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1,\n first_residual_layer=config.rnn_config.first_residual_layer - 1)\n encoder_seq.append(RecurrentEncoder,\n rnn_config=remaining_rnn_config,\n prefix=prefix + C.STACKEDRNN_PREFIX,\n layout=C.TIME_MAJOR)\n\n encoder_seq.append(ConvertLayout, infer_hidden=True, target_layout=C.BATCH_MAJOR)\n\n return encoder_seq", "def getEncode(self, img):\n img_ = self.preprocess(img)\n fv = self.model_.predict(img_)\n fv = fv.reshape(-1, 1)\n return fv", "def forward(self, *args):\r\n enc_src, _, _ = self.unified_encoder(*args)\r\n enc_src = enc_src.view(enc_src.shape[0], -1)\r\n y_pred = self.mlp(enc_src)\r\n return y_pred", "def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor\n be after multi-head attention? HINT: formulate your answer in terms of\n constituent variables like batch_size, embed_dim etc...\n\n The purpose of encoder_padding_mask is to account for the fact that the\n source sentences in the batch are of different length. The output shape\n of state tensor will be [src_time_steps, batch_size, embed_dim].\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state", "def gru_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):\n with tf.variable_scope(\"gru_seq2seq_bid_encoder\"):\n if inputs is not None:\n inputs_length = common_layers.length_from_embedding(inputs)\n # Flatten inputs.\n inputs = common_layers.flatten4d3d(inputs)\n # LSTM encoder.\n _, final_encoder_state = gru_bid_encoder(\n inputs, inputs_length, hparams, train, \"encoder\")\n else:\n inputs_length = None\n final_encoder_state = None\n # LSTM decoder.\n shifted_targets = common_layers.shift_right(targets)\n # Add 1 to account for the padding added to the left from shift_right\n targets_length = common_layers.length_from_embedding(shifted_targets) + 1\n hparams_decoder = copy.copy(hparams)\n hparams_decoder.hidden_size = 2 * hparams.hidden_size\n decoder_outputs, _ = gru(\n common_layers.flatten4d3d(shifted_targets),\n targets_length,\n hparams_decoder,\n train,\n \"decoder\",\n initial_state=final_encoder_state)\n return tf.expand_dims(decoder_outputs, axis=2)", "def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x", "def _encode(self, src_token_ids, padding_mask, training=False):\n src_seq_len = tf.shape(src_token_ids)[1]\n\n # [batch_size, src_seq_len, hidden_size]\n src_token_embeddings = self._embedding_logits_layer(\n src_token_ids, 'embedding')\n\n # [src_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n src_seq_len, self._hidden_size)\n src_token_embeddings += positional_encoding\n src_token_embeddings = self._encoder_dropout_layer(\n src_token_embeddings, training)\n\n encoder_outputs = self._encoder(\n src_token_embeddings, padding_mask, training)\n return encoder_outputs", "def gru_bid_encoder(inputs, sequence_length, hparams, train, name):\n\n with tf.variable_scope(name):\n cell_fw = tf.nn.rnn_cell.MultiRNNCell(\n [_dropout_gru_cell(hparams, train)\n for _ in range(hparams.num_hidden_layers)])\n\n cell_bw = tf.nn.rnn_cell.MultiRNNCell(\n [_dropout_gru_cell(hparams, train)\n for _ in range(hparams.num_hidden_layers)])\n\n ((encoder_fw_outputs, encoder_bw_outputs),\n (encoder_fw_state, encoder_bw_state)) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw,\n cell_bw,\n inputs,\n sequence_length,\n dtype=tf.float32,\n time_major=False)\n\n encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)\n encoder_states = []\n\n for i in range(hparams.num_hidden_layers):\n encoder_state = tf.concat(\n values=(encoder_fw_state[i], encoder_bw_state[i]),\n axis=1,\n name=\"bidirectional_concat\")\n\n encoder_states.append(encoder_state)\n\n encoder_states = tuple(encoder_states)\n return encoder_outputs, encoder_states", "def lstm_encoder(sequence, lstm,\n seq_lens=None, init_states=None, embedding=None):\n # transpose batch tensor to fit lstm format\n # sequence size [batch size,max_seq_len]\n batch_size = sequence.size(0)\n max_seq_len = sequence.size(1)\n batch_first = lstm.batch_first\n\n if not batch_first: # embedding and transpose input sequence tensor\n sequence = sequence.transpose(0, 1)\n\n # emb_sequence size [batch size,max_seq_len,emb_dim]\n emb_sequence = (embedding(sequence) if embedding is not None\n else sequence)\n # reorder batch tensor along batch dim\n if not seq_lens is None: # reorder input sequence tensor along batch dim\n # (max_sen_len, batch_size, lstm_input_size) 按照batch_size维度,根据文本实际长度(句子数量)降序排列\n assert batch_size == len(seq_lens)\n sort_ind = sorted(range(len(seq_lens)),\n key=lambda i: seq_lens[i], reverse=True) # 确定排序索引\n seq_lens = [seq_lens[i] for i in sort_ind] # 根据排序索引 对序列真实长度进行排序\n sequence = reorder_sequence(emb_sequence, sort_ind,\n lstm.batch_first) # 根据排序索引对tensor batch dim进行排序\n\n # init hidden state and cell state for lstm\n if init_states is None: # 初始化lstm中的hidden state 和 cell state\n device = sequence.device\n init_states = init_lstm_states(lstm, batch_size, device)\n else:\n init_states = (init_states[0].contiguous(),\n init_states[1].contiguous())\n\n if not seq_lens is None: # Encode & Reorder Back\n packed_seq = nn.utils.rnn.pack_padded_sequence(emb_sequence, # 压缩lstm输入序列,保留输入序列更多有效序列\n seq_lens,\n batch_first=batch_first) # https://www.cnblogs.com/sbj123456789/p/9834018.html\n packed_out, final_states = lstm(packed_seq.to(init_states[0].dtype), init_states) # encode\n lstm_out, _ = nn.utils.rnn.pad_packed_sequence(packed_out, batch_first=batch_first,\n total_length=max_seq_len)\n # (max_sent_len, batch_size, emb_dim)\n\n sort_ind = sorted(range(len(seq_lens)),\n key=lambda i: seq_lens[i], reverse=True) # 确定排序索引\n back_map = {ind: i for i, ind in enumerate(sort_ind)} # 结构为{之前索引: 当前索引}, 将编码之后的结果按照索引对应回输入索引\n reorder_ind = [back_map[i] for i in range(len(seq_lens))] # 生成逆排序索引,对应于sort_ind\n lstm_out = reorder_sequence(lstm_out, reorder_ind,\n batch_first) # 根据逆排序索引对tensor batch dim进行排序 (max_sent_len, batch_size, lstm_size)\n # final_states = reorder_lstm_states(final_states, reorder_ind)\n else:\n lstm_out, final_states = lstm(sequence, init_states)\n\n # transpose\n return lstm_out, final_states # (seq_len, batch, embedding) (hidden_layer* direction_num, batch, hidden_size)", "def forward(self, src, mask):\n bs = src.shape[0]\n src = src.permute(2, 0, 1)\n m = src \n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n for layer in self.encoder_layers:\n m = layer(m,\n pos=enc_embed,\n src_mask = mask\n )\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)" ]
[ "0.683423", "0.6820723", "0.6807832", "0.675225", "0.66331315", "0.66327035", "0.65157163", "0.6451546", "0.643727", "0.6415194", "0.6381192", "0.6375881", "0.6340776", "0.6330177", "0.6329643", "0.63056594", "0.62932295", "0.6283715", "0.62812126", "0.6278949", "0.6253341", "0.62327033", "0.6228094", "0.614302", "0.6141615", "0.6141152", "0.61335343", "0.61076015", "0.6090194", "0.60574174", "0.60550815", "0.60222626", "0.60119456", "0.60115296", "0.6004813", "0.59613115", "0.59505916", "0.59314317", "0.59203756", "0.5897832", "0.58709806", "0.58677506", "0.5821649", "0.5816332", "0.5806003", "0.57888347", "0.5775479", "0.57695806", "0.5765", "0.5749095", "0.5738066", "0.5732439", "0.5729389", "0.5712179", "0.5696801", "0.56803876", "0.5679941", "0.56747496", "0.5670142", "0.56660837", "0.5665208", "0.5663571", "0.56617135", "0.5639985", "0.56144094", "0.5610349", "0.5599126", "0.5596373", "0.559421", "0.5585892", "0.5558495", "0.55540025", "0.55429024", "0.55340666", "0.5529411", "0.5523737", "0.55168015", "0.5512574", "0.5509761", "0.5500576", "0.5495655", "0.54809475", "0.54686475", "0.546436", "0.5464311", "0.54629815", "0.5455897", "0.5452167", "0.54379433", "0.54273444", "0.5425739", "0.54247963", "0.54190356", "0.54174435", "0.5415367", "0.541156", "0.54029465", "0.5391497", "0.53867686", "0.5386355" ]
0.7965193
0
Gives access to the hidden state of the individual components of the input batch. Since encode() encodes the whole batch of sequences in one call, but decoding is performed for every batch sequence individually, this method becomes necessary.
def get_encoded_item(self, encoded, index): #for vanilla RNN and GRU, since they have a hidden state represented as a single tensor ##return encoded[:, index:index+1] #for LSTM, since it has a hidden state represented as a tuple of two tensors: the cell state and the hidden state return encoded[0][:, index:index+1].contiguous(), encoded[1][:, index:index+1].contiguous()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, x):\n _, hid = self.encoder(x) #All RNN classes output a tuple of 2 objects: the output of the RNN first and the hidden state from the last item in\n return hid #the input sequence second. We're only interested in the hidden state", "def _encode(self):\n with tf.variable_scope('encoding'):\n self.sep_p_encodes, _ = bilstm_layer(self.p_emb, self.p_length, self.hidden_size)\n tf.get_variable_scope().reuse_variables()\n self.sep_q_encodes, _ = bilstm_layer(self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, 1-self.dropout)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, 1-self.dropout)", "def init_hidden_state(self, encoder_out: torch.Tensor):\n pass", "def initialize_hidden_state(self):\n return tf.zeros(shape=(self.batch_size, self.enc_units))", "def forward(\r\n self,\r\n input_ids,\r\n encoder_hidden_states,\r\n encoder_padding_mask,\r\n decoder_padding_mask,\r\n decoder_causal_mask,\r\n past_key_values=None,\r\n use_cache=False,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n **unused,\r\n ):\r\n\r\n if \"decoder_cached_states\" in unused:\r\n warnings.warn(\r\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_cached_states\")\r\n if \"decoder_past_key_values\" in unused:\r\n warnings.warn(\r\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_past_key_values\")\r\n\r\n # check attention mask and invert\r\n if encoder_padding_mask is not None:\r\n encoder_padding_mask = invert_mask(encoder_padding_mask)\r\n\r\n # embed positions\r\n positions = self.embed_positions(input_ids, use_cache=use_cache)\r\n\r\n if use_cache:\r\n input_ids = input_ids[:, -1:]\r\n positions = positions[:, -1:]\r\n\r\n x = self.embed_tokens(input_ids) * self.embed_scale\r\n if self.do_blenderbot_90_layernorm:\r\n x = self.layernorm_embedding(x)\r\n x += positions\r\n else:\r\n x += positions\r\n x = self.layernorm_embedding(x)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n # decoder layers\r\n all_hidden_states = () if output_hidden_states else None\r\n all_self_attns = () if output_attentions else None\r\n enc_dec_all_attn = () if output_attentions else None\r\n next_decoder_cache = []\r\n for idx, decoder_layer in enumerate(self.layers):\r\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\r\n if output_hidden_states:\r\n all_hidden_states += (x,)\r\n dropout_probability = random.uniform(0, 1)\r\n if self.training and (dropout_probability < self.layerdrop):\r\n continue\r\n\r\n layer_state = past_key_values[idx] if past_key_values is not None else None\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n x, layer_self_attn, layer_past,_ = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n \"\"\"\r\n\r\n #isidora - start - replace _ with enc_dec_attn to get the encoder-decoder attn weights\r\n x, layer_self_attn, layer_past, enc_dec_attn = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n #isidora - end\r\n\r\n\r\n if use_cache:\r\n next_decoder_cache.append(layer_past.copy())\r\n\r\n if output_attentions:\r\n all_self_attns += (layer_self_attn,)\r\n enc_dec_all_attn += (enc_dec_attn,)\r\n\r\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\r\n x = self.layer_norm(x)\r\n\r\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n if output_hidden_states:\r\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n next_cache = next_decoder_cache if use_cache else None\r\n\r\n #isidora - start - return enc_dec_all_attn instead of decoder outputs\r\n return enc_dec_all_attn\r\n #isidora - end\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n if not return_dict:\r\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\r\n return BaseModelOutputWithPast(\r\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\r\n )\r\n \"\"\"", "def init_hidden(self, batch_size):\r\n \r\n self.hidden_state = (\r\n torch.zeros(((1+self.bidirectional)*self.num_layers,\r\n batch_size,\r\n self.hidden_size)).to(self.device),\r\n torch.zeros(((1+self.bidirectional)*self.num_layers, \r\n batch_size, \r\n self.hidden_size)).to(self.device))", "def _prepare_attended_output(self,\n decoder_hidden_state: torch.Tensor,\n state: Dict[str, torch.Tensor]) -> torch.Tensor:\n # Ensure mask is also a FloatTensor. Or else the multiplication within\n # attention will complain.\n # shape: (batch_size, max_input_sequence_length)\n\n encoder_outputs = state[\"encoder_outputs\"]\n source_mask = state[\"source_mask\"]\n prev_attention = state[\"attention\"]\n att_keys = state[\"att_keys\"]\n att_values = state[\"att_values\"]\n\n # shape: (batch_size, max_input_sequence_length)\n mode = \"soft\" if self.training else \"hard\"\n if isinstance(self._attention, MonotonicAttention):\n encoder_outs: Dict[str, torch.Tensor] = {\n \"value\": state[\"encoder_outputs\"],\n \"mask\": state[\"source_mask\"]\n }\n\n monotonic_attention, chunk_attention = self._attention(\n encoder_outs, decoder_hidden_state, prev_attention, mode=mode)\n # shape: (batch_size, encoder_output_dim)\n attended_output = util.weighted_sum(\n encoder_outputs, chunk_attention)\n attention = monotonic_attention\n elif isinstance(self._attention, StatefulAttention):\n attended_output, attention = self._attention(decoder_hidden_state,\n att_keys, att_values, source_mask)\n else:\n attention = self._attention(\n decoder_hidden_state, source_mask)\n attended_output = util.weighted_sum(\n encoder_outputs, attention)\n\n return attended_output, attention", "def call(self, inputs, output_hidden_states = False, training = False):\n if isinstance(inputs, (list, tuple)):\n input_ids = inputs[0]\n token_type_ids = inputs[1] if len(inputs) > 1 else None\n attention_mask = inputs[2] if len(inputs) > 2 else None\n \n elif isinstance(inputs, dict):\n input_ids = inputs['input_ids']\n token_type_ids = inputs.get('token_type_ids', None)\n attention_mask = inputs.get('attention_mask', None)\n else:\n raise ValueError('The type of inputs should be list or dictionary.')\n \n input_shape = shape_list(input_ids)\n \n# last_hidden_state = tf.ones(input_shape + (self.config.hidden_size))\n# output = tf.ones(input_shape + (self.config.hidden_size,))\n# logits = tf.ones(input_shape + (self.config.vocab_size,))\n# pooler_output = tf.ones((input_shape[0], self.config.hidden_size))\n \n hidden_states = [] if output_hidden_states else None\n output = self.embeddings(input_ids, token_type_ids, training = training)\n \n if output_hidden_states:\n hidden_states.append(output)\n\n if self.causal_attention:\n attention_mask = tf.constant(lower_triangle_matrix(input_shape[-1]))\n attention_mask = tf.reshape(attention_mask, shape = (1, 1, input_shape[-1], input_shape[-1]))\n \n else:\n if attention_mask is None:\n attention_mask = tf.constant(1.0, shape = input_shape, dtype = 'float32')\n # attention_mask now has shape (batches, sequence_len),\n # we need to covert it to (batches, 1, 1, sequence_len)\n # so that it will broadcast to (batches, num_attention_heads, sequence_len, sequence_len)\n attention_mask = tf.reshape(attention_mask, shape = (-1, 1, 1, input_shape[-1]))\n\n \n \n last_hidden_state, layer_outputs = self.encoder(output, attention_mask, output_hidden_states = output_hidden_states, training = training)\n if output_hidden_states:\n hidden_states.extend(layer_outputs)\n \n pooler_output = self.pooler(tf.gather(last_hidden_state, indices = 0, axis = 1)) if self.pooler else None\n logits = self.lm_head(last_hidden_state) if self.lm_head else None\n\n res = {'sequence_output': last_hidden_state,\n 'pooler_output': pooler_output,\n 'logits': logits,\n 'hidden_states': hidden_states}\n\n self.built = True\n\n return {k : v for k, v in res.items() if v is not None}", "def _bridge_bidirectional_hidden(self, hidden):\n num_layers = hidden.size(0) // 2\n _, batch_size, hidden_size = hidden.size()\n return hidden.view(num_layers, 2, batch_size, hidden_size)\\\n .transpose(1, 2).contiguous().view(num_layers, batch_size, hidden_size * 2)", "def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):\n\n # compute context vector using attention mechanism\n #we only want the hidden, not the cell state of the lstm CZW, hence the hidden[0]\n query = hidden[0][-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]\n context, attn_probs = self.attention(\n query=query, proj_key=proj_key,\n value=encoder_hidden, mask=src_mask)\n\n # update rnn hidden state\n rnn_input = torch.cat([prev_embed, context], dim=2)\n output, hidden = self.rnn(rnn_input, hidden)\n \n pre_output = torch.cat([prev_embed, output, context], dim=2)\n pre_output = self.dropout_layer(pre_output)\n pre_output = self.pre_output_layer(pre_output)\n\n return output, hidden, pre_output", "def _fix_enc_hidden(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h", "def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)", "def init_hidden_state(self,batch_size):\n h = torch.zeros(batch_size,self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size,self.decoder_dim).to(device)\n return h, c", "def _TransformHidden(self, _):\n raise NotImplementedError()", "def process_state_batch(self, batch):\n return np.squeeze(batch, axis=1)", "def _encode_back(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)", "def init_hidden_state(self, encoder_out):\n init_internal_state = []\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out)\n c = self.init_c(mean_encoder_out)\n init_internal_state.append((h, c))\n\n for i in range(1, self.decoder_number_layers):\n init_internal_state.append((\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device),\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device)\n ))\n return init_internal_state", "def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')", "def run_encoder(self, sess, batch):\n feed_dict = self._make_feed_dict(batch, just_enc=True) \n (enc_states, dec_in_state, global_step) = sess.run(\n [self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\n\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\n return enc_states, dec_in_state", "def _make_hidden(self, batch_size):\n hidden = torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device)\n return hidden", "def process_hidden_layers(self, x, training):\n restricted_to_final_seq = False\n for layer_ix, layer in enumerate(self.hidden_layers):\n if type(layer) == Dense:\n if self.return_final_seq_only and not restricted_to_final_seq:\n x = x[:, -1, :]\n restricted_to_final_seq = True\n x = layer(x)\n else:\n x = layer(x)\n if self.batch_norm:\n x = self.batch_norm_layers[layer_ix](x, training=False)\n if self.dropout != 0.0 and training: x = self.dropout_layer(x)\n return x, restricted_to_final_seq", "def _make_hidden(self, batch_size):\n hidden = (torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device),\n torch.zeros(self.nb_layers, batch_size, self.hidden_size).to(device))\n return hidden", "def forward(self, hidden_states):\n # Common transformations (dense layer, layer norm + activation function) performed on text, KG and protein data\n # transform is initialized in the parent BigBirdLMPredictionHead class\n hidden_states = self.transform(hidden_states)\n\n # The first part is processed with the text decoder, the second with the entity decoder, and the third with the\n # protein decoder to map to the text, kg, and protein vocab size, respectively\n text_hidden_states_to_vocab = self.text_decoder(hidden_states[:, : self.kg_start_idx])\n ent_hidden_states_to_kg_vocab = self.entity_decoder(\n hidden_states[:, self.kg_start_idx : self.prot_start_idx]\n )\n prot_hidden_states_to_prot_vocab = self.prot_decoder(\n hidden_states[:, self.prot_start_idx :]\n )\n\n return (\n text_hidden_states_to_vocab,\n ent_hidden_states_to_kg_vocab,\n prot_hidden_states_to_prot_vocab,\n )", "def initialize_hidden_state(self, batch_size):\n return tf.zeros((batch_size, self.enc_units))", "def init_hidden_state(self, encoder_out):\r\n mean_encoder_out = encoder_out.mean(dim=1)\r\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\r\n c = self.init_c(mean_encoder_out)\r\n return h, c", "def _decode(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, 2 * self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], 2 * self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)", "def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2b\n ### TODO - Implement the forward pass of the character decoder.\n # print(\"=====input.size\",input.size())\n char_embedded= self.decoderCharEmb(input)\n # print(\"=====char_embedded.size\",char_embedded.size())\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\n # print(\"=====out.size\",out.size()) #dimensions (seq_length, batch, hidden_size)\n \n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n o_proj = self.char_output_projection(out_batch_first)\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n return scores,dec_hidden\n ### END YOUR CODE ", "def encoder(list_of_str, key):\n tokenized = self.tokenizer.encode_commands(list_of_str)\n hidden = self.tokenizer.tokenize(tokenized)\n hidden = hidden.permute(1, 0, 2).reshape(hidden.size(1), -1) # correct for bididrectional\n return hidden", "def _encode(self):\n with tf.variable_scope('passage_encoding'):\n self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)\n with tf.variable_scope('question_encoding'):\n self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)\n if self.use_dropout:\n self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)\n self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)", "def forward(self, input, hidden, give_gates=False, debug=False):\n\n emb = self.encoder(input)\n if emb.dim()<3:\n emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n\n if give_gates:\n if debug:\n return decoded, hidden, extras, emb\n else:\n return decoded, hidden, extras\n else:\n if debug:\n return decoded, hidden, emb\n else:\n return decoded, hidden", "def get_num_hidden(self) -> int:\n return self.encoders[-1].get_num_hidden()", "def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)", "def get_representation(output):\n\n # shape: (seq_len, vocab_size)\n hidden_states = output[1]\n\n token_embeddings = torch.stack(hidden_states, dim=0)\n # remove dimension 1 (batches)\n token_embeddings = torch.squeeze(token_embeddings, dim=1)\n # swap dimension 0 and 1\n token_embeddings = token_embeddings.permute(1, 0, 2)\n # the last hidden layer output (2+seq_len, 768)\n hidden_states = [token[-1] for token in token_embeddings]\n\n return hidden_states", "def _bert_encoder(self, sentence, attn_mask):\n output = self.bert(sentence, attn_mask)\n embedding = output['hidden_states'][-1]\n\n feats = self.hidden2tag(embedding)\n return feats", "def transparent_forward(self, input, hidden, give_gates=False, debug=False):\n\n lseq, nseq = input.shape\n ispad = (input == self.padding)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n emb = self.encoder(input)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H", "def reset_hidden(self, batch_size):\n\n hidden = {}\n hidden[\"h\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n hidden[\"c\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n return hidden", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def process_state_batch(self, batch):\n return batch", "def process_state_batch(self, batch):\n return batch", "def get_reconstructed_input(self, hidden):\r\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)", "def get_reconstructed_input(self, hidden):\r\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)", "def forward(self, encoding, encoding_lens, hidden, y_labels=None, y_lens=None):\n # split into keys and values\n # keys [B,T,K], values [B,T,V]\n keys, values = torch.split(encoding, [self.key_dim, self.value_dim], dim=-1)\n\n hidden = hidden.unsqueeze(0)\n\n if y_labels is not None and y_lens is not None:\n return self.__forward_train(keys, values, hidden, encoding_lens, y_labels, y_lens)\n else:\n return self.__forward_inference(keys, values, hidden)", "def __call__(self, encoder_hidden_states):\n params = self.dec_params\n search_params = self.search_params\n\n lm_params = self.lm_params\n get_top_k_fn = self.top_k_setup_with_lm(encoder_hidden_states)\n\n x = params.embedding[data_utils.GO_ID]\n x_lm = lm_params.embedding[data_utils.GO_ID]\n\n # Initialize Decoder states\n h_size = params.dec_lstm_w.shape[1]/4\n zero_dec_state = (np.zeros(h_size), np.zeros(h_size))\n\n dec_lm_h_size = params.lm_lstm_w.shape[1]/4\n zero_dec_lm_state = (np.zeros(dec_lm_h_size), np.zeros(dec_lm_h_size))\n\n # Initialize LM state\n lm_h_size = lm_params.lstm_w.shape[1]/4\n zero_lm_state = (np.zeros(lm_h_size), np.zeros(lm_h_size))\n\n zero_attn = np.zeros(encoder_hidden_states.shape[1])\n\n # Maintain a tuple of (output_indices, score, encountered EOS?)\n output_list = []\n final_output_list = []\n k = search_params.beam_size # Represents the current beam size\n step_count = 0\n\n # Run step 0 separately\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, [zero_dec_state, zero_dec_lm_state, zero_lm_state],\n zero_attn, beam_size=k)\n for idx in xrange(top_k_indices.shape[0]):\n output_tuple = (BeamEntry([top_k_indices[idx]], state_list, context_vec),\n top_k_model_scores[idx])\n if top_k_indices[idx] == data_utils.EOS_ID:\n final_output_list.append(output_tuple)\n # Decrease the beam size once EOS is encountered\n k -= 1\n else:\n output_list.append(output_tuple)\n\n step_count += 1\n while step_count < 120 and k > 0:\n # These lists store the states obtained by running the decoder\n # for 1 more step with the previous outputs of the beam\n next_dec_states = []\n next_context_vecs = []\n\n score_list = []\n model_score_list = []\n index_list = []\n for candidate, cand_score in output_list:\n x = params.embedding[candidate.get_last_output()]\n x_lm = lm_params.embedding[candidate.get_last_output()]\n\n top_k_indices, top_k_model_scores, top_k_scores, state_list, context_vec =\\\n get_top_k_fn(x, x_lm, candidate.get_dec_state(),\n candidate.get_context_vec(), beam_size=k)\n\n next_dec_states.append(state_list)\n next_context_vecs.append(context_vec)\n\n index_list.append(top_k_indices)\n score_list.append(top_k_scores + cand_score)\n model_score_list.append(top_k_model_scores + cand_score)\n\n # Score of all k**2 continuations\n all_scores = np.concatenate(score_list, axis=0)\n all_model_scores = np.concatenate(model_score_list, axis=0)\n # All k**2 continuations\n all_indices = np.concatenate(index_list, axis=0)\n\n # Find the top indices among the k^^2 entries\n top_k_indices = np.argpartition(all_scores, -k)[-k:]\n next_k_indices = all_indices[top_k_indices]\n top_k_scores = all_model_scores[top_k_indices]\n # The original candidate indices can be found by dividing by k.\n # Because the indices are of the form - i * k + j, where i\n # represents the ith output and j represents the jth top index for i\n orig_cand_indices = np.divide(top_k_indices, k, dtype=np.int32)\n\n new_output_list = []\n\n for idx in xrange(k):\n orig_cand_idx = int(orig_cand_indices[idx])\n # BeamEntry of the original candidate\n orig_cand = output_list[orig_cand_idx][0]\n next_elem = next_k_indices[idx]\n # Add the next index to the original sequence\n new_index_seq = orig_cand.get_index_seq() + [next_elem]\n dec_state = next_dec_states[orig_cand_idx]\n context_vec = next_context_vecs[orig_cand_idx]\n\n output_tuple = (BeamEntry(new_index_seq, dec_state, context_vec),\n top_k_scores[idx] +\n search_params.word_ins_penalty*len(new_index_seq))\n if next_elem == data_utils.EOS_ID:\n # This sequence is finished. Put the output on the final list\n # and reduce beam size\n final_output_list.append(output_tuple)\n k -= 1\n else:\n new_output_list.append(output_tuple)\n\n output_list = new_output_list\n step_count += 1\n\n final_output_list += output_list\n\n best_output = max(final_output_list, key=lambda output_tuple: output_tuple[1])\n output_seq = best_output[0].get_index_seq()\n return np.stack(output_seq, axis=0)", "def _decode_back(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)\n\n outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.start_probs), axis=2),\n tf.expand_dims(tf.nn.softmax(self.end_probs), axis=1))\n outer = tf.matrix_band_part(outer, 0, -1)\n self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)\n self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)", "def get_state(self):\n state = super().get_state()\n state.update({\n 'num_of_fields': self.num_of_fields,\n 'embedding_dim': self.embedding_dim})\n return state", "def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)", "def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores", "def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)", "def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)", "def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r", "def _replace_appropriate_hidden_state_rows(hidden_state: Union[Tuple[torch.Tensor, torch.Tensor]],\n new_hidden_state: Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\n packed_sequence_info: PackedSequenceInfo, iteration: int,\n num_batches: int) -> Union[Tuple[torch.Tensor, torch.Tensor]]:\n if packed_sequence_info:\n # In the case of PackedSequence, certain inputs in the batch need to be ignored, depending on\n # sequence length for that input and which timestep we are in.\n # In our implementation, we still feed the full batch into the rnn_impl_map function, but\n # instead of replacing all rows of cell_hx (each row corresponds to an output for an item in the\n # batch), we replace only rows which correspond to valid batch inputs. This is the same as how\n # hx behaves in actual Pytorch implementation when using PackedSequence.\n current_batch_size = packed_sequence_info.batch_sizes[iteration]\n if current_batch_size == num_batches:\n # All items in the input batch are valid, so we can replace the entire hidden state.\n hidden_state = new_hidden_state\n else:\n # Not all items in the input batch are valid. Replace the first number of rows in the hidden\n # state corresponding to the number of valid items, and keep the remaining rows unchanged.\n if isinstance(hidden_state, tuple):\n hidden_state = (torch.cat((new_hidden_state[0][:current_batch_size - num_batches],\n hidden_state[0][current_batch_size - num_batches:])),\n torch.cat((new_hidden_state[1][:current_batch_size - num_batches],\n hidden_state[1][current_batch_size - num_batches:])))\n else:\n hidden_state = torch.cat((new_hidden_state[:current_batch_size - num_batches],\n hidden_state[current_batch_size - num_batches:]))\n else:\n hidden_state = new_hidden_state\n return hidden_state", "def hidden(self):\n return self._hidden", "def transparent_forward(self, inp, hidden=None, give_gates=False, debug=False):\n\n lseq = inp.shape[0]\n nseq = inp.shape[1]\n # ispad = (input == self.padding)\n\n if hidden is None:\n hidden = self.init_hidden(nseq)\n\n H = torch.zeros(lseq, self.nhid, nseq)\n if give_gates:\n Z = torch.zeros(lseq, self.nhid, nseq)\n R = torch.zeros(lseq, self.nhid, nseq)\n \n # because pytorch only returns hidden activity in the last time step,\n # we need to unroll it manually. \n O = torch.zeros(lseq, nseq, self.decoder.out_features)\n if self.recoder is None:\n emb = inp\n else:\n emb = self.recoder(inp)\n for t in range(lseq):\n if give_gates:\n out, hidden, ZR = self.rnn(emb[t:t+1,...], hidden, give_gates=True)\n Z[t,:,:] = ZR[0].squeeze(0).T\n R[t,:,:] = ZR[1].squeeze(0).T\n else:\n out, hidden = self.rnn(emb[t:t+1,...], hidden)\n dec = self.decoder(out)\n # naan = torch.ones(hidden.squeeze(0).shape)*np.nan\n # H[t,:,:] = torch.where(~ispad[t:t+1,:].T, hidden.squeeze(0), naan).T\n H[t,:,:] = hidden.squeeze(0).T\n O[t,:,:] = dec.squeeze(0)\n\n if give_gates:\n if debug:\n return O, H, Z, R, emb\n else:\n return O, H, Z, R\n else:\n if debug:\n return O, H, emb\n else:\n return O, H", "def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st=\"<s>\", ed=\"</s>\", k=3):\n\t\tbatch_size = enc_states.shape[0]\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tbeams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)]\n\n\t\t\tfor i in range(self.max_trg_len):\n\t\t\t\tfor j in range(batch_size):\n\t\t\t\t\tlogits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_hidden_state(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_current_word())\n\t\t\t\t\tlogLikelihood = torch.log(F.softmax(logits, dim=-1))\n\t\t\t\t\tbeams[j].advance(logLikelihood, hidden)\n\n\t\t\tallHyp, allScores = [], []\n\t\t\tn_best = 1\n\t\t\tfor b in range(batch_size):\n\t\t\t\tscores, ks = beams[b].sort_best()\n\n\t\t\t\tallScores += [scores[:n_best]]\n\t\t\t\thyps = [beams[b].get_hyp(k) for k in ks[:n_best]]\n\t\t\t\tallHyp.append(hyps)\n\n\t\t\treturn allHyp\n\t\t\t# return sentences\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i])\n\t\t\t\tlogits[:, i, :] = logit.squeeze()\n\t\t\treturn logits", "def _hidden_activation(self, inputs):\n if self.act_enc is None:\n act_enc = lambda x: x\n else:\n act_enc = self.act_enc\n return act_enc(self._mappings(inputs))", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n\n # Input should be provided as (n_batch, n_time_steps, n_features)\n # but scan requires the iterable dimension to be first\n # So, we need to dimshuffle to (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, *range(2, input.ndim))\n seq_len, num_batch = input.shape[0], input.shape[1]\n\n # When we are not precomputing the input, we also need to pass the\n # input-to-hidden parameters to step\n non_seqs = L.get_all_params(self.input_to_hidden)\n\n # Create single recurrent computation step function\n def step(input_n, hid_previous, *args):\n hid_pre = L.get_output(\n self.input_to_hidden,{self.input_to_hidden_input : input_n,\n self.input_to_hidden_hidden : hid_previous}, **kwargs)\n\n # Clip gradients\n if self.grad_clipping:\n hid_pre = theano.gradient.grad_clip(\n hid_pre, -self.grad_clipping, self.grad_clipping)\n\n return hid_pre\n\n def step_masked(input_n, mask_n, hid_previous, *args):\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n hid = step(input_n, hid_previous, *args)\n hid_out = T.switch(mask_n, hid, hid_previous)\n return [hid_out]\n\n if mask is not None:\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n if not isinstance(self.hid_init, L.Layer):\n # The code below simply repeats self.hid_init num_batch times in\n # its first dimension. Turns out using a dot product and a\n # dimshuffle is faster than T.repeat.\n dot_dims = (list(range(1, self.hid_init.ndim - 1)) +\n [0, self.hid_init.ndim - 1])\n hid_init = T.dot(T.ones((num_batch, 1)),\n self.hid_init.dimshuffle(dot_dims))\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])[0]\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n go_backwards=self.backwards,\n outputs_info=[hid_init],\n non_sequences=non_seqs,\n truncate_gradient=self.gradient_steps,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, *range(2, hid_out.ndim))\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n\n return hid_out", "def _init_rnn_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple(\n [self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def gru_encoder(cell, embedding, init_state, batch_input, batch_mask):\n #batch_size = batch_input.get_shape()[0]\n #state = tf.zeros([batch_size, options['state_size']], tf.float32) # initialize the state\n outputs = []\n #split_inputs = tf.split(1, batch_input.get_shape()[0], batch_input)\n \n with tf.device(\"/cpu:0\"):\n embedded_list = tf.nn.embedding_lookup(embedding, batch_input)\n #embedded_list = batch_mask * tf.transpose(embedded_list, [2, 0, 1]) # Add mask to change embedding into zeros\n #embedded_list = tf.transpose(embedded_list, [2, 1, 0])\n embedded_list = tf.transpose(embedded_list, [1, 0, 2])\n embedded_list = tf.unpack(embedded_list) # list of embedding\n \n # min_sequence_length = tf.reduce_min(seq_len)\n #max_sequence_length = tf.reduce_max(seq_len)\n\n state = init_state\n for time, (embedded, i_mask) in enumerate(zip(embedded_list, tf.unpack(tf.transpose(batch_mask)))):\n #embedded = tf.nn.embedding_lookup(embedding, tf.reshape(inputs, [-1])) # deprecated\n #embedded = embedded * tf.reshape(tf.convert_to_tensor(batch_mask[:, time], tf.float32), [batch_size, 1]) # deprecated\n #copy_cond = (time >= seq_len)\n #new_output, new_state = cell(embedded, state)\n output, state = cell(embedded, state)#tf.select(copy_cond, zero_output, new_output), tf.select(copy_cond, state, new_state)\n output = tf.expand_dims(i_mask, 1) * output\n outputs.append(output)\n #outputs = batch_mask * tf.transpose(tf.pack(outputs), [2, 0, 1])\n #outputs = tf.unpack(tf.transpose(outputs, [2, 1, 0]))\n return outputs, state", "def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.W.T) + self.bv)", "def forward(self, hidden: Union[torch.Tensor, Tuple[torch.Tensor, ...]]) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:\n # First, map the non-tuple version to a 1-tuple for easier processing.\n # We will undo this at the end\n if not isinstance(hidden, tuple):\n hidden = (hidden,)\n\n batch_size, hidden_size = hidden[0].size()\n\n # If we are going to share parameters across the forward and backward\n # directions, then we need to separate them in the tensors\n if self.share_bidirectional_parameters:\n # shape: (batch_size, 2, encoder_hidden_size // 2)\n hidden = tuple(h.view(batch_size, 2, -1) for h in hidden)\n\n # Apply the bridge\n output = tuple(layer(h) for layer, h in zip(self.layers, hidden))\n\n # Reshape the tensors if the parameters are shared\n if self.share_bidirectional_parameters:\n # shape: (batch_size, decoder_hidden_size)\n output = tuple(h.view(batch_size, -1) for h in output)\n\n # Undo the tuple if there's only 1 element\n if len(output) == 1:\n output = output[0]\n return output", "def encode(self, state):\n raise NotImplementedError", "def init_hidden(self, encoder_final):\n\n #print(\"encoder final shape\")\n #print(encoder_final[0].size())\n if encoder_final is None:\n return None # start with zeros\n\n return (torch.tanh(self.bridge_hidden(encoder_final[0])),\n torch.tanh(self.bridge_cell(encoder_final[1])))", "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerDecoderBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=(\"layer_%d\" % i)))\n super(TransformerDecoder, self).build(unused_input_shapes)", "def encode(data, encoder):\n # Get the list of hidden depths\n\thd = encoder.hidden_depths\n # Find the middle hidden layer\n\tmiddle_layer_index = (len(hd)-1)/2\n # Initialize empty container for the encoded data\n\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\n\tfor i, d_ in enumerate(data):\n # feed forward, get all the activations, and just keep\n # the middle layer, which is the encoding\n\t\tx, z_container, x_container = encoder.ff(d_,True,True)\n\t\tx_encoded = x_container[1+middle_layer_index]\n\t\tdata_encoded[i] = x_encoded\n\t#\n\treturn data_encoded", "def forward(self, *args): # noqa: R0914\r\n encoder_out, (hn, cn) = self.unified_encoder(*args)\r\n device = hn.device\r\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\r\n non_sequential_cat_decoded = []\r\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\r\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\r\n\r\n hn = torch.unsqueeze(hn, 0)\r\n cn = torch.unsqueeze(cn, 0)\r\n # decoded is the output prediction of timestep i-1 of the decoder\r\n decoded = torch.zeros(encoder_out.shape[0], int(\r\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\r\n seq_cont_decoded = torch.Tensor(device=device)\r\n seq_cat_decoded = []\r\n for _ in range(self.unified_encoder.seq_cat_count):\r\n seq_cat_decoded.append(torch.Tensor(device=device))\r\n\r\n for _ in range(encoder_out.shape[1]):\r\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\r\n # Predict all categorical columns\r\n out_cat_onehot = []\r\n if self.unified_encoder.seq_cat_count != 0:\r\n for idx, out in enumerate(out_cat):\r\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\r\n seq_cat_decoded[idx] = torch.cat(\r\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\r\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\r\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\r\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\r\n else:\r\n decoded = out_cont\r\n seq_cont_decoded = torch.cat(\r\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\r\n\r\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded", "def init_hidden_states(self, encoder_out):\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n\n return h, c", "def get_final_encoder_states(encoder_outputs: torch.Tensor,\n mask: torch.Tensor,\n bidirectional: bool = False) -> torch.Tensor:\n # These are the indices of the last words in the sequences (i.e. length sans padding - 1). We\n # are assuming sequences are right padded.\n # Shape: (batch_size,)\n last_word_indices = mask.sum(1).long() - 1\n\n # handle -1 cases\n ll_ = (last_word_indices != -1).long()\n last_word_indices = last_word_indices * ll_\n\n batch_size, _, encoder_output_dim = encoder_outputs.size()\n expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)\n # Shape: (batch_size, 1, encoder_output_dim)\n final_encoder_output = encoder_outputs.gather(1, expanded_indices)\n final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)\n if bidirectional:\n final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]\n final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]\n final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)\n return final_encoder_output", "def get_state(self):\n state = super().get_state()\n state.update({\n 'num_of_fields': self.num_of_fields,\n 'hash_size': self.hash_size,\n 'embedding_dim': self.embedding_dim})\n return state", "def decode(self):\n for layer in self.layers:\n layer.decode()", "def decode(self):\n for layer in self.layers:\n layer.decode()", "def forward(self,\n input,\n hidden,\n encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n # attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_state = hidden[0] if isinstance(hidden, tuple) else hidden\n attn_weights = F.softmax(attn_state[0] @ encoder_outputs.squeeze().t(), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.permute(1, 0, 2))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.rnn(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights", "def zero_state(self, batch_size):\n del batch_size\n p = self.params\n if p.left_context != 1 or p.right_context != 0:\n msg = ('Streaming implementation of chunkwise attention with left context'\n 'or right context is not supported yet')\n raise NotImplementedError(msg)\n return py_utils.NestedMap()", "def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n rnnten = initializer(shape=(self.batch, self.units))\n return rnnten", "def extract_hidden_states(self, output):\n \n # Extracting the forward and backward hidden states from the last BiLSTM layer\n # output (batch_size, sequence length, 2 * hidden dim)\n output_fw = output[:,:,0:self._hidden_size]\n output_bw = output[:,:,self._hidden_size:]\n \n hidden_states = torch.cat((output_fw, output_bw),-1)\n \n return hidden_states", "def dev_network(self):\n freeze_model(self.eval_net)\n for data_set_name, data_set in self.data_to_dev.items():\n #print(data_set_name)\n valid_iter = make_data_iter(\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\n shuffle=False, train=False)\n valid_sources_raw = data_set.src\n\n \n # don't track gradients during validation\n r_total = 0\n roptimal_total = 0\n all_outputs = []\n i_sample = 0\n\n for valid_batch in iter(valid_iter):\n # run as during training to get validation loss (e.g. xent)\n\n batch = Batch(valid_batch, self.pad_index, use_cuda=self.use_cuda)\n\n encoder_output, encoder_hidden = self.model.encode(\n batch.src, batch.src_lengths,\n batch.src_mask)\n\n # if maximum output length is \n # not globally specified, adapt to src len\n if self.max_output_length is None:\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\n\n batch_size = batch.src_mask.size(0)\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\n dtype=torch.long)\n output = []\n hidden = self.model.decoder._init_hidden(encoder_hidden)\n prev_att_vector = None\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\n\n # pylint: disable=unused-variable\n for t in range(self.max_output_length):\n \n\n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\n # print(\"state on t = \", t, \" : \" , state)\n\n # decode one single step\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=batch.src_mask,\n trg_embed=self.model.trg_embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\n \n if self.state_type == 'hidden':\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu()[0]\n else:\n state = torch.FloatTensor(prev_att_vector.squeeze(1).detach().cpu().numpy()[0])\n\n logits = self.eval_net(state)\n logits = logits.reshape([1,1,-1]) \n #print(type(logits), logits.shape, logits)\n next_word = torch.argmax(logits, dim=-1) \n a = next_word.squeeze(1).detach().cpu().numpy()[0]\n prev_y = next_word\n \n output.append(next_word.squeeze(1).detach().cpu().numpy())\n prev_y = next_word\n \n # check if previous symbol was <eos>\n is_eos = torch.eq(next_word, self.eos_index)\n finished += is_eos\n # stop predicting if <eos> reached for all elements in batch\n if (finished >= 1).sum() == batch_size:\n break\n stacked_output = np.stack(output, axis=1) # batch, time\n\n #decode back to symbols\n decoded_valid_in = self.model.trg_vocab.arrays_to_sentences(arrays=batch.src,\n cut_at_eos=True)\n decoded_valid_out_trg = self.model.trg_vocab.arrays_to_sentences(arrays=batch.trg,\n cut_at_eos=True)\n decoded_valid_out = self.model.trg_vocab.arrays_to_sentences(arrays=stacked_output,\n cut_at_eos=True)\n \n \n\n hyp = stacked_output\n\n r = self.Reward(batch.trg, hyp , show = False)\n \n if i_sample == 0 or i_sample == 3 or i_sample == 6:\n print(\"\\n Sample \", i_sample, \"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\")\n print(\"Target: \", batch.trg, decoded_valid_out_trg)\n print(\"Eval : \", stacked_output, decoded_valid_out, \"\\n\")\n print(\"Reward: \", r)\n\n #r = self.Reward1(batch.trg, hyp , show = False)\n r_total += sum(r[np.where(r > 0)])\n if i_sample ==0:\n roptimal = self.Reward(batch.trg, batch.trg , show = False)\n roptimal_total += sum(roptimal[np.where(roptimal > 0)])\n \n all_outputs.extend(stacked_output)\n i_sample += 1\n\n assert len(all_outputs) == len(data_set)\n\n # decode back to symbols\n decoded_valid = self.model.trg_vocab.arrays_to_sentences(arrays=all_outputs,\n cut_at_eos=True)\n\n # evaluate with metric on full dataset\n join_char = \" \" if self.level in [\"word\", \"bpe\"] else \"\"\n valid_sources = [join_char.join(s) for s in data_set.src]\n valid_references = [join_char.join(t) for t in data_set.trg]\n valid_hypotheses = [join_char.join(t) for t in decoded_valid]\n\n # post-process\n if self.level == \"bpe\":\n valid_sources = [bpe_postprocess(s) for s in valid_sources]\n valid_references = [bpe_postprocess(v)\n for v in valid_references]\n valid_hypotheses = [bpe_postprocess(v) for\n v in valid_hypotheses]\n\n # if references are given, evaluate against them\n if valid_references:\n assert len(valid_hypotheses) == len(valid_references)\n\n current_valid_score = 0\n if self.eval_metric.lower() == 'bleu':\n # this version does not use any tokenization\n current_valid_score = bleu(valid_hypotheses, valid_references)\n elif self.eval_metric.lower() == 'chrf':\n current_valid_score = chrf(valid_hypotheses, valid_references)\n elif self.eval_metric.lower() == 'token_accuracy':\n current_valid_score = token_accuracy(\n valid_hypotheses, valid_references, level=self.level)\n elif self.eval_metric.lower() == 'sequence_accuracy':\n current_valid_score = sequence_accuracy(\n valid_hypotheses, valid_references)\n else:\n current_valid_score = -1\n\n self.dev_network_count += 1\n self.tb_writer.add_scalar(\"dev/dev_reward\",\n r_total, self.dev_network_count)\n self.tb_writer.add_scalar(\"dev/dev_bleu\",\n current_valid_score, self.dev_network_count)\n \n print(self.dev_network_count ,' r_total and score: ', r_total , current_valid_score)\n\n \n unfreeze_model(self.eval_net)\n return current_valid_score", "def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):\n\n batch_size = 1\n layer_states = []\n for rnn in rnns:\n hidden_size = rnn.weight_hh.size()[1]\n \n # h_0 of shape (batch, hidden_size)\n # c_0 of shape (batch, hidden_size)\n if rnn.weight_hh.is_cuda:\n h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)\n else:\n h_0 = torch.zeros(batch_size,hidden_size)\n c_0 = torch.zeros(batch_size,hidden_size)\n\n layer_states.append((h_0, c_0))\n\n outputs = []\n for token in sequence:\n rnn_input = embedder(token)\n (cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)\n\n outputs.append(output)\n\n return (cell_states, hidden_states), outputs", "def init_hidden(self, batch_size):\n return torch.zeros(()), torch.zeros(())", "def _decode_train(self, decoder, _encoder_output, _features, labels):\r\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\r\n labels[\"target_ids\"])\r\n\r\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\"target_len\"]-1)", "def __call__(self, batch):\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x[0]) for x in batch]),\n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n inputs_padded = torch.LongTensor(len(batch), max_input_len)\n inputs_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n input_id = batch[ids_sorted_decreasing[i]][0]\n inputs_padded[i, :input_id.shape[0]] = input_id\n\n phonemes_padded = torch.LongTensor(len(batch), max_input_len)\n phonemes_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n phoneme_id = batch[ids_sorted_decreasing[i]][1]\n phonemes_padded[i, :phoneme_id.shape[0]] = phoneme_id\n\n # Right zero-pad mel-spec\n num_mels = batch[0][2].size(0)\n max_target_len = max([x[2].size(1) for x in batch])\n if max_target_len % self.n_frames_per_step != 0:\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\n assert max_target_len % self.n_frames_per_step == 0\n\n # include mel padded and gate padded\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\n mel_padded.zero_()\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\n gate_padded.zero_()\n output_lengths = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n mel = batch[ids_sorted_decreasing[i]][2]\n mel_padded[i, :, :mel.size(1)] = mel\n gate_padded[i, mel.size(1)-1:] = 1\n output_lengths[i] = mel.size(1)\n\n return input_lengths, inputs_padded, phonemes_padded, mel_padded, gate_padded, output_lengths", "def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded", "def init_hidden(self):\n # TODO ========================\n # initialize the hidden states to zero\n\n initial_hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n return initial_hidden # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)", "def hidden(self, value):\n if value is not None:\n value.get_shape().assert_is_compatible_with(self._output_shape)\n self._hidden = value", "def forward(self, batch: torch.LongTensor,\n hidden_start: torch.FloatTensor = None) -> torch.FloatTensor:\n\n # max_len = x.size(1)\n # x,label = batch\n # batch_size x max_len x embedding_dim\n x_embedded = self.embedding(batch)\n # x_drop = self.dropout\n x_drop = self.dropout(x_embedded)\n\n # compute hidden states and logits for each time step\n # hidden_states_list = []\n # prev_hidden = hidden_start\n hidden_state = self.rnn(x_drop)[0]\n # print(hidden_state)\n # print(hidden_state[0].shape)\n # print(hidden_state[1].shape)\n\n # hidden_state = hidden_state.permute(2,1,0)\n # hidden_state_maxPooled = F.max_pool1d(hidden_state,hidden_state.shape[2])\n # hidden_state_maxPooled = hidden_state.permute(2,1,0)\n hidden_state_pooled, _ = torch.max(hidden_state, dim=1)\n\n output = self.get_logits(hidden_state_pooled)\n\n # Loss = self.loss(output, y)\n\n # hidden_state = softmax(logits(hidden_state))\n\n # batch_size x max_len x rnn_size\n # hidden_states = torch.stack(hidden_states_list, dim=1)\n\n return output", "def unbucketed_next(self):\n # Initialize batch containers\n label_batch = list()\n enc_input_batch = list()\n dec_input_batch = list()\n # Fill individual batches by iterating over the entire data source\n if self.sent_id < self.get_length():\n while len(enc_input_batch) < self.opt.batch_size:\n try:\n indexed_sent = self.data[self.sent_id]\n label_item = indexed_sent[1:]\n enc_input_item = indexed_sent[1:]\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\n enc_input_item.reverse()\n dec_input_item = indexed_sent[:-1]\n label_batch.append(label_item)\n enc_input_batch.append(enc_input_item)\n dec_input_batch.append(dec_input_item)\n self.sent_id += 1\n except IndexError:\n break\n else:\n raise IndexError\n return label_batch, enc_input_batch, dec_input_batch", "def encode(self, images):\n\n i = 0\n N = len(images)\n embs = None\n\n while True:\n end = min(N, i + self.batch_size)\n batch = images[i: end]\n\n size = end - i\n if size < self.batch_size:\n batch += self._input_padding[:self.batch_size - size]\n\n if embs is None:\n embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\n else:\n _embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\n embs = np.vstack((embs, _embs))\n\n i += self.batch_size\n\n if i >= N - 1:\n break\n\n return embs", "def __call__(self, batch):\r\n # Right zero-pad all one-hot text sequences to max input length\r\n input_lengths, ids_sorted_decreasing = torch.sort(\r\n torch.LongTensor([len(x[0]) for x in batch]),\r\n dim=0, descending=True)\r\n max_input_len = input_lengths[0]\r\n\r\n text_padded = torch.LongTensor(len(batch), max_input_len)\r\n text_padded.zero_()\r\n for i in range(len(ids_sorted_decreasing)):\r\n text = batch[ids_sorted_decreasing[i]][0]\r\n text_padded[i, :text.size(0)] = text\r\n\r\n # Right zero-pad mel-spec\r\n num_mels = batch[0][1].size(0)\r\n max_target_len = max([x[1].size(1) for x in batch])\r\n if max_target_len % self.n_frames_per_step != 0:\r\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\r\n assert max_target_len % self.n_frames_per_step == 0\r\n\r\n # include mel padded and gate padded\r\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\r\n mel_padded.zero_()\r\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\r\n gate_padded.zero_()\r\n output_lengths = torch.LongTensor(len(batch))\r\n for i in range(len(ids_sorted_decreasing)):\r\n mel = batch[ids_sorted_decreasing[i]][1]\r\n mel_padded[i, :, :mel.size(1)] = mel\r\n gate_padded[i, mel.size(1)-1:] = 1\r\n output_lengths[i] = mel.size(1)\r\n\r\n return text_padded, input_lengths, mel_padded, gate_padded, \\\r\n output_lengths", "def forward(self, state, encoder_padding_mask):\n residual = state.clone()\n\n '''\n ___QUESTION-6-DESCRIBE-D-START___\n What is the purpose of encoder_padding_mask? What will the output shape of `state' Tensor \n be after multi-head attention? HINT: formulate your answer in terms of \n constituent variables like batch_size, embed_dim etc...\n '''\n '''\n The encoder padding mask is used to mask the ⟨pad⟩ token which is padded to the input sequences to make the sequences in the same lengths each batch. Thus the word of input sequence will not pay attention to these padded tokens.\n The shape of state is (tgt_time_steps * batch_size * embed_dim)\n '''\n state, _ = self.self_attn(query=state, key=state, value=state, key_padding_mask=encoder_padding_mask)\n '''\n ___QUESTION-6-DESCRIBE-D-END___\n '''\n\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.self_attn_layer_norm(state)\n\n residual = state.clone()\n state = F.relu(self.fc1(state))\n state = F.dropout(state, p=self.activation_dropout, training=self.training)\n state = self.fc2(state)\n state = F.dropout(state, p=self.dropout, training=self.training)\n state += residual\n state = self.final_layer_norm(state)\n\n return state", "def get_reconstructed_input(self, hidden):\n return T.nnet.sigmoid(T.dot(hidden, self.w2) + self.b2)", "def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):\n\n with tf.variable_scope(\"inference_initial_state\"):\n n_layers = self.attention_layers\n n_heads = self.attention_heads\n batch_size = tf.shape(encoder_outputs)[0]\n n_features = self.num_mels + self.num_freq\n\n state = {\n \"iteration\": tf.constant(0),\n \"inputs\": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),\n \"finished\": tf.cast(tf.zeros([batch_size]), tf.bool),\n \"alignment_positions\": tf.zeros([n_layers, batch_size, n_heads, 1],\n dtype=tf.int32),\n \"outputs\": {\n \"spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.zeros([0, 0, 0, 0, 0])\n ],\n \"stop_token_logits\": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),\n \"lengths\": tf.zeros([batch_size], dtype=tf.int32),\n \"mag_spec\": tf.zeros([batch_size, 0, self.num_freq * self.reduction_factor])\n },\n \"encoder_outputs\": encoder_outputs,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias\n }\n\n state_shape_invariants = {\n \"iteration\": tf.TensorShape([]),\n \"inputs\": tf.TensorShape([None, None, n_features * self.reduction_factor]),\n \"finished\": tf.TensorShape([None]),\n \"alignment_positions\": tf.TensorShape([n_layers, None, n_heads, None]),\n \"outputs\": {\n \"spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.TensorShape([None, None, None, None, None]),\n ],\n \"stop_token_logits\": tf.TensorShape([None, None, 1 * self.reduction_factor]),\n \"lengths\": tf.TensorShape([None]),\n \"mag_spec\": tf.TensorShape([None, None, None])\n },\n \"encoder_outputs\": encoder_outputs.shape,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias.shape\n }\n\n return state, state_shape_invariants", "def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.num_mels + self.num_freq\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n weights = []\n for index, attention in enumerate(self.attentions):\n if isinstance(attention, AttentionBlock):\n weights.append(attention.multiheaded_attention.attention_weights)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state", "def decoder(self, embedded_inputs, decoder_input0,\n decoder_hidden0, encoder_outputs):\n pass", "def encode_input(self, x_tensor, inp_lens_tensor):\r\n input_emb = self.input_emb.forward(x_tensor)\r\n enc_output_each_word, enc_context_mask, enc_final_states = self.encoder(input_emb, inp_lens_tensor)\r\n enc_final_states_reshaped = (enc_final_states[0].unsqueeze(0), enc_final_states[1].unsqueeze(0))\r\n # print('lest go', enc_final_states_reshaped[1].shape)\r\n return enc_output_each_word, enc_context_mask, enc_final_states_reshaped", "def forward(\r\n self,\r\n input_ids,\r\n attention_mask: torch.Tensor,\r\n token_type_ids: torch.Tensor\r\n ):\r\n ### YOUR CODE HERE\r\n output = self.bert(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n )\r\n\r\n sequence_output = output[0] # the last hidden state (batch, sequence_length, hidden_size)\r\n logits = self.qa_outputs(sequence_output)\r\n start_logits, end_logits = logits.split(1, dim=-1)\r\n start_logits = start_logits.squeeze(-1)\r\n end_logits = end_logits.squeeze(-1)\r\n\r\n outputs = (start_logits, end_logits) # + output[2:]\r\n\r\n return outputs\r\n ### END YOUR CODE", "def build_graph(self):\n with vs.variable_scope(\"context\"):\n context_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n context_hiddens = context_encoder.build_graph(self.context_embs,\n self.context_mask) # (batch_size, context_len, hidden_size*2)\n\n with vs.variable_scope(\"question\"):\n question_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n question_hiddens = question_encoder.build_graph(self.qn_embs,\n self.qn_mask) # (batch_size, question_len, hidden_size*2)\n question_last_hidden = tf.reshape(question_hiddens[:, -1, :], (-1, 2 * self.FLAGS.hidden_size))\n question_last_hidden = tf.contrib.layers.fully_connected(question_last_hidden,\n num_outputs=self.FLAGS.hidden_size)\n # Use context hidden states to attend to question hidden states\n\n # attn_output is shape (batch_size, context_len, hidden_size*2)\n # The following is BiDAF attention\n if self.FLAGS.use_bidaf:\n attn_layer = BiDAF(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens,\n self.context_mask) # (batch_size, context_len, hidden_size * 6)\n else: # otherwise, basic attention\n attn_layer = BasicAttn(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n _, attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens)\n # Concat attn_output to context_hiddens to get blended_reps\n blended_reps = tf.concat([context_hiddens, attn_output], axis=2) # (batch_size, context_len, hidden_size*4)\n\n blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size)\n\n decoder = RNNDecoder(self.FLAGS.batch_size, self.FLAGS.hidden_size, self.ans_vocab_size, self.FLAGS.answer_len,\n self.ans_embedding_matrix, self.keep_prob, sampling_prob=self.sampling_prob,\n schedule_embed=self.FLAGS.schedule_embed, pred_method=self.FLAGS.pred_method)\n (self.train_logits, self.train_translations, _), \\\n (self.dev_logits, self.dev_translations, self.attention_results) = decoder.build_graph(blended_reps_final, question_last_hidden,\n self.ans_embs, self.ans_mask, self.ans_ids,\n self.context_mask)", "def sample_n(self, method, batch, max_sample_length, sample_num):\r\n inp = batch.text\r\n inp_len_np = batch.length.cpu().numpy()\r\n\r\n pad_inp1 = torch.LongTensor([self.fw_start_token] * inp.size(1)).view(1,-1)\r\n pad_inp2 = torch.LongTensor([self.pad_token] * inp.size(1)).view(1,-1)\r\n\r\n if self.gpu >= 0:\r\n inp = inp.to(self.gpu)\r\n pad_inp1 = pad_inp1.to(self.gpu)\r\n pad_inp2 = pad_inp2.to(self.gpu)\r\n\r\n padded_inp = torch.cat([pad_inp1, inp, pad_inp2], 0)\r\n padded_inp[inp_len_np + 1] = self.bw_start_token\r\n\r\n assert padded_inp.max().item() < self.n_vocab + 2\r\n assert inp_len_np[0] + 2 <= padded_inp.size(0)\r\n padded_enc_out = self.encoder(padded_inp, inp_len_np + 2) # [T+2,B,H]\r\n\r\n # extract forward hidden state\r\n assert 0 <= batch.fw_pos.item() - 1 <= padded_enc_out.size(0) - 1\r\n assert 0 <= batch.fw_pos.item() <= padded_enc_out.size(0) - 1\r\n fw_hidden = padded_enc_out.index_select(0,batch.fw_pos - 1)\r\n fw_hidden = torch.cat([fw_hidden[:,:,:self.hidden_size],fw_hidden[:,:,self.hidden_size:]], 0)\r\n fw_next_token = padded_inp.index_select(0,batch.fw_pos).view(1,-1)\r\n\r\n # extract backward hidden state\r\n assert 0 <= batch.bw_pos.item() + 3 <= padded_enc_out.size(0) - 1\r\n assert 0 <= batch.bw_pos.item() + 2 <= padded_enc_out.size(0) - 1\r\n bw_hidden = padded_enc_out.index_select(0,batch.bw_pos + 3)\r\n bw_hidden = torch.cat([bw_hidden[:,:,:self.hidden_size], bw_hidden[:,:,self.hidden_size:]], 0)\r\n bw_next_token = padded_inp.index_select(0,batch.bw_pos + 2).view(1,-1)\r\n\r\n fw_sample_outputs = self.sample_n_sequences(method, 'fw', fw_next_token, fw_hidden, max_sample_length, sample_num)\r\n bw_sample_outputs = self.sample_n_sequences(method, 'bw', bw_next_token, bw_hidden, max_sample_length, sample_num)\r\n\r\n self.filter_special_tokens(fw_sample_outputs)\r\n self.filter_special_tokens(bw_sample_outputs)\r\n\r\n return fw_sample_outputs, bw_sample_outputs" ]
[ "0.6531696", "0.6165337", "0.61568475", "0.59570867", "0.59406155", "0.5908961", "0.58463246", "0.5821808", "0.5770365", "0.57613736", "0.57613736", "0.5760088", "0.5760088", "0.5760088", "0.5757003", "0.57509893", "0.5750539", "0.57451725", "0.570995", "0.5698639", "0.5691191", "0.5663548", "0.5658067", "0.563007", "0.5612284", "0.56113315", "0.5608394", "0.5575342", "0.5572906", "0.555832", "0.55566925", "0.55418396", "0.5522725", "0.55199784", "0.5515714", "0.55051684", "0.5504612", "0.54856265", "0.5482307", "0.5471251", "0.546024", "0.54584426", "0.54535824", "0.54535824", "0.5449207", "0.5449207", "0.54463977", "0.54393405", "0.54261863", "0.54207176", "0.54178464", "0.54089063", "0.5385386", "0.5385386", "0.5374253", "0.53654444", "0.536061", "0.5335899", "0.5332261", "0.5322773", "0.5317808", "0.53157264", "0.5314026", "0.53129804", "0.53081757", "0.5292727", "0.52859914", "0.5280081", "0.52565163", "0.52455044", "0.5240575", "0.5234109", "0.5234089", "0.52310425", "0.52274734", "0.52274734", "0.52236813", "0.5212277", "0.5212023", "0.5211154", "0.521001", "0.5199415", "0.518788", "0.5187849", "0.51868826", "0.5184745", "0.51797944", "0.5174779", "0.5173124", "0.51716137", "0.51612455", "0.51600647", "0.5158575", "0.5141562", "0.5122168", "0.5121686", "0.5120307", "0.5110036", "0.5109149", "0.5105331", "0.510459" ]
0.0
-1
Performs one single decoding step for one example. It passes the hidden state for the decoder and input the tensor with the embeddings vector for the input token. The result of the decoder is passed to the output net to obtain the logits for every item in the dictionary. It outputs those logits and the new hidden state returned by the decoder.
def decode_one(self, hid, input_x): out, new_hid = self.decoder(input_x.unsqueeze(0), hid) out = self.output(out) return out.squeeze(dim=0), new_hid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode():\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(True)\n model.batch_size = 1 # We decode one sentence at a time.\n init_model(sess, model)\n\n # Load vocabularies.\n vocab, rev_vocab = data_utils.get_vocabulary(FLAGS.data_dir, FLAGS.words,\n FLAGS.word_embeddings, FLAGS.vocab_size)\n\n # Decode from standard input.\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n while sentence:\n # Get token-ids for the input sentence.\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), vocab,\n data_utils.basic_word_tokenizer)\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(buckets))\n if buckets[b][0] > len(token_ids)])\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n # Get output logits for the sentence.\n _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True)\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n # If there is an EOS symbol in outputs, cut them at that point.\n if data_utils.EOS_ID in outputs:\n outputs = outputs[:outputs.index(data_utils.EOS_ID)]\n # Print out the network's response to the input.\n join = \" \" if FLAGS.words else \"\"\n print(join.join([tf.compat.as_str(rev_vocab[output]) for output in outputs]))\n print(\"> \", end=\"\")\n sys.stdout.flush()\n sentence = sys.stdin.readline()", "def decoding_fn(decoder_input, cache, **kwargs):\n index = kwargs['index']\n # [batch_size * beam_width, 1, hidden_size]\n decoder_input = self._embedding_logits_layer(decoder_input, 'embedding')\n decoder_input += timing_signal[index:index + 1]\n\n # [batch_size * beam_width, 1, hidden_size]\n decoder_outputs = self._decoder(decoder_input,\n cache['encoder_outputs'],\n tf.zeros((1, 1, 1, index + 1), \n dtype='float32'),\n cache['padding_mask'],\n training=False,\n cache=cache)\n\n # [[batch_size * beam_width, 1, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, mode='logits')\n logits = tf.squeeze(logits, axis=1)\n return logits, cache", "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output", "def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob):\n # Convert the start_ids to be a vector with batch size (the go id repeated batch size times)\n start_ids = tf.tile([start_of_sequence_id], [batch_size])\n # Create the embedding helper.\n embedding_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n dec_embeddings, start_ids, end_of_sequence_id)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(\n dec_cell, embedding_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n basic_decoder,maximum_iterations=max_target_sequence_length)\n return f_output", "def decoder(self, tensor, reuse=False):\n\n outputs, predictions = [], []\n\n with tf.variable_scope(\"decoder\", reuse=reuse) as scope:\n\n\n # add gausian noise\n decoder_input = gaussian_noise_layer(tensor, 0.2)\n encoder_dim = tensor.get_shape().as_list()[-1]\n W = tf.get_variable(\"decoder_last_weight\", [self.num_units + encoder_dim, self.voca_size])\n b = tf.get_variable(\"decoder_last_bias\", [self.voca_size])\n # time-major: [batch_size, max_len, num_units] --> [max_len, batch_size, num_units]\n # decoder_input = tf.transpose(decoder_input, [1,0,2])\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units, state_is_tuple=False)\n # initial_state = state = decoder_input\n initial_state = tf.zeros([self.batch_size, self.num_units])\n initial_state = tf.concat([initial_state, decoder_input], 1)\n\n\n for i in range(self.max_len):\n if i == 0:\n # start of sequence\n input_ = tf.nn.embedding_lookup(self.embedding, tf.ones([self.batch_size], dtype=tf.int32))\n state = initial_state\n\n else:\n scope.reuse_variables()\n input_ = tf.nn.embedding_lookup(self.embedding, prediction)\n\n output, state = cell(input_, state)\n output = tf.concat([output, tensor], -1)\n output = tf.nn.xw_plus_b(output, W, b)\n\n prediction = tf.argmax(output, axis=1)\n\n outputs.append(output)\n predictions.append(prediction)\n\n predictions = tf.transpose(tf.stack(predictions), [1,0])\n outputs = tf.stack(outputs)\n\n return predictions, outputs", "def _build_decoder(self, encoder_outputs, encoder_state, hparams):\n\t\ttgt_sos_id = tf.cast(tf.constant(hparams.sos_id), tf.int32)\n\t\ttgt_eos_id = tf.cast(tf.constant(hparams.eos_id), tf.int32)\n\n\t\tmaximum_iterations = self._get_infer_maximum_iterations(hparams)\n\n\t\t# Decoder\n\t\twith tf.variable_scope('decoder') as decoder_scope:\n\t\t\tcell, decoder_initial_state = self._build_decoder_cell(hparams, encoder_state)\n\t\t\t\n\t\t\tlogits = tf.no_op()\n\t\t\tdecoder_outputs = None\n\n\t\t\t# Train or Eval\n\t\t\tif self.mode != 'infer':\n\t\t\t\tdecoder_emb_input = tf.nn.embedding_lookup(self.embedding_decoder, self.decoder_input_data)\n\n\t\t\t\t# helper\n\t\t\t\thelper = tf.contrib.seq2seq.TrainingHelper(\n\t\t\t\t\tdecoder_emb_input, self.seq_length_decoder_input_data)\n\t\t\t\t\n\t\t\t\t# decoder\n\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\tcell,\n\t\t\t\t\thelper,\n\t\t\t\t\tdecoder_initial_state)\n\t\t\t\t\n\t\t\t\t# dynamic decoding\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\t\n\t\t\t\tsample_id = outputs.sample_id\n\t\t\t\tlogits = self.output_layer(outputs.rnn_output)\n\t\t\telse:\n\t\t\t\tinfer_mode = hparams.infer_mode\n\t\t\t\tstart_tokens = tf.fill([self.batch_size], tgt_sos_id)\n\t\t\t\tend_token = tgt_eos_id\n\t\t\t\t_info(' decoder by infer_mode={} beam_width={}'.format(infer_mode, hparams.beam_width))\n\n\t\t\t\tif infer_mode == 'greedy':\n\t\t\t\t\thelper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n\t\t\t\t\t\tself.embedding_decoder, start_tokens, end_token)\n\t\t\t\telif infer_mode == 'beam_search':\n\t\t\t\t\tbeam_width = hparams.beam_width\n\t\t\t\t\tlength_penalty_weight = hparams.length_penalty_weight\n\t\t\t\t\tcoverage_penalty_weight = hparams.coverage_penalty_weight\n\n\t\t\t\t\t# beam search do not require helper\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n\t\t\t\t\t\tcell=cell,\n\t\t\t\t\t\tembedding=self.embedding_decoder,\n\t\t\t\t\t\tstart_tokens=start_tokens,\n\t\t\t\t\t\tend_token=end_token,\n\t\t\t\t\t\tinitial_state=decoder_initial_state,\n\t\t\t\t\t\tbeam_width=beam_width,\n\t\t\t\t\t\toutput_layer=self.output_layer,\n\t\t\t\t\t\tlength_penalty_weight=length_penalty_weight,\n\t\t\t\t\t\tcoverage_penalty_weight=coverage_penalty_weight)\n\t\t\t\telse:\n\t\t\t\t\t_error('Unknown infer_mode {}'.format(infer_mode))\n\t\t\t\t\traise ValueError\n\t\t\t\t\n\t\t\t\tif infer_mode != 'beam_search':\n\t\t\t\t\tmy_decoder = tf.contrib.seq2seq.BasicDecoder(\n\t\t\t\t\t\tcell,\n\t\t\t\t\t\thelper,\n\t\t\t\t\t\tdecoder_initial_state,\n\t\t\t\t\t\toutput_layer=self.output_layer)\t\t# apply to the RNN output prior to storing the result or sampling\n\t\t\t\t\n\t\t\t\toutputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n\t\t\t\t\tmy_decoder,\n\t\t\t\t\tmaximum_iterations=maximum_iterations,\n\t\t\t\t\tswap_memory=True,\n\t\t\t\t\tscope=decoder_scope)\n\t\t\t\n\t\t\t\tif infer_mode == 'beam_search':\n\t\t\t\t\tsample_id = outputs.predicted_ids\n\t\t\t\telse:\n\t\t\t\t\tlogits = outputs.rnn_output\n\t\t\t\t\tsample_id = outputs.sample_id\n\n\t\treturn logits, sample_id, final_context_state", "def _decode_train(self, decoder, _encoder_output, _features, labels):\r\n target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,\r\n labels[\"target_ids\"])\r\n\r\n return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels[\"target_len\"]-1)", "def _decode(self, tgt_token_ids, encoder_outputs, padding_mask):\n tgt_seq_len = tf.shape(tgt_token_ids)[1]\n\n # [batch_size, tgt_seq_len, hidden_size]\n tgt_token_embeddings = self._embedding_logits_layer(\n tgt_token_ids, 'embedding')\n\n # [tgt_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n tgt_seq_len, self._hidden_size)\n tgt_token_embeddings += positional_encoding\n tgt_token_embeddings = self._decoder_dropout_layer(\n tgt_token_embeddings, training=True) \n\n look_ahead_mask = utils.get_look_ahead_mask(tgt_seq_len)\n\n # [batch_size, tgt_seq_len, hidden_size]\n decoder_outputs = self._decoder(tgt_token_embeddings, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training=True)\n\n # [batch_size, tgt_seq_len, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, 'logits')\n return logits", "def _decode(self, input_dict):\n encoder_outputs = input_dict['encoder_output']['outputs']\n enc_src_lengths = input_dict['encoder_output']['src_length']\n if self._mode == 'train':\n spec = (\n input_dict['target_tensors'][0]\n if 'target_tensors' in input_dict\n else None\n )\n spec_length = (\n input_dict['target_tensors'][1]\n if 'target_tensors' in input_dict\n else None\n )\n\n _batch_size = tf.shape(encoder_outputs)[0]\n\n training = self._mode == 'train'\n regularizer = self.params.get('regularizer', None)\n\n if self.params.get('enable_postnet', True):\n if 'postnet_conv_layers' not in self.params:\n raise ValueError(\n 'postnet_conv_layers must be passed from config file if postnet is'\n 'enabled'\n )\n\n num_audio_features = self._n_feats\n\n output_projection_layer = tf.layers.Dense(\n name='output_proj', units=num_audio_features, use_bias=True\n )\n stop_token_projection_layer = tf.layers.Dense(\n name='stop_token_proj', units=1, use_bias=True\n )\n\n prenet = None\n if self.params.get('enable_prenet', True):\n prenet = Prenet(\n self.params.get('prenet_units', 256),\n self.params.get('prenet_layers', 2),\n self.params.get('prenet_dropout', 0.5),\n self.params.get('prenet_enable_dropout', True),\n self.params.get('prenet_activation', tf.nn.relu),\n self.params['dtype'],\n )\n\n cell_params = {}\n cell_params['num_units'] = self.params['decoder_cell_units']\n decoder_cells = [\n single_cell(\n cell_class=self.params['decoder_cell_type'],\n cell_params=cell_params,\n zoneout_prob=self.params.get('zoneout_prob', 0.0),\n dp_output_keep_prob=1.0\n - self.params.get('dropout_prob', 0.1),\n training=training,\n )\n for _ in range(self.params['decoder_layers'])\n ]\n\n if self.params['attention_type'] is not None:\n attention_mechanism = self._build_attention(\n encoder_outputs,\n enc_src_lengths,\n self.params.get('attention_bias', False),\n )\n\n attention_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n attentive_cell = AttentionWrapper(\n cell=attention_cell,\n attention_mechanism=attention_mechanism,\n alignment_history=True,\n output_attention='both',\n )\n\n decoder_cell = attentive_cell\n\n if self.params['attention_type'] is None:\n decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n if self._mode == 'train':\n train_and_not_sampling = True\n helper = TacotronTrainingHelper(\n inputs=spec,\n sequence_length=spec_length,\n prenet=None,\n model_dtype=self.params['dtype'],\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n elif self._mode == 'eval' or self._mode == 'infer':\n train_and_not_sampling = False\n inputs = tf.zeros(\n (_batch_size, 1, num_audio_features),\n dtype=self.params['dtype'],\n )\n helper = TacotronHelper(\n inputs=inputs,\n prenet=None,\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n else:\n raise ValueError('Unknown mode for decoder: {}'.format(self._mode))\n decoder = TacotronDecoder(\n decoder_cell=decoder_cell,\n helper=helper,\n initial_decoder_state=decoder_cell.zero_state(\n _batch_size, self.params['dtype']\n ),\n attention_type=self.params['attention_type'],\n spec_layer=output_projection_layer,\n stop_token_layer=stop_token_projection_layer,\n prenet=prenet,\n dtype=self.params['dtype'],\n train=train_and_not_sampling,\n )\n\n if self._mode == 'train':\n maximum_iterations = tf.reduce_max(spec_length)\n else:\n maximum_iterations = tf.reduce_max(enc_src_lengths) * 10\n\n outputs, final_state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n # outputs, final_state, sequence_lengths, final_inputs = dynamic_decode(\n decoder=decoder,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n swap_memory=self.params.get('use_swap_memory', False),\n output_time_major=self.params.get('time_major', False),\n parallel_iterations=self.params.get('parallel_iterations', 32),\n )\n\n decoder_output = outputs.rnn_output\n stop_token_logits = outputs.stop_token_output\n\n with tf.variable_scope('decoder'):\n # If we are in train and doing sampling, we need to do the projections\n if train_and_not_sampling:\n decoder_spec_output = output_projection_layer(decoder_output)\n stop_token_logits = stop_token_projection_layer(\n decoder_spec_output\n )\n decoder_output = decoder_spec_output\n\n ## Add the post net ##\n if self.params.get('enable_postnet', True):\n dropout_keep_prob = self.params.get(\n 'postnet_keep_dropout_prob', 0.5\n )\n\n top_layer = decoder_output\n for i, conv_params in enumerate(self.params['postnet_conv_layers']):\n ch_out = conv_params['num_channels']\n kernel_size = conv_params['kernel_size'] # [time, freq]\n strides = conv_params['stride']\n padding = conv_params['padding']\n activation_fn = conv_params['activation_fn']\n\n if ch_out == -1:\n ch_out = self._n_feats\n\n top_layer = conv_bn_actv(\n layer_type='conv1d',\n name='conv{}'.format(i + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=activation_fn,\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=self.params.get(\n 'postnet_data_format', 'channels_last'\n ),\n bn_momentum=self.params.get('postnet_bn_momentum', 0.1),\n bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),\n )\n top_layer = tf.layers.dropout(\n top_layer,\n rate=1.0 - dropout_keep_prob,\n training=training,\n )\n\n else:\n top_layer = tf.zeros(\n [\n _batch_size,\n maximum_iterations,\n outputs.rnn_output.get_shape()[-1],\n ],\n dtype=self.params['dtype'],\n )\n\n if regularizer and training:\n vars_to_regularize = []\n vars_to_regularize += attentive_cell.trainable_variables\n vars_to_regularize += (\n attention_mechanism.memory_layer.trainable_variables\n )\n vars_to_regularize += output_projection_layer.trainable_variables\n vars_to_regularize += (\n stop_token_projection_layer.trainable_variables\n )\n\n for weights in vars_to_regularize:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )\n\n if self.params.get('enable_prenet', True):\n prenet.add_regularization(regularizer)\n\n if self.params['attention_type'] is not None:\n alignments = tf.transpose(\n final_state.alignment_history.stack(), [1, 2, 0]\n )\n else:\n alignments = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n spectrogram_prediction = decoder_output + top_layer\n\n mag_spec_prediction = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n stop_token_prediction = tf.sigmoid(stop_token_logits)\n outputs = [\n decoder_output,\n spectrogram_prediction,\n alignments,\n stop_token_prediction,\n sequence_lengths,\n mag_spec_prediction,\n ]\n\n return {'outputs': outputs, 'stop_token_prediction': stop_token_logits}", "def decoder(self, embedded_inputs, decoder_input0,\n decoder_hidden0, encoder_outputs):\n pass", "def decode(self, targets, encoder_outputs, encoder_attn_bias, input_shape,\n training):\n with tf.name_scope('decode'):\n length = tf.shape(targets)[1]\n decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(\n length)\n encoder_outputs = tf.reshape(\n encoder_outputs, [input_shape[0], -1, self._hparams['hidden_size']])\n decoder_inputs = tf.pad(\n targets, [[0, 0], [1, 0]], constant_values=input_utils.START)\n\n # Remove last element.\n decoder_inputs = decoder_inputs[:, :-1]\n decoder_inputs = self._word_embedding_layer(decoder_inputs)\n\n with tf.name_scope('add_pos_encoding'):\n pos_encoding = self._position_embedding_layer(decoder_inputs)\n decoder_inputs += pos_encoding\n\n if training:\n decoder_inputs = tf.nn.dropout(\n decoder_inputs, rate=self._hparams['layer_postprocess_dropout'])\n\n decoder_outputs = self._decoder(\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n encoder_attn_bias,\n training=training)\n logits = self._word_layer(decoder_outputs)\n return logits", "def forward(\r\n self,\r\n input_ids,\r\n encoder_hidden_states,\r\n encoder_padding_mask,\r\n decoder_padding_mask,\r\n decoder_causal_mask,\r\n past_key_values=None,\r\n use_cache=False,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n **unused,\r\n ):\r\n\r\n if \"decoder_cached_states\" in unused:\r\n warnings.warn(\r\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_cached_states\")\r\n if \"decoder_past_key_values\" in unused:\r\n warnings.warn(\r\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\r\n FutureWarning,\r\n )\r\n past_key_values = unused.pop(\"decoder_past_key_values\")\r\n\r\n # check attention mask and invert\r\n if encoder_padding_mask is not None:\r\n encoder_padding_mask = invert_mask(encoder_padding_mask)\r\n\r\n # embed positions\r\n positions = self.embed_positions(input_ids, use_cache=use_cache)\r\n\r\n if use_cache:\r\n input_ids = input_ids[:, -1:]\r\n positions = positions[:, -1:]\r\n\r\n x = self.embed_tokens(input_ids) * self.embed_scale\r\n if self.do_blenderbot_90_layernorm:\r\n x = self.layernorm_embedding(x)\r\n x += positions\r\n else:\r\n x += positions\r\n x = self.layernorm_embedding(x)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n\r\n # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n # decoder layers\r\n all_hidden_states = () if output_hidden_states else None\r\n all_self_attns = () if output_attentions else None\r\n enc_dec_all_attn = () if output_attentions else None\r\n next_decoder_cache = []\r\n for idx, decoder_layer in enumerate(self.layers):\r\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\r\n if output_hidden_states:\r\n all_hidden_states += (x,)\r\n dropout_probability = random.uniform(0, 1)\r\n if self.training and (dropout_probability < self.layerdrop):\r\n continue\r\n\r\n layer_state = past_key_values[idx] if past_key_values is not None else None\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n x, layer_self_attn, layer_past,_ = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n \"\"\"\r\n\r\n #isidora - start - replace _ with enc_dec_attn to get the encoder-decoder attn weights\r\n x, layer_self_attn, layer_past, enc_dec_attn = decoder_layer(\r\n x,\r\n encoder_hidden_states,\r\n encoder_attn_mask=encoder_padding_mask,\r\n decoder_padding_mask=decoder_padding_mask,\r\n layer_state=layer_state,\r\n causal_mask=decoder_causal_mask,\r\n output_attentions=output_attentions,\r\n )\r\n #isidora - end\r\n\r\n\r\n if use_cache:\r\n next_decoder_cache.append(layer_past.copy())\r\n\r\n if output_attentions:\r\n all_self_attns += (layer_self_attn,)\r\n enc_dec_all_attn += (enc_dec_attn,)\r\n\r\n if self.layer_norm: # if config.add_final_layer_norm (mBART)\r\n x = self.layer_norm(x)\r\n\r\n # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)\r\n if output_hidden_states:\r\n all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)\r\n x = x.transpose(0, 1)\r\n encoder_hidden_states = encoder_hidden_states.transpose(0, 1)\r\n\r\n next_cache = next_decoder_cache if use_cache else None\r\n\r\n #isidora - start - return enc_dec_all_attn instead of decoder outputs\r\n return enc_dec_all_attn\r\n #isidora - end\r\n\r\n #isidora - in comment\r\n \"\"\"\r\n if not return_dict:\r\n return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)\r\n return BaseModelOutputWithPast(\r\n last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns\r\n )\r\n \"\"\"", "def call(self,\n inputs,\n cache=None,\n decode_loop_step=None,\n padded_decode=False):\n attention_bias = inputs[\"attention_bias\"]\n target_ids = inputs[\"target_ids\"]\n all_encoder_outputs = inputs[\"all_encoder_outputs\"]\n self_attention_bias = inputs[\"self_attention_bias\"]\n if not isinstance(all_encoder_outputs, list):\n all_encoder_outputs = [all_encoder_outputs]\n\n target_embeds = self.embedding_lookup(target_ids)\n if decode_loop_step is None:\n target_embeds = self.embedding_postprocessor(target_embeds)\n else:\n target_embeds = self._decoding_step_time_signal(target_embeds,\n decode_loop_step)\n decoder_inputs = dict(\n decoder_inputs=target_embeds,\n encoder_outputs=all_encoder_outputs,\n self_attention_mask=self_attention_bias,\n attention_mask=attention_bias)\n if self.multi_channel_cross_attention:\n decoder_inputs[\"doc_attention_probs\"] = inputs[\"doc_attention_probs\"]\n decode_outputs, cache = self.decoder(\n decoder_inputs, cache, decode_loop_step if padded_decode else None)\n return decode_outputs", "def decode(self,\n decoder_input,\n encoder_output,\n encoder_decoder_attention_bias,\n decoder_self_attention_bias,\n hparams,\n cache=None,\n nonpadding=None,\n losses=None):\n del losses\n # TODO(dehghani): enable caching.\n del cache\n\n decoder_input = tf.nn.dropout(decoder_input,\n 1.0 - hparams.layer_prepostprocess_dropout)\n\n # No caching in Universal Transformers!\n (decoder_output, dec_extra_output) = (\n my_universal_transformer_util.universal_transformer_decoder(\n decoder_input,\n encoder_output,\n decoder_self_attention_bias,\n encoder_decoder_attention_bias,\n hparams,\n nonpadding=nonpadding,\n save_weights_to=self.attention_weights))\n\n # Expand since t2t expects 4d tensors.\n return tf.expand_dims(decoder_output, axis=2), dec_extra_output", "def attention_decoder(decoder_inputs,\n attention_states,\n cell,\n output_size=None,\n dtype=None,\n scope=None):\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if output_size is None:\n output_size = cell.output_size\n \n # ==================================scope=================================================\n with variable_scope.variable_scope(scope or \"TemporalAttn\", dtype=dtype) as scope:\n \n dtype = scope.dtype\n batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n attn_size = attention_states.get_shape()[2].value\n \n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = array_ops.reshape(attention_states, [-1, attn_length, 1, attn_size])\n # U_d * h_i for i in range(T) (filter)\n u = variable_scope.get_variable(\"AttnDecoderU\", [1, 1, attn_size, attn_size], dtype=dtype)\n hidden_features = nn_ops.conv2d(hidden, u, [1, 1, 1, 1], \"SAME\")\n \n v = variable_scope.get_variable(\"AttnDecoderV\", [attn_size], dtype=dtype)\n \n # how to get the initial_state\n initial_state_size = array_ops.stack([batch_size, cell.output_size])\n initial_state = [array_ops.zeros(initial_state_size, dtype=dtype) for _ in xrange(2)]\n state = initial_state\n \n w = variable_scope.get_variable(\"AttnDecoderW\", [2*cell.output_size, attn_size], dtype=dtype)\n b = variable_scope.get_variable(\"AttnDecoderb\", [attn_size], dtype=dtype)\n \n # beta_scalar = variable_scope.get_variable(\"BetaScalar\", [attn_length])\n \n def attention(query, step):\n \"\"\"\n Put attention masks on hidden using hidden_features and query.\n \"\"\"\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta\n\n outputs = []\n attns = []\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, 0)\n attns.append(attn_t)\n # =============================recurrent===========================\n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n \n # LSTM_d([\\tilde{\\mathbf{h}}_{t}; \\mathbf{y}_t], \\hat{\\mathbf{y}}_{t}, \\mathbf{s}^d_{t})\n with variable_scope.variable_scope(\"DecoderOutput\"):\n x = tf.concat([inp, h_t], 1)\n cell_output, state = cell(x, state)\n outputs.append(cell_output)\n\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, i+1)\n attns.append(attn_t)\n \n with variable_scope.variable_scope(\"AttnDecoderOutput\"):\n inputs = tf.concat([cell_output, h_t], 1)\n output = Linear(inputs, output_size, True)(inputs)\n outputs.append(output)\n \n return outputs, state, attns", "def decode(prev_hidden: torch.tensor, source_hiddens: torch.tensor, prev_context: torch.tensor,\n input: int, model: Seq2SeqAttentionModel) -> (\n torch.tensor, torch.tensor, torch.tensor, torch.tensor):\n\n decode_in = torch.cat((model.target_embedding_matrix[input], prev_context))\n hidden_out = model.decoder_gru.forward(decode_in, prev_hidden)\n # passing the top layer of encoder and decoder hidden dims\n attention_weights = model.attention.forward(source_hiddens[:,-1,:], hidden_out[-1])\n context = torch.mm(attention_weights.unsqueeze(dim=0),source_hiddens[:,-1,:]).squeeze()\n log_probs = model.output_layer.forward(torch.cat((hidden_out[-1].squeeze(),context)))\n return log_probs, hidden_out, context, attention_weights", "def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.num_mels + self.num_freq\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n weights = []\n for index, attention in enumerate(self.attentions):\n if isinstance(attention, AttentionBlock):\n weights.append(attention.multiheaded_attention.attention_weights)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state", "def _build_decoding_fn(self, max_decode_length):\n # [max_decode_length, hidden_size]\n timing_signal = utils.get_positional_encoding(\n max_decode_length, self._hidden_size)\n timing_signal = tf.cast(timing_signal, 'float32')\n\n def decoding_fn(decoder_input, cache, **kwargs):\n \"\"\"Computes the logits of the next decoded token ids.\n\n Args:\n decoder_input: int tensor of shape [batch_size * beam_width, 1], the \n decoded tokens at index `i`.\n cache: dict of entries\n 'encoder_outputs': tensor of shape \n [batch_size * beam_width, src_seq_len, hidden_size],\n 'padding_mask': tensor of shape\n [batch_size * beam_width, 1, 1, src_seq_len],\n\n and entries with keys 'layer_0',...,'layer_[decoder_num_layers - 1]'\n where the value associated with key 'layer_*' is a dict with entries\n 'k': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'v': tensor of shape [batch_size * beam_width, seq_len, num_heads, \n size_per_head],\n 'tgt_tgt_attention': tensor of shape [batch_size * beam_width, \n num_heads, seq_len, seq_len],\n 'tgt_src_attention': tensor of shape [batch_size * beam_width, \n num_heads, seq_len, src_seq_len].\n Note `seq_len` is the running length of the growing decode sequence.\n kwargs: dict, storing the following additional keyword arguments.\n index -> int scalar tensor, the index of the `decoder_input` in the \n decoded sequence.\n\n Returns:\n logits: float tensor of shape [batch_size * beam_width, vocab_size].\n cache: a dict with the same structure as the input `cache`, except that\n the shapes of the values of key `k`, `v`, `tgt_tgt_attention`, \n `tgt_src_attention` are\n [batch_size * beam_width, seq_len + 1, num_heads, size_per_head],\n [batch_size * beam_width, seq_len + 1, num_heads, size_per_head],\n [batch_size * beam_width, num_heads, seq_len + 1, seq_len + 1],\n [batch_size * beam_width, num_heads, seq_len + 1, src_seq_len].\n \"\"\"\n index = kwargs['index']\n # [batch_size * beam_width, 1, hidden_size]\n decoder_input = self._embedding_logits_layer(decoder_input, 'embedding')\n decoder_input += timing_signal[index:index + 1]\n\n # [batch_size * beam_width, 1, hidden_size]\n decoder_outputs = self._decoder(decoder_input,\n cache['encoder_outputs'],\n tf.zeros((1, 1, 1, index + 1), \n dtype='float32'),\n cache['padding_mask'],\n training=False,\n cache=cache)\n\n # [[batch_size * beam_width, 1, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, mode='logits')\n logits = tf.squeeze(logits, axis=1)\n return logits, cache\n\n return decoding_fn", "def decode(\n self,\n encoded,\n encoder_input_tokens, # only needed for masks\n decoder_input_tokens,\n decoder_target_tokens,\n encoder_segment_ids=None,\n decoder_segment_ids=None,\n decoder_positions=None,\n enable_dropout=True,\n decode=False,\n max_decode_length=None):\n cfg = self.config\n\n # Make padding attention masks.\n if decode:\n # fast autoregressive decoding uses only a special encoder-decoder mask\n decoder_mask = None\n encoder_decoder_mask = layers.make_attention_mask(\n jnp.ones_like(decoder_target_tokens) > 0,\n encoder_input_tokens > 0,\n dtype=cfg.dtype)\n else:\n decoder_mask = layers.make_decoder_mask(\n decoder_target_tokens=decoder_target_tokens,\n dtype=cfg.dtype,\n decoder_segment_ids=decoder_segment_ids)\n encoder_decoder_mask = layers.make_attention_mask(\n decoder_target_tokens > 0, encoder_input_tokens > 0, dtype=cfg.dtype)\n\n # Add segmentation block-diagonal attention masks if using segmented data.\n if encoder_segment_ids is not None:\n if decode:\n raise ValueError(\n 'During decoding, packing should not be used but '\n '`encoder_segment_ids` was passed to `Transformer.decode`.')\n\n encoder_decoder_mask = layers.combine_masks(\n encoder_decoder_mask,\n layers.make_attention_mask(\n decoder_segment_ids,\n encoder_segment_ids,\n jnp.equal,\n dtype=cfg.dtype))\n\n logits = self.decoder(\n encoded,\n decoder_input_tokens=decoder_input_tokens,\n decoder_positions=decoder_positions,\n decoder_mask=decoder_mask,\n encoder_decoder_mask=encoder_decoder_mask,\n deterministic=not enable_dropout,\n decode=decode,\n max_decode_length=max_decode_length)\n return logits.astype(self.config.dtype)", "def decode(self, dec_state, words, **kwargs):\n with tf.name_scope(self.decoder2.name):\n (enc_out, enc_attn_mask, dec1_out, dec1_rdo, dec1_attn_mask,\n attnP, prev_out_seq, rdo) = dec_state\n\n out_seq = tf.concat([prev_out_seq, tf.expand_dims(words, 1)], 1)\n return self._decode_impl((enc_out, enc_attn_mask, dec1_out, dec1_rdo, dec1_attn_mask,\n attnP, out_seq, rdo), **kwargs)", "def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)", "def decode():\n\n with tf.device('/cpu:0'):\n dataset_test = SequenceDataset(\n subset=\"test\",\n config_dir=FLAGS.config_dir,\n data_dir=FLAGS.data_dir,\n batch_size=1,\n input_size=FLAGS.input_dim,\n output_size=FLAGS.output_dim,\n infer=True,\n name=\"dataset_test\")()\n\n model = TfModel(\n rnn_cell=FLAGS.rnn_cell,\n dnn_depth=FLAGS.dnn_depth,\n dnn_num_hidden=FLAGS.dnn_num_hidden,\n rnn_depth=FLAGS.rnn_depth,\n rnn_num_hidden=FLAGS.rnn_num_hidden,\n output_size=FLAGS.output_dim,\n bidirectional=FLAGS.bidirectional,\n rnn_output=FLAGS.rnn_output,\n cnn_output=FLAGS.cnn_output,\n look_ahead=FLAGS.look_ahead,\n mdn_output=FLAGS.mdn_output,\n mix_num=FLAGS.mix_num,\n name=\"tf_model\")\n\n # Build the testing model and get test output sequence.\n test_iterator = dataset_test.batched_dataset.make_one_shot_iterator()\n input_sequence, input_sequence_length = test_iterator.get_next()\n test_output_sequence_logits, test_final_state = model(\n input_sequence, input_sequence_length)\n\n show_all_variables()\n\n saver = tf.train.Saver()\n\n # Decode.\n with tf.Session() as sess:\n # Run init\n sess.run(tf.global_variables_initializer())\n\n if not restore_from_ckpt(sess, saver): sys.exit(-1)\n\n # Read cmvn to do reverse mean variance normalization\n cmvn = np.load(os.path.join(FLAGS.data_dir, \"train_cmvn.npz\"))\n\n num_batches = 0\n used_time_sum = frames_sum = 0.0\n while True:\n try:\n time_start = time.time()\n logits = sess.run(test_output_sequence_logits)\n time_end = time.time()\n\n used_time = time_end - time_start\n used_time_sum += used_time\n frame_num = logits.shape[1]\n frames_sum += frame_num\n\n # Squeeze batch dimension.\n logits = logits.squeeze(axis=0)\n\n if FLAGS.mdn_output:\n out_pi = logits[:, : FLAGS.mix_num]\n out_mu = logits[:, FLAGS.mix_num : (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim)]\n out_sigma = logits[:, (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim) :]\n\n max_index_pi = out_pi.argmax(axis=1)\n result_mu = []\n for i in xrange(out_mu.shape[0]):\n beg_index = max_index_pi[i] * FLAGS.output_dim\n end_index = (max_index_pi[i] + 1) * FLAGS.output_dim\n result_mu.append(out_mu[i, beg_index:end_index])\n logits = np.vstack(result_mu)\n\n sequence = logits * cmvn[\"stddev_labels\"] + cmvn[\"mean_labels\"]\n\n out_dir_name = os.path.join(FLAGS.save_dir, \"test\", \"cmp\")\n out_file_name =os.path.basename(\n dataset_test.tfrecords_lst[num_batches]).split('.')[0] + \".cmp\"\n out_path = os.path.join(out_dir_name, out_file_name)\n write_binary_file(sequence, out_path, with_dim=False)\n #np.savetxt(out_path, sequence, fmt=\"%f\")\n\n tf.logging.info(\n \"writing inferred cmp to %s (%d frames in %.4f seconds)\" % (\n out_path, frame_num, used_time))\n num_batches += 1\n except tf.errors.OutOfRangeError:\n break\n\n tf.logging.info(\"Done decoding -- epoch limit reached (%d \"\n \"frames per second)\" % int(frames_sum / used_time_sum))", "def decode(self, decoder_input, sampler_output):\n\n self.attention_hidden, self.attention_cell = self.attention_lstm(\n decoder_input, (self.attention_hidden, self.attention_cell))\n self.attention_hidden = F.dropout(\n self.attention_hidden, self.p_attention_dropout, self.training)\n\n self.decoder_hidden, self.decoder_cell = self.decoder_lstm(\n self.attention_hidden, (self.decoder_hidden, self.decoder_cell))\n self.decoder_hidden = F.dropout(\n self.decoder_hidden, self.p_decoder_dropout, self.training)\n\n # print(self.decoder_hidden.size())\n # print(self.decoder_hidden.size(), sampler_output.size())\n proj_input = torch.cat(\n (self.decoder_hidden, sampler_output), 1) # [B, 1024 + 1280]\n\n decoder_output = self.linear_projection(proj_input)\n\n return decoder_output", "def decoder(x, reuse=False):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n with tf.name_scope('Decoder'):\n d_dense_1 = tf.nn.relu(dense(x, z_dim, n_l2, 'd_dense_1'))\n d_dense_2 = tf.nn.relu(dense(d_dense_1, n_l2, n_l1, 'd_dense_2'))\n output = tf.nn.sigmoid(dense(d_dense_2, n_l1, input_dim, 'd_output'))\n return output", "def forward(self, input_token, target_token, timestep, *inputs):\n log_probs_per_model = []\n state_outputs = []\n next_state_input = len(self.models)\n vocab_reduction_module = self.models[0].decoder.vocab_reduction_module\n if vocab_reduction_module is not None:\n possible_translation_tokens = inputs[len(self.models)]\n next_state_input += 1\n else:\n possible_translation_tokens = None\n for i, model in enumerate(self.models):\n encoder_output = inputs[i]\n prev_hiddens = []\n prev_cells = []\n for _ in range(len(model.decoder.layers)):\n prev_hiddens.append(inputs[next_state_input])\n prev_cells.append(inputs[next_state_input + 1])\n next_state_input += 2\n prev_input_feed = inputs[next_state_input].view(1, -1)\n next_state_input += 1\n if self.enable_precompute_reduced_weights and hasattr(model.decoder, '_precompute_reduced_weights') and possible_translation_tokens is not None:\n reduced_output_weights = inputs[next_state_input:next_state_input + 2]\n next_state_input += 2\n else:\n reduced_output_weights = None\n src_length_int = int(encoder_output.size()[0])\n src_length = torch.LongTensor(np.array([src_length_int]))\n src_tokens = torch.LongTensor(np.array([[0] * src_length_int]))\n src_embeddings = encoder_output.new_zeros(encoder_output.shape)\n encoder_out = encoder_output, prev_hiddens, prev_cells, src_length, src_tokens, src_embeddings\n model.decoder._is_incremental_eval = True\n model.eval()\n incremental_state = {}\n utils.set_incremental_state(model.decoder, incremental_state, 'cached_state', (prev_hiddens, prev_cells, prev_input_feed))\n decoder_output = model.decoder(input_token.view(1, 1), encoder_out, incremental_state=incremental_state, possible_translation_tokens=possible_translation_tokens)\n logits, _, _ = decoder_output\n log_probs = F.log_softmax(logits, dim=2)\n log_probs_per_model.append(log_probs)\n next_hiddens, next_cells, next_input_feed = utils.get_incremental_state(model.decoder, incremental_state, 'cached_state')\n for h, c in zip(next_hiddens, next_cells):\n state_outputs.extend([h, c])\n state_outputs.append(next_input_feed)\n if reduced_output_weights is not None:\n state_outputs.extend(reduced_output_weights)\n average_log_probs = torch.mean(torch.cat(log_probs_per_model, dim=0), dim=0, keepdim=True)\n if possible_translation_tokens is not None:\n reduced_indices = torch.zeros(self.vocab_size).long().fill_(self.unk_token)\n possible_translation_token_range = torch._dim_arange(like=possible_translation_tokens, dim=0)\n reduced_indices[possible_translation_tokens] = possible_translation_token_range\n reduced_index = reduced_indices.index_select(dim=0, index=target_token)\n score = average_log_probs.view((-1,)).index_select(dim=0, index=reduced_index)\n else:\n score = average_log_probs.view((-1,)).index_select(dim=0, index=target_token)\n word_reward = self.word_rewards.index_select(0, target_token)\n score += word_reward\n self.input_names = ['prev_token', 'target_token', 'timestep']\n for i in range(len(self.models)):\n self.input_names.append(f'fixed_input_{i}')\n if possible_translation_tokens is not None:\n self.input_names.append('possible_translation_tokens')\n outputs = [score]\n self.output_names = ['score']\n for i in range(len(self.models)):\n self.output_names.append(f'fixed_input_{i}')\n outputs.append(inputs[i])\n if possible_translation_tokens is not None:\n self.output_names.append('possible_translation_tokens')\n outputs.append(possible_translation_tokens)\n for i, state in enumerate(state_outputs):\n outputs.append(state)\n self.output_names.append(f'state_output_{i}')\n self.input_names.append(f'state_input_{i}')\n return tuple(outputs)", "def test_inference(self):\n model = self.create_model()\n ex = self._create_example()\n\n embeddings = tf.get_variable(\n \"W_embed\", [model.target_vocab_info.total_size, self.input_depth])\n\n def make_input_fn(step_output):\n \"\"\"Looks up the predictions in the embeddings.\n \"\"\"\n return tf.nn.embedding_lookup(embeddings, step_output.predictions)\n\n decoder_input_fn = DynamicDecoderInputs(\n initial_inputs=tf.zeros(\n [self.batch_size, self.input_depth], dtype=tf.float32),\n make_input_fn=make_input_fn)\n\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=self.max_decode_length)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n decoder_output_ = sess.run(decoder_output)\n\n # Assert shapes are correct\n np.testing.assert_array_equal(decoder_output_.logits.shape, [\n self.batch_size, self.max_decode_length,\n model.target_vocab_info.total_size\n ])\n np.testing.assert_array_equal(decoder_output_.predictions.shape,\n [self.batch_size, self.max_decode_length])", "def compute_decoding(self, dec_inp, initial_h, initial_c, forward_encoder_states, backward_encoder_states, denseLayer, embedding):\r\n\t\toutputs = []\r\n\t\tdecoder_state={}\r\n\t\tdecoder_state[\"lstm\"] = self.lstm_cell.zero_state()\r\n\r\n\t\tattention_vector, alpha = self.null_attention_vectors(forward_encoder_states)\r\n\t\tdecoder_state['a'] = attention_vector\r\n\t\tdecoder_state['alpha'] = alpha\r\n\t\tdecoder_state[\"lstm\"]['h'] = initial_h\r\n\t\tdecoder_state[\"lstm\"]['c'] = initial_c\r\n\r\n\t\tfor i in range(self.max_decoding_length):\r\n\r\n\t\t\t#To keep track of the input for LRP when we transmit the relevance from the input\r\n\t\t\tdecoder_state[\"input_int\"] = dec_inp[i].reshape((len(dec_inp[i]),1))\r\n\r\n\t\t\t#Concatenating the input vector with the attention vector\r\n\t\t\tinput_ = np.concatenate([dec_inp[i].reshape((len(dec_inp[i]),1)), decoder_state['a']])\r\n\r\n\t\t\t#We compute the LSTM step\r\n\t\t\tdecoder_state[\"lstm\"] = self.lstm_cell.forward(decoder_state[\"lstm\"], input_)\r\n\r\n\t\t\t#We compute the output after the output layer\r\n\t\t\tdecoder_state[\"output\"] = denseLayer.compute_forward(decoder_state[\"lstm\"])\r\n\r\n\t\t\t# The decoder state stores the attention vector is had as an input\r\n\t\t\toutputs.append(decoder_state.copy())\r\n\r\n\t\t\t#We compute the attention vector used for the next step\r\n\t\t\tattention_vector, alpha = self.compute_attention(decoder_state[\"lstm\"], forward_encoder_states, backward_encoder_states)\r\n\t\t\tdecoder_state['a'] = attention_vector\r\n\t\t\tdecoder_state['alpha'] = alpha\r\n\r\n\r\n\t\tself.outputs = outputs\r\n\t\treturn outputs", "def call(self, inputs, output_hidden_states = False, training = False):\n if isinstance(inputs, (list, tuple)):\n input_ids = inputs[0]\n token_type_ids = inputs[1] if len(inputs) > 1 else None\n attention_mask = inputs[2] if len(inputs) > 2 else None\n \n elif isinstance(inputs, dict):\n input_ids = inputs['input_ids']\n token_type_ids = inputs.get('token_type_ids', None)\n attention_mask = inputs.get('attention_mask', None)\n else:\n raise ValueError('The type of inputs should be list or dictionary.')\n \n input_shape = shape_list(input_ids)\n \n# last_hidden_state = tf.ones(input_shape + (self.config.hidden_size))\n# output = tf.ones(input_shape + (self.config.hidden_size,))\n# logits = tf.ones(input_shape + (self.config.vocab_size,))\n# pooler_output = tf.ones((input_shape[0], self.config.hidden_size))\n \n hidden_states = [] if output_hidden_states else None\n output = self.embeddings(input_ids, token_type_ids, training = training)\n \n if output_hidden_states:\n hidden_states.append(output)\n\n if self.causal_attention:\n attention_mask = tf.constant(lower_triangle_matrix(input_shape[-1]))\n attention_mask = tf.reshape(attention_mask, shape = (1, 1, input_shape[-1], input_shape[-1]))\n \n else:\n if attention_mask is None:\n attention_mask = tf.constant(1.0, shape = input_shape, dtype = 'float32')\n # attention_mask now has shape (batches, sequence_len),\n # we need to covert it to (batches, 1, 1, sequence_len)\n # so that it will broadcast to (batches, num_attention_heads, sequence_len, sequence_len)\n attention_mask = tf.reshape(attention_mask, shape = (-1, 1, 1, input_shape[-1]))\n\n \n \n last_hidden_state, layer_outputs = self.encoder(output, attention_mask, output_hidden_states = output_hidden_states, training = training)\n if output_hidden_states:\n hidden_states.extend(layer_outputs)\n \n pooler_output = self.pooler(tf.gather(last_hidden_state, indices = 0, axis = 1)) if self.pooler else None\n logits = self.lm_head(last_hidden_state) if self.lm_head else None\n\n res = {'sequence_output': last_hidden_state,\n 'pooler_output': pooler_output,\n 'logits': logits,\n 'hidden_states': hidden_states}\n\n self.built = True\n\n return {k : v for k, v in res.items() if v is not None}", "def build(self, unused_input_shapes):\n if self.embedding_lookup is None:\n self.embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self.config.vocab_size,\n embedding_width=self.config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.config.initializer_range),\n name=\"target_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=False,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer=tf.keras.initializers.VarianceScaling(\n scale=self.config.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\"),\n name=\"embedding_postprocessor\")\n # Decoder can use a different intermediate size.\n self.multi_channel_cross_attention = self.config.get(\n \"multi_channel_cross_attention\", False)\n self.decoder = TransformerDecoder(\n num_hidden_layers=self.config.num_decoder_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_decoder_attn_heads,\n intermediate_size=self.config.decoder_intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=\"decoder\")\n super(Decoder, self).build(unused_input_shapes)", "def call(self, inputs, cache=None, decode_loop_step=None):\n decoder_inputs = inputs[\"decoder_inputs\"]\n encoder_outputs = inputs[\"encoder_outputs\"]\n self_attention_mask = inputs[\"self_attention_mask\"]\n attention_mask = inputs[\"attention_mask\"]\n decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3)\n batch_size = decoder_shape[0]\n decoder_length = decoder_shape[1]\n\n def _to_bert_self_attention_mask(matrix):\n \"\"\"[1, 1, target_len, target_len] -> [bs, target_len, target_len].\"\"\"\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [batch_size, 1, 1])\n return matrix\n\n def _to_bert_encdec_attention_mask(matrix):\n \"\"\"[bs, 1, 1, input_len] -> [bs, target_len, input_len].\"\"\"\n if self.multi_channel_cross_attention:\n matrix = tf.expand_dims(matrix, axis=2)\n matrix = tf.tile(matrix, [1, 1, decoder_length, 1])\n else:\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [1, decoder_length, 1])\n return matrix\n\n attention_mask = _to_bert_encdec_attention_mask(attention_mask)\n self_attention_mask = _to_bert_self_attention_mask(self_attention_mask)\n\n output_tensor = decoder_inputs\n for layer_idx in range(self.num_hidden_layers):\n if self.attend_to_last_layer:\n memory = encoder_outputs[-1]\n else:\n memory = encoder_outputs[layer_idx]\n if self.multi_channel_cross_attention:\n transformer_inputs = [\n output_tensor, memory, attention_mask, self_attention_mask,\n inputs[\"doc_attention_probs\"]\n ]\n else:\n transformer_inputs = [\n output_tensor, memory, attention_mask, self_attention_mask\n ]\n # Gets the cache for decoding.\n if cache is None:\n output_tensor, _ = self.layers[layer_idx](transformer_inputs)\n else:\n cache_layer_idx = str(layer_idx)\n output_tensor, cache[cache_layer_idx] = self.layers[layer_idx](\n transformer_inputs,\n cache=cache[cache_layer_idx],\n decode_loop_step=decode_loop_step)\n return output_tensor, cache", "def run_decoder(self, model_state: 'ModelState') -> Tuple[mx.nd.NDArray, mx.nd.NDArray, 'ModelState']:\n batch = mx.io.DataBatch(\n data=[model_state.prev_target_word_id.as_in_context(self.context)] + model_state.decoder_states,\n label=None,\n bucket_key=model_state.bucket_key,\n provide_data=self._get_decoder_data_shapes(model_state.bucket_key))\n self.decoder_module.forward(data_batch=batch, is_train=False)\n probs, attention_probs, *model_state.decoder_states = self.decoder_module.get_outputs()\n return probs, attention_probs, model_state", "def gru_training_decoder(cell, attention, sftm, embedding, states, in_mask, in_bool_mask, batch_output, out_mask):\n # batch wise\n l_state = generate_final_state(tf.pack(states), in_bool_mask) # initialize the state, the encoder's last state\n outputs, loss, possib, symbol = [], [], [], []\n sstates, _ = attention.shortcut(states)\n\n with tf.device(\"/cpu:0\"):\n embedded_list = tf.nn.embedding_lookup(embedding, batch_output)\n #embedded_list = out_mask * tf.transpose(embedded_list, [2, 0, 1]) # Add mask to change embedding into zeros\n #embedded_list = tf.transpose(embedded_list, [2, 1, 0])\n embedded_list = tf.transpose(embedded_list, [1, 0, 2])\n embedded_list = tf.unpack(embedded_list) # list of embedding\n\n for time, (embedded, target, t_mask) in enumerate(zip(embedded_list[:-1], tf.unpack(tf.transpose(batch_output))[1:], tf.unpack(tf.transpose(out_mask))[1:])):\n eij = []\n #embedded = tf.nn.embedding_lookup(embedding, tf.reshape(i, [-1])) # deprecated\n #embedded = embedded * tf.reshape(tf.convert_to_tensor(out_mask[:, time], tf.float32), [batch_size, 1]) # deprecated\n for h in sstates:\n eij.append(attention(l_state, h))\n eij = tf.concat(1, eij)\n alphaij = softmax_wt_mask(eij, in_mask) # Add mask to change embedding into zeros\n #alphaij = tf.nn.softmax(eij) # PROBLEM!!!!\n #alphaij = alphaij * in_mask\n ##### Debug\n #print sess.run(alphaij) #print in_mask #print sess.run(alphaij) #print states\n t_ = alphaij * tf.transpose(tf.pack(states)) # broadcastable\n t_ = tf.transpose(t_)\n ci = tf.reduce_sum(t_, 0)\n output, l_state = cell(embedded, l_state, ci)\n output = output * tf.expand_dims(t_mask, 1) # Add mask\n outputs.append(output)\n res = sftm(output, tf.cast(target, tf.int64), t_mask)\n symbol.append(res[0])\n possib.append(res[1])\n loss.append(res[2])\n #cost = tf.reduce_mean(tf.add_n(loss))\n total_size = tf.add_n(tf.unpack(tf.transpose(out_mask))[1:])\n total_size += 1e-12\n cost = tf.add_n(loss) / total_size\n cost = tf.reduce_mean(cost)\n return outputs, symbol, possib, cost, loss", "def decode():\n with io.open(FLAGS.predict_input_file, encoding='utf-8') as test_file:\n lines = test_file.readlines()\n # Get the largest sentence length to set an upper bound to the decoder.\n max_length = FLAGS.max_sentence_length\n # max_length = max([len(line) for line in lines])\n \n print(\"Building dynamic character-level ALLDATASET data...\", flush=True)\n dataset = ALLDATASET(\n train_input=FLAGS.train_input, train_output=FLAGS.train_output,\n dev_input=FLAGS.dev_input, dev_output=FLAGS.dev_output,\n predict_input_file=FLAGS.predict_input_file, \n parse_repeated=FLAGS.parse_repeated,\n max_input_length=max_length, max_label_length=max_length)\n \n print(\"Building computational graph...\", flush=True)\n graph = tf.Graph()\n with graph.as_default():\n \n tf.set_random_seed(1)\n random.seed(1)\n np.random.seed(1)\n\n m = Seq2Seq(\n num_types=dataset.num_types(),\n max_encoder_length=max_length, max_decoder_length=max_length,\n pad_id=dataset.type_to_ix['_PAD'],\n eos_id=dataset.type_to_ix['_EOS'],\n go_id=dataset.type_to_ix['_GO'],\n space_id=dataset.type_to_ix[(' ',)],\n ix_to_type=dataset.ix_to_type,\n batch_size=1, embedding_size=FLAGS.embedding_size,\n hidden_size=FLAGS.hidden_size, rnn_layers=FLAGS.rnn_layers,\n bidirectional_encoder=FLAGS.bidirectional_encoder,\n bidirectional_mode=FLAGS.bidirectional_mode,\n use_lstm=FLAGS.use_lstm, attention=FLAGS.attention,\n beam_size=FLAGS.beam_size, restore=True, model_output_dir=FLAGS.model_output_dir)\n \n with tf.Session(graph=graph) as sess:\n print(\"Restoring model...\", flush=True)\n m.start()\n print(\n \"Restored model (global step {})\".format(m.global_step.eval()),\n flush=True)\n with io.open(FLAGS.output_path, 'w', encoding='utf-8') as output_file:\n for line in lines:\n # if len(line) > max_length:\n # continue\n number_of_chars = len(line)\n completely_divisble = number_of_chars % FLAGS.max_sentence_length == 0\n\n if number_of_chars < FLAGS.max_sentence_length:\n parts = [line]\n else:\n parts = []\n count = 0\n last_word_end_index = 0\n\n line_copy = line\n while len(line_copy) != 0 and count < len(line_copy):\n if count == FLAGS.max_sentence_length:\n if last_word_end_index == 0:\n parts.append(line_copy[: count])\n line_copy = line_copy[count:]\n else:\n parts.append(line_copy[: last_word_end_index])\n line_copy = line_copy[last_word_end_index:]\n \n last_word_end_index = 0\n count = 0\n\n if line_copy[count] == \" \":\n last_word_end_index = count\n\n count += 1\n\n if not completely_divisble:\n parts.append(line_copy)\n \n result = \"\"\n for part in parts:\n ids = dataset.tokenize(part)\n while len(ids) < max_length:\n ids.append(dataset.type_to_ix['_PAD'])\n outputs = sess.run(m.generative_output, feed_dict={m.inputs: [ids]})\n top_line = untokenize_batch(dataset, outputs)[0]\n # Sequences of text will only be repeated up to 5 times.\n top_line = re.sub(r'(.+?)\\1{5,}', lambda m: m.group(1) * 5, top_line)\n result += top_line\n output_file.write(result + '\\n')\n print(\"PREDICTION:\", top_line, flush=True)\n print()", "def forward(self, inputs, decode_len=None):\n\n batch_size = inputs.size(0)\n input_dim = inputs.size(1)\n assert input_dim == self.input_dim, 'input dim should be {:d} but now: {:d}'.format(self.input_dim, input_dim)\n\n sourceL = inputs.size(2)\n\n if self.embed_input:\n # repeat embeddings across batch_size\n # result is [batch_size x input_dim x embedding_dim]\n # TODO: repeat or expand?\n embedding = self.embedding.repeat(batch_size, 1, 1)\n embedded_inputs = []\n # result is [batch_size, 1, input_dim, sourceL]\n ips = inputs.unsqueeze(1)\n\n for i in range(sourceL):\n # [batch_size x 1 x input_dim] * [batch_size x input_dim x embedding_dim]\n # result is [batch_size, embedding_dim]\n embedded_inputs.append(torch.bmm(\n ips[:, :, :, i].float(),\n embedding).squeeze(1))\n\n # Result is [sourceL x batch_size x embedding_dim]\n embedded_inputs = torch.cat(embedded_inputs).view(\n sourceL,\n batch_size,\n embedding.size(2))\n else:\n embedded_inputs = inputs.permute(2, 0, 1)\n\n (encoder_hx, encoder_cx) = init_zero_hidden(self.hidden_dim, inputs.is_cuda)\n encoder_hx = encoder_hx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n encoder_cx = encoder_cx.unsqueeze(0).repeat(embedded_inputs.size(1), 1).unsqueeze(0)\n \n # encoder forward pass\n enc_h, (enc_h_t, enc_c_t) = self.encoder(embedded_inputs, (encoder_hx, encoder_cx))\n\n enc_h_linear = enc_h.view(-1, self.hidden_dim)\n # enc_h_linear_2d = enc_h_linear.view(self.hidden_dim, -1)\n enc_action_scores = self.EncodeScore(enc_h_linear)\n enc_action_scores = enc_action_scores.view(-1, batch_size).permute(1, 0)\n dec_init_state = (enc_h_t[-1], enc_c_t[-1])\n \n # repeat decoder_in_0 across batch\n decoder_input = self.decoder_in_0.unsqueeze(0).repeat(embedded_inputs.size(1), 1)\n\n (head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores), dec_hidden_t = self.decoder(decoder_input,\n embedded_inputs,\n dec_init_state,\n enc_h, max_len=decode_len)\n #TODO: added conversion to tensors\n head_pointer_probs = torch.stack(head_pointer_probs)\n head_pointer_probs = head_pointer_probs.permute(1, 0, 2)\n tail_pointer_probs = torch.stack(tail_pointer_probs)\n tail_pointer_probs = tail_pointer_probs.permute(1, 0, 2)\n cls_scores = torch.stack(cls_scores)\n cls_scores = cls_scores.permute(1, 0, 2)\n head_positions = torch.stack(head_positions)\n head_positions = head_positions.permute(1, 0)\n tail_positions = torch.stack(tail_positions)\n tail_positions = tail_positions.permute(1, 0)\n\n\n\n return head_pointer_probs, head_positions, tail_pointer_probs, tail_positions, cls_scores, enc_action_scores", "def _build_decoding_cache(self, src_token_ids, batch_size):\n padding_mask = utils.get_padding_mask(src_token_ids, SOS_ID)\n encoder_outputs = self._encode(src_token_ids, padding_mask, training=False)\n size_per_head = self._hidden_size // self._num_heads\n src_seq_len = padding_mask.shape[-1] \n\n decoding_cache = {'layer_%d' % layer:\n {'k':\n tf.zeros([\n batch_size, 0, self._num_heads, size_per_head\n ], 'float32'),\n 'v':\n tf.zeros([\n batch_size, 0, self._num_heads, size_per_head\n ], 'float32'),\n 'tgt_tgt_attention':\n tf.zeros([\n batch_size, self._num_heads, 0, 0], 'float32'), \n 'tgt_src_attention':\n tf.zeros([\n batch_size, self._num_heads, 0, src_seq_len], 'float32')\n\n } for layer in range(self._decoder._stack_size)\n }\n decoding_cache['encoder_outputs'] = encoder_outputs\n decoding_cache['padding_mask'] = padding_mask\n return decoding_cache", "def build_decoder(opt, embeddings):\n return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings)", "def decode(self, x, y):\n y = self.embedding(y)\n b, t, h = y.shape\n start = torch.zeros((b, 1, h))\n if self.is_cuda:\n start = start\n y = torch.cat([start, y], dim=1)\n y, _ = self.dec_rnn(y)\n x = x.unsqueeze(dim=2)\n y = y.unsqueeze(dim=1)\n out = self.fc1(x) + self.fc1(y)\n out = nn.functional.relu(out)\n out = self.fc2(out)\n out = nn.functional.log_softmax(out, dim=3)\n return out", "def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')", "def forward(self, encoder_output, encoded_captions, caption_lengths):\n \n batch_size = encoder_output.size(0)\n encoder_dim = encoder_output.size(-1)\n \n # Flatten image\n encoder_output = encoder_output.view(batch_size, -1, encoder_dim)\n \n #print(encoder_output.size())\n \n num_pixels = encoder_output.size(1)\n \n # Embedding\n embeddings = self.embedding(encoded_captions).type(torch.FloatTensor).to(device)\n #print(embeddings.size())\n \n #initial_states\n h, c = self.init_hidden_states(encoder_output)\n #print(h.size(), c.size())\n\n # Create tensors to hold word predicion scores and alphas\n predictions = torch.zeros(batch_size, max(caption_lengths), self.vocab_size) #.to(device)\n \n #print('prediction_length', predictions.size())\n alphas = torch.zeros(batch_size, max(caption_lengths), num_pixels) #.to(device)\n #print('alphas', alphas.size())\n # At each time-step, decode by\n # attention-weighing the encoder's output based on the decoder's previous hidden state output\n # then generate a new word in the decoder with the previous word and the attention weighted encoding\n for t in range(max(caption_lengths)):\n batch_size_t = sum([l > t for l in caption_lengths])\n att, alpha = self.attention(encoder_output[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n att = gate * att\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], att], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n #preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n \n h_embedded = self.linear_h(h)\n att_embedded = self.linear_z(att)\n preds = self.linear_o(self.dropout(embeddings[:batch_size_t, t, :] + h_embedded + att_embedded))\n \n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n \n #print(predictions.size()) \n return predictions, alphas", "def train_output(self, decoder_output, Y, enc_output, vocab_size, scope, reuse):\n logits = top(body_output=decoder_output,\n vocab_size = vocab_size,\n dense_size = self.config.hidden_units,\n scope=scope,\n shared_embedding = self.config.train.shared_embedding,\n reuse=reuse)\n\n with tf.variable_scope(scope, initializer=self._initializer, reuse=reuse):\n #logits = tf.layers.dense(decoder_output, self.config.dst_vocab_size)\n preds = tf.to_int32(tf.arg_max(logits, dimension=-1))\n mask = tf.to_float(tf.not_equal(Y, 0))\n acc = tf.reduce_sum(tf.to_float(tf.equal(preds, Y)) * mask) / tf.reduce_sum(mask)\n\n # Smoothed loss\n loss = smoothing_cross_entropy(logits=logits, labels=Y, vocab_size=vocab_size,\n confidence=1-self.config.train.label_smoothing)\n mean_loss = tf.reduce_sum(loss * mask) / (tf.reduce_sum(mask))\n\n kl_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(enc_output, 2), axis=-1))\n\n mean_loss = mean_loss + self.config.kl_weight * kl_loss\n\n return acc, mean_loss", "def decoder(latent_samples, input_dim, output_dim=500, input_channels=1, output_channels=4, deconv_filters=[4,8,16], \n kernel_sizes=[3,3,3], deconv_strides=[1,1,1], act=tf.nn.relu, \n initializer=tf.contrib.layers.xavier_initializer()):\n x = tf.layers.dense(inputs=latent_samples, units=input_dim*input_dim, activation=act, \n kernel_initializer=initializer)\n x = tf.reshape(x, [-1, input_dim, input_dim, input_channels])\n x = tf.layers.conv2d_transpose(inputs=x, filters=deconv_filters[0], kernel_size=kernel_sizes[0], \n strides=deconv_strides[0], padding='same', activation=act, \n kernel_initializer=initializer)\n x = tf.layers.conv2d_transpose(inputs=x, filters=deconv_filters[1], kernel_size=kernel_sizes[1], \n strides=deconv_strides[1], padding='same', activation=act, \n kernel_initializer=initializer)\n x = tf.layers.conv2d_transpose(inputs=x, filters=deconv_filters[2], kernel_size=kernel_sizes[2], \n strides=deconv_strides[2], padding='same', activation=act, \n kernel_initializer=initializer)\n x = tf.contrib.layers.flatten(x)\n x = tf.layers.dense(inputs=x, units=output_dim*output_dim*output_channels, activation=act, \n kernel_initializer=initializer)\n color_imgs = tf.reshape(x, [-1, output_dim, output_dim, output_channels])\n return color_imgs", "def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):\n # Propagate through decoder\n step_logits, memories = decoding_function(next_ids, current_time_step, memories)\n # Calculate log probabilities for token prediction at current time-step\n step_scores = tf.nn.log_softmax(step_logits)\n # Determine next token to be generated, next_ids has shape [batch_size]\n if do_sample:\n next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)\n else:\n # Greedy decoding\n next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)\n # Collect scores associated with the selected tokens\n score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)\n decoded_score += tf.gather_nd(step_scores, score_coordinates)\n # Concatenate newly decoded token ID with the previously decoded ones\n decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)\n # Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step\n next_ids = tf.expand_dims(next_ids, time_dim)\n # Check if generation has concluded with <EOS>\n # all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)\n all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)\n\n return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories", "def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2a\n embeddings = self.decoderCharEmb(input)\n out, new_hidden = self.charDecoder(embeddings, dec_hidden)\n scores = self.char_output_projection(out)\n return scores, new_hidden\n\n ### END YOUR CODE", "def call(self,\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n attention_bias,\n training,\n cache=None):\n # Run values\n outputs = self._transformer_decoder(\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n attention_bias,\n training=training,\n cache=cache)\n return outputs", "def _decoder(self, inputs, z_dimension, mcd):\n \n latent_inputs = Input(shape=(z_dimension,), name=\"z_sampling\")\n x = latent_inputs\n x = Dense(\n self.hidden_size // 4,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 3,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size // 2,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n\n x = Dense(\n self.hidden_size,\n activation=self.activation_func,\n kernel_initializer=self.weight_init,\n )(x)\n x = Dropout(self.dropout_probability)(x, training=mcd)\n outputs = Dense(\n self.n_dims,\n activation=self.output_activation,\n kernel_initializer=self.weight_init,\n )(x)\n \n self.decoder = Model(latent_inputs, outputs, name=\"decoder\")\n \n outputs = self.decoder(self.encoder(inputs)[0])\n \n return self.decoder, outputs", "def greedyDecoder(self, enc_states, hidden, test=False, sentence=None, st='<s>', ed='</s>'):\n\t\tbatch_size = hidden.shape[1]\n\t\t# according to paper\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tword = torch.ones(batch_size, dtype=torch.long, device=self.device) * self.vocab[st]\n\t\t\twords = torch.zeros(batch_size, self.max_trg_len, dtype=torch.long, device=self.device)\n\t\t\tfor i in range(self.max_trg_len-1):\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, word)\n\t\t\t\tprobs = F.softmax(logit, dim=-1)\n\t\t\t\tword = torch.argmax(probs, dim=-1).squeeze()\n\t\t\t\twords[:,i] = word\n\t\t\twords[:,-1] = torch.ones(batch_size, dtype=torch.long, device=self.device) * self.vocab[ed]\n\t\t\treturn words\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len-1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:,i])\n\t\t\t\tlogits[:,i,:] = logit.squeeze()\n\t\t\treturn logits", "def forward(self, input, last_hidden, last_context, encoder_outputs):\r\n # input: B x 1 x d, last_hidden: (num_layers * num_directions) x B x h\r\n # last_context: B x 1 x h, encoder_outputs: B x S x h\r\n\r\n # output = embedded\r\n rnn_input = torch.cat((input, last_context), 2) # B x 1 x (d + h)\r\n output, hidden = self.rnn(rnn_input, last_hidden) # output: B x 1 x h\r\n\r\n # calculate attention from current RNN state and all encoder outputs; apply to encoder outputs\r\n attn_weights = self.attn(output, encoder_outputs) # B x S\r\n context = attn_weights.unsqueeze(1).bmm(encoder_outputs) # B x 1 x h\r\n\r\n # final output layer (next word prediction) using the RNN hidden state and context vector\r\n output = f.log_softmax(self.out(torch.cat((context.squeeze(1), output.squeeze(1)), 1)), 1)\r\n\r\n # Return final output, hidden state, and attention weights (for visualization)\r\n return output, hidden, context, attn_weights", "def load_decoder(checkpoint, decoder_cls,\n attn_model, embedding, HIDDEN_SIZE, VOC_SIZE, DECODER_N_LAYERS, DROPOUT, decoder_name):\n model = decoder_cls(attn_model, embedding, HIDDEN_SIZE, VOC_SIZE, DECODER_N_LAYERS, DROPOUT, gate=decoder_name)\n model.load_state_dict(checkpoint['de'])\n model.eval()\n return model", "def lfads_decode(params, lfads_hps, key, ib_mean, ib_logvar, ic_mean, ic_logvar,\n xenc_t, keep_rate):\n\n keys = random.split(key, 3)\n\n # Since the factors feed back to the controller,\n # factors_{t-1} -> controller_t -> sample_t -> generator_t -> factors_t\n # is really one big loop and therefor one RNN.\n ii0 = params['ii0']\n ii0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(ii0), ii0)\n # ii_t tanh'd at sampling time in the decode loop. \n c0 = params['con']['h0']\n ib = dists.diag_gaussian_sample(keys[0], ib_mean, ib_logvar,\n lfads_hps['var_min'])\n ib = np.where(lfads_hps['do_tanh_latents'], np.tanh(ib), ib) \n g0 = dists.diag_gaussian_sample(keys[1], ic_mean, ic_logvar,\n lfads_hps['var_min'])\n g0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(g0), g0)\n f0 = params['f0']\n\n # Make all the randomness for all T steps at once, it's more efficient.\n # The random keys get passed into scan along with the input, so the input\n # becomes of a 2-tuple (keys, actual input).\n T = xenc_t.shape[0]\n keys_t = random.split(keys[2], T)\n\n state0 = (c0, ii0, ib, g0, f0)\n decoder = partial(lfads_decode_one_step_scan, *(params, lfads_hps, keep_rate))\n _, state_and_returns_t = lax.scan(decoder, state0, (keys_t, xenc_t))\n return state_and_returns_t", "def inference(self, inputs):\n # test_2\n memory = self.get_go_frame(inputs)\n memory = self._update_memory(memory)\n\n self._init_states(inputs, mask=None)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments, t = [], [], [], 0\n while True:\n memory = self.prenet(memory)\n decoder_output, alignment, stop_token = self.decode(memory)\n stop_token = torch.sigmoid(stop_token.data)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token]\n alignments += [alignment]\n\n if stop_token > self.stop_threshold and t > inputs.shape[0] // 2:\n break\n if len(outputs) == self.max_decoder_steps:\n print(\" | > Decoder stopped with 'max_decoder_steps\")\n break\n\n memory = self._update_memory(decoder_output)\n t += 1\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n\n return outputs, alignments, stop_tokens", "def predict_step(batch, state, cache, eos_idx, config):\n\n logging.info('predict_step(batch=%s)', batch)\n variables = {'params': state.optimizer.target}\n model = models.Model(config)\n encoded, encoded_mask = model.apply(\n variables, batch, method=models.Model.encode)\n\n encoded_inputs = decode.flat_batch_beam_expand(encoded, config.beam_size)\n encoded_mask = decode.flat_batch_beam_expand(encoded_mask, config.beam_size)\n\n def tokens_ids_to_logits(flat_ids, flat_cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # --> [batch * beam, 1, vocab]\n flat_logits, new_vars = model.apply(\n {\n 'params': state.optimizer.target,\n 'cache': flat_cache\n },\n flat_ids,\n encoded_inputs,\n flat_ids > 0,\n encoded_mask,\n mutable=['cache'],\n method=models.Model.decode)\n new_flat_cache = new_vars['cache']\n # Remove singleton sequence-length dimension:\n # [batch * beam, 1, vocab] --> [batch * beam, vocab]\n flat_logits = flat_logits.squeeze(axis=1)\n return flat_logits, new_flat_cache\n\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n beam_seqs, _, = decode.beam_search(\n batch['token'],\n cache,\n tokens_ids_to_logits,\n beam_size=config.beam_size,\n alpha=0.6,\n eos_id=eos_idx,\n max_decode_len=config.max_decode_step)\n # Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension\n # sorted in increasing order of log-probability.\n # Return the highest scoring beam sequence.\n return beam_seqs[:, -1]", "def long_answer_prepare_decoder(inputs, targets, hparams):\n decoder_input = tf.concat([\n length_embedding(targets, hparams), inputs,\n common_layers.shift_left_3d(targets)], 1)\n if hparams.pos == \"timing\":\n decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n return decoder_input", "def decode(self, embeddings):\n def denormalize(img):\n _img = img + 1.0\n _img = _img * (255.0 / 2.0)\n return _img.astype(np.uint8)\n\n i = 0\n N = len(embeddings)\n imgs = []\n while True:\n end = min(N, i + self.batch_size)\n batch = embeddings[i: end]\n\n size = end - i\n if size < self.batch_size:\n batch += self._embed_padding[: self.batch_size - size]\n\n _imgs = self.sess.run(self.output_layer, feed_dict={self.embed_layer: batch})\n imgs += [denormalize(_imgs[i]) for i in range(size)]\n\n i += self.batch_size\n if i >= N - 1:\n break\n\n return imgs", "def _compute_output(self, decoder_output, attention_weights):\n # Compute attention weights & context vector\n attention_weights, context_vector = self.attention_layer(\n encoder_outputs=self.encoder_outputs,\n decoder_output=decoder_output,\n encoder_outputs_length=self.encoder_outputs_seq_len,\n attention_weights=attention_weights)\n\n # Input-feeding approach, this is used as inputs for the decoder\n attentional_vector = tf.contrib.layers.fully_connected(\n tf.concat([decoder_output, context_vector], axis=1),\n num_outputs=self.rnn_cell.output_size,\n activation_fn=tf.nn.tanh,\n weights_initializer=tf.truncated_normal_initializer(\n stddev=self.parameter_init),\n biases_initializer=None, # no bias\n scope=\"attentional_vector\")\n # NOTE: This makes the softmax smaller and allows us to synthesize\n # information between decoder state and attention context\n # see https://arxiv.org/abs/1508.04025v5\n\n # Softmax computation\n logits = tf.contrib.layers.fully_connected(\n attentional_vector,\n num_outputs=self.num_classes,\n activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(\n stddev=self.parameter_init),\n biases_initializer=tf.zeros_initializer(),\n scope=\"output_layer\")\n\n return attentional_vector, logits, attention_weights, context_vector", "def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n\n def _decode_predictions(input_key: str, output_key: str, beam=False):\n if input_key in output_dict:\n if beam:\n all_predicted_tokens = [list(map(self._indices_to_tokens, beams)) \n for beams in sanitize(output_dict[input_key])]\n else:\n all_predicted_tokens = list(map(self._indices_to_tokens, sanitize(output_dict[input_key])))\n output_dict[output_key] = all_predicted_tokens\n\n _decode_predictions(\"predictions\", \"predicted_tokens\", beam=True)\n _decode_predictions(\"ctc_predictions\", \"ctc_predicted_tokens\")\n _decode_predictions(\"rnnt_predictions\", \"rnnt_predicted_tokens\")\n _decode_predictions(\"target_tokens\", \"targets\")\n\n return output_dict", "def sequence_output_logits(self, decoded_outputs, num_units, vocab_size):\n # We need to get the sequence length for *this* batch, this will not be\n # equal for each batch since the decoder is dynamic. Meaning length is\n # equal to the longest sequence in the batch, not the max over data\n max_seq_len = tf.shape(decoded_outputs)[1]\n\n # Reshape to rank 2 tensor so timestep is no longer a dimension\n output = tf.reshape(decoded_outputs, [-1, num_units])\n\n # Get the logits\n logits = self.output_logits(output, num_units, vocab_size, \"seq_softmax\")\n\n # Reshape back to the original tensor shape\n logits = tf.reshape(logits, [-1, max_seq_len, vocab_size])\n return logits", "def forward(self, input, hidden, give_gates=False, debug=False):\n\n emb = self.encoder(input)\n if emb.dim()<3:\n emb = emb.unsqueeze(0)\n\n if give_gates:\n output, hidden, extras = self.rnn(emb, hidden, give_gates)\n else:\n output, hidden = self.rnn(emb, hidden)\n\n # decoded = self.softmax(self.decoder(output))\n decoded = self.decoder(output)\n\n if give_gates:\n if debug:\n return decoded, hidden, extras, emb\n else:\n return decoded, hidden, extras\n else:\n if debug:\n return decoded, hidden, emb\n else:\n return decoded, hidden", "def build_prediction_model(self):\n batch_size=self.image_embeddings.get_shape()[0]\n \n \n with tf.variable_scope(\"pred_layer_1\", initializer=self.initializer) as pred_scope_1:\n # We use a simple three layer fully-connected network for the actual prediction task\n # Hidden neuron sizes are randomly chosen \n \n first_pred = tf.contrib.layers.fully_connected(\n inputs=self.article_embeddings,\n num_outputs=32,\n activation_fn=tf.nn.relu,\n weights_initializer=self.initializer,\n scope=pred_scope_1)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n first_pred = tf.nn.dropout(first_pred, self.config.dropout_keep_prob_classifier)\n ''' \n \n with tf.variable_scope(\"pred_layer_2\", initializer=self.initializer) as pred_scope_2:\n second_pred = tf.contrib.layers.fully_connected(\n inputs=first_pred,\n num_outputs=16,\n activation_fn=tf.nn.relu,\n weights_initializer=self.initializer,\n scope=pred_scope_2)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n second_pred = tf.nn.dropout(second_pred, self.config.dropout_keep_prob_classifier)\n '''\n \n second_pred = first_pred\n \n ################################################\n # Predict Mutual Information\n ################################################\n \n with tf.variable_scope(\"predict_mi\", initializer=self.initializer) as mi_scope:\n mi_logits = None\n mi_prediction = None\n \n if self.config.mi_is_multiclass_problem:\n mi_logits = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=self.config.num_mi_labels,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=mi_scope)\n \n if self.mode != \"prediction\": \n # Compute loss\n mi_loss = 0.0\n \n # Do not punish all misclassifications equally.\n # Use the label distances defined in the config instead.\n if self.config.use_distance_aware_loss:\n # compute softmax to get probability of label l: p(l)\n mi_logits = tf.nn.softmax(mi_logits)\n \n # This part computes the Distance Aware loss function from section 4.4.1 of the thesis.\n # The loss allows to treat misclassifications differently based on a predefined\n # similarity metric defined between labels.\n # D is the symmetric matrix that defines the similarity d(l,t) for a label l\n # and a prediction t. The correct t is not known, only a softmax evidence for\n # all possible t. Therefore, we consider a whole column of D (corresponding to the correct\n # label l) and multiply this column by the softmax output to compute the loss value. \n D = tf.constant(self.config.mi_label_distances)\n indices = tf.expand_dims(self.mi_labels, 1)\n d = tf.gather_nd(D, indices) # contains the l-th column of D for each batch\n \n mi_loss = tf.reduce_sum(d*mi_logits) # d(l,t) * p(t) for all t\n \n # use cross entropy loss\n else:\n # Applies softmax to the unscaled inputs (logits) and then computes the soft-entropy loss: H(p,q) = - sum p(x) * log q(x)\n mi_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(mi_logits, self.mi_labels) \n\n mi_loss = tf.reduce_mean(mi_losses, name=\"mi_loss\") \n \n tf.contrib.losses.add_loss(mi_loss)\n \n else:\n \n # Consider the task as a regression problem and reduce its quadratic loss\n mi_prediction = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=1,\n activation_fn=None, # linear activation \n weights_initializer=self.initializer,\n scope=mi_scope)\n \n if self.mode != \"prediction\": \n mi_loss = tf.reduce_mean(tf.pow(mi_prediction - tf.to_float(self.mi_labels), 2))\n tf.contrib.losses.add_loss(mi_loss)\n \n ################################################\n # Predict Semantic Correlation\n ################################################\n \n with tf.variable_scope(\"predict_sc\", initializer=self.initializer) as sc_scope:\n sc_logits = None\n sc_prediction = None\n \n if self.config.sc_is_multiclass_problem:\n \n # Consider prediction of semantic correlation as a multiclass problem\n sc_logits = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=self.config.num_sc_labels,\n activation_fn=tf.nn.relu, #None, # linear activation \n weights_initializer=self.initializer,\n scope=sc_scope)\n \n if self.mode != \"prediction\": \n # compute sc multiclass labels\n # scale to [0,1,2,3,4]\n multiclass_labels = tf.to_int64(self.sc_labels * 2 + 2)\n\n # Compute loss\n sc_loss = 0.0\n \n # Do not punish all misclassifications equally.\n # Use the label distances defined in the config instead.\n if self.config.use_distance_aware_loss:\n # compute softmax to get probability of label l: p(l)\n sc_logits = tf.nn.softmax(sc_logits)\n \n # see comment above for distance aware MI loss\n D = tf.constant(self.config.sc_label_distances)\n indices = tf.expand_dims(multiclass_labels, 1)\n d = tf.gather_nd(D, indices) # contains the l-th column of D for each batch\n \n sc_loss = tf.reduce_sum(d*sc_logits) # d(l,t) * p(t) for all t\n \n # use cross entropy loss\n else:\n # Applies softmax to the unscaled inputs (logits) and then computes the soft-entropy loss: H(p,q) = - sum p(x) * log q(x)\n sc_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(sc_logits, multiclass_labels) \n\n sc_loss = tf.reduce_mean(sc_losses, name=\"sc_loss\") \n \n tf.contrib.losses.add_loss(sc_loss)\n \n else:\n \n # Consider the task as a regression problem and reduce its quadratic loss\n \n sc_prediction = tf.contrib.layers.fully_connected(\n inputs=second_pred,\n num_outputs=1,\n activation_fn=None, # linear activation \n weights_initializer=self.initializer,\n scope=sc_scope)\n \n if self.mode != \"prediction\":\n sc_loss = tf.reduce_mean(tf.pow(sc_prediction - self.sc_labels, 2))\n tf.contrib.losses.add_loss(sc_loss)\n\n \n if self.mode != \"prediction\":\n \n self.total_loss = tf.contrib.losses.get_total_loss() \n \n self.mi_loss = mi_loss # used in evaluation\n self.sc_loss = sc_loss # used in evaluation\n \n # Add summaries.\n tf.summary.scalar(\"mi_loss\", mi_loss)\n tf.summary.scalar(\"sc_loss\", sc_loss)\n tf.summary.scalar(\"total_loss\", self.total_loss)\n \n \n if self.config.mi_is_multiclass_problem:\n self.mi_logits = mi_logits # used in evaluation\n else:\n self.mi_logits = mi_prediction # used in evaluation\n \n if self.config.sc_is_multiclass_problem:\n self.sc_logits = sc_logits # used in evaluation\n else:\n self.sc_logits = sc_prediction # used in evaluation\n \n for var in tf.trainable_variables():\n #print(var)\n #print(var.name)\n tf.summary.histogram(var.op.name, var)", "def step(self,\n session,\n encoder_inputs,\n decoder_inputs,\n targets,\n target_weights,\n encoder_sequence_length,\n decoder_sequence_length,\n input_keep_prob=1.0,\n output_keep_prob=1.0,\n mode='train'):\n input_feed = {}\n input_feed[self.encoder_inputs] = encoder_inputs\n input_feed[self.decoder_inputs] = decoder_inputs\n input_feed[self.targets] = targets\n input_feed[self.target_weights] = target_weights\n input_feed[self.encoder_sequence_length] = encoder_sequence_length\n input_feed[self.decoder_sequence_length] = decoder_sequence_length\n input_feed[self.input_keep_prob] = input_keep_prob\n input_feed[self.output_keep_prob] = output_keep_prob\n\n if mode == 'train':\n # training\n output_feed = [self.update, # SGD\n self.gradient_norm, # Gradient norm\n self.loss] # Loss for this batch\n outputs = session.run(output_feed, input_feed)\n # Gradient norm, loss, no outputs\n return outputs[1], outputs[2], None\n elif mode == 'test_rank':\n # testing as a ranker\n # TODO(ysu): separate mode for validation\n output_feed = [self.loss, # Loss for this batch\n self.outputs] # Output logits\n outputs = session.run(output_feed, input_feed)\n # No gradient norm, loss, outputs\n return None, outputs[0], outputs[1]\n elif mode == 'summarize':\n output_feed = self.merged_summary\n outputs = session.run(output_feed, input_feed)\n return outputs", "def __init__(self, num_mels=80, num_freq=513, prenet_hidden_size=512, decoder_hidden_size=512,\n attention_dropout=0.1,\n layer_postprocess_dropout=0.1, prenet_activation_fn=None, conv_layers_num=4,\n mag_conv_layers_num=4, prenet_layers=2,\n prenet_dropout=0.5,\n prenet_use_inference_dropout=False,\n cnn_dropout_prob=0.1,\n bn_momentum=0.95,\n bn_epsilon=-1e8,\n reduction_factor=2,\n attention_layers=4,\n self_attention_conv_params=None,\n attention_heads=1,\n attention_cnn_dropout_prob=0.5,\n window_size=4,\n back_step_size=0, kernel_size=5, regularizer=None,\n force_layers=None, dtype=tf.float32, name=\"centaur_decoder\", is_prediction=False, is_training=False,\n is_validation=False):\n self.kernel_size = kernel_size\n\n if force_layers is None:\n force_layers = [1, 3]\n self.is_validation = is_validation\n self.is_prediction = is_prediction\n self.name = name\n self.is_training = is_training\n self.prenet = None\n self.linear_projection = None\n self.attentions = []\n self.output_normalization = None\n self.conv_layers = []\n self.mag_conv_layers = []\n self.conv_layers_num = conv_layers_num\n self.mag_conv_layers_num = mag_conv_layers_num\n self.stop_token_projection_layer = None\n self.mel_projection_layer = None\n self.mag_projection_layer = None\n self.regularizer = regularizer\n self.num_mels = num_mels\n self.num_freq = num_freq\n self.reduction_factor = reduction_factor\n self.prenet_layers = prenet_layers\n self.prenet_hidden_size = prenet_hidden_size\n self.prenet_activation_fn = prenet_activation_fn if prenet_activation_fn else tf.nn.relu\n self.prenet_use_inference_dropout = prenet_use_inference_dropout\n self.prenet_dropout = prenet_dropout\n self.cnn_dropout_prob = cnn_dropout_prob\n self.dtype = dtype\n self.bn_momentum = bn_momentum\n self.bn_epsilon = bn_epsilon\n self.decoder_hidden_size = decoder_hidden_size\n self.attention_layers = attention_layers\n self.force_layers = force_layers\n\n self.window_size = window_size\n self.attention_heads = attention_heads\n self.attention_dropout = attention_dropout\n self.layer_postprocess_dropout = layer_postprocess_dropout\n self.attention_cnn_dropout_prob = attention_cnn_dropout_prob\n self.back_step_size = back_step_size\n if self_attention_conv_params is None:\n self_attention_conv_params = {\n \"kernel_size\": [self.kernel_size],\n \"stride\": [1],\n \"num_channels\": self.decoder_hidden_size,\n \"padding\": \"VALID\",\n \"is_causal\": True,\n \"activation_fn\": tf.nn.relu\n }\n self.self_attention_conv_params = self_attention_conv_params", "def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2b\n ### TODO - Implement the forward pass of the character decoder.\n # print(\"=====input.size\",input.size())\n char_embedded= self.decoderCharEmb(input)\n # print(\"=====char_embedded.size\",char_embedded.size())\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\n # print(\"=====out.size\",out.size()) #dimensions (seq_length, batch, hidden_size)\n \n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n o_proj = self.char_output_projection(out_batch_first)\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n return scores,dec_hidden\n ### END YOUR CODE ", "def forward(self, input, dec_hidden=None):\n ### TODO - Implement the forward pass of the character decoder.\n\n # Get the embedding matrix of the given input\n char_embedding = self.decoderCharEmb(input)\n\n # Apply the LSTM to the input\n dec_state = self.charDecoder(char_embedding, dec_hidden)\n\n # Split the hidden states and cell states\n (dec_hiddens, dec_hidden) = dec_state\n\n # Apply the output projection to get the scores\n scores = self.char_output_projection(dec_hiddens)\n\n # Return the scores and dec_state (afte rthe LSTM)\n return (scores, dec_hidden)", "def decode(self, passage_vectors, question_vectors, init_with_question=True):\n with tf.variable_scope('pn_decoder'):\n fake_inputs = tf.zeros(\n [tf.shape(passage_vectors)[0], 2, 1]) # not used\n sequence_len = tf.tile([2], [tf.shape(passage_vectors)[0]])\n if init_with_question:\n random_attn_vector = tf.Variable(tf.random_normal([1, self.hidden_size]),\n trainable=True, name=\"random_attn_vector\")\n pooled_question_rep = tc.layers.fully_connected(\n attend_pooling(question_vectors,\n random_attn_vector, self.hidden_size),\n num_outputs=self.hidden_size, activation_fn=None\n )\n init_state = tc.rnn.LSTMStateTuple(\n pooled_question_rep, pooled_question_rep)\n else:\n init_state = None\n with tf.variable_scope('fw'):\n fw_cell = PointerNetLSTMCell(self.hidden_size, passage_vectors)\n fw_outputs, _ = custom_dynamic_rnn(\n fw_cell, fake_inputs, sequence_len, init_state)\n with tf.variable_scope('bw'):\n bw_cell = PointerNetLSTMCell(self.hidden_size, passage_vectors)\n bw_outputs, _ = custom_dynamic_rnn(\n bw_cell, fake_inputs, sequence_len, init_state)\n start_prob = (fw_outputs[0:, 0, 0:] + bw_outputs[0:, 1, 0:]) / 2\n end_prob = (fw_outputs[0:, 1, 0:] + bw_outputs[0:, 0, 0:]) / 2\n return start_prob, end_prob", "def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)", "def _build_decoder(self, hparams, inputs, initial_state, is_training):\n ## Decoder.\n with tf.variable_scope(\"trajectory_decoder\"):\n if hparams.decoder_type == \"fc\":\n regression = self._build_fc_decoder(hparams, inputs, is_training)\n final_states = None\n \n elif hparams.decoder_type == \"rnn\":\n list_dummy_input = []\n with tf.name_scope(\"dummy_input\"):\n for gpu_idx in range(self.num_gpu):\n with tf.device(tf.DeviceSpec(device_type=\"GPU\", device_index=gpu_idx)), tf.name_scope(\"tower_{:d}\".format(gpu_idx)):\n list_dummy_input.append(tf.zeros(tf.stack([self.target_length, self.batch_size[gpu_idx], 1])))\n \n with tf.variable_scope(\"rnn\"):\n if hparams.encoder_type == \"cnn\":\n with tf.variable_scope(\"rnn_initial_state\"):\n initial_state = self._make_initial_states(hparams, inputs)\n\n net, final_states = self._build_rnn_decoder(hparams, list_dummy_input, initial_state, is_training)\n\n with tf.name_scope(\"time_batch_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n \n with tf.variable_scope(\"projection\"):\n regression = self._build_output_projection(hparams, net, is_training)\n\n else:\n raise ValueError(\"Unknown decoder type {:s}.\".format(hparams.decoder_type))\n\n return regression, final_states", "def _inference_initial_state(self, encoder_outputs, encoder_decoder_attention_bias):\n\n with tf.variable_scope(\"inference_initial_state\"):\n n_layers = self.attention_layers\n n_heads = self.attention_heads\n batch_size = tf.shape(encoder_outputs)[0]\n n_features = self.num_mels + self.num_freq\n\n state = {\n \"iteration\": tf.constant(0),\n \"inputs\": tf.zeros([batch_size, 1, n_features * self.reduction_factor]),\n \"finished\": tf.cast(tf.zeros([batch_size]), tf.bool),\n \"alignment_positions\": tf.zeros([n_layers, batch_size, n_heads, 1],\n dtype=tf.int32),\n \"outputs\": {\n \"spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.zeros([batch_size, 0, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.zeros([0, 0, 0, 0, 0])\n ],\n \"stop_token_logits\": tf.zeros([batch_size, 0, 1 * self.reduction_factor]),\n \"lengths\": tf.zeros([batch_size], dtype=tf.int32),\n \"mag_spec\": tf.zeros([batch_size, 0, self.num_freq * self.reduction_factor])\n },\n \"encoder_outputs\": encoder_outputs,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias\n }\n\n state_shape_invariants = {\n \"iteration\": tf.TensorShape([]),\n \"inputs\": tf.TensorShape([None, None, n_features * self.reduction_factor]),\n \"finished\": tf.TensorShape([None]),\n \"alignment_positions\": tf.TensorShape([n_layers, None, n_heads, None]),\n \"outputs\": {\n \"spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"post_net_spec\": tf.TensorShape([None, None, self.num_mels * self.reduction_factor]),\n \"alignments\": [\n tf.TensorShape([None, None, None, None, None]),\n ],\n \"stop_token_logits\": tf.TensorShape([None, None, 1 * self.reduction_factor]),\n \"lengths\": tf.TensorShape([None]),\n \"mag_spec\": tf.TensorShape([None, None, None])\n },\n \"encoder_outputs\": encoder_outputs.shape,\n \"encoder_decoder_attention_bias\": encoder_decoder_attention_bias.shape\n }\n\n return state, state_shape_invariants", "def make_decoder(latent_tensor,\n output_shape,\n is_training=True,\n decoder_fn=gin.REQUIRED):\n with tf.variable_scope(\"decoder\"):\n return decoder_fn(\n latent_tensor=latent_tensor,\n output_shape=output_shape,\n is_training=is_training)", "def build_decoder(opt, embeddings):\n dec_type = \"ifrnn\" if opt.decoder_type == \"rnn\" and opt.input_feed \\\n else opt.decoder_type\n return str2dec[dec_type].from_opt(opt, embeddings)", "def _DecodeFn():\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()", "def predict_step(params,\n inputs,\n outputs,\n cache,\n eos_token,\n max_decode_len,\n beam_size,\n config):\n # Prepare transformer fast-decoder call for beam search: for beam search, we\n # need to set up our decoder model to handle a batch size equal to\n # batch_size * beam_size, where each batch item's data is expanded in-place\n # rather than tiled.\n flat_encoded = decode.flat_batch_beam_expand(\n models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n method=models.ProgramTransformer.encode),\n beam_size)\n\n encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)\n flat_encoded_padding_mask = decode.flat_batch_beam_expand(\n encoded_padding_mask, beam_size)\n\n def tokens_ids_to_logits(flat_ids, flat_cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # --> [batch * beam, 1, vocab]\n flat_logits, new_vars = models.ProgramTransformer(config).apply(\n {'params': params, 'cache': flat_cache},\n flat_ids,\n flat_encoded,\n flat_encoded_padding_mask,\n mutable=['cache'],\n method=models.ProgramTransformer.decode)\n new_flat_cache = new_vars['cache']\n # Remove singleton sequence-length dimension:\n # [batch * beam, 1, vocab] --> [batch * beam, vocab]\n flat_logits = flat_logits.squeeze(axis=1)\n return flat_logits, new_flat_cache\n\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n beam_seqs, _ = decode.beam_search(\n inputs,\n cache,\n tokens_ids_to_logits,\n beam_size=beam_size,\n alpha=0.6,\n bos_token=config.bos_token,\n eos_token=eos_token,\n max_decode_len=max_decode_len)\n\n # Beam search returns [n_batch, n_beam, n_length] with beam dimension\n # sorted in increasing order of log-probability.\n return beam_seqs", "def decode_one(*args, **kwargs):\n decoded_batch, out = decode_batch(*args, **kwargs)\n decoded_doc = decoded_batch[0]\n if out.enc_attn_weights is not None:\n out.enc_attn_weights = out.enc_attn_weights[:len(decoded_doc), 0, :]\n if out.ptr_probs is not None:\n out.ptr_probs = out.ptr_probs[:len(decoded_doc), 0]\n return decoded_doc, out", "def _add_input_decoder(self, inputs, seq_len, enc_fw, enc_bw):\n with tf.variable_scope(\"decoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True, initial_state_fw=enc_fw, initial_state_bw=enc_bw)\n\n return fw_states, bw_states", "def step(self, session, encoder_inputs, decoder_inputs, target_weights,\n bucket_id, forward_only):\n # Check if the sizes match.\n encoder_size, decoder_size = self.buckets[bucket_id]\n if len(encoder_inputs) != encoder_size:\n raise ValueError(\"Encoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(encoder_inputs), encoder_size))\n if len(decoder_inputs) != decoder_size:\n raise ValueError(\"Decoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(decoder_inputs), decoder_size))\n if len(target_weights) != decoder_size:\n raise ValueError(\"Weights length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(target_weights), decoder_size))\n # print('in model.step()')\n # print('a',bucket_id, encoder_size, decoder_size)\n\n # Input feed: encoder inputs, decoder inputs, target_weights, as provided.\n input_feed = {}\n for l in xrange(encoder_size):\n input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]\n for l in xrange(decoder_size):\n input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]\n input_feed[self.target_weights[l].name] = target_weights[l]\n # print(self.encoder_inputs[l].name)\n # print(self.decoder_inputs[l].name)\n # print(self.target_weights[l].name)\n\n # Since our targets are decoder inputs shifted by one, we need one more.\n last_target = self.decoder_inputs[decoder_size].name\n input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)\n # print('last_target', last_target)\n\n # Output feed: depends on whether we do a backward step or not.\n if not forward_only:\n output_feed = [self.updates[bucket_id], # Update Op that does SGD.\n self.gradient_norms[bucket_id], # Gradient norm.\n self.losses[bucket_id]] # Loss for this batch.\n else:\n output_feed = [self.losses[bucket_id]] # Loss for this batch.\n for l in xrange(decoder_size): # Output logits.\n output_feed.append(self.outputs[bucket_id][l])\n\n outputs = session.run(output_feed, input_feed)\n if not forward_only:\n return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.\n else:\n return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.", "def conv_decoder(encoder_output):\n namescope = 'conv_decoder'\n with tf.variable_scope(namescope):\n net = tf.layers.conv2d(encoder_output,\n filters=256,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=tf.nn.elu)\n net = tf.layers.conv2d(net,\n filters=C,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0005),\n activation=None)\n return net", "def forward(self, combiner_outputs: Dict[str, torch.Tensor], target: torch.Tensor) ->torch.Tensor:\n decoder_hidden, decoder_cell_state = get_lstm_init_state(combiner_outputs, self.reduce_sequence, self.num_layers)\n batch_size = decoder_hidden.size()[1]\n decoder_input = self.decoder_input.repeat(batch_size)\n logits = self.logits.unsqueeze(0).repeat(batch_size, 1, 1)\n for di in range(self.max_sequence_length):\n decoder_output, decoder_hidden, decoder_cell_state = self.lstm_decoder(decoder_input, decoder_hidden, decoder_cell_state)\n logits[:, di, :] = decoder_output.squeeze(1)\n if target is None:\n _, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze(1).squeeze(1).detach()\n else:\n decoder_input = target[:, di]\n return logits", "def output_logits(self, decoded_outputs, num_units, vocab_size, scope):\n with tf.variable_scope(scope):\n w = tf.get_variable(\"weights\", [num_units, vocab_size],\n dtype=self.floatX, initializer=glorot())\n b = tf.get_variable(\"biases\", [vocab_size],\n dtype=self.floatX, initializer=tf.constant_initializer(0.0))\n\n logits = tf.matmul(decoded_outputs, w) + b\n return logits", "def decode(conv_output, i=0):\n\n conv_shape = tf.shape(conv_output)\n batch_size = conv_shape[0]\n output_size = conv_shape[1]\n\n conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS))\n\n conv_raw_dxdy = conv_output[:, :, :, :, 0:2]\n conv_raw_dwdh = conv_output[:, :, :, :, 2:4]\n conv_raw_conf = conv_output[:, :, :, :, 4:5]\n conv_raw_prob = conv_output[:, :, :, :, 5: ]\n\n y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size])\n x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1])\n\n xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)\n xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, 3, 1])\n xy_grid = tf.cast(xy_grid, tf.float32)\n\n pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * STRIDES[i]\n pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) * STRIDES[i]\n pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)\n\n pred_conf = tf.sigmoid(conv_raw_conf)\n pred_prob = tf.sigmoid(conv_raw_prob)\n\n return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)", "def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.SELU(),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.SELU(),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.SELU(),\n nn.ConvTranspose2d(32, 3, 4, 2, 1), # B, nc, 64, 64\n nn.ReLU()\n )", "def forward(self,\n input,\n hidden):\n embedded = self.embedding(input.squeeze())[:, None, :]\n semantics = self.semantic_embedding(input.squeeze())\n output = embedded\n output, hidden = self.rnn(output, hidden)\n return output, hidden, semantics", "def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st=\"<s>\", ed=\"</s>\", k=3):\n\t\tbatch_size = enc_states.shape[0]\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tbeams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)]\n\n\t\t\tfor i in range(self.max_trg_len):\n\t\t\t\tfor j in range(batch_size):\n\t\t\t\t\tlogits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_hidden_state(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_current_word())\n\t\t\t\t\tlogLikelihood = torch.log(F.softmax(logits, dim=-1))\n\t\t\t\t\tbeams[j].advance(logLikelihood, hidden)\n\n\t\t\tallHyp, allScores = [], []\n\t\t\tn_best = 1\n\t\t\tfor b in range(batch_size):\n\t\t\t\tscores, ks = beams[b].sort_best()\n\n\t\t\t\tallScores += [scores[:n_best]]\n\t\t\t\thyps = [beams[b].get_hyp(k) for k in ks[:n_best]]\n\t\t\t\tallHyp.append(hyps)\n\n\t\t\treturn allHyp\n\t\t\t# return sentences\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i])\n\t\t\t\tlogits[:, i, :] = logit.squeeze()\n\t\t\treturn logits", "def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r", "def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n class_probabilities = F.softmax(output_dict['logits'], dim=-1) # softmax over the rows, dim=0 softmaxes over the columns\n output_dict['class_probabilities'] = class_probabilities\n\n predictions = class_probabilities.cpu().data.numpy()\n argmax_indices = numpy.argmax(predictions, axis=-1)\n labels = [self.vocab.get_token_from_index(x, namespace=\"labels\")\n for x in argmax_indices]\n output_dict['label'] = labels\n return output_dict", "def translate(\n self, x: tf.Tensor, encoder_inputs: TextEncoder, encoder_targets: TextEncoder\n ) -> tf.Tensor:\n batch_size = x.shape[0]\n max_seq_length = tf.reduce_max(\n base.translation_max_seq_lenght(x, encoder_inputs)\n )\n\n encoder_hidden = self.encoder.initialize_hidden_state(batch_size)\n encoder_output, encoder_hidden = self.encoder(x, encoder_hidden, False)\n decoder_hidden = encoder_hidden\n\n # The first words of each sentence in the batch is the start of sample token.\n words = (\n tf.zeros([batch_size, 1], dtype=tf.int64)\n + encoder_targets.start_of_sample_index\n )\n last_words = words\n\n has_finish_predicting = False\n reach_max_seq_lenght = False\n\n while not (has_finish_predicting or reach_max_seq_lenght):\n # Call the decoder and update the decoder hidden state\n decoder_output, decoder_hidden, _ = self.decoder(\n last_words, decoder_hidden, encoder_output, False\n )\n last_words = tf.expand_dims(decoder_output, 1)\n last_words = tf.math.argmax(last_words, axis=2)\n\n logger.debug(f\"New word {last_words}.\")\n\n # Append the newly predicted words into words.\n words = tf.concat([words, last_words], 1)\n\n # Compute the end condition of the while loop.\n end_of_sample = (\n np.zeros([batch_size, 1], dtype=np.int64)\n + encoder_targets.end_of_sample_index\n )\n has_finish_predicting = np.array_equal(last_words.numpy(), end_of_sample)\n reach_max_seq_lenght = words.shape[1] >= max_seq_length\n\n logger.debug(f\"Has finish predicting {has_finish_predicting}.\")\n logger.debug(f\"Has reach max sequence length {reach_max_seq_lenght}.\")\n\n return words", "def forward(self, input_var: Tensor, hidden: Optional[HiddenDict] = None, **additional: Dict) -> RecurrentOutput:\n\n if hidden is None:\n batch_size = input_var.shape[0]\n hidden = {l: self.init_hidden(batch_size, self.device) for l in range(self.num_layers)}\n\n embed = self.embeddings(input_var) # batch_size x seq_len x embedding_dim+\n embed = self.dropout_layer(embed)\n\n input_ = embed.squeeze(1)\n for l in range(self.num_layers):\n new_hidden = self.forward_step(l, hidden[l], input_)\n input_ = new_hidden[0] # New hidden state becomes input for next layer\n hidden[l] = new_hidden # Store for next step\n\n out = self.dropout_layer(input_)\n output = self.predict_distribution(out)\n\n return output, hidden", "def online_inference(self, sess, picture_ids, in_pictures, image_f_inputs,\n stop_word='<EOS>', c_v=None):\n # get stop word index from dictionary\n stop_word_idx = self.data_dict.word2idx['<EOS>']\n cap_list = [None] * in_pictures.shape[0]\n with tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\n _, states = self.decoder(gen_mode=True)\n init_state, out_state, sample = states\n cap_raw = []\n for i in range(len(in_pictures)):\n state = None\n cap_list[i] = {'image_id': picture_ids[i], 'caption': ' '}\n sentence = ['<BOS>']\n cur_it = 0\n gen_word_idx = 0\n cap_raw.append([])\n while (cur_it < self.params.gen_max_len):\n input_seq = [self.data_dict.word2idx[word] for word in sentence]\n feed = {self.captions: np.array(input_seq)[-1].reshape([1, 1]),\n self.lengths: [len(input_seq)],\n image_f_inputs: np.expand_dims(in_pictures[i], 0)}\n if self.c_i is not None:\n feed.update({self.c_i_ph: np.expand_dims(c_v[i], 0)})\n # for the first decoder step, the state is None\n if state is not None:\n feed.update({init_state: state})\n next_word_probs, state = sess.run([sample, out_state],\n feed)\n if self.params.sample_gen == 'greedy':\n next_word_probs = next_word_probs.ravel()\n t = self.params.temperature\n next_word_probs = next_word_probs**(\n 1/t) / np.sum(next_word_probs**(1/t))\n gen_word_idx = np.argmax(next_word_probs)\n elif self.params.sample_gen == 'sample':\n gen_word_idx = next_word_probs\n gen_word = self.data_dict.idx2word[gen_word_idx]\n sentence += [gen_word]\n cap_raw[i].append(gen_word_idx)\n cur_it += 1\n if gen_word_idx == stop_word_idx:\n break\n cap_list[i]['caption'] = ' '.join([word for word in sentence\n if word not in ['<BOS>', '<EOS>']])\n # print(cap_list[i]['caption'])\n return cap_list, cap_raw", "def _define_decoder(self):\n self.decoder = nn.Sequential(nn.Linear(self.encoding_shape, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 64*64*3, bias=False), nn.ReLU(),\n View((-1, 3, 64, 64)),\n )", "def deconv_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)\n d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)\n d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])\n d3 = tf.layers.conv2d_transpose(\n inputs=d2_reshaped,\n filters=64,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d4 = tf.layers.conv2d_transpose(\n inputs=d3,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d5 = tf.layers.conv2d_transpose(\n inputs=d4,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n d6 = tf.layers.conv2d_transpose(\n inputs=d5,\n filters=output_shape[2],\n kernel_size=4,\n strides=2,\n padding=\"same\",\n )\n return tf.reshape(d6, [-1] + output_shape)", "def loglik():\n enc_in_ = tf.one_hot(tf.gather(enc_in,batch_idx),S)\n dec_in_ = tf.one_hot(tf.gather(dec_in,batch_idx),S)\n dec_out_ = tf.one_hot(tf.gather(dec_out,batch_idx),S)\n \"\"\"mask for decoder input\"\"\"\n dec_mask = tf.expand_dims(tf.expand_dims(tf.reduce_sum(dec_in_,-1),-1),1)\n \"\"\"read input with encoder\"\"\"\n h_enc = BiLSTM(enc_in_,LSTM_encoder_params['enc_fwd'],LSTM_encoder_params['enc_bkwd'])\n h_enc_last_state = h_enc[:,-1,:]\n \"\"\"generate log probability of component membership from last LSTM state\"\"\"\n log_p_z = tf.nn.log_softmax(tf.einsum('nd,dk->nk',h_enc_last_state,params['W']),-1)\n \"\"\"concatenate K component-level embeddings to decoder input\"\"\"\n group_embedding = tf.tile(tf.expand_dims(tf.expand_dims(params['U'],1),0),[tf.shape(batch_idx)[0],1,T-1,1])\n dec_in_embedded = tf.concat([group_embedding*dec_mask,tf.tile(tf.expand_dims(dec_in_,1),[1,K,1,1])],-1)\n \"\"\"read decoder input into K-dimensional decoder\"\"\"\n h_dec = MultiLSTM(dec_in_embedded,LSTM_decoder_params['dec_fwd'])\n log_p_output = tf.nn.log_softmax(tf.einsum('nktd,ds->nkts',h_dec,params['V']),-1)\n \"\"\"compute loss for decoder output under all K components\"\"\"\n llik_z = tf.reduce_sum(tf.expand_dims(log_p_z,-1) + tf.reduce_sum(log_p_output*tf.expand_dims(dec_out_,1),-1),-1)\n \"\"\"marginalize out discrete parameter with log sum exp\"\"\"\n lliks = tf.reduce_logsumexp(llik_z,-1)\n return(tf.reduce_sum(lliks))", "def build_model(options,worddicts):\n opt_ret=dict()\n params=dict()\n word_xr1_mask=tf.reverse(word_x1_mask,[1])\n word_xr2_mask = tf.reverse(word_x2_mask, [1])\n\n\n\n #embedding layer\n word_embedding = norm_weight(options['n_words'], options['dim_word'])\n if options['embedding']:\n with open(options['embedding'], 'r',encoding='iso-8859-1') as f:\n for line in f:\n temp=line.split()\n word=temp[0]\n vector=temp[1:]\n if word in worddicts and worddicts[word]<options['n_words']:\n word_embedding[worddicts[word],:]=vector\n\n word_embedding_layer=tf.Variable(word_embedding,name='word_embedding')\n\n emb1=tf.nn.embedding_lookup(word_embedding_layer,word_x1,name='embedding_word_lookup1')\n emb2=tf.nn.embedding_lookup(word_embedding_layer,word_x2,name='embedding_word_lookup2')\n\n if options['use_dropout']:\n emb1=tf.cond(use_noise,lambda :tf.nn.dropout(emb1,0.5),lambda :emb1)\n emb2 = tf.cond(use_noise, lambda: tf.nn.dropout(emb2, 0.5), lambda: emb2)\n\n #1-layer LSTM\n print('LSTM result')\n for l in range(1):\n #param_init_lstm\n prefix = 'encoder_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim_word']\n else:\n nin = options['dim_word']+2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n #param_init_rlstm\n prefix = 'encoder_r_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim_word']\n else:\n nin = options['dim_word'] +2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n\n\n if l==0:\n ctx1=emb1\n ctx2=emb2\n else:\n ctx1=tf.concat([ctx1,emb1],axis=2)\n ctx2=tf.concat([ctx2,emb2],axis=2)\n\n print(ctx1)\n\n ctxr1=tf.reverse(ctx1,axis=[1])\n ctxr2=tf.reverse(ctx2,axis=[1])\n\n proj1=RNN_layer(ctx1,word_x1_mask,options,params,prefix='encoder_{}'.format(str(l+1)))\n projr1=RNN_layer(ctxr1,word_xr1_mask,options,params,prefix='encoder_r_{}'.format(str(l+1)))\n proj2=RNN_layer(ctx2,word_x2_mask,options,params,prefix='encoder_{}'.format(str(l+1)))\n projr2=RNN_layer(ctxr2,word_xr2_mask,options,params,prefix='encoder_r_{}'.format(str(l+1)))\n\n ctx1=tf.concat([proj1[0],projr1[0][::-1]],axis=len(projr1[0].shape)-1)\n ctx2 = tf.concat([proj2[0], projr2[0][::-1]], axis=len(projr2[0].shape) - 1)\n ctx1 = tf.transpose(ctx1, [1, 0, 2])\n ctx2 = tf.transpose(ctx2, [1, 0, 2])\n print(ctx1)\n\n ctx1=ctx1*word_x1_mask[:,:,None]\n ctx2 = ctx2 * word_x2_mask[:, :, None]\n def _step(h,x):\n return tf.matmul(x[0],x[1])\n temp=tf.zeros((tf.shape(ctx1)[1],tf.shape(ctx2)[1]))\n weight_martrix=tf.scan(_step,[ctx1,tf.transpose(ctx2,[0,2,1])],temp)\n weight_martrix_1=tf.exp(weight_martrix)*word_x2_mask[:,None,:]\n weight_martrix_2=tf.transpose(tf.exp(weight_martrix)*word_x1_mask[:,:,None],[0,2,1])\n weight_martrix_1=weight_martrix_1/tf.reduce_sum(weight_martrix_1,axis=2)[:,:,None]\n weight_martrix_2 = weight_martrix_2 / tf.reduce_sum(weight_martrix_2, axis=2)[:,:,None]\n\n ctx1_=tf.reduce_sum(weight_martrix_1[:,:,:,None]*ctx2[:,None,:,:],axis=2)\n ctx2_ = tf.reduce_sum(weight_martrix_2[:, :, :, None] * ctx1[:, None, :, :],axis=2)\n inp1=tf.concat([ctx1, ctx1_, ctx1*ctx1_, ctx1-ctx1_],axis=2)\n inp2 = tf.concat([ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_], axis=2)\n params = param_init_fflayer(options, params, prefix='projection',\n nin=options['dim'] * 8, nout=options['dim'], ortho=False)\n\n\n s=tf.shape(inp1)\n inp1 = tf.nn.relu(tf.matmul(tf.reshape(inp1,[-1,int(inp1.shape[-1])]), params[_p('projection', 'W')]) + params[_p('projection', 'b')])\n inp1=tf.reshape(inp1,tf.concat([s[:2],[-1]],0))\n s=tf.shape(inp2)\n inp2 = tf.nn.relu(tf.matmul(tf.reshape(inp2,[-1,int(inp2.shape[-1])]), params[_p('projection', 'W')]) + params[_p('projection', 'b')])\n inp2=tf.reshape(inp2,tf.concat([s[:2],[-1]],0))\n if options['use_dropout']:\n inp1=tf.cond(use_noise,lambda :tf.nn.dropout(inp1,0.5),lambda :inp1)\n inp2 = tf.cond(use_noise, lambda: tf.nn.dropout(inp2, 0.5), lambda: inp2)\n\n\n for l in range(1):\n #param_init_lstm\n prefix = 'decoder_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim']\n else:\n nin = options['dim']+2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n #param_init_rlstm\n prefix = 'decoder_r_{}'.format(str(l + 1))\n if l==0:\n nin=options['dim']\n else:\n nin = options['dim'] +2*options['dim']\n dim=options['dim']\n\n W = numpy.concatenate([norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim),\n norm_weight(nin, dim)], axis=1)\n params[_p(prefix, 'W')] = tf.Variable(W)\n\n # for the previous hidden activation\n U = numpy.concatenate([ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim),\n ortho_weight(dim)], axis=1)\n params[_p(prefix, 'U')] = tf.Variable(U)\n params[_p(prefix, 'b')] = tf.Variable(numpy.zeros((4 * dim,)).astype('float32'))\n\n\n\n if l==0:\n ctx1=inp1\n ctx2=inp2\n else:\n ctx1=tf.concat([ctx1,inp1],axis=2)\n ctx2=tf.concat([ctx2,inp2],axis=2)\n\n print(ctx1)\n\n ctxr1=tf.reverse(ctx1,axis=[1])\n ctxr2=tf.reverse(ctx2,axis=[1])\n\n proj1=RNN_layer(ctx1,word_x1_mask,options,params,prefix='decoder_{}'.format(str(l+1)))\n projr1=RNN_layer(ctxr1,word_xr1_mask,options,params,prefix='decoder_r_{}'.format(str(l+1)))\n proj2=RNN_layer(ctx2,word_x2_mask,options,params,prefix='decoder_{}'.format(str(l+1)))\n projr2=RNN_layer(ctxr2,word_xr2_mask,options,params,prefix='decoder_r_{}'.format(str(l+1)))\n\n ctx1=tf.concat([proj1[0],projr1[0][::-1]],axis=len(projr1[0].shape)-1)\n ctx2 = tf.concat([proj2[0], projr2[0][::-1]], axis=len(projr2[0].shape) - 1)\n ctx1 = tf.transpose(ctx1, [1, 0, 2])\n ctx2 = tf.transpose(ctx2, [1, 0, 2])\n print(ctx1)\n\n mean_1=tf.reduce_sum(ctx1*word_x1_mask[:,:,None],axis=1)/tf.reduce_sum(word_x1_mask,axis=1)[:,None]\n max_1=tf.reduce_max(ctx1*word_x1_mask[:,:,None],axis=1)\n\n mean_2=tf.reduce_sum(ctx2*word_x2_mask[:,:,None],axis=1)/tf.reduce_sum(word_x2_mask,axis=1)[:,None]\n max_2=tf.reduce_max(ctx2*word_x2_mask[:,:,None],axis=1)\n\n #represention and MLP layer\n logit=tf.concat([mean_1,mean_2,max_1,max_2],axis=1)\n if options['use_dropout']:\n logit=tf.cond(use_noise,lambda :tf.nn.dropout(logit,0.5),lambda :logit)\n\n\n params = param_init_fflayer(options, params, prefix='ff_layer_1',\n nin=options['dim'] * 8, nout=options['dim'], ortho=False)\n params = param_init_fflayer(options, params, prefix='ff_layer_output',\n nin=options['dim'], nout=3, ortho=False)\n logit=tf.nn.tanh(tf.matmul(logit,params[_p('ff_layer_1','W')])+params[_p('ff_layer_1','b')])\n if options['use_dropout']:\n logit=tf.cond(use_noise,lambda :tf.nn.dropout(logit,0.5),lambda :logit)\n\n logit=tf.matmul(logit, params[_p('ff_layer_output', 'W')]) + params[_p('ff_layer_output', 'b')]\n probs=tf.nn.softmax(logit)\n pred=tf.argmax(probs,1)\n cost=tf.losses.sparse_softmax_cross_entropy(y,logit)\n return opt_ret,cost,pred,probs", "def rnn_decoder_with_attention(decoder_inputs, initial_state, cell, loop_function,attention_states,scope=None):#3D Tensor [batch_size x attn_length x attn_size]\n with tf.variable_scope(scope or \"rnn_decoder\"):\n print(\"rnn_decoder_with_attention started...\")\n state = initial_state #[batch_size x cell.state_size].\n _, hidden_size = state.get_shape().as_list() #200\n attention_states_original=attention_states\n batch_size,sequence_length,_=attention_states.get_shape().as_list()\n outputs = []\n prev = None\n #################################################\n for i, inp in enumerate(decoder_inputs):#循环解码部分的输入。如sentence_length个[batch_size x input_size]\n # 如果是训练,使用训练数据的输入;如果是test, 将t时刻的输出作为t + 1 时刻的s输入\n if loop_function is not None and prev is not None:#测试的时候:如果loop_function不为空且前一个词的值不为空,那么使用前一个的值作为RNN的输入\n with tf.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n ##ATTENTION#################################################################################################################################################\n # 1.get logits of attention for each encoder input. attention_states:[batch_size x attn_length x attn_size]; query=state:[batch_size x cell.state_size]\n query=state\n W_a = tf.get_variable(\"W_a\", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))\n query=tf.matmul(query, W_a) #[batch_size,hidden_size]\n query=tf.expand_dims(query,axis=1) #[batch_size, 1, hidden_size]\n U_a = tf.get_variable(\"U_a\", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))\n U_aa = tf.get_variable(\"U_aa\", shape=[ hidden_size])\n attention_states=tf.reshape(attention_states,shape=(-1,hidden_size)) #[batch_size*sentence_length,hidden_size]\n attention_states=tf.matmul(attention_states, U_a) #[batch_size*sentence_length,hidden_size]\n #print(\"batch_size\",batch_size,\" ;sequence_length:\",sequence_length,\" ;hidden_size:\",hidden_size) #print(\"attention_states:\", attention_states) #(?, 200)\n attention_states=tf.reshape(attention_states,shape=(-1,sequence_length,hidden_size)) # TODO [batch_size,sentence_length,hidden_size]\n #query_expanded: [batch_size,1, hidden_size]\n #attention_states_reshaped: [batch_size,sentence_length,hidden_size]\n attention_logits=tf.nn.tanh(query+attention_states+U_aa) #[batch_size,sentence_length,hidden_size]. additive style\n\n # 2.get possibility of attention\n attention_logits=tf.reshape(attention_logits,shape=(-1,hidden_size)) #batch_size*sequence_length [batch_size*sentence_length,hidden_size]\n V_a = tf.get_variable(\"V_a\", shape=[hidden_size,1],initializer=tf.random_normal_initializer(stddev=0.1)) #[hidden_size,1]\n attention_logits=tf.matmul(attention_logits,V_a) #最终需要的是[batch_size*sentence_length,1]<-----[batch_size*sentence_length,hidden_size],[hidden_size,1]\n attention_logits=tf.reshape(attention_logits,shape=(-1,sequence_length)) #attention_logits:[batch_size,sequence_length]\n ##########################################################################################################################################################\n #attention_logits=tf.reduce_sum(attention_logits,2) #[batch_size x attn_length]\n attention_logits_max=tf.reduce_max(attention_logits,axis=1,keep_dims=True) #[batch_size x 1]\n # possibility distribution for each encoder input.it means how much attention or focus for each encoder input\n p_attention=tf.nn.softmax(attention_logits-attention_logits_max)#[batch_size x attn_length]\n\n # 3.get weighted sum of hidden state for each encoder input as attention state\n p_attention=tf.expand_dims(p_attention,axis=2) #[batch_size x attn_length x 1]\n # attention_states:[batch_size x attn_length x attn_size]; p_attention:[batch_size x attn_length];\n attention_final=tf.multiply(attention_states_original,p_attention) #[batch_size x attn_length x attn_size]\n context_vector=tf.reduce_sum(attention_final,axis=1) #[batch_size x attn_size]\n ############################################################################################################################################################\n #inp:[batch_size x input_size].it is decoder input; attention_final:[batch_size x attn_size]\n output, state = cell(inp, state,context_vector) #attention_final TODO 使用RNN走一步\n outputs.append(output) # 将输出添加到结果列表中\n if loop_function is not None:\n prev = output\n print(\"rnn_decoder_with_attention ended...\")\n return outputs, state", "def dynamic_ensemble_decode(\n decoders,\n encoder_outputs,\n bridges,\n target_modalities,\n helper,\n parallel_iterations=32,\n swap_memory=False):\n var_scope = tf.get_variable_scope()\n # Properly cache variable values inside the while_loop\n if var_scope.caching_device is None:\n var_scope.set_caching_device(lambda op: op.device)\n\n def _create_ta(d):\n return tf.TensorArray(\n dtype=d, clear_after_read=False,\n size=0, dynamic_size=True)\n\n decoder_output_removers = nest.map_structure(lambda dec: DecoderOutputRemover(\n dec.mode, dec.output_dtype._fields, dec.output_ignore_fields), decoders)\n\n # initialize first inputs (start of sentence) with shape [_batch*_beam,]\n initial_finished, initial_input_symbols = helper.init_symbols()\n initial_time = tf.constant(0, dtype=tf.int32)\n initial_input_symbols_embed = nest.map_structure(\n lambda modality: _embed_words(modality, initial_input_symbols, initial_time),\n target_modalities)\n\n inputs_preprocessing_fns = []\n inputs_postprocessing_fns = []\n initial_inputs = []\n initial_decoder_states = []\n decoding_params = []\n for dec, enc_out, bri, inp in zip(decoders, encoder_outputs, bridges, initial_input_symbols_embed):\n with tf.variable_scope(dec.name):\n inputs_preprocessing_fn, inputs_postprocessing_fn = dec.inputs_prepost_processing_fn()\n inputs = inputs_postprocessing_fn(None, inp)\n dec_states, dec_params = dec.prepare(enc_out, bri, helper) # prepare decoder\n dec_states = stack_beam_size(dec_states, helper.beam_size)\n dec_params = stack_beam_size(dec_params, helper.beam_size)\n # add to list\n inputs_preprocessing_fns.append(inputs_preprocessing_fn)\n inputs_postprocessing_fns.append(inputs_postprocessing_fn)\n initial_inputs.append(inputs)\n initial_decoder_states.append(dec_states)\n decoding_params.append(dec_params)\n\n initial_outputs_tas = nest.map_structure(\n lambda dec_out_rem, dec: nest.map_structure(\n _create_ta, dec_out_rem.apply(dec.output_dtype)),\n decoder_output_removers, decoders)\n\n def body_infer(time, inputs, decoder_states, outputs_tas, finished,\n log_probs, lengths, infer_status_ta):\n \"\"\"Internal while_loop body.\n\n Args:\n time: Scalar int32 Tensor.\n inputs: A list of inputs Tensors.\n decoder_states: A list of decoder states.\n outputs_tas: A list of TensorArrays.\n finished: A bool tensor (keeping track of what's finished).\n log_probs: The log probability Tensor.\n lengths: The decoding length Tensor.\n infer_status_ta: structure of TensorArray.\n\n Returns:\n `(time + 1, next_inputs, next_decoder_states, next_outputs_tas,\n next_finished, next_log_probs, next_lengths, next_infer_status_ta)`.\n \"\"\"\n # step decoder\n outputs = []\n cur_inputs = []\n next_decoder_states = []\n for dec, inp, pre_fn, stat, dec_params in \\\n zip(decoders, inputs, inputs_preprocessing_fns, decoder_states, decoding_params):\n with tf.variable_scope(dec.name):\n inp = pre_fn(time, inp)\n out, next_stat = dec.step(inp, stat, dec_params)\n cur_inputs.append(inp)\n outputs.append(out)\n next_decoder_states.append(next_stat)\n next_outputs_tas = []\n for out_ta, out, rem in zip(outputs_tas, outputs, decoder_output_removers):\n ta = nest.map_structure(lambda ta, out: ta.write(time, out),\n out_ta, rem.apply(out))\n next_outputs_tas.append(ta)\n logits = []\n for dec, modality, out in zip(decoders, target_modalities, outputs):\n logits.append(_compute_logits(dec, modality, out))\n # sample next symbols\n sample_ids, beam_ids, next_log_probs, next_lengths \\\n = helper.sample_symbols(logits, log_probs, finished, lengths, time=time)\n gathered_states = []\n for next_stat in next_decoder_states:\n gathered_states.append(gather_states(next_stat, beam_ids))\n cur_inputs = nest.map_structure(lambda inp: gather_states(inp, beam_ids),\n cur_inputs)\n infer_status = BeamSearchStateSpec(\n log_probs=next_log_probs,\n predicted_ids=sample_ids,\n beam_ids=beam_ids,\n lengths=next_lengths)\n infer_status_ta = nest.map_structure(lambda ta, out: ta.write(time, out),\n infer_status_ta, infer_status)\n next_finished, next_input_symbols = helper.next_symbols(time=time, sample_ids=sample_ids)\n next_inputs_embed = nest.map_structure(lambda modality: _embed_words(modality, next_input_symbols, time + 1),\n target_modalities)\n next_finished = tf.logical_or(next_finished, finished)\n next_inputs = []\n for dec, cur_inp, next_inp, post_fn in zip(decoders, cur_inputs, next_inputs_embed, inputs_postprocessing_fns):\n with tf.variable_scope(dec.name):\n next_inputs.append(post_fn(cur_inp, next_inp))\n return time + 1, next_inputs, gathered_states, next_outputs_tas, \\\n next_finished, next_log_probs, next_lengths, infer_status_ta\n\n initial_log_probs = tf.zeros_like(initial_input_symbols, dtype=tf.float32)\n initial_lengths = tf.zeros_like(initial_input_symbols, dtype=tf.int32)\n initial_infer_status_ta = nest.map_structure(_create_ta, BeamSearchStateSpec.dtypes())\n loop_vars = [initial_time, initial_inputs, initial_decoder_states,\n initial_outputs_tas, initial_finished,\n # infer vars\n initial_log_probs, initial_lengths, initial_infer_status_ta]\n\n res = tf.while_loop(\n lambda *args: tf.logical_not(tf.reduce_all(args[4])),\n body_infer,\n loop_vars=loop_vars,\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n\n final_infer_status = nest.map_structure(lambda ta: ta.stack(), res[-1])\n return final_infer_status", "def extract_features(\n self,\n prev_output_tokens,\n encoder_out: Optional[EncoderOut] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n *args, **kwargs\n ):\n\n # embed positions\n positions = (\n self.embed_positions(\n prev_output_tokens, incremental_state=incremental_state\n )\n if self.embed_positions is not None\n else None\n )\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_tokens(prev_output_tokens)\n\n if positions is not None:\n x += positions\n x = self.positional_dropout(x)\n else:\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # decoder layers\n attn: Optional[Tensor] = None\n inner_states: List[Optional[Tensor]] = [x]\n\n for layer in self.layers:\n x, layer_attn, _ = layer(\n x,\n encoder_out.encoder_out if encoder_out is not None else None,\n encoder_out.encoder_padding_mask if encoder_out is not None else None,\n incremental_state,\n self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,\n )\n inner_states.append(x)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {\"attn\": [attn], \"inner_states\": inner_states}", "def decode_one(*args, **kwargs):\n decoded_batch, out = decode_batch(*args, **kwargs)\n decoded_doc = decoded_batch[0]\n if out.enc_attn_weights is not None:\n out.enc_attn_weights = out.enc_attn_weights[: len(decoded_doc), 0, :]\n if out.ptr_probs is not None:\n out.ptr_probs = out.ptr_probs[: len(decoded_doc), 0]\n return decoded_doc, out", "def test_decoder(device='/gpu:0'):\n tf.reset_default_graph()\n B = 64\n latent_dim = 8\n input_dim, output_dim, input_channels, output_channels = 4, 500, 1, 4\n with tf.device(device):\n latent_samples = tf.zeros((B, latent_dim))\n color_imgs = decoder(latent_samples, input_dim, output_dim, input_channels, output_channels)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n color_imgs_np = sess.run(color_imgs)\n print('Output shape should be (%d, %d, %d, %d)' % (B, output_dim, output_dim, output_channels))\n print('color_imgs shape: ' + str(color_imgs_np.shape))", "def __init__(self, vocab, embed_size=512, dropout_rate=0.1, max_len=200):\n super(DecoderEmbeddings, self).__init__()\n pad_token_idx = 0 #vocab.tokenizer.ids_to_tokens[0]\n assert vocab.tokenizer.ids_to_tokens[0] == '[PAD]'\n self.embeddings = nn.Embedding(len(vocab.tokenizer.ids_to_tokens), embed_size, padding_idx=pad_token_idx)\n self.positional_encoding = PositionalEncoding(d_model=embed_size, dropout=dropout_rate, max_len=max_len)", "def test_forward_pass(self):\n ex = self._create_example()\n decoder_input_fn = FixedDecoderInputs(\n inputs=tf.convert_to_tensor(\n ex.target, dtype=tf.float32),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n model = self.create_model()\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n decoder_output_ = sess.run(decoder_output)\n\n max_decode_length = model.params[\"target.max_seq_len\"]\n expected_decode_len = np.minimum(ex.target_len, max_decode_length)\n\n # Assert shapes are correct\n np.testing.assert_array_equal(decoder_output_.logits.shape, [\n self.batch_size, np.max(expected_decode_len),\n model.target_vocab_info.total_size\n ])\n np.testing.assert_array_equal(\n decoder_output_.predictions.shape,\n [self.batch_size, np.max(expected_decode_len)])", "def get_representation(output):\n\n # shape: (seq_len, vocab_size)\n hidden_states = output[1]\n\n token_embeddings = torch.stack(hidden_states, dim=0)\n # remove dimension 1 (batches)\n token_embeddings = torch.squeeze(token_embeddings, dim=1)\n # swap dimension 0 and 1\n token_embeddings = token_embeddings.permute(1, 0, 2)\n # the last hidden layer output (2+seq_len, 768)\n hidden_states = [token[-1] for token in token_embeddings]\n\n return hidden_states", "def call(self, x: Tuple[tf.Tensor, tf.Tensor], training=False):\n batch_size = x[0].shape[0]\n\n encoder_hidden = self.encoder.initialize_hidden_state(batch_size)\n encoder_output, encoder_hidden = self.encoder(x[0], encoder_hidden, training)\n\n decoder_hidden = encoder_hidden\n predictions = None\n\n seq_lenght = x[1].shape[1]\n for t in range(seq_lenght):\n # The decoder input is the word at timestep t\n previous_target_word = x[1][:, t]\n decoder_input = tf.expand_dims(previous_target_word, 1)\n\n # Call the decoder and update the decoder hidden state\n decoder_output, decoder_hidden, _ = self.decoder(\n decoder_input, decoder_hidden, encoder_output, training\n )\n\n # The predictions are concatenated on the time axis\n # The shape is (batch_size, seq_lenght, output_vocab_size)\n if predictions is None:\n predictions = tf.expand_dims(decoder_output, 1)\n else:\n decoder_output = tf.expand_dims(decoder_output, 1)\n predictions = tf.concat([predictions, decoder_output], axis=1)\n\n return predictions", "def decode(self, session, dev_example):\n unzipped_dev_example = list(zip(*dev_example))\n input_feed = self.create_feed_dict(unzipped_dev_example[0:4], dropout = 1)\n output_feed = [self.h_s, self.h_e, self.relevence]\n outputs = session.run(output_feed, input_feed)\n h_s = outputs[0]\n h_e = outputs[1]\n rel = outputs[2]\n return h_s, h_e, rel", "def forward(self, enc_input, enc_input_ext, enc_pad_mask, enc_len,\n dec_input, max_oov_len):\n\n # Build source text representations from encoder\n # [B x L x E]\n enc_emb = self.embedding(enc_input)\n # import pdb\n # pdb.set_trace()\n # TODO enc_len<=0\n enc_hidden, (h, c) = self.encoder(\n enc_emb, enc_len) # [B x L x 2H]\n\n # Outputs required for loss computation\n # 1. cross-entropy (negative log-likelihood) loss - Eq. (6)\n final_dists = []\n\n # 2. coverage loss - Eq. (12)\n attn_dists = []\n coverages = []\n\n # Initialize decoder inputs\n # [B x T x E]\n dec_emb = self.embedding(dec_input)\n cov = torch.zeros_like(enc_input).float() # [B x L]\n\n for t in range(self.config.tgt_max_train):\n input_t = dec_emb[:, t, :] # Decoder input at this timestep\n vocab_dist, attn_dist, context_vec, h, c = self.decoder(dec_input=input_t,\n prev_h=h,\n prev_c=c,\n enc_hidden=enc_hidden,\n enc_pad_mask=enc_pad_mask,\n coverage=cov)\n # Eq. (10) - Compute coverage vector;\n # sum of attn dist over all prev decoder timesteps\n cov = cov + attn_dist\n\n # Eq. (8) - Compute generation probability p_gen\n context_feat = self.w_h(context_vec) # [B x 1]\n decoder_feat = self.w_s(h) # [B x 1]\n input_feat = self.w_x(input_t) # [B x 1]\n gen_feat = context_feat + decoder_feat + input_feat\n p_gen = torch.sigmoid(gen_feat) # [B x 1]\n\n # Eq. (9) - Compute prob dist'n over extended vocabulary\n vocab_dist = p_gen * vocab_dist # [B x V]\n weighted_attn_dist = (1.0 - p_gen) * attn_dist # [B x L]\n\n # Concat some zeros to each vocab dist,\n # to hold probs for oov words that appeared in source text\n batch_size = vocab_dist.size(0)\n extra_zeros = torch.zeros((batch_size, max_oov_len),\n device=vocab_dist.device)\n extended_vocab_dist = torch.cat(\n [vocab_dist, extra_zeros], dim=-1) # [B x V_x]\n\n final_dist = extended_vocab_dist.scatter_add(dim=-1,\n index=enc_input_ext,\n src=weighted_attn_dist)\n # Save outputs for loss computation\n final_dists.append(final_dist)\n attn_dists.append(attn_dist)\n coverages.append(cov)\n\n final_dists = torch.stack(final_dists, dim=-1) # [B x V_x x T]\n attn_dists = torch.stack(attn_dists, dim=-1) # [B x L x T]\n coverages = torch.stack(coverages, dim=-1) # [B x L x T]\n\n import pdb\n pdb.set_trace()\n return {\n 'final_dist': final_dists,\n 'attn_dist': attn_dists,\n 'coverage': coverages\n }", "def decode(self, output_dict: Dict[str, Any]) -> Dict[str, Any]:\n\n # If the predictions are already decoded, skip the step\n if 'relex_predictions' in output_dict:\n return output_dict\n\n output_dict: Dict[str, Any] = super().decode(output_dict)\n batch_relex_predictions = self._decode_relex_predictions(output_dict)\n output_dict['relex_predictions'] = batch_relex_predictions\n\n batch_clusters: List[List[List[Tuple[int, int]]]] = output_dict['clusters']\n batch_flat_tokens: List[List[Token]] = output_dict['flat_tokens']\n batch_flat_text: List[str] = output_dict['flat_text']\n\n batch_updates = defaultdict(list)\n for (clusters, relations,\n flat_tokens, flat_text) in zip(batch_clusters, batch_relex_predictions,\n batch_flat_tokens, batch_flat_text):\n updates = self._decode_sample(clusters, relations,\n flat_tokens, flat_text)\n for key, value in updates.items():\n batch_updates[key].append(value)\n\n output_dict.update(batch_updates)\n return output_dict" ]
[ "0.7202692", "0.6928575", "0.6782827", "0.6653837", "0.65899867", "0.6541342", "0.6538928", "0.6499897", "0.6485077", "0.641824", "0.64113784", "0.637471", "0.6374259", "0.6316419", "0.6304639", "0.62677693", "0.62158835", "0.6194001", "0.61918765", "0.61859244", "0.6164222", "0.6153589", "0.6140984", "0.6132919", "0.6104226", "0.6077567", "0.6066122", "0.60258496", "0.60118663", "0.60076827", "0.6005081", "0.5927375", "0.5926551", "0.5909359", "0.5868946", "0.58484095", "0.58410543", "0.58263886", "0.5816463", "0.5810297", "0.58092093", "0.5806049", "0.580015", "0.57750195", "0.57602155", "0.57554275", "0.57454896", "0.57139385", "0.5697729", "0.5689141", "0.56810987", "0.56710935", "0.56579846", "0.56499755", "0.5649604", "0.56336236", "0.561751", "0.5604372", "0.56042033", "0.5600684", "0.5599563", "0.5596677", "0.5587778", "0.5585559", "0.5583127", "0.5572295", "0.5571707", "0.55558556", "0.55552036", "0.55544513", "0.5554387", "0.55437565", "0.55368495", "0.5522489", "0.5520433", "0.55195826", "0.55149776", "0.55134106", "0.5508658", "0.5506689", "0.55003875", "0.5496487", "0.54886043", "0.54879874", "0.5486688", "0.54862297", "0.5485744", "0.5479025", "0.5478642", "0.5476091", "0.5472844", "0.54726577", "0.5471384", "0.54706794", "0.54694384", "0.54598445", "0.54586583", "0.54356927", "0.54345876", "0.54342955", "0.5430718" ]
0.0
-1
Decodes sequence by feeding token into net again and acts according to probabilities.
def decode_chain_argmax(self, hid, begin_emb, seq_len, stop_at_token=None): res_logits = [] res_tokens = [] cur_emb = begin_emb for _ in range(seq_len): out_logits, hid = self.decode_one(hid, cur_emb) out_token_v = torch.max(out_logits, dim=1)[1] #uses argmax to go from logits to the decoded token ID out_token = out_token_v.data.cpu().numpy()[0] cur_emb = self.emb(out_token_v) #obtains embeddings for the decoded token to iterate over res_logits.append(out_logits) res_tokens.append(out_token) if stop_at_token is not None and out_token == stop_at_token: break return torch.cat(res_logits), res_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(self, passage_vectors, question_vectors, init_with_question=True):\n with tf.variable_scope('pn_decoder'):\n fake_inputs = tf.zeros(\n [tf.shape(passage_vectors)[0], 2, 1]) # not used\n sequence_len = tf.tile([2], [tf.shape(passage_vectors)[0]])\n if init_with_question:\n random_attn_vector = tf.Variable(tf.random_normal([1, self.hidden_size]),\n trainable=True, name=\"random_attn_vector\")\n pooled_question_rep = tc.layers.fully_connected(\n attend_pooling(question_vectors,\n random_attn_vector, self.hidden_size),\n num_outputs=self.hidden_size, activation_fn=None\n )\n init_state = tc.rnn.LSTMStateTuple(\n pooled_question_rep, pooled_question_rep)\n else:\n init_state = None\n with tf.variable_scope('fw'):\n fw_cell = PointerNetLSTMCell(self.hidden_size, passage_vectors)\n fw_outputs, _ = custom_dynamic_rnn(\n fw_cell, fake_inputs, sequence_len, init_state)\n with tf.variable_scope('bw'):\n bw_cell = PointerNetLSTMCell(self.hidden_size, passage_vectors)\n bw_outputs, _ = custom_dynamic_rnn(\n bw_cell, fake_inputs, sequence_len, init_state)\n start_prob = (fw_outputs[0:, 0, 0:] + bw_outputs[0:, 1, 0:]) / 2\n end_prob = (fw_outputs[0:, 1, 0:] + bw_outputs[0:, 0, 0:]) / 2\n return start_prob, end_prob", "def decode():\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(True)\n model.batch_size = 1 # We decode one sentence at a time.\n init_model(sess, model)\n\n # Load vocabularies.\n vocab, rev_vocab = data_utils.get_vocabulary(FLAGS.data_dir, FLAGS.words,\n FLAGS.word_embeddings, FLAGS.vocab_size)\n\n # Decode from standard input.\n sys.stdout.write(\"> \")\n sys.stdout.flush()\n sentence = sys.stdin.readline()\n while sentence:\n # Get token-ids for the input sentence.\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), vocab,\n data_utils.basic_word_tokenizer)\n # Which bucket does it belong to?\n bucket_id = min([b for b in xrange(len(buckets))\n if buckets[b][0] > len(token_ids)])\n # Get a 1-element batch to feed the sentence to the model.\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n # Get output logits for the sentence.\n _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True)\n # This is a greedy decoder - outputs are just argmaxes of output_logits.\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]\n # If there is an EOS symbol in outputs, cut them at that point.\n if data_utils.EOS_ID in outputs:\n outputs = outputs[:outputs.index(data_utils.EOS_ID)]\n # Print out the network's response to the input.\n join = \" \" if FLAGS.words else \"\"\n print(join.join([tf.compat.as_str(rev_vocab[output]) for output in outputs]))\n print(\"> \", end=\"\")\n sys.stdout.flush()\n sentence = sys.stdin.readline()", "def _decode(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, 2 * self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], 2 * self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)", "def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):\n # Propagate through decoder\n step_logits, memories = decoding_function(next_ids, current_time_step, memories)\n # Calculate log probabilities for token prediction at current time-step\n step_scores = tf.nn.log_softmax(step_logits)\n # Determine next token to be generated, next_ids has shape [batch_size]\n if do_sample:\n next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)\n else:\n # Greedy decoding\n next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)\n # Collect scores associated with the selected tokens\n score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)\n decoded_score += tf.gather_nd(step_scores, score_coordinates)\n # Concatenate newly decoded token ID with the previously decoded ones\n decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)\n # Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step\n next_ids = tf.expand_dims(next_ids, time_dim)\n # Check if generation has concluded with <EOS>\n # all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)\n all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)\n\n return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories", "def greedy_decode(self, src, src_mask, src_lengths ):\n encoder_hidden, encoder_final = self.encodeSent(src, src_mask, src_lengths)\n prev_y, trg_mask = self.getTokRepresentation(self.sos_index, src, ret_mask=True)\n\n output = []\n attention_scores = []\n hidden = None\n\n for i in range(self.max_len):\n hidden, pre_output = self.decodeSent(encoder_hidden, encoder_final, src_mask,\n prev_y, trg_mask, hidden)\n\n # we predict from the pre-output layer, which is\n # a combination of Decoder state, prev emb, and context\n prob = self.model.callGenerator(pre_output[:, -1])\n\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data.item()\n output.append(next_word)\n prev_y = torch.ones(1, 1).type_as(src).fill_(next_word)\n if self.model.decoder.attention is not None and not self.do_lang_model:\n attention_scores.append(self.model.decoder.attention.alphas.cpu().numpy())\n else:\n #no attention do not store\n attention_scores.append(torch.zeros_like(hidden).cpu().numpy())\n \n output = np.array(output)\n \n # cut off everything starting from </s> \n # (only when eos_index provided)\n if self.eos_index is not None:\n first_eos = np.where(output==self.eos_index)[0]\n if len(first_eos) > 0:\n output = output[:first_eos[0]] \n \n return output, np.concatenate(attention_scores, axis=1)", "def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)", "def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)", "def decode(self, dec_state, words, **kwargs):\n with tf.name_scope(self.decoder2.name):\n (enc_out, enc_attn_mask, dec1_out, dec1_rdo, dec1_attn_mask,\n attnP, prev_out_seq, rdo) = dec_state\n\n out_seq = tf.concat([prev_out_seq, tf.expand_dims(words, 1)], 1)\n return self._decode_impl((enc_out, enc_attn_mask, dec1_out, dec1_rdo, dec1_attn_mask,\n attnP, out_seq, rdo), **kwargs)", "def predict_seq2seq(net, src_sentence, src_vocab, tgt_vocab, num_steps,\n device, save_attention_weights=False):\n # Set `net` to eval mode for inference\n net.eval()\n src_tokens = src_vocab[src_sentence.lower().split(' ')] + [\n src_vocab['<eos>']]\n enc_valid_len = torch.tensor([len(src_tokens)], device=device)\n src_tokens = truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])\n # Add the batch axis\n enc_X = torch.unsqueeze(\n torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)\n enc_outputs = net.encoder(enc_X, enc_valid_len)\n dec_state = net.decoder.init_state(enc_outputs, enc_valid_len)\n # Add the batch axis\n dec_X = torch.unsqueeze(\n torch.tensor([tgt_vocab['<bos>']], dtype=torch.long, device=device),\n dim=0)\n output_seq, attention_weight_seq = [], []\n for _ in range(num_steps):\n Y, dec_state = net.decoder(dec_X, dec_state)\n # We use the token with the highest prediction likelihood as the input\n # of the decoder at the next time step\n dec_X = Y.argmax(dim=2)\n pred = dec_X.squeeze(dim=0).type(torch.int32).item()\n # Save attention weights (to be covered later)\n if save_attention_weights:\n attention_weight_seq.append(net.decoder.attention_weights)\n # Once the end-of-sequence token is predicted, the generation of the\n # output sequence is complete\n if pred == tgt_vocab['<eos>']:\n break\n output_seq.append(pred)\n return ' '.join(tgt_vocab.to_tokens(output_seq)), attention_weight_seq", "def decode(self, seq):\n return [ self.rev_vocab[int(el)] for el in seq ]", "def decode_response(model: Model, test_input: np.array) -> str:\n # Needed variables\n input_tokens, target_tokens = get_tokens(get_questions_and_answers())\n input_features_dict, target_features_dict = get_features_dicts(input_tokens, target_tokens)\n reverse_target_features_dict = dict(\n (i, token) for token, i in target_features_dict.items()\n )\n\n num_decoder_tokens = len(list(target_features_dict.items()))\n max_decoder_seq_length = 48\n test_input = string_to_matrix(test_input, input_features_dict, len(input_tokens))\n\n # We get our encoder & decoder models based on our pretrained model\n encoder_model, decoder_model = get_model(model, len(target_tokens))\n\n # Getting the output states to pass into the decoder\n states_value = encoder_model.predict(test_input)\n\n # Generating empty target sequence of length 1\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n\n # Setting the first token of target sequence with the start token\n target_seq[0, 0, target_features_dict['<INICIO>']] = 1.\n\n decoded_sentence = ''\n \n stop_condition = False\n while not stop_condition:\n # Predicting output tokens with probabilities and states\n output_tokens, hidden_state, cell_state = decoder_model.predict([target_seq] + states_value)\n\n # Choosing the one with highest probability\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_token = reverse_target_features_dict[sampled_token_index]\n decoded_sentence += f' {sampled_token}'\n\n # Stop if hit max length or found the stop token\n stop_condition = sampled_token == '<FINAL>' or len(decoded_sentence) > max_decoder_seq_length\n\n # Update the target sequence\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n target_seq[0, 0, sampled_token_index] = 1.\n\n # Update states\n states_value = [hidden_state, cell_state]\n\n return decoded_sentence", "def _decode_step(self, states: List[ModelState]) -> Tuple[mx.nd.NDArray, mx.nd.NDArray, List[ModelState]]:\n model_probs, model_attention_probs, model_states = [], [], []\n for model, state in zip(self.models, states):\n probs, attention_probs, state = model.run_decoder(state)\n model_probs.append(probs)\n model_attention_probs.append(attention_probs)\n model_states.append(state)\n probs, attention_probs = self._combine_predictions(model_probs, model_attention_probs)\n return probs, attention_probs, model_states", "def _decode_train(self):\n\n # the basic idea is, we use golden sketch during train and in order to copy from source\n # we given true mask of decoder to generate right copy weights\n state = {'encoder': self.concated_encoder_output}\n\n def transformer_concated_decoder_internal(inputs, memory, bias, mem_bias, params, state=None, scope=None,\n reuse=False):\n return transformer_decoder(inputs, memory, bias, mem_bias, params, state, scope, reuse)\n\n self.final_logits = self._decode_func(\n self.tgt_seq, self.tgt_len, self.target_embeddings, self.decoder_weights,\n self.final_enc_attn_bias, 'train', state, self.vocab_size, use_copy=True,\n expand_source_ids_oo=self.concat_src_ids_oo,\n max_out_oovs=self.max_out_oovs, src_mask=self.concat_src_mask,\n decoder_fn=transformer_concated_decoder_internal,\n scope='final_decoder')", "def _decode(self, x):\r\n features = Model._get_features(\r\n [x], self.feature_type, no_tqdm=True)[0][0]\r\n labels = self._predict_one(features)\r\n correct_grammar = Model._check_grammar(labels, [0, 3])\r\n word = \"\"\r\n words = []\r\n for token, label in zip(x, labels):\r\n word += token\r\n if label in [1, 3]:\r\n words.append(word)\r\n word = \"\"\r\n return ' '.join(words), correct_grammar", "def decode(self, input):\n S = [s[0] for s in self.ngram_counts[0].keys()] # Set of tags\n _S = [s[0] for s in self.ngram_counts[0].keys()]\n _S.append('*') # _S includes '*' tag\n X = ['*'] # X stores each sentence. X[0] = '*', X[i] = xi\n for l in input:\n x = l.strip()\n if x: # Word\n X.append(x)\n else: # End of a sentence\n n = len(X) - 1 # the length of the sentence\n pi = defaultdict(float) # DP table PI\n bp = {} # back pointer\n\n # Initialize DP table\n for u in _S:\n for v in _S:\n pi[tuple([0, u, v])] = 0\n pi[tuple([0, '*', '*'])] = 1\n\n # Viterbi algorithm\n for k in xrange(1, n + 1):\n for u in _S:\n for v in S: # v will not be '*' \n max_score = 0\n tag = None\n for w in _S:\n if sum([self.emission_counts[tuple([y, X[k]])] for y in S]) < 5: # If the word X[k] is rare word or unseen word in the training corpus,\n x = symbolize(X[k], self.symbolize_option) # use RARE word probability\n else:\n x = X[k]\n try:\n score = pi[tuple([k-1, w, u])] * self.q(v, w, u) * self.e(x, v)\n if max_score < score:\n max_score = score\n tag = w\n except:\n pass\n pi[tuple([k, u, v])] = max_score # Update DP table entry\n bp[tuple([k, u, v])] = tag\n\n # Find tag sequence\n Y = ['*'] # Y stores tag sequence for X. Y[0] = '*', Y[i] = yi\n Y.extend(n * [None])\n max_score = None\n tag = None\n for u in _S:\n for v in _S:\n if self.ngram_counts[1][tuple([u, v])]:\n score = pi[tuple([n, u, v])] * self.q('STOP', u, v)\n if max_score is None or max_score < score:\n max_score = score\n tag = [u, v]\n Y[n-1] = tag[0]\n Y[n] = tag[1]\n for k in xrange(n - 2, 0, -1):\n Y[k] = bp[tuple([k + 2, Y[k + 1], Y[k + 2]])]\n\n # Write result\n prev = '*'\n for k in xrange(1, n + 1):\n print X[k], Y[k], log(pi[tuple([k, prev, Y[k]])])\n prev = Y[k]\n print ''\n\n X = ['*'] # set for the next sentence", "def _postprocess(self, preds):\n ntok = preds.pop(\"ntok\")\n ids = preds.pop(\"input_ids\")[:ntok]\n preds[\"tokens\"] = self._detokenize(ids)\n\n # Decode predicted top-k tokens.\n # token_topk_preds will be a List[List[(word, prob)]]\n # Initialize prediction for 0th token as N/A.\n token_topk_preds = [[(\"N/A\", 1.)]]\n pred_ids = preds.pop(\"top_k_indices\")[:ntok] # <int>[num_tokens, k]\n pred_probs = preds.pop(\"top_k_probs\")[:ntok] # <float32>[num_tokens, k]\n for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):\n token_pred_words = self._detokenize(token_pred_ids)\n token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))\n preds[\"pred_tokens\"] = token_topk_preds\n\n # Process attention.\n for key in preds:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n preds[key] = preds[key].copy()\n\n return preds", "def decode_parser(self, classifier, sentence):\n start_config = self.init_config(sentence)\n config = copy.deepcopy(start_config)\n \n while not config.is_terminal():\n if config.sigma == start_config.sigma:\n shift(config)\n else:\n # here the feature extraction will only return one instance instead of instance list\n instance = self.feature_extract([(config, '')])\n transition_name = classifier.label_codebook.get_label(classifier.classify_instance(instance[0]))\n transition_func = self.transition_codebook[transition_name]\n transition_func(config)\n # print config.A\n new_sentence = sentence[:]\n for i, element in enumerate(sentence):\n arc = [(k, v) for k, v in config.A if v == element[0]]\n # print arc\n if len(arc) > 1:\n # raise Exception(\"Error: one word must have one head\\n\")\n print \"Error: one word must have one head\\n\"\n if arc != []:\n header = arc[0][0]\n else:\n header = 0\n new_sentence[i] = tuple((element[0], element[1], element[2], header, element[4]))\n \n return new_sentence", "def decode(self, output, nwords, params):\n raise NotImplementedError()", "def decode(prev_hidden: torch.tensor, source_hiddens: torch.tensor, prev_context: torch.tensor,\n input: int, model: Seq2SeqAttentionModel) -> (\n torch.tensor, torch.tensor, torch.tensor, torch.tensor):\n\n decode_in = torch.cat((model.target_embedding_matrix[input], prev_context))\n hidden_out = model.decoder_gru.forward(decode_in, prev_hidden)\n # passing the top layer of encoder and decoder hidden dims\n attention_weights = model.attention.forward(source_hiddens[:,-1,:], hidden_out[-1])\n context = torch.mm(attention_weights.unsqueeze(dim=0),source_hiddens[:,-1,:]).squeeze()\n log_probs = model.output_layer.forward(torch.cat((hidden_out[-1].squeeze(),context)))\n return log_probs, hidden_out, context, attention_weights", "def crf_decode(potentials, transition_params, sequence_length, balance_param = 1):\n \"\"\"Decoding of highest scoring sequence.\"\"\"\n # For simplicity, in shape comments, denote:\n # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).\n num_tags = tensor_shape.dimension_value(potentials.shape[2])\n \n # Computes forward decoding. Get last score and backpointers.\n crf_fwd_cell = CrfDecodeForwardRnnCell(transition_params, balance_param)\n initial_state = array_ops.slice(potentials, [0, 0, 0], [-1, 1, -1]) # [B,1, O]\n initial_state = array_ops.squeeze(initial_state, axis=[1]) # [B, O]\n inputs = array_ops.slice(potentials, [0, 1, 0], [-1, -1, -1]) # [B, T-1, O]\n \n # Sequence length is not allowed to be less than zero.\n sequence_length_less_one = math_ops.maximum(\n constant_op.constant(0, dtype=sequence_length.dtype),\n sequence_length - 1)\n backpointers, last_score = rnn.dynamic_rnn( # [B, T - 1, O], [B, O]\n crf_fwd_cell,\n inputs=inputs,\n sequence_length=sequence_length_less_one,\n initial_state=initial_state,\n time_major=False,\n dtype=dtypes.int32)\n backpointers = gen_array_ops.reverse_sequence( # [B, T - 1, O]\n backpointers, sequence_length_less_one, seq_dim=1)\n \n # Computes backward decoding. Extract tag indices from backpointers.\n crf_bwd_cell = CrfDecodeBackwardRnnCell(num_tags)\n initial_state = math_ops.cast(math_ops.argmax(last_score, axis=1), # [B]\n dtype=dtypes.int32)\n initial_state = array_ops.expand_dims(initial_state, axis=-1) # [B, 1]\n decode_tags, _ = rnn.dynamic_rnn( # [B, T - 1, 1]\n crf_bwd_cell,\n inputs=backpointers,\n sequence_length=sequence_length_less_one,\n initial_state=initial_state,\n time_major=False,\n dtype=dtypes.int32)\n decode_tags = array_ops.squeeze(decode_tags, axis=[2]) # [B, T - 1]\n decode_tags = array_ops.concat([initial_state, decode_tags], # [B, T]\n axis=1)\n decode_tags = gen_array_ops.reverse_sequence( # [B, T]\n decode_tags, sequence_length, seq_dim=1)\n best_score = math_ops.reduce_max(last_score, axis=1) # [B]\n \n return decode_tags, best_score", "def decode(self, tgt, encoder_out):\n tgt_mask = get_lookahead_mask(tgt)\n tgt = self.emb(tgt)\n tgt = self.phn_lin(tgt)\n if self.attention_type == \"RelPosMHAXL\":\n # we use fixed positional encodings in the decoder\n tgt = tgt + self.positional_encoding_decoder(tgt)\n encoder_out = encoder_out + self.positional_encoding_decoder(\n encoder_out\n )\n elif self.positional_encoding_type == \"fixed_abs_sine\":\n tgt = tgt + self.positional_encoding(tgt) # add the encodings here\n prediction, self_attns, multihead_attns = self.decoder(\n tgt,\n encoder_out,\n tgt_mask=tgt_mask,\n pos_embs_tgt=None,\n pos_embs_src=None,\n )\n attention = multihead_attns[-1]\n return prediction, attention", "def predict(self, seq):\n raise Exception(\"You cannot predict with a base predictor.\")", "def inference(self, inputs):\n # test_2\n memory = self.get_go_frame(inputs)\n memory = self._update_memory(memory)\n\n self._init_states(inputs, mask=None)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments, t = [], [], [], 0\n while True:\n memory = self.prenet(memory)\n decoder_output, alignment, stop_token = self.decode(memory)\n stop_token = torch.sigmoid(stop_token.data)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token]\n alignments += [alignment]\n\n if stop_token > self.stop_threshold and t > inputs.shape[0] // 2:\n break\n if len(outputs) == self.max_decoder_steps:\n print(\" | > Decoder stopped with 'max_decoder_steps\")\n break\n\n memory = self._update_memory(decoder_output)\n t += 1\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n\n return outputs, alignments, stop_tokens", "def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.num_mels + self.num_freq\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n weights = []\n for index, attention in enumerate(self.attentions):\n if isinstance(attention, AttentionBlock):\n weights.append(attention.multiheaded_attention.attention_weights)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state", "def decoder(self, tensor, reuse=False):\n\n outputs, predictions = [], []\n\n with tf.variable_scope(\"decoder\", reuse=reuse) as scope:\n\n\n # add gausian noise\n decoder_input = gaussian_noise_layer(tensor, 0.2)\n encoder_dim = tensor.get_shape().as_list()[-1]\n W = tf.get_variable(\"decoder_last_weight\", [self.num_units + encoder_dim, self.voca_size])\n b = tf.get_variable(\"decoder_last_bias\", [self.voca_size])\n # time-major: [batch_size, max_len, num_units] --> [max_len, batch_size, num_units]\n # decoder_input = tf.transpose(decoder_input, [1,0,2])\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units, state_is_tuple=False)\n # initial_state = state = decoder_input\n initial_state = tf.zeros([self.batch_size, self.num_units])\n initial_state = tf.concat([initial_state, decoder_input], 1)\n\n\n for i in range(self.max_len):\n if i == 0:\n # start of sequence\n input_ = tf.nn.embedding_lookup(self.embedding, tf.ones([self.batch_size], dtype=tf.int32))\n state = initial_state\n\n else:\n scope.reuse_variables()\n input_ = tf.nn.embedding_lookup(self.embedding, prediction)\n\n output, state = cell(input_, state)\n output = tf.concat([output, tensor], -1)\n output = tf.nn.xw_plus_b(output, W, b)\n\n prediction = tf.argmax(output, axis=1)\n\n outputs.append(output)\n predictions.append(prediction)\n\n predictions = tf.transpose(tf.stack(predictions), [1,0])\n outputs = tf.stack(outputs)\n\n return predictions, outputs", "def decode(self, received_sequence):\n return self.decode_node(received_sequence).input_history()", "def TestSeq2Seq(source_seq, target_seq_in, target_seq_out): \r\n loss = 0\r\n pred = []\r\n decoder_length = target_seq_out.shape[1]\r\n # Encode the source.\r\n encoder_outputs = encoder(source_seq)\r\n states = encoder_outputs[1:]\r\n # Decoder predicts the target_seq.\r\n decoder_in = tf.expand_dims(target_seq_in[:, 0], 1)\r\n for t in range(decoder_length):\r\n logit, de_state_h, de_state_c= decoder(decoder_in, states)\r\n decoder_in = tf.expand_dims(logit, 1)\r\n states = de_state_h, de_state_c\r\n # loss function : RSME TODO\r\n loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)# TODO\r\n \r\n loss = tf.reduce_mean(loss) \r\n loss = loss / decoder_length\r\n return loss", "def decode_sequence(self, sequence=list) -> str:\n try:\n out = []\n for word in sequence:\n out.append(self.decode(word))\n return(out)\n except Exception as error:\n print(f\"Error: self.decode_sequence({sequence}) -> {error}\")", "def decode():\n\n with tf.device('/cpu:0'):\n dataset_test = SequenceDataset(\n subset=\"test\",\n config_dir=FLAGS.config_dir,\n data_dir=FLAGS.data_dir,\n batch_size=1,\n input_size=FLAGS.input_dim,\n output_size=FLAGS.output_dim,\n infer=True,\n name=\"dataset_test\")()\n\n model = TfModel(\n rnn_cell=FLAGS.rnn_cell,\n dnn_depth=FLAGS.dnn_depth,\n dnn_num_hidden=FLAGS.dnn_num_hidden,\n rnn_depth=FLAGS.rnn_depth,\n rnn_num_hidden=FLAGS.rnn_num_hidden,\n output_size=FLAGS.output_dim,\n bidirectional=FLAGS.bidirectional,\n rnn_output=FLAGS.rnn_output,\n cnn_output=FLAGS.cnn_output,\n look_ahead=FLAGS.look_ahead,\n mdn_output=FLAGS.mdn_output,\n mix_num=FLAGS.mix_num,\n name=\"tf_model\")\n\n # Build the testing model and get test output sequence.\n test_iterator = dataset_test.batched_dataset.make_one_shot_iterator()\n input_sequence, input_sequence_length = test_iterator.get_next()\n test_output_sequence_logits, test_final_state = model(\n input_sequence, input_sequence_length)\n\n show_all_variables()\n\n saver = tf.train.Saver()\n\n # Decode.\n with tf.Session() as sess:\n # Run init\n sess.run(tf.global_variables_initializer())\n\n if not restore_from_ckpt(sess, saver): sys.exit(-1)\n\n # Read cmvn to do reverse mean variance normalization\n cmvn = np.load(os.path.join(FLAGS.data_dir, \"train_cmvn.npz\"))\n\n num_batches = 0\n used_time_sum = frames_sum = 0.0\n while True:\n try:\n time_start = time.time()\n logits = sess.run(test_output_sequence_logits)\n time_end = time.time()\n\n used_time = time_end - time_start\n used_time_sum += used_time\n frame_num = logits.shape[1]\n frames_sum += frame_num\n\n # Squeeze batch dimension.\n logits = logits.squeeze(axis=0)\n\n if FLAGS.mdn_output:\n out_pi = logits[:, : FLAGS.mix_num]\n out_mu = logits[:, FLAGS.mix_num : (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim)]\n out_sigma = logits[:, (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim) :]\n\n max_index_pi = out_pi.argmax(axis=1)\n result_mu = []\n for i in xrange(out_mu.shape[0]):\n beg_index = max_index_pi[i] * FLAGS.output_dim\n end_index = (max_index_pi[i] + 1) * FLAGS.output_dim\n result_mu.append(out_mu[i, beg_index:end_index])\n logits = np.vstack(result_mu)\n\n sequence = logits * cmvn[\"stddev_labels\"] + cmvn[\"mean_labels\"]\n\n out_dir_name = os.path.join(FLAGS.save_dir, \"test\", \"cmp\")\n out_file_name =os.path.basename(\n dataset_test.tfrecords_lst[num_batches]).split('.')[0] + \".cmp\"\n out_path = os.path.join(out_dir_name, out_file_name)\n write_binary_file(sequence, out_path, with_dim=False)\n #np.savetxt(out_path, sequence, fmt=\"%f\")\n\n tf.logging.info(\n \"writing inferred cmp to %s (%d frames in %.4f seconds)\" % (\n out_path, frame_num, used_time))\n num_batches += 1\n except tf.errors.OutOfRangeError:\n break\n\n tf.logging.info(\"Done decoding -- epoch limit reached (%d \"\n \"frames per second)\" % int(frames_sum / used_time_sum))", "def predict_next(self, token: str) -> str:\n return self._cooccurrence_matrix.distribution(token).sample()", "def beam_decode(model, src, src_mask, src_lengths, max_len=100, sos_index=1, eos_index=None, beam_size=5):\n\n with torch.no_grad():\n encoder_hidden, encoder_final = model.encode(src, src_mask, src_lengths)\n\n output = []\n hidden = None\n\n i = 0\n beam_nodes = []\n beam_nodes.append(BeamNode(sos_index, hidden, 0))\n ended = False #Flag raised when EOS token found\n while i<max_len and not ended:\n new_nodes = []\n for node in beam_nodes:\n prev_word = node.prev_input\n prev_y = torch.ones(1, 1).fill_(prev_word).type_as(src)\n trg_mask = torch.ones_like(prev_y)\n hidden = node.prev_h\n with torch.no_grad():\n out, hidden, pre_output = model.decode(\n encoder_hidden, encoder_final, src_mask,\n prev_y, trg_mask, hidden)\n\n # we predict from the pre-output layer, which is\n # a combination of Decoder state, prev emb, and context\n prob = model.generator(pre_output[:, -1])\n\n probs, words = torch.topk(prob, beam_size, dim=1)\n #print(probs, words)\n probs = probs.squeeze().cpu().numpy()\n words = words.squeeze().cpu().numpy()\n #print([lookup_words(x, TRG.vocab) for x in words])\n# print(lookup_words(words, TRG.vocab))\n #print(probs)\n #print(words)\n for j in range(len(probs)):\n #print(j)\n probj = probs[j]\n next_word = words[j]\n #print(probi)\n #print(wordi)\n new_words = node.words.copy() + [next_word]\n new_prob = node.logProb + probj\n new_node = BeamNode(next_word, hidden, new_prob, words=new_words, attention_scores=node.attention_scores.copy())\n new_node.attention_scores.append(model.decoder.attention.alphas.cpu().numpy())\n new_nodes.append(new_node)\n i+=1\n #print(\"first\", len(beam_nodes))\n beam_nodes = sorted(new_nodes, key=lambda node: -node.logProb)[:beam_size] \n #print(lookup_words([n.prev_input for n in beam_nodes], TRG.vocab))\n #print([n.logProb for n in beam_nodes])\n #print([n.logProb for n in beam_nodes])\n #print(len(beam_nodes))\n ended = any([True if node.prev_input==eos_index else False for node in beam_nodes])\n #print(ended)\n output = []\n attns = []\n if ended:\n end_node_i = [1 if node.prev_input==eos_index else 0 for node in beam_nodes].index(1)\n end_node = beam_nodes[end_node_i]\n output = np.array(end_node.words[:-1])\n else:\n end_node = beam_nodes[0]\n output = np.array(end_node.words)\n #print(end_node.attention_scores) \n #print(np.array(end_node.attention_scores).shape) \n #print([x.shape for x in end_node.attention_scores])\n #print(output)\n return output, np.concatenate(np.array(end_node.attention_scores), axis=1)", "def predict_tokens(self, tokens):\n return", "def greedy_decode(self, max_len):\n decoded_sentences = []\n self.model.eval()\n with tqdm(total=len(self.dev_iter)) as t:\n with torch.no_grad():\n for idx, batch in enumerate(self.dev_iter):\n src, src_lengths = batch.src\n src_mask = (src != self.params.pad_token).unsqueeze(-2)\n\n if self.params.cuda:\n src = src.cuda()\n\n # run the src language through the Encoder\n encoder_output, encoder_final = self.model.encode(\n src, src_mask, src_lengths)\n\n # TODO: Greedy Decoding for GRU and Transformers are different\n # the GRU greedy decoding just takes in the previous token\n # whereas the Transformer model takes in the whole sequence\n # that has been decoded sofar\n\n # Encoder Final is the final hidden state of the Encoder Model\n # You will only have the Encoder Final if you are using a\n # GRUEncoder and if you are using a Transformer then\n # the encoder_final will be None\n # [num_layers, batch_size, hidden_size]\n encoder_final = encoder_final[:self.model.decoder.num_layers] if self.params.model_type == \"GRU\" else None\n\n decoded_batch = torch.ones(1, 1).fill_(\n self.params.sos_index).type_as(src)\n\n hidden = None\n for _ in range(max_len-1):\n # either use the decoded batch to decode the next word\n # or use the last word decoded to decode the next work\n trg = decoded_batch[:, -1].unsqueeze(\n 1) if self.params.model_type == \"GRU\" else decoded_batch\n\n # create trgt_mask for transformer [batch_size, seq_len, seq_len]\n trg_mask = make_tgt_mask(\n trg, tgt_pad=self.params.pad_token)\n\n # output: [batch_size, seq_len, hidden_size], hidden: [num_layers, batch_size, hidden_size]\n output, hidden = self.model.decode(\n trg, encoder_output, src_mask, trg_mask, encoder_final, hidden)\n\n # pass the output through the generator to get prediction\n # take the last output and pass it through the\n # linear softmax layer to get preidctions\n\n # output[:, -1] => [batch_size, hidden_size]\n # linear [hidden_size, tgt_vocab_size]\n # prob: [batch_size, tgt_vocab_size]\n prob = self.model.generator(output[:, -1])\n prob = F.log_softmax(prob, dim=-1)\n\n # [batch_size, 1]\n next_word = torch.argmax(prob, dim=-1).item()\n\n decoded_batch = torch.cat([decoded_batch,\n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\n\n # the decoded batch should not include the <s> token\n decoded_batch = decoded_batch[:, 1:]\n tokens = self.batch_reverse_tokenization(decoded_batch)\n decoded_sentences.extend(tokens)\n t.update()\n return decoded_sentences", "def TestAttentionSeq2Seq(source_seq, target_seq_in, target_seq_out): \r\n loss = 0\r\n decoder_length = target_seq_out.shape[1]\r\n # Encode the source.\r\n encoder_outputs = encoder_a(source_seq)\r\n states = encoder_outputs[1:]\r\n history = encoder_outputs[0]\r\n # Decoder predicts the target_seq.\r\n decoder_in = tf.expand_dims(target_seq_in[:, 0], 1)\r\n for t in range(decoder_length):\r\n logit, lstm_out, de_state_h, de_state_c, _= decoder_a(decoder_in, states, history)\r\n decoder_in = tf.expand_dims(logit, 1)\r\n history_new = tf.expand_dims(lstm_out, 1)\r\n history = tf.concat([history[:, 1:], history_new], 1)\r\n states = de_state_h, de_state_c\r\n # loss function : RSME \r\n loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)\r\n \r\n loss = tf.reduce_mean(loss) \r\n loss = loss / decoder_length\r\n return loss", "def decode(\n self,\n encoded,\n encoder_input_tokens, # only needed for masks\n decoder_input_tokens,\n decoder_target_tokens,\n encoder_segment_ids=None,\n decoder_segment_ids=None,\n decoder_positions=None,\n enable_dropout=True,\n decode=False,\n max_decode_length=None):\n cfg = self.config\n\n # Make padding attention masks.\n if decode:\n # fast autoregressive decoding uses only a special encoder-decoder mask\n decoder_mask = None\n encoder_decoder_mask = layers.make_attention_mask(\n jnp.ones_like(decoder_target_tokens) > 0,\n encoder_input_tokens > 0,\n dtype=cfg.dtype)\n else:\n decoder_mask = layers.make_decoder_mask(\n decoder_target_tokens=decoder_target_tokens,\n dtype=cfg.dtype,\n decoder_segment_ids=decoder_segment_ids)\n encoder_decoder_mask = layers.make_attention_mask(\n decoder_target_tokens > 0, encoder_input_tokens > 0, dtype=cfg.dtype)\n\n # Add segmentation block-diagonal attention masks if using segmented data.\n if encoder_segment_ids is not None:\n if decode:\n raise ValueError(\n 'During decoding, packing should not be used but '\n '`encoder_segment_ids` was passed to `Transformer.decode`.')\n\n encoder_decoder_mask = layers.combine_masks(\n encoder_decoder_mask,\n layers.make_attention_mask(\n decoder_segment_ids,\n encoder_segment_ids,\n jnp.equal,\n dtype=cfg.dtype))\n\n logits = self.decoder(\n encoded,\n decoder_input_tokens=decoder_input_tokens,\n decoder_positions=decoder_positions,\n decoder_mask=decoder_mask,\n encoder_decoder_mask=encoder_decoder_mask,\n deterministic=not enable_dropout,\n decode=decode,\n max_decode_length=max_decode_length)\n return logits.astype(self.config.dtype)", "def decode():\n with io.open(FLAGS.predict_input_file, encoding='utf-8') as test_file:\n lines = test_file.readlines()\n # Get the largest sentence length to set an upper bound to the decoder.\n max_length = FLAGS.max_sentence_length\n # max_length = max([len(line) for line in lines])\n \n print(\"Building dynamic character-level ALLDATASET data...\", flush=True)\n dataset = ALLDATASET(\n train_input=FLAGS.train_input, train_output=FLAGS.train_output,\n dev_input=FLAGS.dev_input, dev_output=FLAGS.dev_output,\n predict_input_file=FLAGS.predict_input_file, \n parse_repeated=FLAGS.parse_repeated,\n max_input_length=max_length, max_label_length=max_length)\n \n print(\"Building computational graph...\", flush=True)\n graph = tf.Graph()\n with graph.as_default():\n \n tf.set_random_seed(1)\n random.seed(1)\n np.random.seed(1)\n\n m = Seq2Seq(\n num_types=dataset.num_types(),\n max_encoder_length=max_length, max_decoder_length=max_length,\n pad_id=dataset.type_to_ix['_PAD'],\n eos_id=dataset.type_to_ix['_EOS'],\n go_id=dataset.type_to_ix['_GO'],\n space_id=dataset.type_to_ix[(' ',)],\n ix_to_type=dataset.ix_to_type,\n batch_size=1, embedding_size=FLAGS.embedding_size,\n hidden_size=FLAGS.hidden_size, rnn_layers=FLAGS.rnn_layers,\n bidirectional_encoder=FLAGS.bidirectional_encoder,\n bidirectional_mode=FLAGS.bidirectional_mode,\n use_lstm=FLAGS.use_lstm, attention=FLAGS.attention,\n beam_size=FLAGS.beam_size, restore=True, model_output_dir=FLAGS.model_output_dir)\n \n with tf.Session(graph=graph) as sess:\n print(\"Restoring model...\", flush=True)\n m.start()\n print(\n \"Restored model (global step {})\".format(m.global_step.eval()),\n flush=True)\n with io.open(FLAGS.output_path, 'w', encoding='utf-8') as output_file:\n for line in lines:\n # if len(line) > max_length:\n # continue\n number_of_chars = len(line)\n completely_divisble = number_of_chars % FLAGS.max_sentence_length == 0\n\n if number_of_chars < FLAGS.max_sentence_length:\n parts = [line]\n else:\n parts = []\n count = 0\n last_word_end_index = 0\n\n line_copy = line\n while len(line_copy) != 0 and count < len(line_copy):\n if count == FLAGS.max_sentence_length:\n if last_word_end_index == 0:\n parts.append(line_copy[: count])\n line_copy = line_copy[count:]\n else:\n parts.append(line_copy[: last_word_end_index])\n line_copy = line_copy[last_word_end_index:]\n \n last_word_end_index = 0\n count = 0\n\n if line_copy[count] == \" \":\n last_word_end_index = count\n\n count += 1\n\n if not completely_divisble:\n parts.append(line_copy)\n \n result = \"\"\n for part in parts:\n ids = dataset.tokenize(part)\n while len(ids) < max_length:\n ids.append(dataset.type_to_ix['_PAD'])\n outputs = sess.run(m.generative_output, feed_dict={m.inputs: [ids]})\n top_line = untokenize_batch(dataset, outputs)[0]\n # Sequences of text will only be repeated up to 5 times.\n top_line = re.sub(r'(.+?)\\1{5,}', lambda m: m.group(1) * 5, top_line)\n result += top_line\n output_file.write(result + '\\n')\n print(\"PREDICTION:\", top_line, flush=True)\n print()", "def ctc_greedy_decoder(probs_seq, vocabulary):\n result = swig_decoders.ctc_greedy_decoder(probs_seq.tolist(), vocabulary)\n return result #remove decode as it's a str .decode('utf-8')", "def decode_and_evaluate(self,\n name,\n sess,\n trans_file,\n ref_file,\n beam_width,\n tgt_eos,\n num_translations_per_input=1,\n decode=True):\n\n # Decode\n if decode:\n self.logger.info(\"Decoding to output {}.\".format(trans_file))\n\n num_sentences = 0\n with open(trans_file, 'w', encoding='utf-8') as trans_f:\n trans_f.write(\"\") # Write empty string to ensure file is created.\n\n num_translations_per_input = max(\n min(num_translations_per_input, beam_width), 1)\n while True:\n try:\n _, _, _, nmt_outputs = self.infer(sess)\n if beam_width == 0:\n nmt_outputs = np.expand_dims(nmt_outputs, 0)\n\n batch_size = nmt_outputs.shape[1]\n num_sentences += batch_size\n\n for sent_id in range(batch_size):\n for beam_id in range(num_translations_per_input):\n translation = utils.get_translation(\n nmt_outputs[beam_id],\n sent_id,\n tgt_eos=tgt_eos)\n trans_f.write(translation + \"\\n\")\n except tf.errors.OutOfRangeError:\n self.logger.info(\n \"Done, num sentences %d, num translations per input %d\" %\n (num_sentences, num_translations_per_input))\n break\n\n # Evaluation\n evaluation_scores = {}\n if ref_file and os.path.exists(trans_file):\n score = evaluation_utils.evaluate(\n ref_file,\n trans_file,\n 'BLEU')\n evaluation_scores['BLEU'] = score\n self.logger.info(\"%s BLEU: %.1f\" % (name, score))\n\n return evaluation_scores", "def predict_next(self, seq):\n context = tuple(seq[-2:]) # last two words\n pc = self.probas[context] # conditional distribution\n words, probs = zip(*pc.items()) # convert to list\n return np.random.choice(words, p=probs)", "def greedyDecoder(self, enc_states, hidden, test=False, sentence=None, st='<s>', ed='</s>'):\n\t\tbatch_size = hidden.shape[1]\n\t\t# according to paper\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tword = torch.ones(batch_size, dtype=torch.long, device=self.device) * self.vocab[st]\n\t\t\twords = torch.zeros(batch_size, self.max_trg_len, dtype=torch.long, device=self.device)\n\t\t\tfor i in range(self.max_trg_len-1):\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, word)\n\t\t\t\tprobs = F.softmax(logit, dim=-1)\n\t\t\t\tword = torch.argmax(probs, dim=-1).squeeze()\n\t\t\t\twords[:,i] = word\n\t\t\twords[:,-1] = torch.ones(batch_size, dtype=torch.long, device=self.device) * self.vocab[ed]\n\t\t\treturn words\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len-1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:,i])\n\t\t\t\tlogits[:,i,:] = logit.squeeze()\n\t\t\treturn logits", "def decode_onestep(self, sess, feed):\n\n to_return = {\n \"ids\": self._topk_ids, \"probs\": self._topk_log_probs, \"states\": self.state, \"attn_dists\": self.attn_dists}\n if self.use_coverage:\n to_return['coverage'] = self.coverage\n results = sess.run(to_return, feed_dict=feed) # run the decoder step\n\n # Convert results['states'] (a single LSTMStateTuple) into a list of LSTMStateTuple -- one for each hypothesis\n new_states = [LSTMStateTuple(results['states'].c[i, :], results['states'].h[i, :]) for i in\n range(self.beam_size)]\n # Convert singleton list containing a tensor to a list of k arrays\n assert len(results['attn_dists']) == 1\n attn_dists = results['attn_dists'][0].tolist()\n\n # Convert the coverage tensor to a list length k containing the coverage vector for each hypothesis\n if self.use_coverage:\n new_coverage = results['coverage'].tolist()\n assert len(new_coverage) == self.beam_size\n else:\n new_coverage = [None for _ in range(self.beam_size)]\n\n return results['ids'], results['probs'], new_states, attn_dists, new_coverage", "def predict(net, query, context, graph, tokenizer, sentence_lengths, fb_passes=1):\n\n # (M,2), (1,M), (1,M), (1,3)\n o_sup, o_start, o_end, o_type = net(query, context, graph, fb_passes=fb_passes)\n\n # =========== GET ANSWERS\n answer_start = o_start.argmax() #TODO make sure that these tensors are all only containing one number!\n answer_end = o_end.argmax()\n answer_type = o_type.argmax()\n if answer_type == 0:\n answer = \"yes\"\n elif answer_type == 1:\n answer = \"no\"\n elif answer_type == 2 and answer_end >= answer_start:\n answer = tokenizer.decode(graph.tokens[answer_start: answer_end + 1])\n else:\n answer = \"noanswer\"\n\n # =========== GET SUPPORTING FACTS\n pos = 0\n sup_fact_pairs = []\n for para, s_lens in zip(context, sentence_lengths):\n for j, s_len in enumerate(s_lens):\n #score = round(sum(o_sup.argmax([pos: pos + s_len])) / s_len)\n # take avg of token-wise scores and round to 0 or 1\n try:\n score = round(float(sum([x.argmax() for x in o_sup.T[pos: pos + s_len]]) / float(s_len)))\n except ZeroDivisionError:\n score = 0\n if score == 1:\n sup_fact_pairs.append([para[0], j])\n pos += s_len\n\n return answer, sup_fact_pairs", "def decode(self, x, y):\n y = self.embedding(y)\n b, t, h = y.shape\n start = torch.zeros((b, 1, h))\n if self.is_cuda:\n start = start\n y = torch.cat([start, y], dim=1)\n y, _ = self.dec_rnn(y)\n x = x.unsqueeze(dim=2)\n y = y.unsqueeze(dim=1)\n out = self.fc1(x) + self.fc1(y)\n out = nn.functional.relu(out)\n out = self.fc2(out)\n out = nn.functional.log_softmax(out, dim=3)\n return out", "def _decode_back(self):\n with tf.variable_scope('same_question_concat'):\n batch_size = tf.shape(self.start_label)[0]\n concat_passage_encodes = tf.reshape(\n self.fuse_p_encodes,\n [batch_size, -1, self.hidden_size]\n )\n no_dup_question_encodes = tf.reshape(\n self.sep_q_encodes,\n [batch_size, -1, tf.shape(self.sep_q_encodes)[1], self.hidden_size]\n )[0:, 0, 0:, 0:]\n decoder = PointerNetDecoder(self.hidden_size)\n self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,\n no_dup_question_encodes)\n\n outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.start_probs), axis=2),\n tf.expand_dims(tf.nn.softmax(self.end_probs), axis=1))\n outer = tf.matrix_band_part(outer, 0, -1)\n self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)\n self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)", "def __rnnt_decoder_predictions_tensor(tensor, detokenize):\n return [detokenize(pred) for pred in tensor]", "def _dinamic_decode(self):\n raise NotImplementedError", "def decode(self, x, y):\n inputs = self.embedding(y[:, :-1])\n out = []\n aligns = []\n hx = torch.zeros((x.shape[0], x.shape[2]), requires_grad=False)\n if self.is_cuda:\n hx\n ax = None\n sx = None\n for t in range(y.size()[1] - 1):\n sample = out and self.scheduled_sampling\n if sample and random.random() < self.sample_prob:\n ix = torch.max(out[-1], dim=2)[1]\n ix = self.embedding(ix)\n else:\n ix = inputs[:, t:t + 1, :]\n if sx is not None:\n ix = ix + sx\n hx = self.dec_rnn(ix.squeeze(dim=1), hx)\n ox = hx.unsqueeze(dim=1)\n sx, ax = self.attend(x, ox, ax)\n aligns.append(ax)\n out.append(self.fc(ox + sx))\n out = torch.cat(out, dim=1)\n aligns = torch.stack(aligns, dim=1)\n return out, aligns", "def _decoding_step(current_time_step,\n alive_sequences,\n alive_log_probs,\n finished_sequences,\n finished_scores,\n finished_eos_flags,\n alive_memories):\n # 1. Get the top sequences/ scores/ flags for the current time step\n top_sequences, top_log_probs, top_scores, top_eos_flags, top_memories = \\\n _extend_hypotheses(current_time_step,\n alive_sequences,\n alive_log_probs,\n alive_memories)\n\n # 2. Update the alive beam\n alive_sequences, alive_log_probs, alive_eos_flags, alive_memories = \\\n _update_alive(top_sequences,\n top_scores,\n top_log_probs,\n top_eos_flags,\n top_memories)\n\n # 3. Update the finished beam\n finished_sequences, finished_scores, finished_eos_flags = \\\n _update_finished(finished_sequences,\n finished_scores,\n finished_eos_flags,\n top_sequences,\n top_scores,\n top_eos_flags)\n\n return current_time_step + 1, alive_sequences, alive_log_probs, finished_sequences, finished_scores, \\\n finished_eos_flags, alive_memories", "def sequence_predict(self,sess,seq_xs,dropout,seqlen,seq_ys = []):\n if len(seq_ys) > 0:\n # since we have 1 sequence\n seq_ys = seq_ys[0,0:seqlen[0]]\n y = np.zeros([seqlen[0],2])\n y[np.arange(0,seqlen[0]),np.array(seq_ys,dtype=np.int32)] = 1\n\n \"\"\"cut spare entries of xs (added by the reader)\"\"\"\n seq_xs = seq_xs[0:seqlen[0],:]\n \n cost = -1\n if len(seq_ys) > 0:\n llogits, predictions,cost = sess.run( [self.logits, self.predictions,self.cost], feed_dict={self.x: seq_xs, self.y: y, \n self.keep_prob: dropout}) \n else:\n llogits,predictions = sess.run( [self.logits, self.predictions], feed_dict={self.x: seq_xs, \n self.keep_prob: dropout}) \n \n \n seq_prediction = np.sum(predictions) >= seqlen[0]/2.\n \n #if seq_ys is provided, then output also correct predictions\n corr_preds = []\n if len(seq_ys) > 0:\n corr_preds = (seq_ys[0] == seq_prediction)\n\n return np.sum(llogits,0),seq_prediction, [corr_preds], cost", "def _unpredict(self, arr: np.ndarray) -> None:\n if self.Predictor.value == 2:\n imagecodecs.delta_decode(arr, out=arr, axis=-1)", "def predict(self,text):\n\n text= \"[CLS] \" + text + \" [SEP]\"\n tokenized_text = self.tokenizer.tokenize(text)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n masked_index = tokenized_text.index('[MASK]') \n\n # Create the segments tensors.\n segments_ids = [0] * len(tokenized_text)\n \n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n \n self.model.eval()\n \n # Predict all tokens\n with torch.no_grad():\n predictions = self.model(tokens_tensor, segments_tensors)\n \n predicted_index = torch.argmax(predictions[0][0][masked_index]).item()\n predicted_token = self.tokenizer.convert_ids_to_tokens([predicted_index])[0]\n \n print(predicted_token)", "def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2b\n ### TODO - Implement the forward pass of the character decoder.\n # print(\"=====input.size\",input.size())\n char_embedded= self.decoderCharEmb(input)\n # print(\"=====char_embedded.size\",char_embedded.size())\n out, dec_hidden = self.charDecoder(char_embedded,dec_hidden)\n # print(\"=====out.size\",out.size()) #dimensions (seq_length, batch, hidden_size)\n \n out_batch_first = out.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n o_proj = self.char_output_projection(out_batch_first)\n scores = o_proj.permute(1, 0, 2) #dimensions (seq_length, batch, hidden_size)\n return scores,dec_hidden\n ### END YOUR CODE ", "def predict(video_generators, audio_generator, subject):\n prediction = [random.randint(0, 4)]\n\n # TODO: implement prediction method\n # video_frame_subject = next(video_generators[int(subject[-1]) - 1])\n # audio_value = next(audio_generator)\n\n return prediction", "def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):\n def xavier_init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n if type(m) == nn.GRU:\n for param in m._flat_weights_names:\n if \"weight\" in param:\n nn.init.xavier_uniform_(m._parameters[param])\n net.apply(xavier_init_weights)\n net.to(device)\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n loss = MaskedSoftmaxCELoss()\n net.train()\n animator = Animator(xlabel='epoch', ylabel='loss',\n xlim=[10, num_epochs])\n for epoch in range(num_epochs):\n timer = Timer()\n metric = Accumulator(2) # Sum of training loss, no. of tokens\n for batch in data_iter:\n optimizer.zero_grad()\n X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]\n bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0],\n device=device).reshape(-1, 1)\n dec_input = torch.cat([bos, Y[:, :-1]], 1) # Teacher forcing\n Y_hat, _ = net(X, dec_input, X_valid_len)\n l = loss(Y_hat, Y, Y_valid_len)\n l.sum().backward() # Make the loss scalar for `backward`\n grad_clipping(net, 1)\n num_tokens = Y_valid_len.sum()\n optimizer.step()\n with torch.no_grad():\n metric.add(l.sum(), num_tokens)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, (metric[0] / metric[1],))\n print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f}' f'tokens/sec on {str(device)}')", "def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):\n def xavier_init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n if type(m) == nn.GRU:\n for param in m._flat_weights_names:\n if \"weight\" in param:\n nn.init.xavier_uniform_(m._parameters[param])\n net.apply(xavier_init_weights)\n net.to(device)\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n loss = MaskedSoftmaxCELoss()\n net.train()\n animator = Animator(xlabel='epoch', ylabel='loss',\n xlim=[10, num_epochs])\n for epoch in range(num_epochs):\n timer = Timer()\n metric = Accumulator(2) # Sum of training loss, no. of tokens\n for batch in data_iter:\n optimizer.zero_grad()\n X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]\n bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0],\n device=device).reshape(-1, 1)\n dec_input = torch.cat([bos, Y[:, :-1]], 1) # Teacher forcing\n Y_hat, _ = net(X, dec_input, X_valid_len)\n l = loss(Y_hat, Y, Y_valid_len)\n l.sum().backward() # Make the loss scalar for `backward`\n grad_clipping(net, 1)\n num_tokens = Y_valid_len.sum()\n optimizer.step()\n with torch.no_grad():\n metric.add(l.sum(), num_tokens)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, (metric[0] / metric[1],))\n print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f}' f'tokens/sec on {str(device)}')", "def _decode(self, tgt_token_ids, encoder_outputs, padding_mask):\n tgt_seq_len = tf.shape(tgt_token_ids)[1]\n\n # [batch_size, tgt_seq_len, hidden_size]\n tgt_token_embeddings = self._embedding_logits_layer(\n tgt_token_ids, 'embedding')\n\n # [tgt_seq_len, hidden_size]\n positional_encoding = utils.get_positional_encoding(\n tgt_seq_len, self._hidden_size)\n tgt_token_embeddings += positional_encoding\n tgt_token_embeddings = self._decoder_dropout_layer(\n tgt_token_embeddings, training=True) \n\n look_ahead_mask = utils.get_look_ahead_mask(tgt_seq_len)\n\n # [batch_size, tgt_seq_len, hidden_size]\n decoder_outputs = self._decoder(tgt_token_embeddings, \n encoder_outputs, \n look_ahead_mask, \n padding_mask, \n training=True)\n\n # [batch_size, tgt_seq_len, vocab_size]\n logits = self._embedding_logits_layer(decoder_outputs, 'logits')\n return logits", "def decode(self, tokens: List[str]) -> str:\n return self.bpe.decode([int(token) for token in tokens])", "def create_masked_EHR_predictions(input_seq, masked_lm_prob,max_predictions_per_seq, vocab, rng):\n #print('original_inp_seq',input_seq)\n #orig_seq=input_seq ## added for inp_seq update issue LR 4/25\n #cand_indexes = []\n #for (i, token) in enumerate(tokens):\n # if token == \"[CLS]\" or token == \"[SEP]\":\n # continue\n # cand_indexes.append(i)\n\n #cand_indexes=list(range(len(input_seq)+1))[1:] ## I can use that for the position but not for the mask index \n #will use the same but exclude the +1 so I don't mask the fist and last code\n \n cand_indexes=list(range(len(input_seq)))### LR 4/29 remove[1:]\n rng.shuffle(cand_indexes)\n output_tokens = input_seq[:] ### added slicing to inhibit original list update\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(input_seq) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index in cand_indexes:\n if len(masked_lms) >= num_to_predict: ### LR 4/29 remove >=\n break\n if index in covered_indexes:\n continue\n covered_indexes.add(index)\n\n #masked_token = None #### need to make sure what I did below is correct\n masked_token=0 ### comment for now LR 4/25\n \n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n #masked_token = \"[MASK]\"\n masked_token=0\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n #masked_token = tokens[index]\n masked_token=input_seq[index] ### LR 4/29 added +1\n # 10% of the time, replace with random word\n else:\n #masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n masked_token=rng.randint(1,max(vocab.values()))\n \n output_tokens[index] = masked_token ### LR 4/29 added +1\n\n masked_lms.append(MaskedLmInstance(index=index, label=input_seq[index])) ### Currently keeping the original code but I need to optimize that later from here till end of function\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n #print (input_seq,orig_seq,output_tokens, masked_lm_positions, masked_lm_labels)\n return (output_tokens, masked_lm_positions, masked_lm_labels)", "def decode(self, probs, mask):\n assert (probs == probs).all(), \"Probs should not contain any nans\"\n\n if self.decode_type == \"greedy\":\n _, selected = probs.max(1)\n assert not mask.gather(\n 1, selected.unsqueeze(-1)\n ).data.any(), \"Decode greedy: infeasible action has maximum probability\"\n\n elif self.decode_type == \"sampling\":\n selected = probs.multinomial(1).squeeze(1)\n\n # Check if sampling went OK, can go wrong due to bug on GPU\n # See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232\n while mask.gather(1, selected.unsqueeze(-1)).data.any():\n print(\"Sampled bad values, resampling!\")\n selected = probs.multinomial(1).squeeze(1)\n\n else:\n assert False, \"Unknown decode type\"\n return selected", "def translate(encoder,decoder,input_text, true_output_text=None):\n\n # Convert the input-text to integer-tokens.\n # Note the sequence of tokens has to be reversed.\n # Padding is probably not necessary.\n input_tokens = tokenizer_src.text_to_tokens(text=input_text,\n reverse=True,\n padding=True)\n #print(input_tokens)\n \n # Get the output of the encoder's GRU which will be\n # used as the initial state in the decoder's GRU.\n # This could also have been the encoder's final state\n # but that is really only necessary if the encoder\n # and decoder use the LSTM instead of GRU because\n # the LSTM has two internal states.\n initial_state = encoder.predict(input_tokens)\n # print(f'inital_state : {initial_state}')\n token_start = tokenizer_dest.word_index[mark_start.strip()]\n token_end = tokenizer_dest.word_index[mark_end.strip()]\n\n # Max number of tokens / words in the output sequence.\n max_tokens = tokenizer_dest.max_tokens\n\n # Pre-allocate the 2-dim array used as input to the decoder.\n # This holds just a single sequence of integer-tokens,\n # but the decoder-model expects a batch of sequences.\n shape = (1, max_tokens)\n decoder_input_data = np.zeros(shape=shape, dtype=np.int)\n\n # The first input-token is the special start-token for 'ssss '.\n token_int = token_start\n\n # Initialize an empty output-text.\n output_text = ''\n\n # Initialize the number of tokens we have processed.\n count_tokens = 0\n\n # While we haven't sampled the special end-token for ' eeee'\n # and we haven't processed the max number of tokens.\n while token_int != token_end and count_tokens < max_tokens:\n # Update the input-sequence to the decoder\n # with the last token that was sampled.\n # In the first iteration this will set the\n # first element to the start-token.\n decoder_input_data[0, count_tokens] = token_int\n\n # Wrap the input-data in a dict for clarity and safety,\n # so we are sure we input the data in the right order.\n x_data = \\\n {\n 'decoder_initial_state': initial_state,\n 'decoder_input': decoder_input_data\n }\n\n # Note that we input the entire sequence of tokens\n # to the decoder. This wastes a lot of computation\n # because we are only interested in the last input\n # and output. We could modify the code to return\n # the GRU-states when calling predict() and then\n # feeding these GRU-states as well the next time\n # we call predict(), but it would make the code\n # much more complicated.\n\n # Input this data to the decoder and get the predicted output.\n decoder_output = decoder.predict(x_data)\n# print(decoder_output)\n # Get the last predicted token as a one-hot encoded array.\n token_onehot = decoder_output[0, count_tokens, :]\n #print(token_onehot)\n \n # Convert to an integer-token.\n token_int = np.argmax(token_onehot)\n\n # Lookup the word corresponding to this integer-token.\n sampled_word = tokenizer_dest.token_to_word(token_int)\n\n # Append the word to the output-text.\n output_text += \" \" + sampled_word\n\n # Increment the token-counter.\n count_tokens += 1\n\n print(output_text)\n\n return output_text", "def greedy_decode(model, src, src_mask, src_lengths, max_len=100, sos_index=1, \n eos_index=None):\n\n with torch.no_grad():\n encoder_hidden, encoder_final = model.encode(src, src_mask, \n src_lengths)\n prev_y = torch.ones(1, 1).fill_(sos_index).type_as(src)\n trg_mask = torch.ones_like(prev_y)\n\n output = []\n attention_scores = []\n hidden = None\n\n for i in range(max_len):\n with torch.no_grad():\n out, hidden, pre_output = model.decode(\n encoder_hidden, encoder_final, src_mask,\n prev_y, trg_mask, hidden)\n\n # we predict from the pre-output layer, which is\n # a combination of Decoder state, prev emb, and context\n prob = model.generator(pre_output[:, -1])\n\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data.item()\n output.append(next_word)\n prev_y = torch.ones(1, 1).type_as(src).fill_(next_word)\n attention_scores.append(model.decoder.attention.alphas.cpu().numpy())\n \n output = np.array(output)\n \n # cut off everything starting from </s> \n # (only when eos_index provided)\n if eos_index is not None:\n first_eos = np.where(output == eos_index)[0]\n if len(first_eos) > 0:\n output = output[:first_eos[0]]\n \n return output, np.concatenate(attention_scores, axis=1)", "def CNN_sequence_predict(cnn,sess,seq_xs,dropout,seqlen,seq_ys = []):\n if len(seq_ys) > 0:\n y = np.zeros([seqlen[0],2])\n y[np.arange(0,seqlen[0]),np.array(seq_ys,dtype=np.int32)] = 1\n \n cost = -1\n if len(seq_ys) > 0:\n llogits, predictions,cost = sess.run( [cnn.logits, cnn.predictions,cnn.cost], feed_dict={cnn.x: seq_xs, cnn.y: y, \n cnn.keep_prob: dropout}) \n else:\n llogits,predictions = sess.run( [cnn.logits, cnn.predictions], feed_dict={cnn.x: seq_xs, \n cnn.keep_prob: dropout}) \n \n seq_prediction = np.sum(predictions) >= seqlen[0]/2.\n \n #if seq_ys is provided, then output also correct predictions\n corr_preds = []\n if len(seq_ys) > 0:\n corr_preds = (seq_ys[0] == seq_prediction)\n\n return np.sum(llogits,0),seq_prediction, [corr_preds], cost", "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n # TODO: Implement Function\n trainig_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, trainig_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(basic_decoder,maximum_iterations=max_summary_length)\n return f_output", "def decodeLabels(predictions, vocab):\n\n decoded_labels =[]\n for sequence in predictions:\n temp = []\n for label in list(sequence):\n if label in vocab:\n temp.append(vocab[label])\n else:\n temp.append(label)\n decoded_labels.append(temp)\n \n return decoded_labels", "def forward(self,x):\n tokens = x['token'].to(self.device)\n attn_masks = x['attn_mask'].to(self.device)\n if tokens.shape[0] != self.batch_size:\n self.batch_size = tokens.shape[0]\n \n tag_prob = self._bert_encoder(tokens, attn_masks)\n tag_seq = torch.argmax(tag_prob, dim=-1)\n return tag_seq", "def decode_step(self, x, y, state=None, softmax=False):\n if state is None:\n hx = torch.zeros((x.shape[0], x.shape[2]), requires_grad=False)\n if self.is_cuda:\n hx\n ax = None\n sx = None\n else:\n hx, ax, sx = state\n ix = self.embedding(y)\n if sx is not None:\n ix = ix + sx\n hx = self.dec_rnn(ix.squeeze(dim=1), hx=hx)\n ox = hx.unsqueeze(dim=1)\n sx, ax = self.attend(x, ox, ax=ax)\n out = ox + sx\n out = self.fc(out.squeeze(dim=1))\n if softmax:\n out = nn.functional.log_softmax(out, dim=1)\n return out, (hx, ax, sx)", "def forward(self, input, dec_hidden=None):\n ### YOUR CODE HERE for part 2a\n embeddings = self.decoderCharEmb(input)\n out, new_hidden = self.charDecoder(embeddings, dec_hidden)\n scores = self.char_output_projection(out)\n return scores, new_hidden\n\n ### END YOUR CODE", "def decode(self, targets, encoder_outputs, encoder_attn_bias, input_shape,\n training):\n with tf.name_scope('decode'):\n length = tf.shape(targets)[1]\n decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(\n length)\n encoder_outputs = tf.reshape(\n encoder_outputs, [input_shape[0], -1, self._hparams['hidden_size']])\n decoder_inputs = tf.pad(\n targets, [[0, 0], [1, 0]], constant_values=input_utils.START)\n\n # Remove last element.\n decoder_inputs = decoder_inputs[:, :-1]\n decoder_inputs = self._word_embedding_layer(decoder_inputs)\n\n with tf.name_scope('add_pos_encoding'):\n pos_encoding = self._position_embedding_layer(decoder_inputs)\n decoder_inputs += pos_encoding\n\n if training:\n decoder_inputs = tf.nn.dropout(\n decoder_inputs, rate=self._hparams['layer_postprocess_dropout'])\n\n decoder_outputs = self._decoder(\n decoder_inputs,\n encoder_outputs,\n decoder_self_attention_bias,\n encoder_attn_bias,\n training=training)\n logits = self._word_layer(decoder_outputs)\n return logits", "def preprocess_seq(self, sequence, word2idx):\n story = []\n for value in sequence:\n #v = [word2idx[word] if word in word2idx else UNK_token for word in value.split()] + [EOS_token]\n story.append(word2idx[value] if value in word2idx else UNK_token)\n story = torch.Tensor(story)\n return story", "def decode(self, X, timesteps):\r\n return Decoder(self._autoencoder, self._latent_space).predict(X, timesteps)", "def decode(self, probs, embedded_inputs, selections, decode_type):\n batch_size = self.shape(probs)[0]\n idxs = mindspore.ops.multinomial(probs, 1)\n squeeze = mindspore.ops.Squeeze(1)\n idxs = squeeze(idxs)\n\n sels = embedded_inputs[idxs.item(), [i for i in range(batch_size)], :]\n return sels, idxs", "def beamSearchDecoder(self, enc_states, hidden, test=False, sentence=None, st=\"<s>\", ed=\"</s>\", k=3):\n\t\tbatch_size = enc_states.shape[0]\n\t\thidden = F.tanh(self.init_decoder_hidden(hidden[1])).view(1, batch_size, self.hid_dim)\n\t\tif test:\n\t\t\tbeams = [Beam(k, self.vocab, hidden[:,i,:], self.device) for i in range(batch_size)]\n\n\t\t\tfor i in range(self.max_trg_len):\n\t\t\t\tfor j in range(batch_size):\n\t\t\t\t\tlogits, hidden = self.decoderStep(enc_states[j].view(1, -1, self.hid_dim).expand(k, -1, -1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_hidden_state(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t beams[j].get_current_word())\n\t\t\t\t\tlogLikelihood = torch.log(F.softmax(logits, dim=-1))\n\t\t\t\t\tbeams[j].advance(logLikelihood, hidden)\n\n\t\t\tallHyp, allScores = [], []\n\t\t\tn_best = 1\n\t\t\tfor b in range(batch_size):\n\t\t\t\tscores, ks = beams[b].sort_best()\n\n\t\t\t\tallScores += [scores[:n_best]]\n\t\t\t\thyps = [beams[b].get_hyp(k) for k in ks[:n_best]]\n\t\t\t\tallHyp.append(hyps)\n\n\t\t\treturn allHyp\n\t\t\t# return sentences\n\t\telse:\n\t\t\tmax_seq_len = sentence.shape[1]\n\t\t\tlogits = torch.zeros(batch_size, max_seq_len - 1, self.vocab_size, device=self.device)\n\t\t\tfor i in range(max_seq_len - 1):\n\t\t\t\t# logit: [batch, 1, vocab_size]\n\t\t\t\tlogit, hidden = self.decoderStep(enc_states, hidden, sentence[:, i])\n\t\t\t\tlogits[:, i, :] = logit.squeeze()\n\t\t\treturn logits", "def lfads_decode_prior(params, lfads_hps, key, z_sample):\n\n ib, g0, ii_txi = decompose_sample(lfads_hps, z_sample)\n ib = np.where(lfads_hps['do_tanh_latents'], np.tanh(ib), ib) \n g0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(g0), g0)\n ii0 = params['ii0']\n ii0 = np.where(lfads_hps['do_tanh_latents'], np.tanh(ii0), ii0)\n # ii tanh'd at the decode loop to keep prior routines similar to inference.\n\n # Since the factors feed back to the controller,\n # factors_{t-1} -> controller_t -> sample_t -> generator_t -> factors_t\n # is really one big loop and therefor one RNN.\n f0 = params['f0'] \n\n # Make all the randomness for all T steps at once, it's more efficient.\n # The random keys get passed into scan along with the input, so the input\n # becomes of a 2-tuple (keys, actual input).\n T = ii_txi.shape[0]\n keys = random.split(key, 2)\n keys_t = random.split(keys[0], T)\n\n state0 = (ii0, ib, g0, f0)\n decoder = partial(lfads_decode_prior_one_step_scan, *(params, lfads_hps))\n _, state_and_returns_t = lax.scan(decoder, state0, (keys_t, ii_txi))\n\n g_t, f_t, ii_t, ib, lograte_t = state_and_returns_t\n return (g_t, f_t, ii_t, ib, lograte_t, g0, ii0)", "def decode_chain_sampling(self, hid, begin_emb, seq_len, stop_at_token=None, device='cpu'):\n res_logits = []\n res_actions = []\n cur_emb = begin_emb\n\n for _ in range(seq_len):\n out_logits, hid = self.decode_one(hid, cur_emb)\n out_probs_v = F.softmax(out_logits, dim=1)\n out_probs = out_probs_v.data.cpu().numpy()[0]\n action = int(np.random.choice(out_probs.shape[0], p=out_probs))\n action_v = torch.LongTensor([action]).to(device)\n cur_emb = self.emb(action_v)\n\n res_logits.append(out_logits)\n res_actions.append(action)\n if stop_at_token is not None and action == stop_at_token:\n break\n\n return torch.cat(res_logits), res_actions", "def train_forward(self, char_sequence, dec_hidden=None):\n ### YOUR CODE HERE for part 2c\n ### TODO - Implement training forward pass.\n ###\n ### Hint: - Make sure padding characters do not contribute to the cross-entropy loss.\n ### - char_sequence corresponds to the sequence x_1 ... x_{n+1} from the handout (e.g., <START>,m,u,s,i,c,<END>).\n\n\n # We pass the input sequence x_1,..,x_n (along with the initial states h_0 and c_0 from the combined output vector)\n # into the CharDecoderLSTM, thus obtaining scores s_1,...,s_n\n\n # the input sequence x_1,..,x_n\n input_padded = char_sequence[:-1]\n\n # Apply the forward step to inputs and dec_hidden to acquire scores and dec_state\n scores, _ = self.forward(input_padded, dec_hidden)\n\n # the target sequence x_2,...,x_n+1\n # This has shape: (length, batch) -> needs to be a list\n target_sequence = char_sequence[1:]\n\n # flatten the target matrix to feed into the cross entropy loss -> shape: (batch_size)\n target_sequence = torch.flatten(target_sequence)\n\n # recall that cross entropy loss combines nn.LogSoftmax() and nn.NLLLoss() in one single class\n # scores has shape: (length, batch, self.vocab_size)\n # cross entropoy expects (batch, C) so we want to flatten the first two elements\n scores = scores.view(-1, scores.shape[-1])\n\n # Apply the cross entropy loss\n # Make sure that the padding characters do not contribute to the cross-entropy loss (ignore_index)\n # reducion: `sum`\n ce_loss = nn.CrossEntropyLoss(ignore_index = self.vocab['<pad>'],\n reduction = 'sum')\n\n loss = ce_loss(input = scores, target = target_sequence)\n\n return loss", "def forward(self, inp, state):\n emb = self.drop(self.encoder(inp))\n y, state_next = self.rnn(emb, state)\n y = self.drop(y)\n y = self.decoder(y)\n return y, state_next", "def decode(self, words: torch.Tensor,\n word_seq_lens: torch.Tensor,\n context_emb: torch.Tensor,\n chars: torch.Tensor,\n char_seq_lens: torch.Tensor,\n **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:\n word_rep = self.embedder(words, word_seq_lens, context_emb, chars, char_seq_lens)\n features = self.encoder(word_rep, word_seq_lens.cpu())\n bestScores, decodeIdx = self.inferencer.decode(features, word_seq_lens)\n return bestScores, decodeIdx", "def decode(self, encoded):\n decoded = []\n for codes in encoded:\n tmp = []\n for code in codes:\n try:\n word = self.vocab[code]\n tmp.append(word)\n except:\n tmp.append(self.unk_token)\n decoded.append(tmp)\n return decoded", "def test_decode_token():\n pass", "def decode(self):\n decoder_input = Input(shape=self.input_decoder_shape, batch_shape=self.input_batch_decoder_shape)\n ppg_input = Input(shape=self.input_ppg_shape, batch_shape=self.input_batch_ppg_shape)\n\n if self.hparams.Masking is True:\n mask_decoder_input = Masking(mask_value=0)(decoder_input)\n mask_ppg_input = Masking(mask_value=0)(ppg_input)\n prenet_output = self.PreNet(mask_decoder_input)\n encoder_input = self.Encoder(mask_ppg_input)\n decoder_mask = None\n else:\n decoder_mask = Masking(mask_value=0).compute_mask(ppg_input)\n prenet_output = self.PreNet(decoder_input)\n encoder_input = self.Encoder(ppg_input, decoder_mask)\n\n rnn_output = Concatenate(axis=-1)([prenet_output, encoder_input])\n # mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n # diff_mask = Input(shape=(self.hparams.PreNet_hidden_size + self.hparams.Tacotron_encoder_hidden_size))\n for i in range(self.hparams.Tacotron_decoder_layers):\n rnn_output = self.Decoder_LSTM[i](rnn_output, mask=decoder_mask)\n\n # feed by self.states is unhelpful in training, since we don't stop rnn during epochs\n # but it is important in generating since each fit states will be set to zeros.!!!!!!\n rnn_output = Concatenate(axis=-1)([rnn_output, encoder_input])\n decoder_output = self.Linear_projection(rnn_output)\n if self.hparams.Tacotron_postnet is True:\n residual_output = decoder_output\n for i in range(self.hparams.PostNet_layers):\n residual_output = self.PostNet_Conv1D[i](residual_output)\n residual_output = self.PostNet_BatchNorm[i](residual_output)\n residual_output = self.PostNet_dropout_list[i](residual_output)\n decoder_output = Add()([decoder_output, residual_output])\n return Model(inputs=[decoder_input, ppg_input], outputs=decoder_output)", "def train_forward(self, char_sequence, dec_hidden=None):\n ### YOUR CODE HERE for part 2c\n ### TODO - Implement training forward pass.\n ###\n ### Hint: - Make sure padding characters do not contribute to the cross-entropy loss.\n ### - char_sequence corresponds to the sequence x_1 ... x_{n+1} from the handout (e.g., <START>,m,u,s,i,c,<END>).\n \n # the input sequence for the CharDecoderLSTM is [x1, . . . , xn] = [<START>,m,u,s,i,c]\n # the target sequence for the CharDecoderLSTM is [x2, . . . , xn+1] = [m,u,s,i,c,<END>].\n inp_char_seq = char_sequence[: -1, :]\n target_out_seq = char_sequence[1:, :]\n\n # shape (seq_length, batch, vocab_size), ((1, batch, hidden_size), (1, batch, hidden_size))\n scores, dec_hidden = self.forward(inp_char_seq, dec_hidden)\n\n # create target mask at padded locations - shape (seq_length, )\n target_masks = (target_out_seq != self.target_vocab.char2id['<pad>']).float()\n\n # calculate loss\n log_softmax_scores = nn.functional.log_softmax(scores, dim=2)\n loss_per_timestep = -1 * torch.gather(log_softmax_scores, index=target_out_seq.unsqueeze(2), dim=2).squeeze(2)\n loss_per_timestep_masked = loss_per_timestep * target_masks\n loss = loss_per_timestep_masked.sum()\n return loss\n ### END YOUR CODE", "def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob):\n # Convert the start_ids to be a vector with batch size (the go id repeated batch size times)\n start_ids = tf.tile([start_of_sequence_id], [batch_size])\n # Create the embedding helper.\n embedding_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n dec_embeddings, start_ids, end_of_sequence_id)\n basic_decoder = tf.contrib.seq2seq.BasicDecoder(\n dec_cell, embedding_helper, encoder_state, output_layer)\n f_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n basic_decoder,maximum_iterations=max_target_sequence_length)\n return f_output", "def dec_step(prev_chars, dec_hidden):\n\n scores, dec_hidden = self.forward(prev_chars, dec_hidden)\n next_chars = scores.argmax(dim=-1)\n return next_chars, dec_hidden", "def __init__(self, num_mels=80, num_freq=513, prenet_hidden_size=512, decoder_hidden_size=512,\n attention_dropout=0.1,\n layer_postprocess_dropout=0.1, prenet_activation_fn=None, conv_layers_num=4,\n mag_conv_layers_num=4, prenet_layers=2,\n prenet_dropout=0.5,\n prenet_use_inference_dropout=False,\n cnn_dropout_prob=0.1,\n bn_momentum=0.95,\n bn_epsilon=-1e8,\n reduction_factor=2,\n attention_layers=4,\n self_attention_conv_params=None,\n attention_heads=1,\n attention_cnn_dropout_prob=0.5,\n window_size=4,\n back_step_size=0, kernel_size=5, regularizer=None,\n force_layers=None, dtype=tf.float32, name=\"centaur_decoder\", is_prediction=False, is_training=False,\n is_validation=False):\n self.kernel_size = kernel_size\n\n if force_layers is None:\n force_layers = [1, 3]\n self.is_validation = is_validation\n self.is_prediction = is_prediction\n self.name = name\n self.is_training = is_training\n self.prenet = None\n self.linear_projection = None\n self.attentions = []\n self.output_normalization = None\n self.conv_layers = []\n self.mag_conv_layers = []\n self.conv_layers_num = conv_layers_num\n self.mag_conv_layers_num = mag_conv_layers_num\n self.stop_token_projection_layer = None\n self.mel_projection_layer = None\n self.mag_projection_layer = None\n self.regularizer = regularizer\n self.num_mels = num_mels\n self.num_freq = num_freq\n self.reduction_factor = reduction_factor\n self.prenet_layers = prenet_layers\n self.prenet_hidden_size = prenet_hidden_size\n self.prenet_activation_fn = prenet_activation_fn if prenet_activation_fn else tf.nn.relu\n self.prenet_use_inference_dropout = prenet_use_inference_dropout\n self.prenet_dropout = prenet_dropout\n self.cnn_dropout_prob = cnn_dropout_prob\n self.dtype = dtype\n self.bn_momentum = bn_momentum\n self.bn_epsilon = bn_epsilon\n self.decoder_hidden_size = decoder_hidden_size\n self.attention_layers = attention_layers\n self.force_layers = force_layers\n\n self.window_size = window_size\n self.attention_heads = attention_heads\n self.attention_dropout = attention_dropout\n self.layer_postprocess_dropout = layer_postprocess_dropout\n self.attention_cnn_dropout_prob = attention_cnn_dropout_prob\n self.back_step_size = back_step_size\n if self_attention_conv_params is None:\n self_attention_conv_params = {\n \"kernel_size\": [self.kernel_size],\n \"stride\": [1],\n \"num_channels\": self.decoder_hidden_size,\n \"padding\": \"VALID\",\n \"is_causal\": True,\n \"activation_fn\": tf.nn.relu\n }\n self.self_attention_conv_params = self_attention_conv_params", "def predict(self, input_sequence):\n return self.session.run(self.prediction, feed_dict={self.input_placeholder: input_sequence})", "def dis_encoder_seq2seq(hparams):\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n ## Encoder forward variables.\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n return variable_mapping", "def decode(self, encoded_train, encoded_dev):\n decoded_train_A = self.decoder_A.predict(encoded_train)\n decoded_dev_A = self.decoder_A.predict(encoded_dev)\n self.save_reconstruction(decoded_train_A, decoded_dev_A, modality=True, no_modality=0)\n decoded_train_V = self.decoder_V.predict(encoded_train)\n decoded_dev_V = self.decoder_V.predict(encoded_dev)\n self.save_reconstruction(decoded_train_V, decoded_dev_V, modality=True, no_modality=1)", "def preprocess(self, sequence, word2id, trg=True):\r\n if trg:\r\n story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')] + [EOS_token]\r\n else:\r\n story = []\r\n for i, word_triple in enumerate(sequence):\r\n story.append([])\r\n for ii, word in enumerate(word_triple):\r\n temp = word2id[word] if word in word2id else UNK_token\r\n story[i].append(temp)\r\n try:\r\n story = torch.Tensor(story)\r\n except:\r\n print(sequence)\r\n print(story)\r\n # print('111111111111111111111111')\r\n return story", "def encode_decode_TD(self, n_step, idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ ):\r\n\r\n actor_embedding = embed_seq(input_seq=self.input_, from_=self.dimension, to_= self.input_embed, is_training=self.is_training, BN=True, initializer=self.initializer)\r\n actor_encoding = encode_seq(input_seq=actor_embedding, input_dim=self.input_embed, num_stacks=self.num_stacks, num_heads=self.num_heads, num_neurons=self.num_neurons, is_training=self.is_training)\r\n \r\n if self.is_training == False:\r\n actor_encoding = tf.tile(actor_encoding,[self.batch_size,1,1])\r\n \r\n idx_list = copy(idx_list_previous)\r\n log_probs = copy(log_probs_previous)\r\n entropies = copy(entropies_previous)\r\n \r\n\r\n mask = copy(mask_previous)\r\n \r\n n_hidden = actor_encoding.get_shape().as_list()[2] # input_embed\r\n W_ref = tf.get_variable(\"W_ref\",[1, n_hidden, self.num_units],initializer=self.initializer)\r\n W_q = tf.get_variable(\"W_q\",[self.query_dim, self.num_units],initializer=self.initializer)\r\n v = tf.get_variable(\"v\",[self.num_units],initializer=self.initializer)\r\n \r\n encoded_ref = tf.nn.conv1d(actor_encoding, W_ref, 1, \"VALID\") # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]\r\n \r\n query1 = copy( query1_previous)\r\n query2 = copy( query2_previous)\r\n query3 = copy( query3_previous)\r\n idx_copy = copy(idx_)\r\n \r\n W_1 =tf.get_variable(\"W_1\",[n_hidden, self.query_dim],initializer=self.initializer) # update trajectory (state)\r\n W_2 =tf.get_variable(\"W_2\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n W_3 =tf.get_variable(\"W_3\",[n_hidden, self.query_dim],initializer=self.initializer)\r\n \r\n \r\n \"\"\"\r\n # sample from POINTER from the perspective of the Actor\r\n \"\"\"\r\n for step in range(n_step + 1 ): \r\n query = tf.nn.relu(tf.matmul(query1, W_1) + tf.matmul(query2, W_2) + tf.matmul(query3, W_3))\r\n logits = pointer(encoded_ref=encoded_ref, query=query, mask=mask, W_ref=W_ref, W_q=W_q, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n idx_list_previous.append(idx)\r\n \r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n log_probs_previous.append(prob.log_prob(idx))\r\n \r\n entropies.append(prob.entropy()) # entropies\r\n entropies_previous.append(prob.entropy())\r\n \r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n mask_previous = mask_previous + tf.one_hot(idx, self.max_length)\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_) # update trajectory (state)\r\n \r\n query3_previous = query2_previous\r\n query2_previous = query1_previous\r\n query1_previous = tf.gather_nd(actor_encoding, idx_) # update trajectory (state) \r\n\r\n if (len(idx_list) >= self.max_length): break #leave the loop if reach the end of the episode\r\n\r\n \"\"\"\r\n # sample from POINTER from the perspective of the Critic\r\n make q_t vector = 0\r\n \"\"\"\r\n while(len(idx_list) < self.max_length): \r\n \r\n logits = pointer_critic(encoded_ref=encoded_ref, mask=mask, W_ref=W_ref, v=v, C=config.C, temperature=config.temperature)\r\n prob = distr.Categorical(logits) # logits = masked_scores\r\n idx = prob.sample()\r\n\r\n idx_list.append(idx) # tour index\r\n log_probs.append(prob.log_prob(idx)) # log prob\r\n entropies.append(prob.entropy()) # entropies\r\n mask = mask + tf.one_hot(idx, self.max_length) # mask\r\n\r\n idx_copy = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n #idx_ = tf.stack([tf.range(self.batch_size,dtype=tf.int32), idx],1) # idx with batch \r\n query3 = query2\r\n query2 = query1\r\n query1 = tf.gather_nd(actor_encoding, idx_copy) # update trajectory (state)\r\n \r\n idx_list.append(idx_list[0]) # return to start\r\n self.tour =tf.stack(idx_list, axis=1) # permutations\r\n self.log_prob = tf.add_n(log_probs) # corresponding log-probability for backprop\r\n self.entropies = tf.add_n(entropies)\r\n tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))\r\n tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))\r\n \r\n return idx_list_previous, log_probs_previous, entropies_previous, mask_previous, query1_previous, query2_previous, query3_previous, idx_ #returns variables necessary for the next loop\r", "def get_predictions(self, seq, seq_mask, seq_lens, batch, xyzhe, simulator_next_action):\n raise NotImplementedError", "def predict(self, aa_seq, **kwargs):\n raise NotImplementedError", "def predict(self, aa_seq, **kwargs):\n raise NotImplementedError", "def decode(args: Dict[str, str]):\n threshold = 2.0\n test_data = read_corpus(args['TEST_SOURCE_FILE'], source='src')\n\n print(f\"load model from {args['MODEL_PATH_I']}\", file=sys.stderr)\n model_I = NMT.load(args['MODEL_PATH_I'])\n model_I.encoder.dropout = nn.Dropout(0.)\n\n ces_I = []\n with torch.no_grad():\n for sent in tqdm(test_data, desc='Decoding', file=sys.stdout):\n loss = model_I([sent]).item()\n ce = loss / len(sent)\n ces_I.append(ce)\n\n print(f\"load model from {args['MODEL_PATH_N']}\", file=sys.stderr)\n model_N = NMT.load(args['MODEL_PATH_N'])\n model_N.encoder.dropout = nn.Dropout(0.)\n\n ces_N = []\n with torch.no_grad():\n for sent in tqdm(test_data, desc='Decoding', file=sys.stdout):\n loss = model_N([sent]).item()\n ce = loss / len(sent)\n ces_N.append(ce)\n\n ces_diff = []\n for ce_I, ce_N in zip(ces_I, ces_N):\n ces_diff.append(ce_I - ce_N)\n\n selected = 0\n with open(args['OUTPUT_FILE'], 'w') as f:\n for words, ce in zip(test_data, ces_diff):\n if (ce < threshold):\n selected += 1\n words = words[1:-1:1]\n sent = (\"\".join(words)).replace(\"▁\", \" ▁\").strip()\n # f.write(str(ce) + ' ')\n f.write(sent + '\\n')\n\n print(\"%d out of %d sentences selected.\" % (selected, len(test_data)))", "def __call__(self, inputs_tokens: tf.Tensor, outputs_tokens: tf.Tensor,\n training: bool=True):\n if self.max_seq_len is not None:\n # cut to max sequence length if longer\n inputs_tokens = inputs_tokens[..., :self.max_seq_len]\n outputs_tokens = outputs_tokens[..., :self.max_seq_len]\n # embeddings\n inputs = self.input_embedding(inputs_tokens)\n outputs = self.output_embedding(outputs_tokens)\n # first normalization\n inputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n outputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n # positional encoding\n pos_code = self.positional_encoding()\n inputs += pos_code[..., :tf.shape(inputs)[-2], :]\n outputs += pos_code[..., :tf.shape(outputs)[-2], :]\n # masks\n in_pad_mask, out_mask = get_masks(inputs_tokens, outputs_tokens)\n # forward pass\n decoder_outputs, attentions = super(TransformerNLP, self).__call__(\n inputs=inputs, outputs=outputs, out_mask=out_mask,\n in_pad_mask=in_pad_mask, training=training\n )\n # decode embedding\n if self.final_layer is None:\n proba_output = self.output_embedding.reverse(decoder_outputs)\n else:\n proba_output = self.final_layer(decoder_outputs)\n\n return proba_output, attentions", "def predict_step(batch, state, cache, eos_idx, config):\n\n logging.info('predict_step(batch=%s)', batch)\n variables = {'params': state.optimizer.target}\n model = models.Model(config)\n encoded, encoded_mask = model.apply(\n variables, batch, method=models.Model.encode)\n\n encoded_inputs = decode.flat_batch_beam_expand(encoded, config.beam_size)\n encoded_mask = decode.flat_batch_beam_expand(encoded_mask, config.beam_size)\n\n def tokens_ids_to_logits(flat_ids, flat_cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # --> [batch * beam, 1, vocab]\n flat_logits, new_vars = model.apply(\n {\n 'params': state.optimizer.target,\n 'cache': flat_cache\n },\n flat_ids,\n encoded_inputs,\n flat_ids > 0,\n encoded_mask,\n mutable=['cache'],\n method=models.Model.decode)\n new_flat_cache = new_vars['cache']\n # Remove singleton sequence-length dimension:\n # [batch * beam, 1, vocab] --> [batch * beam, vocab]\n flat_logits = flat_logits.squeeze(axis=1)\n return flat_logits, new_flat_cache\n\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n beam_seqs, _, = decode.beam_search(\n batch['token'],\n cache,\n tokens_ids_to_logits,\n beam_size=config.beam_size,\n alpha=0.6,\n eos_id=eos_idx,\n max_decode_len=config.max_decode_step)\n # Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension\n # sorted in increasing order of log-probability.\n # Return the highest scoring beam sequence.\n return beam_seqs[:, -1]", "def _decode(self, input_dict):\n encoder_outputs = input_dict['encoder_output']['outputs']\n enc_src_lengths = input_dict['encoder_output']['src_length']\n if self._mode == 'train':\n spec = (\n input_dict['target_tensors'][0]\n if 'target_tensors' in input_dict\n else None\n )\n spec_length = (\n input_dict['target_tensors'][1]\n if 'target_tensors' in input_dict\n else None\n )\n\n _batch_size = tf.shape(encoder_outputs)[0]\n\n training = self._mode == 'train'\n regularizer = self.params.get('regularizer', None)\n\n if self.params.get('enable_postnet', True):\n if 'postnet_conv_layers' not in self.params:\n raise ValueError(\n 'postnet_conv_layers must be passed from config file if postnet is'\n 'enabled'\n )\n\n num_audio_features = self._n_feats\n\n output_projection_layer = tf.layers.Dense(\n name='output_proj', units=num_audio_features, use_bias=True\n )\n stop_token_projection_layer = tf.layers.Dense(\n name='stop_token_proj', units=1, use_bias=True\n )\n\n prenet = None\n if self.params.get('enable_prenet', True):\n prenet = Prenet(\n self.params.get('prenet_units', 256),\n self.params.get('prenet_layers', 2),\n self.params.get('prenet_dropout', 0.5),\n self.params.get('prenet_enable_dropout', True),\n self.params.get('prenet_activation', tf.nn.relu),\n self.params['dtype'],\n )\n\n cell_params = {}\n cell_params['num_units'] = self.params['decoder_cell_units']\n decoder_cells = [\n single_cell(\n cell_class=self.params['decoder_cell_type'],\n cell_params=cell_params,\n zoneout_prob=self.params.get('zoneout_prob', 0.0),\n dp_output_keep_prob=1.0\n - self.params.get('dropout_prob', 0.1),\n training=training,\n )\n for _ in range(self.params['decoder_layers'])\n ]\n\n if self.params['attention_type'] is not None:\n attention_mechanism = self._build_attention(\n encoder_outputs,\n enc_src_lengths,\n self.params.get('attention_bias', False),\n )\n\n attention_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n attentive_cell = AttentionWrapper(\n cell=attention_cell,\n attention_mechanism=attention_mechanism,\n alignment_history=True,\n output_attention='both',\n )\n\n decoder_cell = attentive_cell\n\n if self.params['attention_type'] is None:\n decoder_cell = tf.contrib.rnn.MultiRNNCell(decoder_cells)\n\n if self._mode == 'train':\n train_and_not_sampling = True\n helper = TacotronTrainingHelper(\n inputs=spec,\n sequence_length=spec_length,\n prenet=None,\n model_dtype=self.params['dtype'],\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n elif self._mode == 'eval' or self._mode == 'infer':\n train_and_not_sampling = False\n inputs = tf.zeros(\n (_batch_size, 1, num_audio_features),\n dtype=self.params['dtype'],\n )\n helper = TacotronHelper(\n inputs=inputs,\n prenet=None,\n mask_decoder_sequence=self.params.get(\n 'mask_decoder_sequence', True\n ),\n )\n else:\n raise ValueError('Unknown mode for decoder: {}'.format(self._mode))\n decoder = TacotronDecoder(\n decoder_cell=decoder_cell,\n helper=helper,\n initial_decoder_state=decoder_cell.zero_state(\n _batch_size, self.params['dtype']\n ),\n attention_type=self.params['attention_type'],\n spec_layer=output_projection_layer,\n stop_token_layer=stop_token_projection_layer,\n prenet=prenet,\n dtype=self.params['dtype'],\n train=train_and_not_sampling,\n )\n\n if self._mode == 'train':\n maximum_iterations = tf.reduce_max(spec_length)\n else:\n maximum_iterations = tf.reduce_max(enc_src_lengths) * 10\n\n outputs, final_state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n # outputs, final_state, sequence_lengths, final_inputs = dynamic_decode(\n decoder=decoder,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n swap_memory=self.params.get('use_swap_memory', False),\n output_time_major=self.params.get('time_major', False),\n parallel_iterations=self.params.get('parallel_iterations', 32),\n )\n\n decoder_output = outputs.rnn_output\n stop_token_logits = outputs.stop_token_output\n\n with tf.variable_scope('decoder'):\n # If we are in train and doing sampling, we need to do the projections\n if train_and_not_sampling:\n decoder_spec_output = output_projection_layer(decoder_output)\n stop_token_logits = stop_token_projection_layer(\n decoder_spec_output\n )\n decoder_output = decoder_spec_output\n\n ## Add the post net ##\n if self.params.get('enable_postnet', True):\n dropout_keep_prob = self.params.get(\n 'postnet_keep_dropout_prob', 0.5\n )\n\n top_layer = decoder_output\n for i, conv_params in enumerate(self.params['postnet_conv_layers']):\n ch_out = conv_params['num_channels']\n kernel_size = conv_params['kernel_size'] # [time, freq]\n strides = conv_params['stride']\n padding = conv_params['padding']\n activation_fn = conv_params['activation_fn']\n\n if ch_out == -1:\n ch_out = self._n_feats\n\n top_layer = conv_bn_actv(\n layer_type='conv1d',\n name='conv{}'.format(i + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=activation_fn,\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=self.params.get(\n 'postnet_data_format', 'channels_last'\n ),\n bn_momentum=self.params.get('postnet_bn_momentum', 0.1),\n bn_epsilon=self.params.get('postnet_bn_epsilon', 1e-5),\n )\n top_layer = tf.layers.dropout(\n top_layer,\n rate=1.0 - dropout_keep_prob,\n training=training,\n )\n\n else:\n top_layer = tf.zeros(\n [\n _batch_size,\n maximum_iterations,\n outputs.rnn_output.get_shape()[-1],\n ],\n dtype=self.params['dtype'],\n )\n\n if regularizer and training:\n vars_to_regularize = []\n vars_to_regularize += attentive_cell.trainable_variables\n vars_to_regularize += (\n attention_mechanism.memory_layer.trainable_variables\n )\n vars_to_regularize += output_projection_layer.trainable_variables\n vars_to_regularize += (\n stop_token_projection_layer.trainable_variables\n )\n\n for weights in vars_to_regularize:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )\n\n if self.params.get('enable_prenet', True):\n prenet.add_regularization(regularizer)\n\n if self.params['attention_type'] is not None:\n alignments = tf.transpose(\n final_state.alignment_history.stack(), [1, 2, 0]\n )\n else:\n alignments = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n spectrogram_prediction = decoder_output + top_layer\n\n mag_spec_prediction = tf.zeros([_batch_size, _batch_size, _batch_size])\n\n stop_token_prediction = tf.sigmoid(stop_token_logits)\n outputs = [\n decoder_output,\n spectrogram_prediction,\n alignments,\n stop_token_prediction,\n sequence_lengths,\n mag_spec_prediction,\n ]\n\n return {'outputs': outputs, 'stop_token_prediction': stop_token_logits}", "def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n\n def _decode_predictions(input_key: str, output_key: str, beam=False):\n if input_key in output_dict:\n if beam:\n all_predicted_tokens = [list(map(self._indices_to_tokens, beams)) \n for beams in sanitize(output_dict[input_key])]\n else:\n all_predicted_tokens = list(map(self._indices_to_tokens, sanitize(output_dict[input_key])))\n output_dict[output_key] = all_predicted_tokens\n\n _decode_predictions(\"predictions\", \"predicted_tokens\", beam=True)\n _decode_predictions(\"ctc_predictions\", \"ctc_predicted_tokens\")\n _decode_predictions(\"rnnt_predictions\", \"rnnt_predicted_tokens\")\n _decode_predictions(\"target_tokens\", \"targets\")\n\n return output_dict", "def _basic_rnn_seq2seq(encoder_inputs,\n decoder_inputs,\n cell,\n feed_previous,\n dtype=dtypes.float32,\n scope=None):\n with variable_scope.variable_scope(scope or \"basic_rnn_seq2seq\"):\n enc_cell = copy.deepcopy(cell)\n _, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)\n if feed_previous:\n return _rnn_decoder(decoder_inputs, enc_state, cell, _loop_function)\n else:\n return _rnn_decoder(decoder_inputs, enc_state, cell)", "def batch_decode(self, article, art_lens, go, eos, max_len):\n batch_size = len(art_lens)\n attention, init_dec_states = self.encode(article, art_lens)\n mask = len_mask(art_lens, attention.device).unsqueeze(-2)\n attention = (attention, mask)\n tok = torch.LongTensor([go]*batch_size).to(article.device)\n outputs = []\n attns = []\n states = init_dec_states\n\n # locked dropout\n m = init_dec_states[0][0].data.new(init_dec_states[0][0].size(0),\n init_dec_states[0][0].size(1),\n self.emb_size * 2\n ).bernoulli_(1 - self.dropouth)\n dropout_mask = Variable(m, requires_grad=False) / (1 - self.dropouth)\n\n for i in range(max_len):\n tok, states, attn_score = self._decoder.decode_step(\n tok, states, attention, dropout_mask)\n outputs.append(tok[:, 0])\n attns.append(attn_score)\n return outputs, attns", "def decode(self, code):\n raise NotImplementedError", "def do_decode(decoder, \n output_handlers, \n src_sentences,\n trgt_sentences=None,\n estimator=None,\n iterations=1,\n num_log=1):\n if not decoder.has_predictor():\n logging.fatal(\"Terminated due to an error in the \"\n \"predictor configuration.\")\n return\n all_hypos = []\n text_output_handler = _get_text_output_handler(output_handlers)\n if text_output_handler:\n text_output_handler.open_file()\n score_output_handler = _get_score_output_handler(output_handlers)\n\n start_time = time.time()\n logging.info(\"Start time: %s\" % start_time)\n sen_indices = []\n diversity_metrics = []\n not_full = 0\n num_iterations = iterations if estimator and not decoder.is_deterministic() else 1\n \n estimates = []\n\n for sen_idx in get_sentence_indices(args.range, src_sentences):\n decoder.set_current_sen_id(sen_idx)\n try:\n src = \"0\" if src_sentences is False else src_sentences[sen_idx]\n if len(src.split()) > 1000:\n print(\"Skipping ID\", str(sen_idx), \". Too long...\")\n continue\n src_print = io_utils.src_sentence(src)\n logging.info(\"Next sentence (ID: %d): %s\" % (sen_idx + 1, src_print))\n src = io_utils.encode(src)\n sen_estimates = []\n\n for i in range(num_iterations):\n start_hypo_time = time.time()\n decoder.apply_predictor_count = 0\n decoder.seed=i\n if decoder.name == \"reference\":\n hypos = decoder.decode(src, io_utils.encode_trg(trgt_sentences[sen_idx]))\n else:\n hypos = decoder.decode(src)\n if not hypos:\n logging.error(\"No translation found for ID %d!\" % (sen_idx+1))\n logging.info(\"Stats (ID: %d): score=<not-found> \"\n \"num_expansions=%d \"\n \"time=%.2f\" % (sen_idx+1,\n decoder.apply_predictor_count,\n time.time() - start_hypo_time))\n hypos = [_generate_dummy_hypo()]\n \n hypos = _postprocess_complete_hypos(hypos)\n for logged_hypo in hypos[:num_log]:\n logging.info(\"Decoded (ID: %d): %s\" % (\n sen_idx+1,\n io_utils.decode(logged_hypo.trgt_sentence)))\n logging.info(\"Stats (ID: %d): score=%f \"\n \"num_expansions=%d \"\n \"time=%.2f \" \n \"perplexity=%.2f\"% (sen_idx+1,\n logged_hypo.total_score,\n #logged_hypo.base_score if logged_hypo.base_score else logged_hypo.total_score,\n decoder.apply_predictor_count,\n time.time() - start_hypo_time,\n utils.perplexity(logged_hypo.score_breakdown)))\n if estimator:\n container = []\n kau = min(hypos).total_score if decoder.gumbel else None\n for h in hypos:\n if kau and h.total_score <= kau:\n continue\n inc_prob = decoder.get_inclusion_prob_estimate(src, h, kau=kau)\n model_prob = h.base_score if h.base_score else h.total_score\n val = estimator.add_value(h, model_prob - inc_prob, \n ref=trgt_sentences[sen_idx] if trgt_sentences else None)\n container.append((model_prob - inc_prob, val))\n logging.info(\"Estimator value: %.5f\" % (estimator.estimate()))\n estimator.reset()\n sen_estimates.append(container)\n\n\n if score_output_handler:\n try:\n score_output_handler.write_score(logged_hypo.score_breakdown)\n except IOError as e:\n logging.error(\"I/O error %d occurred when creating output files: %s\"\n % (sys.exc_info()[0], e))\n\n if decoder.nbest > 1:\n diversity_score = utils.ngram_diversity([io_utils.decode(h.trgt_sentence) for h in hypos])\n logging.info(\"Diversity: score=%f \"\n % (diversity_score))\n diversity_metrics.append(diversity_score)\n\n if len(hypos) < decoder.nbest:\n not_full += 1\n\n \n all_hypos.append(hypos)\n sen_indices.append(sen_idx)\n estimates.append(sen_estimates)\n try:\n # Write text output as we go\n if text_output_handler:\n text_output_handler.write_hypos([hypos])\n except IOError as e:\n logging.error(\"I/O error %d occurred when creating output files: %s\"\n % (sys.exc_info()[0], e))\n except ValueError as e:\n logging.error(\"Number format error at sentence id %d: %s, \"\n \"Stack trace: %s\" % (sen_idx+1, \n e,\n traceback.format_exc()))\n except AttributeError as e:\n logging.fatal(\"Attribute error at sentence id %d: %s. This often \"\n \"indicates an error in the predictor configuration \"\n \"which could not be detected in initialisation. \"\n \"Stack trace: %s\" \n % (sen_idx+1, e, traceback.format_exc()))\n except Exception as e:\n logging.error(\"An unexpected %s error has occurred at sentence id \"\n \"%d: %s, Stack trace: %s\" % (sys.exc_info()[0],\n sen_idx+1,\n e,\n traceback.format_exc()))\n try:\n # Write text output as we go\n if text_output_handler:\n hypos = [_generate_dummy_hypo()]\n text_output_handler.write_hypos([hypos])\n except IOError as e:\n logging.error(\"I/O error %d occurred when creating output files: %s\"\n % (sys.exc_info()[0], e))\n if estimator:\n file_name = decoder.name + '_' + args.fairseq_lang_pair + '_' + estimator.name + '_' +str(args.range) + '_' + str(args.nbest)\n if hasattr(args, 'inc_prob_estimate_rounds'):\n file_name += '_' + str(args.inc_prob_estimate_rounds)\n file_name += '.out'\n with open(file_name, 'w') as f:\n f.write('\\n'.join([str(x) for x in estimates]))\n\n logging.info(\"Decoding finished. Time: %.2f\" % (time.time() - start_time))\n if decoder.nbest > 1:\n print(diversity_metrics)\n print(\"Total not full:\", str(not_full))\n try:\n for output_handler in output_handlers:\n if output_handler == text_output_handler:\n output_handler.close_file()\n else:\n output_handler.write_hypos(all_hypos, sen_indices)\n except IOError as e:\n logging.error(\"I/O error %s occurred when creating output files: %s\"\n % (sys.exc_info()[0], e))" ]
[ "0.5983819", "0.5868206", "0.5792075", "0.57452136", "0.5722459", "0.569437", "0.569437", "0.56869286", "0.5666454", "0.5658044", "0.56570065", "0.56113213", "0.560299", "0.5571299", "0.5535273", "0.55341107", "0.55276024", "0.5525068", "0.55168015", "0.5509156", "0.5509141", "0.5498574", "0.54877174", "0.54837143", "0.5476871", "0.5465428", "0.5457043", "0.5446976", "0.5435453", "0.5434525", "0.54314905", "0.5427879", "0.5427524", "0.54263616", "0.54119915", "0.5393428", "0.5385052", "0.53831947", "0.53637666", "0.5359821", "0.53521496", "0.5349516", "0.5343814", "0.5334483", "0.5329395", "0.5309482", "0.5304182", "0.5299691", "0.52967066", "0.52946323", "0.52804285", "0.5272009", "0.526791", "0.5261632", "0.5261632", "0.52520883", "0.524695", "0.52416545", "0.5232605", "0.5232573", "0.5228966", "0.5226951", "0.52228206", "0.5221308", "0.52177346", "0.52164227", "0.51973706", "0.519152", "0.5185939", "0.5185542", "0.51783746", "0.5146057", "0.51333255", "0.51312137", "0.5130185", "0.512992", "0.5121941", "0.5120924", "0.51148844", "0.51136225", "0.510383", "0.5093766", "0.5089072", "0.5088668", "0.50881857", "0.5080169", "0.50753355", "0.5073629", "0.50675464", "0.5065992", "0.50636345", "0.50636345", "0.5063495", "0.5062006", "0.50591", "0.5058583", "0.5057021", "0.5056133", "0.50539356", "0.5043306", "0.5041766" ]
0.0
-1
Almost the same as decode_chain_argmax(), but instead of using argmax, it performs the random sampling from the returned probability distribution.
def decode_chain_sampling(self, hid, begin_emb, seq_len, stop_at_token=None, device='cpu'): res_logits = [] res_actions = [] cur_emb = begin_emb for _ in range(seq_len): out_logits, hid = self.decode_one(hid, cur_emb) out_probs_v = F.softmax(out_logits, dim=1) out_probs = out_probs_v.data.cpu().numpy()[0] action = int(np.random.choice(out_probs.shape[0], p=out_probs)) action_v = torch.LongTensor([action]).to(device) cur_emb = self.emb(action_v) res_logits.append(out_logits) res_actions.append(action) if stop_at_token is not None and action == stop_at_token: break return torch.cat(res_logits), res_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _random_max_wrap(*args):\n _, opt_pt = random_maximise(*args)\n return opt_pt", "def custom_argmax(arr):\n return np.random.choice(np.flatnonzero(arr == arr.max()))", "def argmax_random_tie(seq, key=identity):\n return argmax(shuffled(seq), key=key)", "def argmax(values):\n\tvalues = np.array(values)\n\tmx = np.max(values)\n\tval = np.where(values==mx)[0]\n\treturn np.random.choice(val)", "def random_argmax(x: np.ndarray) -> int:\n indices = all_argmax(x)\n return np.random.choice(indices)", "def argmax_break_ties(self, probs):\n return np.random.choice(np.where(probs == probs.max())[0])", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def _decode_by_maxprob(self):\n self.best_path = np.zeros(self.N, dtype=np.int)\n P = self.prob.copy()\n \n for i in range(self.N):\n idx = np.unravel_index(np.argmax(P, axis=None), P.shape)\n self.best_path[idx[0]] = idx[1]\n P[idx[0],:] = 0\n P[:,idx[1]] = 0", "def produce_max(self, *args, **kwargs):\n raise NotImplementedError('This interaction has no produce_max method yet!')", "def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])", "def dirichlet_max(sampled_probas):\n\talphas = dirichlet_fit(sampled_probas)\n\treturn alphas.max(1)", "def sample(self, logits, argmax_sampling=False):\n if argmax_sampling:\n return torch.argmax(logits, dim=-1)\n else:\n u = torch.rand_like(logits)\n return torch.argmax(logits - torch.log(-torch.log(u)), dim=-1)", "def rargmax(vector):\n m = np.amax(vector)\n indices = np.nonzero(vector ==m)[0]\n return pr.choice(indices)", "def sample_softmax(x : Union[List[float], np.ndarray], t : float = 1):\n x = np.array(x)\n x = x - np.max(x)\n x = np.exp(x / t)\n x = x / np.sum(x)\n return np.random.choice(range(len(x)), p = x)", "def sample_from_probabilities(probabilities, topn=ALPHASIZE):\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0\n p = p / np.sum(p)\n return np.random.choice(ALPHASIZE, 1, p=p)[0]", "def get_posterior_sample(self):\n total_tries = self.prior_success + self.prior_failure\n prob_success = self.prior_success / total_tries\n # np.random.binomial采样出来的是二项分布的均值, 即正面朝上的次数,所以要除以N\n boot_sample = np.random.binomial(total_tries, prob_success) / total_tries\n return boot_sample", "def posterior_sampler(self, nsamples, seed=0, verbose=True):\n\n import random\n\n random.seed(seed)\n sample = self.get_chain()[-self.get_tune:]\n sample = sample.reshape(-1, sample.shape[-1])\n sample = random.choices(sample, k=nsamples)\n\n return sample", "def puct_choice(node):\n return np.argmax(puct_distribution(node))", "def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n estimates.append(np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id]))\n return self.bandits[np.argmax(estimates)]", "def max(self):\r\n\t\treturn max(self.sample)", "def bprop_argmax(x, axis, out, dout):\n return (zeros_like(x), zeros_like(axis))", "def __call__(self, state):\n if random.random() > self._epsilon:\n return self._max_policy(state)\n return random.choice(np.arange(self._action_size))", "def decode(self, probs, mask):\n assert (probs == probs).all(), \"Probs should not contain any nans\"\n\n if self.decode_type == \"greedy\":\n _, selected = probs.max(1)\n assert not mask.gather(\n 1, selected.unsqueeze(-1)\n ).data.any(), \"Decode greedy: infeasible action has maximum probability\"\n\n elif self.decode_type == \"sampling\":\n selected = probs.multinomial(1).squeeze(1)\n\n # Check if sampling went OK, can go wrong due to bug on GPU\n # See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232\n while mask.gather(1, selected.unsqueeze(-1)).data.any():\n print(\"Sampled bad values, resampling!\")\n selected = probs.multinomial(1).squeeze(1)\n\n else:\n assert False, \"Unknown decode type\"\n return selected", "def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )", "def _get_max_bootstrap_genus(self, seq, repeats):\n word_posteriors = self._word_posteriors\n word_idxs = self._word_idxs\n word_size = self._word_size\n\n all_words = list(unique_words(seq, word_size))\n print sorted(map(word_idxs.get, all_words))\n decisions = [] #genera idxs\n for words in bootstrap(all_words, len(seq)//word_size, repeats):\n decisions.append(self._get_max_likelihood_genus(words,\n word_posteriors, word_idxs))\n freqs = calc_freqs(concatenate(decisions))\n sorted_freqs = sorted(freqs.items(), key=itemgetter(1))\n return sorted_freqs[-1] #what if a tie here?", "def predict(probs):\n return np.argmax(probs, axis=0)", "def argmax(seq, fn):\n return argmin(seq, lambda x: -fn(x))", "def get_bprop_argmaxwithvalue(self):\n axis = self.axis\n keep_dims = self.keep_dims\n op = P.ArgMaxWithValue(axis)\n\n def bprop(x, out, dout):\n dx = _argmin_or_argmax_grad(x, axis, keep_dims, op, out, dout)\n return (dx,)\n return bprop", "def decode_chain_argmax(self, hid, begin_emb, seq_len, stop_at_token=None):\n res_logits = []\n res_tokens = []\n cur_emb = begin_emb\n\n for _ in range(seq_len):\n out_logits, hid = self.decode_one(hid, cur_emb)\n out_token_v = torch.max(out_logits, dim=1)[1] #uses argmax to go from logits to the decoded token ID\n out_token = out_token_v.data.cpu().numpy()[0]\n\n cur_emb = self.emb(out_token_v) #obtains embeddings for the decoded token to iterate over\n\n res_logits.append(out_logits)\n res_tokens.append(out_token)\n if stop_at_token is not None and out_token == stop_at_token:\n break\n \n return torch.cat(res_logits), res_tokens", "def argmax(seq, fn):\n return argmin(seq, lambda x: -fn(x))", "def predict(probs):\n # Your code here.\n return np.argmax(probs, axis=1)", "def p_logl_max(self):\n sampler = self.__sampler\n if sampler == \"EnsembleSampler\":\n chain = self.sampler.chain\n lnlike = self.sampler.lnprobability\n elif sampler == \"PTSampler\":\n chain = self.sampler.chain[0, ...]\n lnlike = self.sampler.lnlikelihood[0, ...]\n else:\n raise ValueError(\"[linfit]: The sampler type ({0}) is unrecognised!\".format(sampler))\n idx = lnlike.ravel().argmax()\n p = chain.reshape(-1, self.ndim)[idx]\n return p", "def argmax(d):\n return max(d.iteritems(), key=operator.itemgetter(1))", "def argmax_with_random_tiebreaker(self, action_value_estimates):\r\n return np.random.choice(\r\n np.where( action_value_estimates == action_value_estimates.max())[0]\r\n )", "def choice(some_list, probabilities, max_probability=1):\n x = random.uniform(0, max_probability)\n cumulative_probability = 0.0\n\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability: break\n\n return item", "def argmax(func, seq):\n def compare(a1, b1):\n if a1[0] > b1[0]:\n return a1\n return b1\n # using a generator expression here should save memory\n objs = ((func(val), val) for val in seq)\n return reduce(compare, objs)[1]", "def _compute_q_argmax(self):\n self.cur_head = self._sess.run(self.ucb_net._P_argmax,\n {self.state_ph: self.state,\n self.ucb_A_ph: self.ucb_A,\n self.ucb_b_ph: self.ucb_b})[0]\n x = self._sess.run(self._net_outputs.q_heads,\n {self.state_ph: self.state})\n return np.argmax(x[:,:,self.cur_head], axis=1)[0]", "def compute_token_probabilities(probabilities: Union[list, tuple, np.ndarray]) ->np.ndarray:\n if isinstance(probabilities, (list, tuple)):\n max_probs = []\n for timestep_probs in probabilities:\n max_probs.append(np.max(timestep_probs))\n max_probs = np.array(max_probs)\n elif isinstance(probabilities, np.ndarray):\n max_probs = np.max(probabilities, axis=-1)\n else:\n raise ValueError(f'probabilities type must be in [list, tuple, np.ndarray]. Got {type(probabilities)}')\n return max_probs", "def predict_max(self, x):\n y_ = self.predict(x)\n amax = torch.argmax(y_, dim=1).detach()\n t = torch.zeros_like(y_)\n t[torch.arange(y_.shape[0]),amax] = 1\n return t", "def get_target_distribution(\n next_states, rewards, mask, gamma, target_estimator, support\n):\n bsz = rewards.shape[0]\n bsz_ = next_states.shape[0]\n bin_no = support.shape[0]\n v_min, v_max = support[0].item(), support[-1].item()\n delta_z = (v_max - v_min) / (bin_no - 1)\n\n probs = target_estimator(next_states, probs=True)\n qs = torch.mul(probs, support.expand_as(probs))\n argmax_a = qs.sum(2).max(1)[1].unsqueeze(1).unsqueeze(1)\n action_mask = argmax_a.expand(bsz_, 1, bin_no)\n _qa_probs = probs.gather(1, action_mask).squeeze()\n\n # Next-states batch can be smaller so we scatter qa_probs in\n # a tensor the size of the full batch with each row summing to 1\n qa_probs = torch.eye(bsz, bin_no, device=_qa_probs.device)\n qa_probs.masked_scatter_(mask.expand_as(qa_probs), _qa_probs)\n\n # Mask gamma and reshape it torgether with rewards to fit p(x,a).\n rewards = rewards.expand_as(qa_probs)\n gamma = (mask.float() * gamma).expand_as(qa_probs)\n\n # Compute projection of the application of the Bellman operator.\n bellman_op = rewards + gamma * support.unsqueeze(0).expand_as(rewards)\n bellman_op = torch.clamp(bellman_op, v_min, v_max)\n\n # Compute categorical indices for distributing the probability\n m = torch.zeros(bsz, bin_no, device=qa_probs.device)\n b = (bellman_op - v_min) / delta_z\n l = b.floor().long()\n u = b.ceil().long()\n\n # Fix disappearing probability mass when l = b = u (b is int)\n l[(u > 0) * (l == u)] -= 1\n u[(l < (bin_no - 1)) * (l == u)] += 1\n\n # Distribute probability\n \"\"\"\n for i in range(bsz):\n for j in range(self.bin_no):\n uidx = u[i][j]\n lidx = l[i][j]\n m[i][lidx] = m[i][lidx] + qa_probs[i][j] * (uidx - b[i][j])\n m[i][uidx] = m[i][uidx] + qa_probs[i][j] * (b[i][j] - lidx)\n for i in range(bsz):\n m[i].index_add_(0, l[i], qa_probs[i] * (u[i].float() - b[i]))\n m[i].index_add_(0, u[i], qa_probs[i] * (b[i] - l[i].float()))\n \"\"\"\n # Optimized by https://github.com/tudor-berariu\n offset = (\n torch.linspace(0, ((bsz - 1) * bin_no), bsz, device=qa_probs.device)\n .long()\n .unsqueeze(1)\n .expand(bsz, bin_no)\n )\n\n m.view(-1).index_add_(\n 0, (l + offset).view(-1), (qa_probs * (u.float() - b)).view(-1)\n )\n m.view(-1).index_add_(\n 0, (u + offset).view(-1), (qa_probs * (b - l.float())).view(-1)\n )\n return m, probs", "def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]", "def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]", "def argmax(tensor):\n raise NotImplementedError", "def get_bernoulli_sample(probs):\n return tf.ceil(probs - tf.random_uniform(tf.shape(probs)))", "def _graph_fn_get_deterministic_action_wo_distribution(self, logits):\n if get_backend() == \"tf\":\n return tf.argmax(logits, axis=-1, output_type=tf.int32)\n elif get_backend() == \"pytorch\":\n return torch.argmax(logits, dim=-1).int()", "def get_most_probable_bit_with_thres(filtered_capture, prob_threshold):\n # Enumerate challenge/responses to build a list of\n # (probabilities, challenge, value)\n sorted_prob = []\n for challenge, response in filtered_capture.items():\n n0, n1 = response\n n = n0 + n1\n if n < MIN_COUNT_FOR_MEANING:\n # Ignore challenges with not enough measures\n continue\n # q0 = ncr(n, n0) * pow(1 - PROB, n0) * pow(PROB, n1)\n # q1 = ncr(n, n0) * pow(PROB, n0) * pow(1 - PROB, n1)\n # unnormalize_q0 = pow(1 - PROB, n0) * pow(PROB, n1)\n # unnormalize_q1 = pow(PROB, n0) * pow(1 - PROB, n1)\n # p_b0 = unnormalized_q0 / (unnormalized_q1 + unnormalized_q0)\n # p_b1 = unnormalized_q1 / (unnormalized_q1 + unnormalized_q0)\n if n >= 500:\n # Prevent \"OverflowError: (34, 'Numerical result out of range')\"\n p_b0 = n0 / (n0 + n1)\n p_b1 = n1 / (n1 + n0)\n else:\n p_b0 = 1. / (1 + pow(PROB / (1 - PROB), n0) * pow((1 - PROB) / PROB, n1))\n p_b1 = 1. / (1. + pow(PROB / (1 - PROB), n1) * pow((1 - PROB) / PROB, n0))\n # print(f\"[{n0:2}+{n1:2}={n:2}] pb0={p_b0:.3}, pb1={p_b1:.3}\")\n if p_b1 > prob_threshold and p_b1 > p_b0:\n sorted_prob.append((p_b1, challenge, 1))\n elif p_b0 > prob_threshold and p_b0 > p_b1:\n sorted_prob.append((p_b0, challenge, 0))\n\n sorted_prob.sort()\n # print(f\"Trying to find a bit out of {len(sorted_prob)} challenges with p>={prob_threshold}...\")\n while sorted_prob:\n best_prob, best_chall, best_val = sorted_prob.pop()\n best_count = count_ones(best_chall)\n if best_count == 1:\n print(f\"Found a bit: {best_chall:#x} = {best_val} (proba {best_prob})\")\n return (best_chall, best_val)\n\n # Combine the probabilities\n for prob, chall, val in sorted_prob.copy():\n if chall == best_chall:\n continue\n # Reduce the masking with XOR\n if chall & ~best_chall == 0:\n # Ensure that the masking is reasonable\n count_xor = count_ones(best_chall ^ chall)\n if count_xor <= 2 and count_xor < best_count:\n new_prob = prob * best_prob\n if new_prob > prob_threshold:\n sorted_prob.append((new_prob, chall ^ best_chall, val ^ best_val))\n sorted_prob.sort()\n\n print(f\"Unable to found a bit with threshold={prob_threshold}\")\n return None", "def decode(model, image, dictionary):\n\n prediction = model(image)\n sm = torch.nn.Softmax(dim=1)\n probabilities = sm(prediction) \n\n idx = torch.argmax(probabilities)\n label = dictionary[str(idx.item())]\n prob = np.round(probabilities[0][idx].item(), 2)\n \n return prob, idx, label", "def compute_sequence_probability(sequence_probabilities: np.ndarray, max_sequence_length: Optional[int]=None, return_log_prob: bool=True) ->float:\n if max_sequence_length is None:\n max_sequence_length = sequence_probabilities.shape[0]\n sequence_probabilities = sequence_probabilities[:max_sequence_length]\n if return_log_prob:\n return np.sum(np.log(sequence_probabilities))\n else:\n return np.prod(sequence_probabilities)", "def argmax(vec):\n _, idx = torch.max(vec, -1)\n return to_scalar(idx)", "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def max_(*args, **kwargs):\n ...", "def sample_propensities(mutated_params: torch.Tensor) -> torch.Tensor:\n return torch.softmax(mutated_params, -1)", "def probabilityTransitionRule(self, unvisitedHeuristics, totalUnvisited):\n argMaxProbValue = 0\n argMaxToken = None\n for unvisitedToken in unvisitedHeuristics:\n probValue = unvisitedHeuristics[unvisitedToken] / totalUnvisited\n\n # In case obtained value is higher than previous maximum, substitute\n if probValue > argMaxProbValue:\n argMaxProbValue = probValue\n argMaxToken = unvisitedToken\n\n return argMaxToken", "def multinomial_class(\n distribution_or_probs: Union[tfd.Distribution, jnp.DeviceArray]\n) -> jnp.DeviceArray:\n if isinstance(distribution_or_probs, tfd.Distribution):\n return jnp.argmax(distribution_or_probs.logits_parameter(), axis=1)\n return jnp.argmax(distribution_or_probs, axis=1)", "def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def _predict_and_return_argmax_label(self, example):\n model_out = self._model.predict([example])\n softmax = list(model_out)[0]['preds']\n argmax = np.argmax(softmax)\n return self._model.output_spec()['preds'].vocab[argmax]", "def maxfit(self, *args, **kwargs):\n return _image.image_maxfit(self, *args, **kwargs)", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def greedy_proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = np.argmax(freqs)\n return CHOICES[(prediction_for_them + 1) % 3]", "def getHighestRank_Naive(self):\n\n # filter out low confidences\n maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]\n \n if len(p) == 1:\n # only one predictor has high confidence\n chosenPredictor = p[0]\n elif len(p) > 1:\n # many predictors has high confidence. look for highest wins\n maxScore = max(p, key=operator.attrgetter('scoreWins'))\n \n# maxScore = 0\n# for pred in p:\n# maxScore = max(maxScore, pred.scoreWins - pred.scoreLosts) \n \n predictors = p\n p = [p for p in predictors if p.scoreWins >= maxScore.scoreWins]\n \n if len(p) == 1:\n chosenPredictor = p[0]\n elif len(p) > 1:\n # there are ties. look for lowest losts\n maxScore = min(p, key=operator.attrgetter('scoreLosts'))\n predictors = p\n p = [p for p in predictors if p.scoreLosts == maxScore]\n \n if len(p) == 1:\n chosenPredictor = p[-1]\n elif len(p) > 1:\n # choose at random\n random = rps.random() % len(p)\n chosenPredictor = p[random]\n \n if len(p) == 0:\n maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]\n \n random = rps.random() % len(p)\n chosenPredictor = p[random]\n else:\n # confidences are low. look for highest wins\n maxScore = max(self.Predictors, key=operator.attrgetter('scoreWins'))\n p = [p for p in self.Predictors if p.scoreWins == maxScore]\n \n if len(p) == 1:\n chosenPredictor = p[0]\n elif len(p) > 1:\n # choose at random\n random = rps.random() % len(p)\n chosenPredictor = p[random]\n else:\n # choose at random\n random = rps.random() % len(self.Predictors)\n chosenPredictor = self.Predictors[random]\n \n if Debug:\n maxScore = max([p.scoreWins for p in self.Predictors]) \n print(\"max score: %f \" % (maxScore), end=\"\") \n maxScore = max([p.confidence for p in self.Predictors]) \n print(\"max confidence: %f \" % (maxScore), end=\"\") \n print(\"chosen predictor: %s\" % (chosenPredictor.name))\n #input()\n\n \n rankConfidence = chosenPredictor.confidence\n return chosenPredictor, rankConfidence", "def get_majority_vote_for_sequence(sequence, nb_classes):\n votes_per_class = np.zeros((nb_classes, 1))\n for i in range(len(sequence)):\n class_vote = np.argmax(sequence[i])\n votes_per_class[class_vote] += 1\n # Return random choice of the max if there's a tie.\n return np.random.choice(np.flatnonzero(votes_per_class == votes_per_class.max()))", "def compute_sequence_probability(\n sequence_probabilities: np.ndarray,\n max_sequence_length: Optional[int] = None,\n return_log_prob: bool = True,\n) -> float:\n if max_sequence_length is None:\n max_sequence_length = sequence_probabilities.shape[0]\n\n sequence_probabilities = sequence_probabilities[:max_sequence_length]\n\n if return_log_prob:\n return np.sum(np.log(sequence_probabilities))\n else:\n return np.prod(sequence_probabilities)", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def choice(Y):\n m,n = Y.shape\n max_entropy = - m*math.log(1.0/n)\n log_Y = np.ma.log(Y)\n return - np.sum(Y * log_Y) / max_entropy", "def compute_token_probabilities(\n probabilities: Union[list, tuple, np.ndarray],\n) -> np.ndarray:\n if isinstance(probabilities, (list, tuple)):\n if not hasattr(probabilities[0], \"__len__\"):\n raise ValueError(\n \"Received token probabilities as a flat 1D list. Expected list of list of probabilities \"\n \"(sequence_length, vocab_size).\"\n )\n max_probs = []\n for timestep_probs in probabilities:\n max_probs.append(np.max(timestep_probs))\n max_probs = np.array(max_probs)\n elif isinstance(probabilities, np.ndarray):\n if len(probabilities.shape) != 2:\n raise ValueError(\n f\"Received token probabilities with non 2D shape: {probabilities.shape}. Expected shape: \"\n \"(sequence_length, vocab_size).\"\n )\n max_probs = np.max(probabilities, axis=-1)\n else:\n raise ValueError(f\"probabilities type must be in [list, tuple, np.ndarray]. Got {type(probabilities)}\")\n return max_probs", "def argmax(X):\n\tN,K,_ = X.shape\n\tg0 = X[0,0]\n\tg = X[1:]\n\n\tB = ones((N,K), dtype=int32) * -1\n\t# compute max-marginals and backtrace matrix\n\tV = g0\n\tfor t in xrange(1,N):\n\t\tU = empty(K)\n\t\tfor y in xrange(K):\n\t\t\tw = V + g[t-1,:,y]\n\t\t\tB[t,y] = b = w.argmax()\n\t\t\tU[y] = w[b]\n\t\tV = U\n\t# extract the best path by brack-tracking\n\ty = V.argmax()\n\ttrace = []\n\tfor t in reversed(xrange(N)):\n\t\ttrace.append(y)\n\t\ty = B[t, y]\n\ttrace.reverse()\n\treturn trace", "def softmax(x: np.array) -> Tuple[int, List[float]]:\n dist = np.exp(x) / np.sum(np.exp(x))\n y = np.argmax(dist)\n return int(y), dist", "def _select_from(arr, probs=None):\n if probs is None:\n return arr[randint(0, len(arr) - 1)]\n else:\n r = random()\n s = 0\n for i in range(len(probs)):\n s += probs[i]\n if s > r:\n return arr[i]\n return arr[len(arr) - 1]", "def softmax(dist, return_numpy=True):\n\n dist = utils.to_numpy(dist)\n\n output = []\n if utils.is_arr_of_arr(dist):\n for i in range(len(dist.values)):\n output.append(softmax(dist[i]), return_numpy=True)\n\n output = dist - dist.max(axis=0)\n output = np.exp(output)\n output = output / np.sum(output, axis=0)\n if return_numpy:\n return output\n else:\n return utils.to_categorical(output)", "def get_most_probable_bit(filtered_capture):\n for prob_threshold in (.9, .8, .7, .6):\n result = get_most_probable_bit_with_thres(filtered_capture, prob_threshold)\n if result is not None:\n return result\n return None", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def argmax(x1, axis=None, out=None):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n if x1_desc:\n if axis is not None:\n pass\n elif out is not None:\n pass\n else:\n result_obj = dpnp_argmax(x1_desc).get_pyobj()\n result = dpnp.convert_single_elem_array_to_scalar(result_obj)\n\n return result\n\n return call_origin(numpy.argmax, x1, axis, out)", "def arm_selection(exp_reward_array):\n #print('sampled exp reward = {}'.format(exp_reward_array))\n return np.argmax(exp_reward_array)", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "async def infer_argmax(\n self, engine, input: lib.AbstractArray, dim: lib.u64tup_typecheck\n):\n shp = ()\n shp_inp = input.xshape()\n dim = tuple(\n self.require_constant(e, argnum=f'\"1:dim[{edx}]\"')\n for edx, e in enumerate(dim.elements)\n )\n shp = list(shp_inp)\n for d in dim:\n shp[d] = 1\n shp = tuple(shp)\n return type(input)(\n AbstractScalar({VALUE: ANYTHING, TYPE: xtype.Int[64]}),\n {SHAPE: shp, TYPE: input.xtype()},\n )", "def action(self, obs, deterministic = False):\n\t\tdist = self.forward(obs)\n\t\tif deterministic:\n\t\t\tact = np.argmax(dist)\n\t\telse:\n\t\t\tact = np.random.choice(dist.shape[0], p=dist)\n\n\t\treturn act", "def set_generator(random, args):\n representation = args.get('representation')\n indices = list(range(len(representation)))\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size and max_size > 1:\n size = random.randint(1, max_size)\n else:\n size = max_size\n candidate = random.sample(indices, size)\n return sorted(candidate)", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def max_discrete(func: Callable[[Tuple], np.ndarray], over: Iterable[Tuple],\\\n state: Tuple[Union[int, float]]) -> Tuple[float, Tuple[Union[int, float]], None]:\n vals = [func(np.asarray((*state, *action)).reshape(1, -1))[0, 0] for action in over]\n maximum = max(vals)\n return (maximum, over[vals.index(maximum)], None)", "def viterbi_decode(score, transition_params):\n trellis = np.zeros_like(score)\n backpointers = np.zeros_like(score, dtype=np.int32)\n trellis[0] = score[0]\n for t in range(1, score.shape[0]):\n v = np.expand_dims(trellis[t - 1], 1) + transition_params\n trellis[t] = score[t] + np.max(v, 0)\n backpointers[t] = np.argmax(v, 0)\n\n viterbi = [np.argmax(trellis[-1])]\n for bp in reversed(backpointers[1:]):\n viterbi.append(bp[viterbi[-1]])\n viterbi.reverse()\n\n viterbi_score = np.max(trellis[-1])\n return viterbi, viterbi_score", "def argmax(self, values):\n return self.aggregate(values, \"argmax\")", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def get_numerical_max_sr(mu, sigma, num_trials, n_iter):\n max_sr, count = [], 0\n while count < n_iter:\n count += 1\n series = np.random.normal(mu, sigma, num_trials)\n max_sr.append(max(series))\n return np.mean(max_sr), np.std(max_sr)", "def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index", "def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list", "def argmax(a, *args, **kwargs):\n warn('The function argmax is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n if isinstance(a, np.ma.MaskedArray):\n return np.ma.argmax(a, *args, **kwargs)\n elif isinstance(a, np.ndarray):\n return np.argmax(a, *args, **kwargs)\n else:\n return _argmax(a)", "def estimate_max_dn(exposure, gain=1):\n return np.random.randint(100*exposure, 500*exposure)", "def example_A():\n d = dit.example_dists.Xor()\n\n # Calculate marginal maximum entropy distributions up to order 3.\n maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)\n\n print_output(d, maxent_dists)\n\n return maxent_dists", "def act(self,observation):\n maximum_actions = np.argwhere(self.q_table[observation] == np.amax(self.q_table[observation])).flatten()\n return(np.random.choice(maximum_actions))", "def max_apply(x): \n if len(x) == 1:\n return x[0]\n else:\n return x[1]", "def produce(self, key=lambda x: 1.0):\n return max(self.data[0], key=key)", "def choose_arm_via_thompson_sampling(self):\n samples = {\n content_id: random.betavariate(\n self.content_stats[content_id]['successes'] + 1,\n self.content_stats[content_id]['failures'] + 1,\n )\n for content_id in self.content_stats\n }\n return max(\n samples,\n key=samples.get,\n )", "def max_value(policy_lookup, state, player):\n\taction_values = list(get_policy_actions(policy_lookup, state, player).values())\n\tif action_values:\n\t\treturn np.max(action_values)\n\treturn 0", "def probchoice(V, d, obs=[]):\n\n #d = 0.01\n #obs = []\n #V = array([0., 0., 0.2, 0.2, 0.2, 0.4])\n\n #top = [exp(d*v) for v in V]\n top = exp(V * (1./d))\n\n #print top\n #print dummy\n\n # set the value of any prior observations to zero\n for i in range(len(obs)): top[obs[i][0]] = 0.\n\n bottom = sum(top)\n cp = [t/bottom for t in top]\n\n r = random()\n #print r\n #print cumsum(cp)\n\n return where((1*(r < cumsum(cp)))==1)[0][0]\n\n #return sum(1*(random() < cumsum(cp)))-1", "def optimal_agent(bandit, iterations):\n\n for i in range(iterations):\n a = bandit.pay_offs.index(max(bandit.pay_offs))\n r = bandit.sample(a)\n yield a, r", "def randInt(max):\n return int(max * random.random())", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]" ]
[ "0.68480635", "0.6822088", "0.67059606", "0.65348536", "0.63732535", "0.63186467", "0.6183038", "0.59274244", "0.58021593", "0.57685935", "0.5748391", "0.5623231", "0.56098354", "0.5572641", "0.5567708", "0.55628234", "0.55474985", "0.55280584", "0.55236834", "0.54596233", "0.5455208", "0.5423757", "0.54161495", "0.53973925", "0.5389262", "0.53644836", "0.53311443", "0.5330921", "0.5314725", "0.5293461", "0.528756", "0.52664274", "0.52645844", "0.5261135", "0.52575815", "0.52574193", "0.52415746", "0.52332133", "0.520183", "0.52001595", "0.519159", "0.51737005", "0.51530486", "0.51496154", "0.513933", "0.5138629", "0.5136384", "0.5115447", "0.5113051", "0.51104033", "0.5094092", "0.5094049", "0.50719124", "0.5071356", "0.50654125", "0.5055429", "0.50525886", "0.5027187", "0.5027187", "0.50034714", "0.50031996", "0.4998324", "0.49969485", "0.49962255", "0.49873117", "0.4983769", "0.49782327", "0.49729478", "0.49630472", "0.49554226", "0.49513966", "0.49489462", "0.49359065", "0.49357083", "0.49342793", "0.49338618", "0.49317166", "0.49233568", "0.49199706", "0.49193412", "0.49154344", "0.4902814", "0.4901729", "0.4898715", "0.4898715", "0.48968515", "0.48876008", "0.488294", "0.4880561", "0.48784992", "0.48701632", "0.48663026", "0.4864599", "0.48601055", "0.48585182", "0.4857824", "0.48441648", "0.48423672", "0.4837014", "0.4833169", "0.4833169" ]
0.0
-1
Returns a free socket port. It works by creating an empty socket, binding it to port 0 so that the OS automatically assigns a free port to it, obtaining the port using `getsockname` and then immediately closing it. The application intending to use this port should bind to it immediately so that no other application binds to it before us.
def get_free_port(): sock = socket.socket() # bind to a random port (so that the OS automatically assigns us a free port) sock.bind(('', 0)) # obtain the random port value port = sock.getsockname()[1] # close the socket so that the port gets free sock.close() return port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_free_port():\n s = socket.socket()\n s.bind(('', 0))\n _, port = s.getsockname()\n s.close()\n return port", "def get_free_port():\n s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)\n s.bind(('127.0.0.1', 0))\n _, port = s.getsockname()\n s.close()\n return port", "def free_port():\n\n with socket.socket() as sock:\n sock.bind(('', 0))\n return sock.getsockname()[1]", "def free_port():\n free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n free_socket.bind(('0.0.0.0', 0))\n free_socket.listen(5)\n port = free_socket.getsockname()[1]\n free_socket.close()\n return port", "def _get_free_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n with closing(s):\n s.bind((\"localhost\", 0))\n return s.getsockname()[1]", "def free_port():\n free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n free_socket.bind(('0.0.0.0', 0))\n free_socket.listen(5)\n port = free_socket.getsockname()[1]\n free_socket.close()\n return port", "def get_free_port():\n s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)\n s.bind((\"localhost\", 0))\n address, port = s.getsockname()\n s.close()\n return port", "def _find_free_port():\n import socket\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Binding to port 0 will cause the OS to find an available port for us\n sock.bind((\"\", 0))\n port = sock.getsockname()[1]\n sock.close()\n # NOTE: there is still a chance the port could be taken by other processes.\n return port", "def _find_free_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('localhost', 0))\n _, port = sock.getsockname()\n sock.close()\n\n return port", "def _find_free_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind((\"localhost\", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]", "def GetUnreservedAvailableLocalPort():\n tmp = socket.socket()\n tmp.bind(('', 0))\n port = tmp.getsockname()[1]\n tmp.close()\n\n return port", "def get_free_port():\n max_tries = 0\n while max_tries < MITM_MAX_TRIES:\n max_tries += 1\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n except Exception:\n sleep(1)\n else:\n return port\n return None", "def find_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n sock.bind(('127.0.0.1', 0))\n sock.listen(socket.SOMAXCONN)\n ipaddr, port = sock.getsockname()\n sock.close()\n return port", "def get_free_port(address=\"\"):\n\n s = socket(AF_INET, SOCK_STREAM)\n s.bind((address, 0)) # lgtm [py/bind-socket-all-network-interfaces]\n port = s.getsockname()[1]\n s.close()\n return port", "def select_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('127.0.0.1', 0))\n _, port = sock.getsockname()\n sock.close()\n return port", "def get_free_local_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(('localhost', 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]", "def get_available_port() -> int:\n with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.bind(('', 0))\n _, port = sock.getsockname()\n return int(port)", "def get_free_port() -> int:\n not_free = True\n while not_free:\n port = np.random.randint(7000, 7999)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n res = sock.connect_ex((\"localhost\", port))\n if res != 0:\n not_free = False\n return port", "def find_first_available_port():\n skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n skt.bind((\"0.0.0.0\", 0))\n _, port = skt.getsockname()\n skt.close()\n return port", "def get_open_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((\"\", 0))\n o_port = sock.getsockname()[1]\n sock.close()\n return o_port", "def get_safe_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((LOCALHOST, 0))\n port = sock.getsockname()[1]\n sock.close()\n return port", "def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):\n tempsock = socket.socket(family, socktype)\n port = bind_port(tempsock)\n tempsock.close()\n del tempsock\n return port", "def get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = s.getsockname()[1]\n s.close()\n return port", "def get_unused_port(port):\n if port is None or port < 1024 or port > 49151:\n port = random.randint(1024, 49151)\n while True:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind(('', port)) # Try to open port\n except socket.error as e:\n if e.errno is 98: # Errorno 98 means address already bound\n port += 1\n continue\n raise e\n s.close()\n return port", "def get_unused_port():\n port, s = get_unused_port_and_socket()\n s.close()\n return port", "def _pick_unused_port_without_server(bind_timeout=0):\n # Next, try a few times to get an OS-assigned port.\n # Ambrose discovered that on the 2.6 kernel, calling Bind() on UDP socket\n # returns the same port over and over. So always try TCP first.\n port = None\n bound_sockets = [] if bind_timeout > 0 else None\n for _ in range(10):\n # Ask the OS for an unused port.\n port = _bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP, bound_sockets)\n # Check if this port is unused on the other protocol.\n if (port and port not in _random_ports and\n _bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP, bound_sockets)):\n _random_ports.add(port)\n _spawn_bound_port_holding_daemon(port, bound_sockets, bind_timeout)\n return port\n if bound_sockets:\n for held_socket in bound_sockets:\n held_socket.close()\n del bound_sockets[:]\n\n # Try random ports as a last resort.\n rng = random.Random()\n for _ in range(10):\n port = int(rng.randrange(15000, 25000))\n if port not in _random_ports:\n if _is_port_free(port, bound_sockets):\n _random_ports.add(port)\n _spawn_bound_port_holding_daemon(\n port, bound_sockets, bind_timeout)\n return port\n if bound_sockets:\n for held_socket in bound_sockets:\n held_socket.close()\n del bound_sockets[:]\n\n # Give up.\n raise NoFreePortFoundError()", "def test_get_unused_port() -> None:\n available_port = get_unused_port()\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.bind((\"\", available_port))\n assert int(sock.getsockname()[1]) == available_port", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def get_open_port(host=\"localhost\"):\n temp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n temp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n temp_sock.bind((host, 0))\n port = temp_sock.getsockname()[1]\n temp_sock.close()\n del temp_sock\n return port", "def find_available_local_port():\n infos = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)\n family, proto, _, _, addr = next(iter(infos))\n sock = socket.socket(family, proto)\n sock.bind(addr)\n addr, port = sock.getsockname()[:2]\n sock.close()\n return port", "def new_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n for i in range(12042, 16042):\n try:\n s.bind(('127.0.0.1', i))\n s.close()\n return i\n except socket.error, e:\n pass\n raise Exception('No local port available')", "def _get_unused_udp_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n return port", "def find_unbound_port():\n while True:\n port = random.randint(*PORT_RANGE)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.bind((\"127.0.0.1\", port))\n return port\n except socket.error:\n print(\"randomly generated port %d is bound. Trying again.\" % port)", "def get_free_socket(self):\n try:\n peer_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n peer_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n peer_socket.bind(('', 0))\n free_socket = peer_socket.getsockname()[1]\n peer_socket.close()\n return free_socket\n except Exception as e:\n print \"Obtaining free sockets failed: %s\" % e\n sys.exit(1)", "def get_unused_port_and_socket():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n addr, port = s.getsockname()\n return (port, s)", "def _reserve_port():\n sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:\n raise RuntimeError(\"Failed to set SO_REUSEPORT.\")\n sock.bind(('', int(SERVICE_PORT)))\n try:\n yield sock.getsockname()[1]\n finally:\n sock.close()", "def reserve(ip=LOCALHOST, port=0):\n port = int(port)\n with contextlib.closing(socket()) as s:\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n try:\n s.bind((ip, port))\n except SocketError as e:\n # socket.error: EADDRINUSE Address already in use\n if e.errno == errno.EADDRINUSE and port != 0:\n s.bind((ip, 0))\n else:\n raise\n\n # the connect below deadlocks on kernel >= 4.4.0 unless this arg is greater than zero\n s.listen(1)\n\n sockname = s.getsockname()\n\n # these three are necessary just to get the port into a TIME_WAIT state\n with contextlib.closing(socket()) as s2:\n s2.connect(sockname)\n sock, _ = s.accept()\n with contextlib.closing(sock):\n return sockname[1]", "def find_free_port(ports_socket, name):\n request_name = \"-\".join((name, str(os.getpid())))\n while True:\n port = test_server_request(ports_socket, request_name, GETPORT)\n if not tcp_listening(port):\n return port\n error(\"port %u is busy, try another\" % port)", "def port_connection(self, sock):\n sock.bind(('', 0)) # Bind to OS-assigned available & random port.\n sock.listen(1)", "def bind_port(sock, host=HOST):\n if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:\n if hasattr(socket, 'SO_REUSEADDR'):\n if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:\n raise TestFailed(\"tests should never set the SO_REUSEADDR \" \\\n \"socket option on TCP/IP sockets!\")\n if hasattr(socket, 'SO_REUSEPORT'):\n if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:\n raise TestFailed(\"tests should never set the SO_REUSEPORT \" \\\n \"socket option on TCP/IP sockets!\")\n if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)\n\n sock.bind((host, 0))\n port = sock.getsockname()[1]\n return port", "def findFreePort(start=8000, end=1<<16):\n for port in range(start, end+1):\n try:\n sock = socket.socket()\n sock.bind(('', port))\n return port\n finally:\n sock.close()\n raise ValueError('Impossible to find a free port in %s-%s' % (start, end))", "def find_free_port(self):\n return utils.find_free_port()", "def port(self):\n if self._server_thread is None:\n raise RuntimeError('Server not started.')\n return self._port", "def _getusableport():\r\n port_found = False\r\n port_min = 63000\r\n port_max = 63150\r\n port_iter = port_min\r\n local_addr = getmyip()\r\n\r\n while not port_found:\r\n if port_iter > port_max:\r\n raise Exception(\"Network restriction error! Unable to find a free port!\")\r\n try:\r\n udp_test_socket = recvmess(local_addr, port_iter, _dummy_function)\r\n stopcomm(udp_test_socket)\r\n port_found = True\r\n except Exception, e:\r\n port_iter += 1\r\n\r\n return port_iter", "def select_socket(self, ip: str = '') -> socket:\n sock = socket(AF_INET, SOCK_STREAM)\n found_port = False\n retries = 0\n while not found_port:\n try:\n sock.bind((ip, self._get_candidate_port()))\n found_port = True\n except Exception:\n retries = retries + 1\n if retries > max_port_range_retries:\n self.log_and_raise(RuntimeError(f\"Failed to locate port within range {self.port_range} \"\n f\"after {max_port_range_retries} retries!\"))\n return sock", "def get_port(self) -> int:\n return int(self.socket.getsockname()[1])", "def _pick_unused_port(pid=None, portserver_address=None,\n noserver_bind_timeout=0):\n try: # Instead of `if _free_ports:` to handle the race condition.\n port = _free_ports.pop()\n except KeyError:\n pass\n else:\n _owned_ports.add(port)\n return port\n # Provide access to the portserver on an opt-in basis.\n if portserver_address:\n port = get_port_from_port_server(portserver_address, pid=pid)\n if port:\n return port\n if 'PORTSERVER_ADDRESS' in os.environ:\n port = get_port_from_port_server(os.environ['PORTSERVER_ADDRESS'],\n pid=pid)\n if port:\n return port\n return _pick_unused_port_without_server(bind_timeout=noserver_bind_timeout)", "def _create_server_socket(server_ip: str, server_port: int) -> socket.socket:\r\n\ttry:\r\n\t\tserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\tserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\tserver_socket.bind((server_ip, server_port))\r\n\t\tserver_socket.listen(MAX_CONNECTION)\r\n\texcept Exception as e:\r\n\t\t_logger.error(\"Exception occured while creating server socket: \" \\\r\n\t\t\t+ str(e))\r\n\t\treturn None\r\n\telse:\r\n\t\treturn server_socket", "def _get_unused_port(hostname):\n for port in range(8000, 9001):\n if _check_port_available(hostname, port):\n return port", "def port_in_use(port_num):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', port_num))\n except OSError:\n return True\n else:\n return False", "def server_socket(port):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n sock.bind((\"localhost\", port))\r\n sock.listen(LQUEUE_SIZE)\r\n return sock", "def make_data_port(self):\n err = None\n sock = None\n for res in socket.getaddrinfo(None, 0, socket.AF_INET, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):\n af, socktype, proto, canonname, sa = res\n try:\n sock = socket.socket(af, socktype, proto)\n sock.bind(sa)\n except OSError as _:\n err = _\n if sock:\n sock.close()\n sock = None\n continue\n break\n if sock is None:\n if err is not None:\n raise err\n else:\n raise OSError(\"getaddrinfo returns an empty list\")\n sock.listen(1)\n port = sock.getsockname()[1]\n host = self.sock.getsockname()[0]\n response = self._send_port_command(host, port)\n return sock, response", "def pick_port(*ports):\n sockets = []\n\n def find_free_port(port):\n if port:\n return port\n else:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except OSError as e:\n # [Errno 97] Address family not supported by protocol\n # Likely indicates we are in an IPv6-only environment (BEAM-10618). Try\n # again with AF_INET6.\n if e.errno == 97:\n s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n else:\n raise e\n\n sockets.append(s)\n s.bind(('localhost', 0))\n return s.getsockname()[1]\n\n ports = list(map(find_free_port, ports))\n # Close sockets only now to avoid the same port to be chosen twice\n for s in sockets:\n s.close()\n return ports", "def createListeningSocket(host):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((host, 0))\n return sock", "def get_port(self) -> int:\n return self._port", "def _create_listener(port, backlog=256):\n sock, sockaddr = inet.create_external_address(port)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(sockaddr)\n sock.listen(backlog)\n sock.setblocking(0)\n return sock", "def __getServerSocket(self, port, backlog=10):\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\tsock.bind(('', port))\n\t\tsock.listen(backlog)\n\t\treturn sock", "def _is_port_free(port, return_sockets=None):\n return (_bind(port, *_PROTOS[0], return_socket=return_sockets) and\n _bind(port, *_PROTOS[1], return_socket=return_sockets))", "def _create_new_socket(self):\n sock = socket()\n sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)\n return sock", "def _find_open_port(worker_ip: str, local_listen_port: int, ports_to_skip: Iterable[int]) -> int:\n max_tries = 1000\n found_port = False\n for i in range(max_tries):\n out_port = local_listen_port + i\n if out_port in ports_to_skip:\n continue\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((worker_ip, out_port))\n found_port = True\n break\n # if unavailable, you'll get OSError: Address already in use\n except OSError:\n continue\n if not found_port:\n msg = \"LightGBM tried %s:%d-%d and could not create a connection. Try setting local_listen_port to a different value.\"\n raise RuntimeError(msg % (worker_ip, local_listen_port, out_port))\n return out_port", "def pick_unused_port(pid=None, portserver_address=None):\n return _pick_unused_port(pid, portserver_address)", "def _socket(addr, port, family):\n sock = socket.socket(family, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n except (AttributeError, OSError):\n pass\n if family == socket.AF_INET6:\n sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)\n try:\n sock.bind((addr, port))\n except OSError:\n msg = \"Cannot bind to {0}:{1}.\".format(addr, port)\n logger.critical(msg)\n sock.close()\n raise BlackholeRuntimeException(msg)\n os.set_inheritable(sock.fileno(), True)\n sock.listen(1024)\n sock.setblocking(False)\n return sock", "def get_ip_freebind(self):\n if hasattr(socket, \"IP_FREEBIND\"):\n # Valid distribution\n return socket.IP_FREEBIND\n if sys.platform == \"linux2\":\n return 15\n return None", "def check_free_port(host, port):\n import socket\n from contextlib import closing\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n if sock.connect_ex((host, port)) == 0:\n # Port is open, so not free\n return False\n else:\n # Port is not open, so free\n return True", "def get_port(self):\n \n return self._port", "def get_port_number():\n try:\n return os.environ[\"PORT\"]\n except Exception:\n return None", "def _bind(port, socket_type, socket_proto, return_socket=None,\n return_family=socket.AF_INET6):\n # Our return family must come last when returning a bound socket\n # as we cannot keep it bound while testing a bind on the other\n # family with many network stack configurations.\n if return_socket is None or return_family == socket.AF_INET:\n socket_families = (socket.AF_INET6, socket.AF_INET)\n elif return_family == socket.AF_INET6:\n socket_families = (socket.AF_INET, socket.AF_INET6)\n else:\n raise ValueError('unknown return_family %s' % return_family)\n got_socket = False\n for family in socket_families:\n try:\n sock = socket.socket(family, socket_type, socket_proto)\n got_socket = True\n except socket.error:\n continue\n try:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(('', port))\n if socket_type == socket.SOCK_STREAM:\n sock.listen(1)\n port = sock.getsockname()[1]\n except socket.error:\n return None\n finally:\n if return_socket is None or family != return_family:\n try:\n # Adding this resolved 1 in ~500 flakiness that we were\n # seeing from an integration test framework managing a set\n # of ports with is_port_free(). close() doesn't move the\n # TCP state machine along quickly.\n sock.shutdown(socket.SHUT_RDWR)\n except OSError:\n pass\n sock.close()\n if return_socket is not None and family == return_family:\n return_socket.append(sock)\n break # Final iteration due to pre-loop logic; don't close.\n return port if got_socket else None", "def return_free_ports(ports_socket, name):\n return test_server_request(ports_socket, name, PUTPORTS)", "def findFreePort(interface=\"127.0.0.1\", family=socket.AF_INET, type=socket.SOCK_STREAM):\n addr = socket.getaddrinfo(interface, 0)[0][4]\n probe = socket.socket(family, type)\n try:\n probe.bind(addr)\n if family == socket.AF_INET6:\n sockname = probe.getsockname()\n hostname = socket.getnameinfo(\n sockname, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV\n )[0]\n return (hostname, sockname[1])\n else:\n return probe.getsockname()\n finally:\n probe.close()", "def createPort():\n\n starting_interval = 0\n ending_interval = 65535\n port = randint(starting_interval, ending_interval)\n return port", "def internal_port(self):\r\n return self._internal_port", "def _create_socket():\n sock = socket.socket()\n return sock", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def _setupSocket(self):\n oldUmask = None\n if type(self._bindAddress) is str:\n # Unix socket\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n os.unlink(self._bindAddress)\n except OSError:\n pass\n if self._umask is not None:\n oldUmask = os.umask(self._umask)\n else:\n # INET socket\n assert type(self._bindAddress) is tuple\n assert len(self._bindAddress) == 2\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n sock.bind(self._bindAddress)\n sock.listen(socket.SOMAXCONN)\n\n if oldUmask is not None:\n os.umask(oldUmask)\n\n return sock", "def get_port(self):\n return self.__port", "def get_logical_port(self):\n return None", "def _get_port(self):\n return self.__port", "def localport(self) :\n\t\ttry :\n\t\t\treturn self._localport\n\t\texcept Exception as e:\n\t\t\traise e", "def start_socket(ip, port):\n sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n # the server binds itself to a certain socket\n sock.bind((ip, port))\n # listening to the socket\n sock.listen(LISTEN)\n return sock", "def start_socket(ip, port):\n sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n # the server binds itself to a certain socket\n sock.bind((ip, port))\n # listening to the socket\n sock.listen(LISTEN)\n return sock", "def get_socket():\n return socket.create_connection((HOST, PORT))", "def get_available_portoffset(target=\"localhost\"):\n target_ip = socket.gethostbyname(target)\n for portoffset in range(10000, 61000, 1000):\n i = portoffset + 873\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((target_ip, i))\n sock.close()\n if result != 0:\n logger.debug(\"port open {0}\".format(portoffset))\n return portoffset\n return None", "def get_port(self):\n return self.port", "def assert_port_available(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind((\"\", port))\n except socket.error:\n raise exceptions.SpotifyError(\n \"Port {} is not available. If you are currently running a server, \" \"please halt it for a min.\".format(port)\n )\n finally:\n s.close()", "def port(self):\n # This property is not 100% needed, but is included instead of making the raw variable public to prevent people from accidentally overwriting the port and screwing up this representative value\n return self._port", "def get(self, oid=None, name=None, mac_address=None):\n if oid is not None:\n path = '%s/ports/%s' % (self.ver, oid)\n elif name is not None:\n path = '%s/ports?display_name=%s' % (self.ver, name)\n elif mac_address is not None:\n path = '%s/ports?mac_address=%s' % (self.ver, mac_address) \n else:\n raise OpenstackError('Specify at least port id or name')\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Get openstack port: %s' % truncate(res))\n if oid is not None:\n server = res[0]['port']\n elif name is not None:\n server = res[0]['ports'][0]\n \n return server", "def remote_getPort(self):\r\n return int(self._fwdPort)", "def port(self):\r\n _, port = self.server_address\r\n return port", "def getPort(self):\n return self._port", "def getPort(self):\n return self._port", "def is_port_available(port):\n port = int(port)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n available = sock.connect_ex(('localhost', port))\n sock.close()\n return available", "def port(self):\n _, port = self.server_address\n return port", "def get_port(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_get_port(self)", "def port_factory_method(self):\n if self.is_secure:\n return HTTPSSecurePort()\n return HTTPPort()", "def __init__(self, port: int, backlog: int = 0):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind((\"localhost\", port))\n self.socket.listen(backlog)", "def get_port_from_port_server(portserver_address, pid=None):\n if not portserver_address:\n return None\n\n if pid is None:\n pid = os.getpid()\n\n if _winapi:\n buf = _windows_get_port_from_port_server(portserver_address, pid)\n else:\n buf = _posix_get_port_from_port_server(portserver_address, pid)\n\n if buf is None:\n return None\n\n try:\n port = int(buf.split(b'\\n')[0])\n except ValueError:\n print('Portserver failed to find a port.', file=sys.stderr)\n return None\n _owned_ports.add(port)\n return port", "def port(self) -> int:\n return self._port", "def server_port(self):\n return self._server_port", "def server_port(self):\n return self._server_port" ]
[ "0.89505553", "0.8727646", "0.87067574", "0.8690947", "0.8689836", "0.8675498", "0.8668015", "0.86642563", "0.86574286", "0.85628986", "0.8248419", "0.8209171", "0.81788385", "0.81242055", "0.81235737", "0.8103176", "0.80116427", "0.79655474", "0.79650843", "0.7938987", "0.7877765", "0.78774416", "0.78359056", "0.7779933", "0.7679333", "0.7599862", "0.7547505", "0.754664", "0.75330067", "0.75124353", "0.7363204", "0.725584", "0.7191373", "0.710728", "0.7047069", "0.6975989", "0.69697446", "0.6940277", "0.6859218", "0.6850206", "0.68455243", "0.6759772", "0.6654283", "0.66443205", "0.6458079", "0.63791966", "0.6362394", "0.63064724", "0.6292145", "0.62751824", "0.62717664", "0.6214455", "0.62096596", "0.6170764", "0.6105576", "0.6079785", "0.6063802", "0.60472023", "0.6040161", "0.60387135", "0.6033473", "0.60297287", "0.59934324", "0.59901804", "0.5984638", "0.5970342", "0.5962248", "0.59615654", "0.59501547", "0.59432864", "0.59173596", "0.5901934", "0.5901243", "0.5882632", "0.5880092", "0.5877065", "0.5874586", "0.5857693", "0.584278", "0.5825183", "0.5825183", "0.57980824", "0.5784056", "0.5778652", "0.57487684", "0.57288086", "0.5727246", "0.572579", "0.57126534", "0.5701922", "0.5697287", "0.56893885", "0.56844604", "0.56592774", "0.56526047", "0.5631839", "0.5625798", "0.5620527", "0.56121874", "0.56121874" ]
0.85797644
9
Start a web app at the given port for serving the jigna view for the given template and context.
def start_web_app(template, context, port=8000): from tornado.ioloop import IOLoop from jigna.web_app import WebApp ioloop = IOLoop.instance() app = WebApp(template=template, context=context) app.listen(port) print 'Starting the web app on port %s ...' % port ioloop.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serve(port):\n app.run(host='0.0.0.0', port=port, debug=True)", "def run():\n return render_template('index.html')", "def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )", "def serve(ctx, host, port):\n pass", "def serve(ssl='n', port='5000'):\n if not 'project_path' in env:\n _setup_env()\n\n opts = ' -p '+port\n if do(ssl):\n opts += ' -s'\n\n with lcd(join(env.project_path)):\n if exists(join(env.project_path, 'manage.py')):\n local('python manage.py runserver')\n elif _config and 'deploy' in _config:\n if int(port) < 1024:\n local('sudo python website/app.py'+opts)\n else:\n local('python website/app.py'+opts)\n else:\n if int(port) < 1024:\n local('sudo python api.py'+opts)\n else:\n local('python api.py'+opts)", "def start() -> None:\n from app import app\n app.run(debug = True, host = HOST, port = PORT)", "def startserver(path):\n global urlpath\n urlpath = path\n app.run(debug=True, host='0.0.0.0', port=4444)", "def run(port: int = 8080):\n current_ioloop = ioloop.IOLoop.current()\n\n # Start the web server.\n app = create_app()\n logging.info('starting server: localhost:{}'.format(port))\n server = httpserver.HTTPServer(app)\n\n try:\n server.listen(port)\n current_ioloop.start()\n except KeyboardInterrupt:\n pass\n finally:\n current_ioloop.stop()", "def run(self, host=\"0.0.0.0\", port=8080):\n self.app.run(host=host, port=port, debug=True, use_reloader=False,\n use_evalex=False)", "def serve(kwargs, host, port, verbose):\n setup_logging(verbose)\n from werkzeug.serving import run_simple\n from . import create_app\n\n app = create_app(kwargs['db_path'], kwargs['plugins'])\n run_simple(host, port, app)", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def run(port):\n print \"========= SAND conformance server =============\"\n print \"-----------------------------------------------\"\n import os\n if os.environ.get('PORT') is not None:\n port = int(os.environ['PORT'])\n APP.run(port=port)", "def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))", "def start_web(appdir, webport, apiport, debug=False):\n infoapp = None\n\n while infoapp is None:\n infoapp = App.get_running_app()\n if getattr(infoapp, \"base\", None) is None:\n infoapp = None\n sleep(1)\n\n ws = InfoScreenWebServer(infoapp, appdir, apiport)\n\n ws.run(host=\"0.0.0.0\", port=webport, debug=debug)", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])", "def run(self, host: str = '0.0.0.0', port: int = 8080):\n self._loop.run_until_complete(self._configure_plugins())\n web.run_app(self._app, host=host, port=port) # pragma: no cover", "def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)", "def entry_point():\n return render_template(\"index.html\")", "def main(self, options):\n import sys\n import getopt\n import errno\n from os import makedirs\n from os.path import dirname\n\n try:\n opts, args = getopt.getopt(options,\n \"lt:gsp:\", [\n \"port=\",\n \"view=\",\n \"list\",\n \"static=\",\n \"generate\",\n \"server\"])\n except getopt.GetoptError, err:\n print str(err)\n sys.exit(2)\n \n generate = False\n serve = False\n listfiles = False\n statichtml = 'static'\n view = False\n port = 8080\n \n for option, arg in opts:\n if option in (\"-g\",\"--generate\"):\n generate = True\n elif option in (\"-t\",\"--static\"):\n statichtml = arg\n elif option in (\"--view\"):\n view = arg\n elif option in (\"-s\",\"--serve\"):\n serve = True\n elif option in (\"-l\",\"--list\"):\n listfiles = True\n elif option in (\"-p\",\"--port\"):\n port = int(arg)\n else:\n assert False, \"unhandle option\"\n \n if generate or listfiles:\n for path, filename in self.genlist():\n \n path = path.replace('?','')\n \n if view and view != path:\n continue\n \n if listfiles:\n print path + \" <= \" + \\\n join(path[1:],'index.html') + ' <= ' + \\\n filename\n \n if generate:\n path = join(statichtml , path[1:], 'index.html')\n try:\n makedirs(dirname(path))\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n pass\n else: raise\n print \"Generating \" + path\n static_file = open(path,'w')\n static_file.write(self.generatehtml(path))\n static_file.close()\n \n if serve:\n print \"Starting wsgi web server on port \" + str(port)\n from wsgiref.simple_server import make_server\n server = make_server('', port, self.wsgiapp())\n server.serve_forever()", "async def serve_web(self):\n interface = \"0.0.0.0\" if settings.PUBLIC_ACCESS else \"127.0.0.1\"\n port = settings.WEB_PORT\n self.logger.info(f\"web: starting the server on {interface}:{port}...\")\n await self.runner.setup()\n site = aioweb.TCPSite(self.runner, interface, port)\n await site.start()\n self.preparing_task = None", "def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)", "def run_server(host='localhost', port=0xdead, **args):\n application = wsgi_application(**args)\n debug = args.get('debug', False)\n serving.run_simple(host, port, application, use_reloader=debug)", "def main():\n return render_template('index.html')", "def main():\n return render_template('index.html')", "def webserver_start():\n run(_webserver_command())", "def run():\n app.run(debug=True, port=5001)", "def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)", "def start(name, path):\n app.start(name, path)", "def run(self, hostname, port, server='wsgiref'):\r\n self._hostname = hostname\r\n self._port = port\r\n self._app.run(host=hostname, port=port, server=server)", "def server(port, wsgi_app):\n try:\n httpd = wsgiref.simple_server.make_server(self._host, port, wsgi_app)\n except socket.error:\n # Try IPv6\n httpd = wsgiref.simple_server.make_server(\n self._host, port, wsgi_app, server_class=WsgiServerIpv6)\n started.set()\n httpd.timeout = 30\n while not stopping.is_set():\n httpd.handle_request()\n stopped.set()", "def run_webserver():\n\tglobal hostname, portnum\n\t#bottle.debug(True)\t# While in development, we want the data\n\tbottle.run(host=hostname, port=portnum) \n\tlogging.info(\"Exiting server.\")", "def serve_vue_app():\n return(render_template('index.html'))", "def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass", "def run_server(app: Flask, ip: str, port: int) -> Flask:\n app.run(ip, port)\n\n return app", "def startapp():", "def run(debug, threaded, host, port):\n \n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def web(connection):\n from .web import create_application\n app = create_application(connection=connection)\n app.run(host='0.0.0.0', port=5000)", "def webserver(\n *,\n host=None, port=None, nodebug=False,\n reload=False, load_dotenv=True\n):\n app = web.create_app()\n app.run(\n host=host, port=port,\n use_reloader=True,\n debug=(not nodebug),\n load_dotenv=load_dotenv)", "def web(host: str, port: str, loglevel: str) -> None:\n uvicorn.run(\"source.apps.web:App\", host=host, port=port, log_level=loglevel)", "def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex", "def run(*port):\n print(port)\n if port:\n port = port[0]\n else:\n port = 8000\n external_ip = '0.0.0.0:{}'.format(port)\n _manage('runserver %s' % external_ip)", "def index(path):\n return render_template(\"main.jinja2.html\")", "def run_html():\n if __name__ != \"__main__\":\n app.run(debug=True)", "def serve(self):\n if self._server_thread is not None:\n return\n if self._port is None:\n self._port = portpicker.pick_unused_port()\n started = threading.Event()\n self._stopped = threading.Event()\n self._stopping = threading.Event()\n\n def build_server(started, stopped, stopping):\n \"\"\"Closure to build the server function to be passed to the thread.\n\n Args:\n started: Threading event to notify when started.\n stopped: Threading event to notify when stopped.\n stopping: Threading event to notify when stopping.\n Returns:\n A function that function that takes a port and WSGI app and notifies\n about its status via the threading events provided.\n \"\"\"\n\n def server(port, wsgi_app):\n \"\"\"Serve a WSGI application until stopped.\n\n Args:\n port: Port number to serve on.\n wsgi_app: WSGI application to serve.\n \"\"\"\n try:\n httpd = wsgiref.simple_server.make_server(self._host, port, wsgi_app)\n except socket.error:\n # Try IPv6\n httpd = wsgiref.simple_server.make_server(\n self._host, port, wsgi_app, server_class=WsgiServerIpv6)\n started.set()\n httpd.timeout = 30\n while not stopping.is_set():\n httpd.handle_request()\n stopped.set()\n\n return server\n\n server = build_server(started, self._stopped, self._stopping)\n server_thread = threading.Thread(\n target=server, args=(self._port, self._app))\n self._server_thread = server_thread\n\n server_thread.start()\n started.wait()", "def start():\n app.run()", "def serve() -> None: # pragma: no cover-behave\n logging.getLogger().setLevel(logging.INFO)\n database = init_database()\n init_bottle(database)\n server_port = os.environ.get(\"SERVER_PORT\", \"5001\")\n bottle.run(server=\"gevent\", host=\"0.0.0.0\", port=server_port, reloader=True, log=logging.getLogger()) # nosec", "def run(self):\n self.app.run(host=\"0.0.0.0\")", "def main():\r\n run_wsgi_app(app)", "def run_server(kit, local, port, debug):\n host = '127.0.0.1' if local else '0.0.0.0'\n apps = len(kit.flasks)\n if not apps:\n print 'No Flask app found!'\n return\n elif apps == 1:\n app = kit.flasks[0]\n else:\n app_number = getenv('KIT_FLASK_APP', None)\n if not app_number:\n s = '%s Flask applications found:\\n\\n # Name\\n' % (apps, )\n s += '\\n'.join(\n '%04s %s' % (index, flask_app.name)\n for index, flask_app in enumerate(kit.flasks)\n )\n s += '\\n\\nWhich # would you like to run? '\n app_number = raw_input(s)\n environ['KIT_FLASK_APP'] = app_number\n app = kit.flasks[int(app_number)]\n app.run(host=host, port=port, debug=debug, extra_files=[kit.path])", "def start_flask_app(port: int) -> None:\n logging.info('Starting flask app')\n start_app(port=port)", "def main(methods=[\"GET\"]):\n validate_auth()\n ## issue with path resolution after build\n return send_from_directory(\n #todo: remove templates directory reference; index.html isn't a jinja template\n safe_join(current_app.static_folder, 'templates'),\n 'index.html',\n cache_timeout=-1\n )", "def runserver():\n local_addr = \"0.0.0.0:8000\"\n local(\"{} exec web python3 manage.py runserver {} {}\".format(\n dc, local_addr, settings))", "def index():\n return render_template('home.jinja2')", "def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)", "def run(debug, threaded, host, port):\r\n\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\r\n\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def index():\n with app.app_context():\n return render_template(\"public/index.html\")", "def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def app(environ: t.Dict, start_response):\n # Print the request object details in environ.items()\n for k, v in environ.items():\n print(k, v)\n\n # Let's capture the request path\n path = environ.get(\"PATH_INFO\")\n\n # Handle our different routes. Render different templates.\n # Allow user to add \"/\" or not to URL string\n # NOTE: Don't use elif statement! It skips 'data' assignment!\n if path.endswith(\"/\"):\n path = path[:-1] # remove the trailing \"/\"\n if path == \"\": # the root / index\n data = home(environ)\n elif path == \"/contact\":\n data = contact_us(environ)\n elif path == \"/box-office\":\n data = read_box_office_data(environ)\n else:\n data = render_template(template_name=\"404.html\", context={\"path\": path})\n\n # Encode data to BYTE string\n data = data.encode(\"utf-8\")\n\n # Gunicorn's start_response to get a response going\n start_response(\n f\"200 OK\",\n [(\"Content-Type\", \"text/html\"), (\"Content-Length\", str(len(data)))],\n # You can remove these headers and the browser will still parse it.\n # Modern browsers are smart enough to infer how to parse the request\n )\n # Where does this print to? Server logs I bet... YES!\n # print(f\"{data=}\\n{iter([data])}\")\n return iter([data]) # <list_iterator object at 0x10f9f1340>", "def main():\n args = utils.parse_arguments()\n logging.basicConfig(level=logging.INFO)\n coloredlogs.install(level=0,\n fmt=\"[%(asctime)s][%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n isatty=True)\n if args.debug:\n l_level = logging.DEBUG\n else:\n l_level = logging.INFO\n\n logging.getLogger(__package__).setLevel(l_level)\n\n LOG.info('RUNNING TAMAGO WEB')\n serve(app, port=8080, host='0.0.0.0')", "def run(debug, threaded, host, port):\r\n\r\n HOST, PORT = host, port\r\n print(\"running on %s:%d\" % (HOST, PORT))\r\n app.run(host=HOST, port=PORT, debug=True, threaded=threaded)", "def serve_paste(app, global_conf, **kw):\n # Convert the values from the .ini file to something bjoern can work with\n host = kw.get('host', '')\n port = int(kw.get('port', '0')) or False\n if not host and not port and kw.get('listen'):\n listen = kw.get('listen')\n if ':' in listen:\n host = listen.split(':')[0]\n port = int(listen.split(':')[1])\n else:\n host = ''\n port = int(listen)\n if kw.get('reuse_port', '').lower() in ('1', 'true', 'on'):\n reuse_port = True\n else:\n reuse_port = False\n\n run(app, host, port=port, reuse_port=reuse_port)\n return 0", "def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)", "def run_server(args):\n from .app import app # noqa\n\n app.run_server(host=args.host, port=args.port, debug=args.debug)", "def start(parse_opts):\n global opts\n opts = parse_opts\n app.run(host='0.0.0.0')", "def home(environ):\n return render_template(template_name=\"index.html\", context={})", "def web():\n env['remote_port'] = env['port_map']['8000']\n\n sys.stdout.write('Launching browser on remote port %(remote_port)s\\n' % env)\n\n run('open http://%(relay_server)s:%(remote_port)s' % env)", "def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print (\"running on %s:%d\" % (HOST, PORT))\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def index():\n return render_template(\"main.html\")", "def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0", "def run(self, **kwargs):\n app = self.create_app()\n\n app.run(host=self.host, port=self.port, **kwargs)", "def web():\n from mephisto.client.server import app\n\n app.run(debug=False)", "def run(port):\n run(host=config.HOST, port=port)", "def home():\n logging.info('Entering route: HOME')\n\n logging.info('Rendering template: main.html')\n return render_template('main.html')", "def index():\n return render_template('main.html')", "def index():\n return render_template('main.html')", "def hello():\n\n return render_template(\"index.html\")", "def start(port, table_size, update_size, update_rate):\n app = make_app(table_size, update_size, update_rate)\n app.listen(port)\n logging.critical(\"Listening on http://localhost:{}\".format(port))\n loop = tornado.ioloop.IOLoop.current()\n loop.start()", "def run():\n app.run()", "def run(selector):\n\n print selector\n exec \"from \" + selector + \" import urls, port\"\n\n if os.environ.get(\"REQUEST_METHOD\", \"\"):\n from wsgiref.handlers import BaseCGIHandler\n BaseCGIHandler(sys.stdin, sys.stdout, sys.stderr, os.environ) \\\n .run(urls)\n else:\n from wsgiref.simple_server import WSGIServer, WSGIRequestHandler\n# with the code like this we are binding to no particular interface, matter?\n httpd = WSGIServer(('', port), WSGIRequestHandler)\n httpd.set_app(urls)\n print \"Serving HTTP on %s port %s ...\" % httpd.socket.getsockname()\n httpd.serve_forever()", "def web():\n import web\n web.app.run(host='0.0.0.0', port=5000, debug=True)", "def web():\n import web\n web.app.run(host='0.0.0.0', port=5000, debug=True)", "def main(port):\n ps = PathologicalServer(\"localhost\", port, _responses)\n ps.start()", "def simple_serve(self, host='0.0.0.0', port=8000):\n from wsgiref.simple_server import make_server\n srv = make_server(host, port, self)\n print \"Development server is running at http://%s:%d/\" % (\n host, port\n )\n print \"Quit the server with CONTROL-C\"\n srv.serve_forever()", "def run_dev_server(port: int=5000) -> None:\n app.run(port=port)", "def run_server(port, settings, keyfile=None, certfile=None):\n create_server(port, settings, keyfile, certfile).serve_forever()", "def local_main():\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)", "def start(self):\n\n self.app = Application()\n self.app._loop = self.loop\n self.add_routes()\n self.app.run(port=int(self.port),\n worker_num=None,\n reload=False,\n debug=False)\n # GZip support\n # Compress(self.app)\n # self.app.config['COMPRESS_MIMETYPES'] = {'text/html',\n # 'application/json'}\n # self.app.config['COMPRESS_LEVEL'] = 4\n # self.app.config['COMPRESS_MIN_SIZE'] = 300\n # Session support\n # self.session_interface = InMemorySessionInterface()\n # self.app.response_middleware.appendleft(self.save_session)\n # self.app.request_middleware.append(self.add_session_to_request)\n\n # self.add_routes()\n # return await self.app.create_server(loop=self.loop,\n # host='0.0.0.0',\n # port=self.port,\n # debug=False)", "def run_dev_appserver(self, args, open_ports=False):\n return self.spawn_dev_appserver(args, open_ports).wait()", "def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)", "def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler", "def main():\n access_token = get_access_token()\n\n return render_template('index.html', ACCESS_TOKEN=access_token)" ]
[ "0.6824245", "0.6593554", "0.65677285", "0.65660465", "0.6564316", "0.6545133", "0.65020066", "0.6479876", "0.6479806", "0.6458077", "0.64405406", "0.64051074", "0.6399282", "0.6394062", "0.63920164", "0.63898516", "0.6380909", "0.6349641", "0.62751275", "0.62736714", "0.6271152", "0.6265836", "0.6259486", "0.6259314", "0.6259314", "0.6249816", "0.6223423", "0.6214609", "0.62118906", "0.62103444", "0.6200573", "0.6197262", "0.619629", "0.6192825", "0.6184496", "0.6184423", "0.61771095", "0.6162563", "0.61611605", "0.61484766", "0.61461884", "0.613655", "0.6135172", "0.6130813", "0.6130779", "0.6116299", "0.6103134", "0.6095625", "0.60954493", "0.6095353", "0.6085512", "0.60854995", "0.6081631", "0.6069497", "0.6054083", "0.6048064", "0.6048064", "0.6042722", "0.6030522", "0.6029352", "0.6029352", "0.6029352", "0.6029352", "0.6029352", "0.6029352", "0.6029352", "0.6028816", "0.6022032", "0.60210675", "0.60145825", "0.6014033", "0.6012795", "0.60092205", "0.60048217", "0.600016", "0.599474", "0.59575886", "0.5954213", "0.5941928", "0.593358", "0.5929267", "0.5924908", "0.5921452", "0.5921452", "0.59170717", "0.58997726", "0.589397", "0.58870506", "0.5885822", "0.5885822", "0.58826524", "0.5877662", "0.58695304", "0.58595884", "0.5850166", "0.58386445", "0.5816205", "0.5813694", "0.5808439", "0.5805991" ]
0.8381516
0
Builds a differentiable augmentation pipeline based on its class type.
def build_aug(aug_type, **kwargs): if aug_type not in _AUGMENTATIONS: raise ValueError(f'Invalid augmentation type: `{aug_type}`!\n' f'Types allowed: {list(_AUGMENTATIONS)}.') return _AUGMENTATIONS[aug_type](**kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def xray_augmentationFactory(augmentation, height, width):\n downsample = (260,260)\n\n if augmentation == 'autoaugment':\n transform = [\n transforms.RandomCrop((height, width)),\n transforms.RandomHorizontalFlip(),\n AutoAugment(),\n Cutout()\n ]\n elif augmentation == 'original-cifar':\n transform = [\n transforms.Resize(downsample),\n transforms.RandomCrop(size=(height, width)),\n transforms.RandomHorizontalFlip(),\n ]\n elif augmentation == 'noaugment':\n transform = [\n transforms.Resize(downsample),\n transforms.CenterCrop((height, width)),\n ]\n\n elif augmentation == 'glico':\n NotImplemented(f\"augment parameter {augmentation} not implemented\")\n else: \n NotImplemented(f\"augment parameter {augmentation} not implemented\")\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n #normalize = transforms.Normalize(mean=[0.5888, 0.5888, 0.5889],\n #std=[0.1882, 0.1882, 0.1882])\n\n return transforms.Compose(transform + [transforms.ToTensor(), normalize])", "def build_augmentation_pipeline(aug_list):\n AUGMENTATIONS = {\n 'leadlag': LeadLag(),\n 'penoff': PenOff(),\n 'addtime': AddTime(),\n 'cumsum': CumulativeSum(),\n 'basepoint': Basepoint()\n }\n\n pipeline = Pipeline([\n (tfm_str, AUGMENTATIONS[tfm_str]) for tfm_str in aug_list\n ])\n\n return pipeline", "def _build(self,\n model_type: str,\n **kwargs) -> Predictor:\n if model_type == 'classifier':\n modelcls = sklearn.gaussian_process.GaussianProcessClassifier\n elif model_type == 'regressor':\n modelcls = sklearn.gaussian_process.GaussianProcessRegressor\n else:\n raise ValueError(\n '`model_type` should be \"classifier\" or \"regressor\"')\n model = modelcls(**kwargs)\n return model", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def make_pipeline(slam, settings):\n\n pipeline_name = \"pipeline_source[inversion]\"\n\n \"\"\"\n This pipeline is tagged according to whether:\n\n 1) Hyper-fitting settings (galaxies, sky, background noise) are used.\n 2) The lens galaxy mass model includes an `ExternalShear`.\n 3) The `Pixelization` and `Regularization` scheme of the pipeline (fitted in phases 3 & 4).\n \"\"\"\n\n path_prefix = f\"{slam.path_prefix}/{pipeline_name}/{slam.source_inversion_tag}\"\n\n \"\"\"\n Phase 1: Fit the `Pixelization` and `Regularization`, where we:\n\n 1) Fix the lens mass model to the `MassProfile`'s inferred by the previous pipeline.\n \"\"\"\n\n phase1 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[1]_mass[fixed]_source[inversion_magnification_initialization]\",\n n_live_points=30,\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last.instance.galaxies.lens.mass,\n shear=af.last.instance.galaxies.lens.shear,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.lens.hyper_galaxy,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=al.pix.VoronoiMagnification,\n regularization=al.reg.Constant,\n hyper_galaxy=af.last.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=af.last.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=af.last.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase1 = phase1.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 2: Fit the lens`s mass and source galaxy using the magnification `Inversion`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 1.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of the previous pipeline.\n \"\"\"\n\n phase2 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[2]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=af.last[-1].model.galaxies.lens.mass,\n shear=af.last[-1].model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase1.result.instance.galaxies.source.pixelization,\n regularization=phase1.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase1.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase1.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase1.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase2 = phase2.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 3: fit the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the lens `MassProfile` to the result of phase 2.\n \"\"\"\n\n phase3 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[3]_mass[fixed]_source[inversion_initialization]\",\n n_live_points=30,\n evidence_tolerance=slam.setup_hyper.evidence_tolerance,\n sample=\"rstagger\",\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=phase2.result.instance.galaxies.lens.mass,\n shear=phase2.result.instance.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=slam.pipeline_source_inversion.setup_source.pixelization_prior_model,\n regularization=slam.pipeline_source_inversion.setup_source.regularization_prior_model,\n hyper_galaxy=phase2.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase2.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase2.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase3 = phase3.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=False\n )\n\n \"\"\"\n Phase 4: fit the lens`s mass using the input pipeline `Pixelization` & `Regularization`, where we:\n\n 1) Fix the source `Inversion` parameters to the results of phase 3.\n 2) Set priors on the lens galaxy `MassProfile`'s using the results of phase 2.\n \"\"\"\n\n mass = slam.pipeline_source_parametric.setup_mass.mass_prior_model_with_updated_priors(\n index=-1, unfix_mass_centre=True\n )\n\n phase4 = al.PhaseImaging(\n search=af.DynestyStatic(\n name=\"phase[4]_mass[total]_source[fixed]\", n_live_points=50\n ),\n galaxies=dict(\n lens=al.GalaxyModel(\n redshift=slam.redshift_lens,\n mass=mass,\n shear=phase2.result.model.galaxies.lens.shear,\n ),\n source=al.GalaxyModel(\n redshift=slam.redshift_source,\n pixelization=phase3.result.instance.galaxies.source.pixelization,\n regularization=phase3.result.instance.galaxies.source.regularization,\n hyper_galaxy=phase3.result.hyper_combined.instance.optional.galaxies.source.hyper_galaxy,\n ),\n ),\n hyper_image_sky=phase3.result.hyper_combined.instance.optional.hyper_image_sky,\n hyper_background_noise=phase3.result.hyper_combined.instance.optional.hyper_background_noise,\n settings=settings,\n )\n\n phase4 = phase4.extend_with_multiple_hyper_phases(\n setup_hyper=slam.setup_hyper, include_inversion=True\n )\n\n return al.PipelineDataset(\n pipeline_name, path_prefix, phase1, phase2, phase3, phase4\n )", "def pipeline(self):\n\n transformers = []\n\n custom = self.CustomFeature()\n #transformers.append(('custom', custom))\n n_features = int(self.n_features/2)\n\n #kbest = SelectKBest(score_func=chi2, k=n_features)\n #transformers.append(('kbest', kbest))\n\n # pca = PCA(n_components=n_features, svd_solver='randomized', whiten=True)\n # transformers.append(('pca', pca))\n\n if self.definer.problem_type == 'classification':\n extraTC = SelectFromModel(ExtraTreesClassifier(criterion='entropy'))\n else:\n extraTC = SelectFromModel(ExtraTreesRegressor())\n\n transformers.append(('extraTC', extraTC))\n\n #scaler = StandardScaler()\n #transformers.append(('scaler', scaler))\n #binarizer = Binarizer()\n return FeatureUnion(transformers)", "def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5.\n # But we only blur about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n # Strengthen or weaken the contrast in each image.\n iaa.LinearContrast((0.75, 1.5)),\n # Add gaussian noise.\n # For 50% of all images, we sample the noise once per pixel.\n # For the other 50% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Make some images brighter and some darker.\n # In 20% of all cases, we sample the multiplier once per channel,\n # which can end up changing the color of the images.\n iaa.Multiply((0.8, 1.2), per_channel=0.2),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale={\"x\": (0.80, 1.2), \"y\": (0.80, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-6, 6)\n )\n], random_order=True) # apply augmenters in random order\n\n return augmenter", "def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)", "def _create_chain(class_type_list, kwargs_list):\n chain = None # module with preprocessing chain\n modules = [] # list of modules (not connected via preprocessing)\n for i, pre_id in enumerate(class_type_list):\n chain = CModule.create(\n pre_id, preprocess=chain, **kwargs_list[i])\n modules.append(CModule.create(pre_id, **kwargs_list[i]))\n return chain, modules", "def build_categorical_pipeline(self) -> Pipeline:\n pipeline = Pipeline([\n ('extract_data', FunctionTransformer(self.get_categorical_features)),\n ('impute', SimpleImputer(missing_values=np.nan, strategy='median')),\n ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False))\n ])\n return pipeline", "def attach(self,\n preprocessor_type: PreprocessorsTypes,\n parent_extractor_type: ExtractorsTypes = None) -> None:\n # Check what arguments are needed for the current preprocessor\n arguments = {}\n if preprocessor_type == PreprocessorsTypes.N_GRAMS:\n charset = Charset[self._preprocessors_config[\"ngrams\"]\n [\"valid_charset\"]]\n arguments = {\n \"n\":\n self._preprocessors_config[\"ngrams\"][\"n\"],\n \"to_lowercase\":\n self._preprocessors_config[\"ngrams\"][\"to_lowercase\"],\n \"valid_charset\":\n charset\n }\n elif preprocessor_type == PreprocessorsTypes.GROUP_COUNTER:\n if (parent_extractor_type in [\n ExtractorsTypes.STATIC_OPCODES,\n ExtractorsTypes.DYNAMIC_OPCODES\n ]):\n arguments = {\n \"categories\":\n self._extractors_config[\"opcodes\"][\"categories\"],\n \"allow_multiple_categories\":\n self._extractors_config[\"opcodes\"]\n [\"allow_multiple_categories\"],\n \"verbose\":\n self._extractors_config[\"opcodes\"][\"verbose\"],\n \"min_ignored_percent\":\n self._extractors_config[\"opcodes\"][\"min_ignored_percent\"]\n }\n elif (parent_extractor_type in [\n ExtractorsTypes.STATIC_APIS, ExtractorsTypes.DYNAMIC_APIS\n ]):\n arguments = {\n \"categories\":\n self._extractors_config[\"apis\"][\"categories\"],\n \"allow_multiple_categories\":\n self._extractors_config[\"apis\"]\n [\"allow_multiple_categories\"],\n \"verbose\":\n self._extractors_config[\"apis\"][\"verbose\"],\n \"min_ignored_percent\":\n self._extractors_config[\"apis\"][\"min_ignored_percent\"]\n }\n\n # Create the preprocessor\n preprocessor = None\n if preprocessor_type == PreprocessorsTypes.IDENTITY:\n preprocessor = Identity()\n elif preprocessor_type == PreprocessorsTypes.BINARIZER:\n preprocessor = Binarizer()\n elif preprocessor_type == PreprocessorsTypes.K_BINS_DISCRETIZER:\n preprocessor = KBinsDiscretizer()\n\n # Save this column in case of imputation needs\n self._columns_to_be_filled.append(len(self._preprocessors))\n\n elif preprocessor_type == PreprocessorsTypes.COUNTER:\n preprocessor = Counter()\n elif preprocessor_type == PreprocessorsTypes.COUNT_VECTORIZER:\n preprocessor = CountVectorizer()\n elif preprocessor_type == PreprocessorsTypes.N_GRAMS:\n preprocessor = NGrams(**arguments)\n elif preprocessor_type == PreprocessorsTypes.GROUP_COUNTER:\n preprocessor = GroupCounter(**arguments)\n elif preprocessor_type == PreprocessorsTypes.SAME_LENGTH_IMPUTER:\n preprocessor = SameLengthImputer()\n\n self._preprocessors.append(preprocessor)", "def cifar_augmentationFactory(augmentation):\n\n if augmentation == 'autoaugment':\n transform = [\n transforms.RandomCrop(32, 4),\n transforms.RandomHorizontalFlip(),\n AutoAugment(),\n Cutout()\n ]\n elif augmentation == 'original-cifar':\n transform = [\n transforms.RandomCrop(32, 4),\n transforms.RandomHorizontalFlip(),\n ]\n elif augmentation == 'noaugment':\n transform = []\n elif augmentation == 'glico':\n NotImplemented(f\"augment parameter {augmentation} not implemented\")\n else: \n NotImplemented(f\"augment parameter {augmentation} not implemented\")\n\n normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],\n std=[x/255.0 for x in [63.0, 62.1, 66.7]])\n\n # more precise cifar normalization thanks to:\n # https://github.com/tomgoldstein/loss-landscape/blob/master/cifar10/dataloader.py#L16\n\n return transforms.Compose(transform + [transforms.ToTensor(), normalize])", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def albu_builder(self, cfg):\n\n assert isinstance(cfg, dict) and 'type' in cfg\n args = cfg.copy()\n\n obj_type = args.pop('type')\n if general_ocr.is_str(obj_type):\n if albumentations is None:\n raise RuntimeError('albumentations is not installed')\n obj_cls = getattr(albumentations, obj_type)\n elif inspect.isclass(obj_type):\n obj_cls = obj_type\n else:\n raise TypeError(\n f'type must be a str or valid type, but got {type(obj_type)}')\n\n if 'transforms' in args:\n args['transforms'] = [\n self.albu_builder(transform)\n for transform in args['transforms']\n ]\n\n return obj_cls(**args)", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def _augment_pipeline_cfg(self):", "def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"", "def _replicate_class(self, **kwargs):\n return Posterior(**kwargs)", "def create_effect_classes(self):\r\n effects.polulate(self.effect_packages)", "def set_pipeline(self):\n feateng_steps = self.kwargs.get('feateng', ['runtime', 'country', 'language',\n 'genre', 'age', 'rated', 'released',\n 'writer', 'director', 'actors', 'production'])\n \n pipe_runtime_features = Pipeline([\n ('runtime', SimpleImputer(strategy='constant', fill_value=\"0\")),\n ('runtime_encoder', CleanRuntimeEncoder()),\n ('runtime_scaler', StandardScaler())])\n \n pipe_country_features = Pipeline([\n ('country', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('country_encoder', CleanCountryEncoder())])\n \n pipe_language_features = Pipeline([\n ('language', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('language_encoder', CleanLanguageEncoder())])\n \n pipe_genre_features = Pipeline([\n ('genre', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('genre_transformer', FunctionTransformer(np.reshape, kw_args={'newshape':-1})), \n ('genre_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_age_features = Pipeline([\n ('age', SimpleImputer(strategy='median')),\n ('age_enconder', CleanAgeEncoder())])\n \n pipe_rated_features = Pipeline([\n ('rated', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('rated_encoder', CleanRatedEncoder()),\n ('rated_ohe', OneHotEncoder(handle_unknown='ignore'))])\n \n pipe_released_features = Pipeline([\n ('released', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('released_encoder', CleanReleasedEncoder()),\n ('released_ohe', OneHotEncoder(handle_unknown='ignore'))])\n\n pipe_writer_features = Pipeline([\n ('writer', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('writer_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('writer_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_director_features = Pipeline([\n ('director', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('director_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('director_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_actors_features = Pipeline([\n ('actors', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('actors_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('actors_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_production_features = Pipeline([\n ('production', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('production_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('production_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n # define default feature engineering blocks\n feateng_blocks = [\n ('runtime', pipe_runtime_features, ['Runtime']),\n ('country', pipe_country_features, ['Country']),\n ('genre', pipe_genre_features, ['Genre']),\n ('age', pipe_age_features, ['Year']),\n ('rated', pipe_rated_features, ['Rated']),\n ('released', pipe_released_features, ['Released']),\n ('writer', pipe_writer_features, ['Writer']),\n ('director', pipe_director_features, ['Director']),\n ('actors', pipe_actors_features, ['Actors']),\n ('language', pipe_language_features, ['Language']),\n ('production', pipe_production_features, ['Production'])]\n \n # filter out some blocks according to input parameters\n for block in feateng_blocks:\n if block[0] not in feateng_steps:\n feateng_blocks.remove(block)\n\n features_encoder = ColumnTransformer(feateng_blocks,\n n_jobs=None,\n remainder='drop')\n\n self.pipeline = Pipeline(steps=[\n ('features', features_encoder),\n ('rgs', self.get_estimator())])", "def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline", "def __build_ml_pipeline(self, clf: MultiOutputClassifier) -> Pipeline:\r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n #('vect', CountVectorizer(tokenizer=clean_text)),\r\n #('tfidf', TfidfTransformer())\r\n ('tfidf', TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.25, \r\n ngram_range=(1,2)))\r\n ])),\r\n \r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n \r\n ('clf', clf)\r\n ])\r\n \r\n return pipeline", "def _get_augmentor(self, augmentor_type: str, params: Dict):\n params[\"rng\"] = self._rng\n if augmentor_type not in _AUGMENT_TYPE_ALIAS:\n raise ValueError(\"Unknown augumentor type [%s]\" % augmentor_type)\n return _AUGMENT_TYPE_ALIAS[augmentor_type](**params)", "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )", "def build_train_augmentor(cfg: CfgNode, keep_uncropped: bool = False, keep_non_smoothed: bool = False):\n aug_list = []\n \n names = cfg.AUGMENTOR.ADDITIONAL_TARGETS_NAME\n types = cfg.AUGMENTOR.ADDITIONAL_TARGETS_TYPE\n if names is None:\n additional_targets = None\n else:\n assert len(names) == len(types)\n additional_targets = {}\n for i in range(len(names)):\n additional_targets[names[i]] = types[i]\n\n #1. rotate\n if cfg.AUGMENTOR.ROTATE.ENABLED:\n aug_list.append(\n Rotate(rot90=cfg.AUGMENTOR.ROTATE.ROT90,\n p=cfg.AUGMENTOR.ROTATE.P,\n additional_targets=additional_targets))\n\n #2. rescale\n if cfg.AUGMENTOR.RESCALE.ENABLED:\n aug_list.append(\n Rescale(p=cfg.AUGMENTOR.RESCALE.P,\n additional_targets=additional_targets))\n\n #3. flip\n if cfg.AUGMENTOR.FLIP.ENABLED:\n aug_list.append(\n Flip(do_ztrans=cfg.AUGMENTOR.FLIP.DO_ZTRANS,\n p=cfg.AUGMENTOR.FLIP.P, \n additional_targets=additional_targets))\n\n #4. elastic\n if cfg.AUGMENTOR.ELASTIC.ENABLED:\n aug_list.append(\n Elastic(alpha=cfg.AUGMENTOR.ELASTIC.ALPHA, \n sigma=cfg.AUGMENTOR.ELASTIC.SIGMA, \n p=cfg.AUGMENTOR.ELASTIC.P,\n additional_targets=additional_targets))\n\n #5. grayscale\n if cfg.AUGMENTOR.GRAYSCALE.ENABLED:\n aug_list.append(\n Grayscale(p=cfg.AUGMENTOR.GRAYSCALE.P,\n additional_targets=additional_targets))\n\n #6. missingparts\n if cfg.AUGMENTOR.MISSINGPARTS.ENABLED:\n aug_list.append(\n MissingParts(iterations=cfg.AUGMENTOR.MISSINGPARTS.ITER,\n p=cfg.AUGMENTOR.MISSINGPARTS.P,\n additional_targets=additional_targets))\n\n #7. missingsection\n if cfg.AUGMENTOR.MISSINGSECTION.ENABLED and not cfg.DATASET.DO_2D:\n aug_list.append(\n MissingSection(\n num_sections=cfg.AUGMENTOR.MISSINGSECTION.NUM_SECTION,\n p=cfg.AUGMENTOR.MISSINGSECTION.P, \n additional_targets=additional_targets))\n\n #8. misalignment\n if cfg.AUGMENTOR.MISALIGNMENT.ENABLED and not cfg.DATASET.DO_2D:\n aug_list.append(\n MisAlignment( \n displacement=cfg.AUGMENTOR.MISALIGNMENT.DISPLACEMENT,\n rotate_ratio=cfg.AUGMENTOR.MISALIGNMENT.ROTATE_RATIO,\n p=cfg.AUGMENTOR.MISALIGNMENT.P,\n additional_targets=additional_targets))\n\n #9. motion-blur\n if cfg.AUGMENTOR.MOTIONBLUR.ENABLED:\n aug_list.append(\n MotionBlur( \n sections=cfg.AUGMENTOR.MOTIONBLUR.SECTIONS, \n kernel_size=cfg.AUGMENTOR.MOTIONBLUR.KERNEL_SIZE,\n p=cfg.AUGMENTOR.MOTIONBLUR.P,\n additional_targets=additional_targets))\n\n #10. cut-blur\n if cfg.AUGMENTOR.CUTBLUR.ENABLED:\n aug_list.append(\n CutBlur(length_ratio=cfg.AUGMENTOR.CUTBLUR.LENGTH_RATIO, \n down_ratio_min=cfg.AUGMENTOR.CUTBLUR.DOWN_RATIO_MIN,\n down_ratio_max=cfg.AUGMENTOR.CUTBLUR.DOWN_RATIO_MAX,\n downsample_z=cfg.AUGMENTOR.CUTBLUR.DOWNSAMPLE_Z,\n p=cfg.AUGMENTOR.CUTBLUR.P,\n additional_targets=additional_targets))\n\n #11. cut-noise\n if cfg.AUGMENTOR.CUTNOISE.ENABLED:\n aug_list.append(\n CutNoise(length_ratio=cfg.AUGMENTOR.CUTNOISE.LENGTH_RATIO, \n scale=cfg.AUGMENTOR.CUTNOISE.SCALE,\n p=cfg.AUGMENTOR.CUTNOISE.P, \n additional_targets=additional_targets))\n\n # compose the list of transforms\n augmentor = Compose(transforms=aug_list, \n input_size=cfg.MODEL.INPUT_SIZE, \n smooth=cfg.AUGMENTOR.SMOOTH,\n keep_uncropped=keep_uncropped, \n keep_non_smoothed=keep_non_smoothed,\n additional_targets=additional_targets)\n\n return augmentor", "def set_augmentor():\n config = {'blur': {'values': ('gaussian', 0.7, 1.0), 'prob': 0.3},\n 'brightness': {'values': (0.6, 1.0), 'prob': 0.1},\n 'brightness1': {'values': (1.0, 1.5), 'prob': 0.1},\n 'flip': {'values': ('hor',), 'prob': 0.5},\n 'grid_mask': {'values': (0, 0.2, 0, 0.2, 0.01, 0.1, 0.01, 0.1, 0.1, 0.2, 0.1, 0.2), 'prob': 0.4},\n 'illumination': {'values': ('blob_negative', 0.1, 0.2, 100, 150), 'prob': 0.2},\n 'noise': {'values': (2, 10), 'use_gray_noise': True, 'prob': 1},\n 'rotate': {'values': (-45, 45), 'prob': 0.4},\n 'translate': {'values': ('RANDOM', -0.2, 0.2), 'prob': 0.2, 'use_replication': True},\n 'zoom': {'values': (0.5, 1.5), 'prob': 0.9, 'use_replication': True}}\n\n augmentor = Augmentor(config, no_repetition=True)\n\n return augmentor", "def __new__(cls, *args, **kwargs):\n if cls.__name__ != 'Codec':\n return super().__new__(cls)\n if kwargs.get('type'):\n t_cls = ClassFactory.get_cls(ClassType.CODEC, kwargs.pop('type'))\n else:\n t_cls = ClassFactory.get_cls(ClassType.CODEC)\n return super().__new__(t_cls)", "def build(self, input_image, num_class):\n x = build_resnet(101)\n # add classifier\n x = Conv2D(num_class, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)", "def get_augmentation_sequence():\n # Macro to apply something with 50% chance\n sometimes = lambda aug: iaa.Sometimes(0.5, aug) # 50%\n rarely = lambda aug: iaa.Sometimes(0.1, aug) # 10%\n\n # Augmentation applied to every image\n # Augmentors sampled one value per channel\n aug_sequence = iaa.Sequential(\n [\n # apply the following augmenters to most images\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n iaa.Flipud(0.5), # vertically flip 50% of all images\n\n # crop images by -0.25% to 0.25% of their height/width\n # positive values crop the image, negative pad\n sometimes(iaa.CropAndPad(\n percent=(-0.25, 0.25),\n pad_mode=['constant', 'edge'], # pad with constant value of the edge value\n pad_cval=(0, 0) # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 0), # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n mode='constant' # ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n # rarely(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),\n iaa.GaussianBlur((0, 3.0)),\n iaa.Add((-10, 10), per_channel=0.7), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)),\n # sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))\n ],\n random_order=True\n )\n\n return aug_sequence", "def __init__(self, phase, hyper_phase_classes: (type,) = tuple()):\n super().__init__(phase=phase, hyper_name=\"hyper_combined\")\n self.hyper_phases = list(map(lambda cls: cls(phase), hyper_phase_classes))", "def _create_pipeline(self):\n # Add boxes to the pipeline\n if self.box_tag not in self.proto:\n raise Exception(\n \"Box defined in '{0}' has no '<{1}>' declared.\".format(\n self._xmlfile, self.box_tag))\n switch_descs = []\n for box_item in self.proto[self.box_tag]:\n for box_type in box_item.keys():\n\n # Create processing boxes (can be iterative)\n if box_type == self.box_names[0]:\n for boxdesc in box_item[box_type]:\n self._add_box(boxdesc)\n # Create switch boxes\n elif box_type == self.box_names[1]:\n for switchdesc in box_item[box_type]:\n switch_descs.append(switchdesc)\n # Unrecognize box type\n else:\n raise ValueError(\n \"Box structure: '{0}' defined in '{1}' is not \"\n \"supported. Supported boxes are '{2}'.\".format(\n json.dumps(box_item, indent=2), self._xmlfile,\n self.box_names))\n\n # Add switch to the pipeline\n for switchdesc in switch_descs:\n self._add_switch(switchdesc)\n\n # Add links between boxes\n if self.link_tag not in self.proto:\n raise Exception(\n \"Box defined in '{0}' has no '<{1}>' declared.\".format(\n self._xmlfile, self.link_tag))\n for link_item in self.proto[self.link_tag]:\n inner_tag = self.link_tag[:-1]\n for linkdesc in link_item[inner_tag]:\n if is_io_control(linkdesc[self.link_attributes[0]]):\n linktype = \"input\"\n elif is_io_control(linkdesc[self.link_attributes[1]]):\n linktype = \"output\"\n else:\n linktype = \"link\"\n self._add_link(linkdesc, linktype)", "def _build_image_processing(self, shift_ratio=0):\n with tf.device(self.cpu_device):\n subset = 'train'\n image_producer_ops = []\n image_producer_stages = []\n images_splits, labels_splits = self.image_preprocessor.minibatch(\n self.dataset,\n subset=subset,\n use_datasets=self.params.use_datasets,\n cache_data=self.params.cache_data,\n shift_ratio=shift_ratio)\n images_shape = images_splits[0].get_shape()\n labels_shape = labels_splits[0].get_shape()\n for device_num in range(len(self.devices)):\n image_producer_stages.append(\n data_flow_ops.StagingArea(\n [images_splits[0].dtype, labels_splits[0].dtype],\n shapes=[images_shape, labels_shape]))\n return (image_producer_ops, image_producer_stages)", "def __init__(self, pipeline, **kwargs):\n self.pipeline = pipeline\n\n self.batch_size = pipeline.batch_size\n self.X_train = None\n self.y_train = None\n self.X_valid = None\n self.y_valid = None\n\n # Child classes should have init signature:\n # (self, batch_size, **kwargs), should call this __init__ and then\n # define self.X_train, self.y_train, self.X_valid, and self.y_valid", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def sample_custom_augmentations_constructor(num_features: int, window_radius: int) -> albumentations.Compose:\n max_kernel = int(round(0.1 * window_radius))\n max_hole_size = int(round(0.1 * window_radius))\n additional_targets = [ADDITIONAL_TARGETS_KEY.format(idx) for idx in range(1, num_features)]\n\n return albumentations.Compose(\n [\n # The augmentations assume an image is RGB between 0 and 1\n albumentations.ToFloat(max_value=255, always_apply=True, p=1.0),\n # These augmentations should be order independent, toss 'em up front\n albumentations.Flip(p=0.5),\n albumentations.Transpose(p=0.5),\n albumentations.Rotate(limit=90, p=0.5),\n # Fogging as it's quite similar to top-down cloud effects, seems reasonable to apply up front\n albumentations.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.8, alpha_coef=0.08, p=0.5),\n # Color modifications\n albumentations.OneOf(\n [\n albumentations.RandomBrightnessContrast(\n brightness_limit=0.2, contrast_limit=0.6, brightness_by_max=True, p=1.0\n ),\n albumentations.RGBShift(r_shift_limit=0.2, g_shift_limit=0.2, b_shift_limit=0.2, p=1.0),\n ],\n p=0.25,\n ),\n # Distortions\n albumentations.OneOf(\n [\n albumentations.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, p=1.0),\n albumentations.GridDistortion(num_steps=5, distort_limit=0.4, p=1.0),\n albumentations.OpticalDistortion(distort_limit=0.1, shift_limit=0.1, p=1.0),\n ],\n p=0.25,\n ),\n albumentations.GaussianBlur(blur_limit=max_kernel, p=0.25),\n # Noise\n albumentations.OneOf(\n [\n albumentations.CoarseDropout(\n max_holes=8, max_height=max_hole_size, max_width=max_hole_size, fill_value=np.nan, p=1.0\n ),\n albumentations.GaussNoise(var_limit=0.05, mean=0, p=1.0),\n ],\n p=0.25,\n ),\n # Scaling, adding last so that other augmentations are applied at a consistent resolution\n albumentations.RandomScale(scale_limit=0.05, p=0.25),\n # Augmentations may not return images of the same size, images can be both smaller and larger than expected, so\n # these two augmentations are added to keep things consistent\n albumentations.PadIfNeeded(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),\n albumentations.CenterCrop(2 * window_radius, 2 * window_radius, always_apply=True, p=1.0),\n # Return the data to its original scale\n albumentations.FromFloat(max_value=255, always_apply=True, p=1.0),\n ],\n p=1.0,\n additional_targets={target: \"image\" for target in additional_targets},\n )", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def create():\n with torch.set_grad_enabled(False):\n model = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"vgg11\", pretrained=True).eval()\n\n with_cuda = torch.cuda.is_available()\n if with_cuda:\n model.to(\"cuda\")\n else:\n logging.warn(\"Running on CPU, no CUDA detected.\")\n\n def call(features):\n images = features[\"image\"].numpy()\n # Normalize according to the documentation. Note that the pro-processing\n # will already have the range normalized to [0, 1].\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n images_normalized = (images - mean) / std\n # Reshape from [batch, h, w, c] -> [batch, c, h, w]\n images_normalized_bchw = np.transpose(\n images_normalized, [0, 3, 1, 2]).astype(np.float32).copy()\n with torch.no_grad():\n images_torch = torch.from_numpy(images_normalized_bchw)\n if with_cuda:\n images_torch = images_torch.to(\"cuda\")\n logits = model(images_torch)\n return torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()\n\n preprocess_config = \"resize_small(256)|central_crop(224)|value_range(0,1)\"\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n return call, preprocess_fn", "def __init__(\n self,\n num_classes: int,\n decoder_input_levels: Optional[List[str]] = None,\n decoder_stage_merge_styles: Optional[List[str]] = None,\n decoder_filters: Optional[List[int]] = None,\n decoder_projected_filters: Optional[List[int]] = None,\n encoder_end_level: Optional[int] = 4,\n use_additional_classifier_layer: bool = False,\n classifier_kernel_size: int = 1,\n activation: str = 'relu',\n use_sync_bn: bool = False,\n batchnorm_momentum: float = 0.99,\n batchnorm_epsilon: float = 0.001,\n kernel_initializer: str = 'GlorotUniform',\n kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,\n interpolation: str = 'bilinear',\n bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,\n **kwargs):\n super().__init__(\n num_classes=num_classes,\n decoder_input_levels=decoder_input_levels,\n decoder_stage_merge_styles=decoder_stage_merge_styles,\n decoder_filters=decoder_filters,\n decoder_projected_filters=decoder_projected_filters,\n encoder_end_level=encoder_end_level,\n use_additional_classifier_layer=use_additional_classifier_layer,\n classifier_kernel_size=classifier_kernel_size,\n activation=activation,\n use_sync_bn=use_sync_bn,\n batchnorm_momentum=batchnorm_momentum,\n batchnorm_epsilon=batchnorm_epsilon,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n interpolation=interpolation,\n bias_regularizer=bias_regularizer,\n **kwargs)\n\n # Assuming decoder_input_levels and the following lists are sorted and\n # follow the same order.\n if decoder_input_levels is None:\n decoder_input_levels = ['3', '2']\n if decoder_stage_merge_styles is None:\n decoder_stage_merge_styles = ['concat_merge', 'sum_merge']\n if decoder_filters is None:\n decoder_filters = [64, 64]\n if decoder_projected_filters is None:\n decoder_projected_filters = [32, 32]\n self._decoder_input_levels = decoder_input_levels\n self._decoder_stage_merge_styles = decoder_stage_merge_styles\n self._decoder_filters = decoder_filters\n self._decoder_projected_filters = decoder_projected_filters\n if (len(decoder_input_levels) != len(decoder_stage_merge_styles) or\n len(decoder_input_levels) != len(decoder_filters) or\n len(decoder_input_levels) != len(decoder_projected_filters)):\n raise ValueError('The number of Decoder inputs and settings must match.')\n self._merge_stages = []\n for (stage_merge_style, decoder_filter,\n decoder_projected_filter) in zip(decoder_stage_merge_styles,\n decoder_filters,\n decoder_projected_filters):\n if stage_merge_style == 'concat_merge':\n concat_merge_stage = nn_blocks.DecoderConcatMergeBlockQuantized(\n decoder_internal_depth=decoder_filter,\n decoder_projected_depth=decoder_projected_filter,\n output_size=(0, 0),\n use_sync_bn=use_sync_bn,\n batchnorm_momentum=batchnorm_momentum,\n batchnorm_epsilon=batchnorm_epsilon,\n activation=activation,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n interpolation=interpolation)\n self._merge_stages.append(concat_merge_stage)\n elif stage_merge_style == 'sum_merge':\n sum_merge_stage = nn_blocks.DecoderSumMergeBlockQuantized(\n decoder_projected_depth=decoder_projected_filter,\n output_size=(0, 0),\n use_sync_bn=use_sync_bn,\n batchnorm_momentum=batchnorm_momentum,\n batchnorm_epsilon=batchnorm_epsilon,\n activation=activation,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n interpolation=interpolation)\n self._merge_stages.append(sum_merge_stage)\n else:\n raise ValueError(\n 'A stage merge style in MOSAIC Decoder can only be concat_merge '\n 'or sum_merge.')\n\n # Concat merge or sum merge does not require an additional classifer layer\n # unless the final decoder projected filter does not match num_classes.\n final_decoder_projected_filter = decoder_projected_filters[-1]\n if (final_decoder_projected_filter != num_classes and\n not use_additional_classifier_layer):\n raise ValueError('Additional classifier layer is needed if final decoder '\n 'projected filters does not match num_classes!')\n self._use_additional_classifier_layer = use_additional_classifier_layer\n if use_additional_classifier_layer:\n # This additional classification layer uses different kernel\n # initializers and bias compared to earlier blocks.\n self._pixelwise_classifier = helper.Conv2DQuantized(\n name='pixelwise_classifier',\n filters=num_classes,\n kernel_size=classifier_kernel_size,\n padding='same',\n bias_initializer=tf.zeros_initializer(),\n kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activation=helper.NoOpActivation(),\n use_bias=True)\n\n self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(\n tf_utils.get_activation(activation, use_keras_layer=True),\n configs.Default8BitActivationQuantizeConfig())\n\n self._config_dict = {\n 'num_classes': num_classes,\n 'decoder_input_levels': decoder_input_levels,\n 'decoder_stage_merge_styles': decoder_stage_merge_styles,\n 'decoder_filters': decoder_filters,\n 'decoder_projected_filters': decoder_projected_filters,\n 'encoder_end_level': encoder_end_level,\n 'use_additional_classifier_layer': use_additional_classifier_layer,\n 'classifier_kernel_size': classifier_kernel_size,\n 'activation': activation,\n 'use_sync_bn': use_sync_bn,\n 'batchnorm_momentum': batchnorm_momentum,\n 'batchnorm_epsilon': batchnorm_epsilon,\n 'kernel_initializer': kernel_initializer,\n 'kernel_regularizer': kernel_regularizer,\n 'interpolation': interpolation,\n 'bias_regularizer': bias_regularizer\n }", "def mixin(base, *mixs):\n def mixin__internal(base, addition):\n \"\"\"Internal closure\"\"\"\n class NewClass(base, addition):\n \"\"\"Created class mixings the params\"\"\"\n def __init__(self, *args):\n addition.__init__(self, *args)\n base.__init__(self, *args)\n return NewClass\n\n new_class = base\n for mix in mixs:\n new_class = mixin__internal(new_class, mix)\n return new_class", "def setup_pipeline(self, estimator=None, biclass=True):\n if biclass:\n self.pipeline = Pipeline(estimator)\n else:\n self.pipeline = OneVsOneClassifier(Pipeline(estimator))", "def albu_builder(self, cfg):\n assert isinstance(cfg, dict) and 'type' in cfg\n args = cfg.copy()\n obj_type = args.pop('type')\n if mmcv.is_str(obj_type):\n obj_cls = getattr(albumentations, obj_type)\n else:\n raise TypeError(f'type must be str, but got {type(obj_type)}')\n if 'transforms' in args:\n args['transforms'] = [\n self.albu_builder(transform)\n for transform in args['transforms']\n ]\n return obj_cls(**args)", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def _build_preprocessing(self):\n\n # For now, do nothing\n pass", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def customize(cls, **kwargs):\n\n store_as = apply_pssm(kwargs.get('store_as', None), PSSM_VALUES)\n if store_as is not None:\n kwargs['store_as'] = store_as\n\n cls_name, cls_bases, cls_dict = cls._s_customize(cls, **kwargs)\n cls_dict['__module__'] = cls.__module__\n\n retval = type(cls_name, cls_bases, cls_dict)\n retval._type_info = TypeInfo(cls._type_info)\n retval.__type_name__ = cls.__type_name__\n retval.__namespace__ = cls.__namespace__\n retval.Attributes.parent_variant = cls\n\n dca = retval.Attributes._delayed_child_attrs\n if retval.Attributes._delayed_child_attrs is None:\n retval.Attributes._delayed_child_attrs = {}\n else:\n retval.Attributes._delayed_child_attrs = dict(dca.items())\n\n child_attrs = kwargs.get('child_attrs', None)\n if child_attrs is not None:\n ti = retval._type_info\n for k, v in child_attrs.items():\n if k in ti:\n ti[k] = ti[k].customize(**v)\n else:\n retval.Attributes._delayed_child_attrs[k] = v\n\n tn = kwargs.get(\"type_name\", None)\n if tn is not None:\n retval.__type_name__ = tn\n\n ns = kwargs.get(\"namespace\", None)\n if ns is not None:\n retval.__namespace__ = ns\n\n if not cls is ComplexModel:\n cls._process_variants(retval)\n\n # we could be smarter, but customize is supposed to be called only while\n # daemon initialization, so it's not really necessary.\n ComplexModelBase.get_subclasses.memo.clear()\n ComplexModelBase.get_flat_type_info.memo.clear()\n ComplexModelBase.get_simple_type_info.memo.clear()\n\n return retval", "def create_pipeline(numeric_attribs, categoric_attribs, remove_ratios, remove_needs):\n # get a list of ratio and need column titles, independently, from numeric_attribs.\n ratio_attribs = []\n need_attribs = []\n for attrib in numeric_attribs:\n if attrib.endswith('_chance'):\n ratio_attribs.append(attrib)\n elif attrib.endswith('_need'):\n need_attribs.append(attrib)\n\n cats = [['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']] * len(categoric_attribs)\n\n transformation_pipeline = ColumnTransformer([\n ('numeric', NumericColumnsRemover(remove_ratios=remove_ratios, remove_needs=remove_needs, ratios=ratio_attribs,\n needs=need_attribs), numeric_attribs),\n ('categoric', OneHotEncoder(categories=cats), categoric_attribs)\n ])\n\n return transformation_pipeline", "def _create_baselining_processor(self):\n\n baselining_processor = SageMakerClarifyProcessor(\n role=self.role,\n instance_count=self.instance_count,\n instance_type=self.instance_type,\n volume_size_in_gb=self.volume_size_in_gb,\n volume_kms_key=self.volume_kms_key,\n output_kms_key=self.output_kms_key,\n max_runtime_in_seconds=self.max_runtime_in_seconds,\n sagemaker_session=self.sagemaker_session,\n env=self.env,\n tags=self.tags,\n network_config=self.network_config,\n )\n baselining_processor.image_uri = self.image_uri\n baselining_processor.base_job_name = self.base_job_name\n return baselining_processor", "def get_validation_augmentation():\r\n test_transform = [\r\n albu.RandomCrop(height=256, width=256, always_apply=True),\r\n ]\r\n return albu.Compose(test_transform)", "def build_svm_pipeline():\n svm_pipeline = None\n\n svm_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', SGDClassifier()),\n ])\n\n return svm_pipeline", "def get_SrcClass(args):\n return Reactome(args)", "def build_classify(self, num_classes: int = 10) -> KM.Model:\n input_tensor = KL.Input(shape=(32, 32, 3))\n # if self.train_mode == \"classifier\":\n # # Use the pretrained encoder for classifier only training\n # self.encoder_model = KM.load_model(\"ae_model/ae_model.h5\").get_layer(\n # \"encoder\"\n # )\n if self.train_mode != \"both\":\n # build the encoder model\n self.encoder_model = self.encoder(\n features=self.encoder_features, name=\"encoder\"\n )\n encoded_features = self.encoder_model(input_tensor)\n contrastive_features = KL.Lambda(lambda x: K.mean(x, [1, 2]), name=\"contrast\")(\n encoded_features[-1]\n )\n # Calculate class probs from multiple latent representations\n probs = classifier_block(encoded_features, num_classes=num_classes)\n\n return KM.Model(\n inputs=input_tensor,\n outputs=[probs, contrastive_features],\n name=\"classifier\",\n )", "def build_combined(self, num_classes: int = 10) -> KM.Model:\n\n # For decoder number of features in opposite order of encoder\n decoder_features = self.encoder_features.copy()\n decoder_features.reverse()\n\n # build the encoder model\n self.encoder_model = self.encoder(\n features=self.encoder_features, name=\"encoder\"\n )\n\n # build the decoder model\n decoder = self.decoder(features=decoder_features, name=\"decoder\")\n input_tensor = KL.Input(\n shape=(32, 32, 3)\n ) # shape of images for cifar10 dataset\n\n # Encode the images\n encoded_features = self.encoder_model(input_tensor)\n # Decode the image from the final layer features of Auto-encoder\n decoded = decoder(encoded_features[-1])\n contrastive_features = KL.Lambda(lambda x: K.mean(x, [1, 2]), name=\"contrast\")(\n encoded_features[-1]\n )\n # Calculate class probs from multiple latent representations\n probs = classifier_block(encoded_features, num_classes=num_classes)\n\n return KM.Model(\n inputs=input_tensor,\n outputs=[decoded, probs, contrastive_features],\n name=\"combined\",\n )", "def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline", "def __init__(\n self,\n *,\n demonstrations: base.AnyTransitions,\n demo_batch_size: int,\n venv: vec_env.VecEnv,\n gen_algo: base_class.BaseAlgorithm,\n reward_net: reward_nets.RewardNet,\n **kwargs,\n ):\n super().__init__(\n demonstrations=demonstrations,\n demo_batch_size=demo_batch_size,\n venv=venv,\n gen_algo=gen_algo,\n reward_net=reward_net,\n **kwargs,\n )\n # AIRL needs a policy from STOCHASTIC_POLICIES to compute discriminator output.\n if not isinstance(self.gen_algo.policy, STOCHASTIC_POLICIES):\n raise TypeError(\n \"AIRL needs a stochastic policy to compute the discriminator output.\",\n )", "def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline", "def make_runtime_class(runtime_class=None, mixins=None):\n if runtime_class is None:\n # The import is put here because of circular depencencies\n # between viff.runtime and viff.passive.\n from viff.passive import PassiveRuntime\n runtime_class = PassiveRuntime\n if mixins is None:\n return runtime_class\n else:\n # The order is important: we want the most specific classes to\n # go first so that they can override methods from later\n # classes. We must also include at least one new-style class\n # in bases -- we include it last to avoid overriding __init__\n # from the other base classes.\n bases = tuple(mixins) + (runtime_class, object)\n return type(\"ExtendedRuntime\", bases, {})", "def build_logistic_regr():\n logistic_pipeline = None\n ##### Write code here #######\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression())\n ])\n ##### End of your work ######\n return logistic_pipeline", "def create_effect_instances(self):\r\n raise NotImplementedError()", "def get_combiner_class(combiner_type: str):\n return get_from_registry(combiner_type, combiner_registry)", "def build(self):\n input_shape_img = (None, None, 3)\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n \n output_region_proposal = self.region_proposal_net(shared_layers, num_anchors)\n output_classifier = self.classifier(shared_layers,\n self.cnn_model.classifier_layers, \n roi_input, self.C.num_roi, \n num_class=len(self.class_count), trainable=True)\n \n self.model_region_proposal = Model(img_input, output_region_proposal[:2])\n self.model_classifier = Model([img_input, roi_input], output_classifier)\n self.model_all = Model([img_input, roi_input], output_region_proposal[:2] + output_classifier)\n\n optimizer = Adam(lr=1e-5)\n self.model_region_proposal.compile(optimizer=optimizer, \n loss=[losses.rpn_loss_cls(num_anchors), \n losses.rpn_loss_regr(num_anchors)])\n self.model_classifier.compile(optimizer=optimizer, \n loss=[losses.class_loss_cls, \n losses.class_loss_regr(len(self.class_count)-1)], \n metrics={'dense_class_{}'.format(len(self.class_count)): 'accuracy'})\n self.model_all.compile(optimizer='sgd', loss='mae')\n\n # print(self.model_all.summary())\n plot_model(self.model_region_proposal, show_shapes=True, to_file='./frcnn/images/region_proposal.png')\n plot_model(self.model_classifier, show_shapes=True, to_file='./frcnn/images/classifier.png')\n plot_model(self.model_all, show_shapes=True, to_file='./frcnn/images/model_all.png')", "def mix_labellers(labellers, class_name=\"MixtureLabeller\"):\n return type(class_name, labellers, {})", "def createAugmentor(self):\n rotation_range = [-15, 15]\n shear_range = [-0.3 * 180 / math.pi, 0.3 * 180 / math.pi]\n zoom_range = [0.8, 2]\n shift_range = [5, 5]\n\n return ImageAugmentor(0.5, shear_range, rotation_range, shift_range, zoom_range)", "def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p", "def setup(self, args):\n for key, ags in self._mapp.items():\n arg = args.get(key)\n\n if arg: #if exist, turn aggregator actived and create a new instance a new aggregator class\n self.active = True\n return ags(arg)", "def get_image_preprocessor(self):\n image_size = self.model.get_image_size()\n input_data_type = get_data_type(self.params)\n\n shift_ratio = 0\n\n processor_class = self.dataset.get_image_preprocessor()\n assert processor_class\n return processor_class(\n image_size,\n image_size,\n self.batch_size * self.batch_group_size,\n len(self.devices) * self.batch_group_size,\n dtype=input_data_type,\n train=(not self.params.eval),\n distortions=self.params.distortions,\n resize_method=self.resize_method,\n shift_ratio=shift_ratio,\n summary_verbosity=self.params.summary_verbosity,\n distort_color_in_yiq=self.params.distort_color_in_yiq,\n fuse_decode_and_crop=self.params.fuse_decode_and_crop)", "def build(*args, **kwargs):\n\n\treturn Spider(*args, **kwargs)", "def build(self):\n # add ops for generator (A->B) to graph\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,\n init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='G')\n\n if self.training:\n # add ops for other generator (B->A) and discriminators to graph\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')\n\n # generate fake images\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n\n # generate reconstructed images\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n\n # generate identity mapping images\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))\n\n # add loss ops to graph\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,\n reconstructedB, identA, identB)\n\n # add optimizer ops to graph\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else: # only need generator from A->B during testing\n fakeB = self.G(self.realA)\n return fakeB", "def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError", "def build(classes):\n # data input\n data = mx.sym.Variable(\"data\")\n\n # Block #1: first CONV => RELU => POOL layer set\n conv1_1 = mx.sym.Convolution(data=data, kernel=(11, 11), stride=(4, 4), num_filter=96)\n act1_1 = mx.sym.LeakyReLU(data=conv1_1, act_type=\"elu\")\n bn1_1 = mx.sym.BatchNorm(data=act1_1)\n pool1 = mx.sym.Pooling(data=bn1_1, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do1 = mx.sym.Dropout(data=pool1, p=0.25)\n\n # Block #2: second CONV => RELU => POOL layer set\n conv2_1 = mx.sym.Convolution(data=do1, kernel=(5, 5), pad=(2, 2), num_filter=256)\n act2_1 = mx.sym.LeakyReLU(data=conv2_1, act_type=\"elu\")\n bn2_1 = mx.sym.BatchNorm(data=act2_1)\n pool2 = mx.sym.Pooling(data=bn2_1, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do2 = mx.sym.Dropout(data=pool2, p=0.25)\n\n # Block #3: (CONV => RELU) * 3 => POOL\n conv3_1 = mx.sym.Convolution(data=do2, kernel=(3, 3), pad=(1, 1), num_filter=384)\n act3_1 = mx.sym.LeakyReLU(data=conv3_1, act_type=\"elu\")\n bn3_1 = mx.sym.BatchNorm(data=act3_1)\n conv3_2 = mx.sym.Convolution(data=bn3_1, kernel=(3, 3), pad=(1, 1), num_filter=384)\n act3_2 = mx.sym.LeakyReLU(data=conv3_2, act_type=\"elu\")\n bn3_2 = mx.sym.BatchNorm(data=act3_2)\n conv3_3 = mx.sym.Convolution(data=bn3_2, kernel=(3, 3), pad=(1, 1), num_filter=256)\n act3_3 = mx.sym.LeakyReLU(data=conv3_3, act_type=\"elu\")\n bn3_3 = mx.sym.BatchNorm(data=act3_3)\n pool3 = mx.sym.Pooling(data=bn3_3, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do3 = mx.sym.Dropout(data=pool3, p=0.25)\n\n # Block #4: first set of FC => RELU layers\n flatten = mx.sym.Flatten(data=do3)\n fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=4096)\n act4_1 = mx.sym.LeakyReLU(data=fc1, act_type=\"elu\")\n bn4_1 = mx.sym.BatchNorm(data=act4_1)\n do4 = mx.sym.Dropout(data=bn4_1, p=0.5)\n\n # Block #5: second set of FC => RELU layers\n fc2 = mx.sym.FullyConnected(data=do4, num_hidden=4096)\n act5_1 = mx.sym.LeakyReLU(data=fc2, act_type=\"elu\")\n bn5_1 = mx.sym.BatchNorm(data=act5_1)\n do5 = mx.sym.Dropout(data=bn5_1, p=0.5)\n\n # softmax classifier\n fc3 = mx.sym.FullyConnected(data=do5, num_hidden=classes)\n model = mx.sym.SoftmaxOutput(data=fc3, name=\"softmax\")\n\n # return the network architecture\n return model", "def build_model(self):\n pipeline = Pipeline([\n ('vec', CountVectorizer(tokenizer=self.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n self.model = pipeline\n return pipeline", "def preparePipelines(self):\n\n # Construct the differnent states making up the pipeline\n\n # Input assembly state describes how primitives are assembled\n # This pipeline will assemble vertex data as a triangle lists (though we only use one triangle)\n inputAssemblyState = vk.VkPipelineInputAssemblyStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,\n topology = vk.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST\n )\n # Rasterization state\n rasterizationState = vk.VkPipelineRasterizationStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,\n polygonMode = vk.VK_POLYGON_MODE_FILL,\n cullMode = vk.VK_CULL_MODE_NONE,\n frontFace = vk.VK_FRONT_FACE_COUNTER_CLOCKWISE,\n depthClampEnable = vk.VK_FALSE,\n rasterizerDiscardEnable = vk.VK_FALSE,\n depthBiasEnable = vk.VK_FALSE,\n lineWidth = 1.0\n )\n # Color blend state describes how blend factors are calculated (if used)\n # We need one blend attachment state per color attachment (even if blending is not used\n blendAttachmentState = vk.VkPipelineColorBlendAttachmentState(\n colorWriteMask = 0xf,\n blendEnable = vk.VK_FALSE\n )\n colorBlendState = vk.VkPipelineColorBlendStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,\n attachmentCount = 1,\n pAttachments = [blendAttachmentState]\n )\n # Viewport state sets the number of viewports and scissor used in this pipeline\n # Note: This is actually overriden by the dynamic states (see below)\n viewportState = vk.VkPipelineViewportStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,\n viewportCount = 1,\n scissorCount = 1\n )\n # Enable dynamic states\n # Most states are baked into the pipeline, but there are still a few dynamic states that can be changed within a command buffer\n #To be able to change these we need do specify which dynamic states will be changed using this pipeline. Their actual states are set later on in the command buffer.\n # For this example we will set the viewport and scissor using dynamic states\n dynamicStateEnables = [vk.VK_DYNAMIC_STATE_VIEWPORT, vk.VK_DYNAMIC_STATE_SCISSOR]\n dynamicState = vk.VkPipelineDynamicStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,\n dynamicStateCount = len(dynamicStateEnables),\n pDynamicStates = dynamicStateEnables\n )\n\n # Depth and stencil state containing depth and stencil compare and test operations\n # We only use depth tests and want depth tests and writes to be enabled and compare with less or equal\n opState = vk.VkStencilOpState(\n failOp = vk.VK_STENCIL_OP_KEEP,\n passOp = vk.VK_STENCIL_OP_KEEP,\n compareOp = vk.VK_COMPARE_OP_ALWAYS\n )\n depthStencilState = vk.VkPipelineDepthStencilStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,\n depthTestEnable = vk.VK_TRUE,\n depthWriteEnable = vk.VK_TRUE,\n depthCompareOp = vk.VK_COMPARE_OP_LESS_OR_EQUAL,\n depthBoundsTestEnable = vk.VK_FALSE,\n stencilTestEnable = vk.VK_FALSE,\n front = opState,\n back = opState\n )\n # Multi sampling state\n # This example does not make use fo multi sampling (for anti-aliasing), the state must still be set and passed to the pipeline\n multisampleState = vk.VkPipelineMultisampleStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,\n rasterizationSamples = vk.VK_SAMPLE_COUNT_1_BIT,\n pSampleMask = None\n )\n # Vertex input descriptions\n # Specifies the vertex input parameters for a pipeline\n #Vertex input binding\n # This example uses a single vertex input binding at binding point 0 (see vkCmdBindVertexBuffers)\n vertexInputBinding = vk.VkVertexInputBindingDescription(\n binding = 0,\n stride = self.vertexShape.size * self.vertexShape.itemsize,\n inputRate = vk.VK_VERTEX_INPUT_RATE_VERTEX\n )\n # Input attribute bindings describe shader attribute locations and memory layouts\n vertexInputAttributs = []\n # These match the following shader layout (see triangle.vert):\n # layout (location = 0) in vec3 inPos;\n # layout (location = 1) in vec3 inColor;\n # Attribute location 0: Position\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 0,\n # Position attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = 0 # offsetof(vertexShape, position)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 1,\n # Color attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = self.vertexShape[0].size * self.vertexShape.itemsize # offsetof(vertexShape, color)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n\n # Vertex input state used for pipeline creation\n vertexInputState = vk.VkPipelineVertexInputStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,\n vertexBindingDescriptionCount = 1,\n pVertexBindingDescriptions = [vertexInputBinding],\n vertexAttributeDescriptionCount = len(vertexInputAttributs),\n pVertexAttributeDescriptions = vertexInputAttributs\n )\n # Shaders\n shaderStages = []\n # Vertex shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_VERTEX_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.vert.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n # Fragment shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_FRAGMENT_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.frag.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n\n # Assign the pipeline states to the pipeline creation info structure\n pipelineCreateInfo = vk.VkGraphicsPipelineCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,\n # The layout used for this pipeline (can be shared among multiple pipelines using the same layout)\n layout = self.pipelineLayout,\n # Renderpass this pipeline is attached to\n renderPass = self.renderPass,\n pVertexInputState = vertexInputState,\n pInputAssemblyState = inputAssemblyState,\n pRasterizationState = rasterizationState,\n pColorBlendState = colorBlendState,\n pMultisampleState = multisampleState,\n pViewportState = viewportState,\n pDepthStencilState = depthStencilState,\n pDynamicState = dynamicState,\n stageCount = len(shaderStages),\n pStages = shaderStages\n )\n # Create rendering pipeline using the specified states\n self.pipelines = vk.vkCreateGraphicsPipelines(self.device, self.pipelineCache, 1, [pipelineCreateInfo], None)\n try:\n self.pipeline = self.pipelines[0]\n except TypeError:\n self.pipeline = self.pipelines\n # Shader modules are no longer needed once the graphics pipeline has been created\n vk.vkDestroyShaderModule(self.device, shaderStages[0].module, None)\n vk.vkDestroyShaderModule(self.device, shaderStages[1].module, None)", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize((256, 768)),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def build_layers(self):\n raise NotImplementedError", "def get_refinement_pipeline():\n node_scaling = PrimaryNode('scaling')\n node_logit = SecondaryNode('logit', nodes_from=[node_scaling])\n node_decompose = SecondaryNode('class_decompose', nodes_from=[node_logit, node_scaling])\n node_rfr = SecondaryNode('rfr', nodes_from=[node_decompose])\n node_xgboost = SecondaryNode('xgboost', nodes_from=[node_rfr, node_logit])\n\n pipeline = Pipeline(node_xgboost)\n return pipeline", "def __init__(self, *pipeline: Iterable[WarningDiscriminator]):\n if not pipeline:\n pipeline = []\n\n self.pipeline = list(pipeline)", "def buildProcessor( self ):\n from vrml.vrml97 import parseprocessor\n return parseprocessor.ParseProcessor()", "def _build_pcollection(self, pipeline, data_dir, meta_dir, split):\n beam = tfds.core.lazy_imports.apache_beam\n\n with tf.io.gfile.GFile(meta_dir) as meta_file:\n meta_lines = meta_file.readlines()\n header = meta_lines[0].split(',')\n examples_descriptions = meta_lines[1:]\n\n total_examples = len(examples_descriptions)\n examples_descriptions = enumerate(examples_descriptions)\n split_index = header.index('split')\n date_index = header.index('timestamp')\n\n def _process_example(example_description):\n (idx, features) = example_description\n (unused_split, unused_img_filename, unused_img_path,\n unused_spatial_reference, unused_epsg, category, unused_visible,\n unused_img_width, unused_img_height, unused_country_code,\n unused_cloud_cover, unused_timestamp, unused_lat,\n unused_lon) = features.split(',')\n chunk_size = total_examples // 100\n batch_indx = int(idx) // chunk_size\n img_indx = int(idx) % chunk_size\n image = onp.load(\n os.path.join(data_dir, f'rgb_all_imgs_{batch_indx}.npy'),\n mmap_mode='r')[img_indx]\n return idx, {'image': image, 'label': category}\n\n def _filter_example(example_description):\n time_condition = self._DOMAIN_FILTERS[split](\n pd.to_datetime(example_description[1].split(',')[date_index]))\n split_condition = (\n example_description[1].split(',')[split_index] == split.split('_')[0])\n return time_condition and split_condition\n\n return pipeline | beam.Create(\n (examples_descriptions\n )) | beam.Filter(_filter_example) | beam.Map(_process_example)", "def get_validation_augmentation():\n test_transform = [\n A.PadIfNeeded(min_height=512, min_width=512, always_apply=True, border_mode=cv2.BORDER_REPLICATE),\n A.Resize(height = SHAPE, width = SHAPE, interpolation=1, always_apply=True, p=1)\n ]\n return A.Compose(test_transform)", "def get_cb_pipeline(train):\n from src.features import alchemy_feat, counting_feat, nltk_feat\n features = [\n ('sentiment', alchemy_feat.Sentiment()),\n ('sent_len', counting_feat.SentenceLength()),\n ('tfidf', counting_feat.BagOfTfIDF(train)),\n ('ner', alchemy_feat.NER()),\n ('pos', nltk_feat.POS())\n ]\n return get_pipeline(features)", "def build_posterior(self):\n raise NotImplementedError('Abstract Method')", "def build_model(self) -> Pipeline:\r\n clf = RandomForestClassifier(\r\n n_estimators=200,\r\n max_features='auto',\r\n min_samples_leaf=1,\r\n min_samples_split=3,\r\n random_state=42, \r\n n_jobs=-1)\r\n model = MultiOutputClassifier(clf)\r\n \r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n ('tfidf', \r\n TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.5, ngram_range=(1,2)))\r\n ])),\r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n ('clf', model)\r\n ])\r\n \r\n return pipeline", "def __init_subclass__(cls, type_: CalibrationTargetType):\n cls._types[type_] = cls", "def build_extensions(self):\n # TODO: move build customization here?\n build_ext.build_extensions(self)", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def build_gan(self):\n # make weights in the discriminator not trainable\n self.d_model.trainable = False\n # get noise and label inputs from generator model\n gen_noise, gen_label = self.g_model.input\n # get image output from the generator model\n gen_output = self.g_model.output\n # connect image output and label input from generator as inputs to discriminator\n gan_output = self.d_model([gen_output, gen_label])\n # define gan model as taking noise and label and outputting a classification\n self.gan_model = Model([gen_noise, gen_label], gan_output)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def _make_im_modules(self):\n im_support = {\n 'icm': IntrinsicCuriosityModule,\n 'rnd': RandomNetworkDistillation\n }\n if self.im_type.lower() not in im_support:\n raise ValueError('Intrinsic Motivation type {} not recognized. Options are:\\n{}'.format(\n self.im_type.lower(),\n '\\n'.join([k for k in im_support.keys()])\n ))\n im_class = im_support[self.im_type.lower()]\n return im_class(env=self._dummy_env, hidden_size=128)", "def prepare(self, class_map=None):\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)", "def vgg_based_model(num_cls, input_shape=(150, 150, 3), weights=None):\n # Create a base model\n base_model = VGG16(include_top=False, weights='imagenet', input_shape=input_shape)\n\n # Add new classifier\n last = base_model.output\n x = Flatten()(last)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.5)(x)\n predictions = Dense(num_cls, activation='softmax')(x)\n model = Model(input=base_model.input, output=predictions)\n\n if weights:\n model.load_weights(weights)\n\n return model, base_model", "def build_classifier(self, X, y, validation_data=(None, None)):\n\n if self.augmented:\n self.augmented_training(X, y, *validation_data)\n \n else:\n self.conventional_training(X, y, *validation_data)", "def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline", "def build_numerical_pipeline(self) -> Pipeline:\n pipeline = Pipeline([\n ('extract_data', FunctionTransformer(self.get_numerical_features)),\n ('impute', SimpleImputer(missing_values=np.nan)),\n ('standard_scaler', CustomStandardScaler())\n ])\n return pipeline", "def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)", "def __init__(self, embed_size):\n super(ImgAttentionEncoder, self).__init__()\n vggnet_feat = models.vgg19(pretrained=True).features\n modules = list(vggnet_feat.children())[:-2]\n self.cnn = nn.Sequential(*modules)\n self.fc = nn.Sequential(nn.Linear(self.cnn[-3].out_channels, embed_size),\n nn.Tanh()) # feature vector of image", "def __init__(self, type, embedding_size: tuple): \n super(AbstractClassifier, self).__init__()\n \n # CNN TODO: shape checks\n self.layer1 = nn.Sequential(\n nn.Conv1d(1, 20, kernel_size=2, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv1d(20, 50, kernel_size=2, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2, stride=2))\n self.dropout = nn.Dropout()\n self.fc1 = nn.Linear(50*44, 1024)\n self.fc2 = nn.Linear(1024, 2)", "def get_validation_augmentation():\n test_transform = [\n A.PadIfNeeded(min_height=512, min_width=512, always_apply=True, border_mode=0),\n A.Resize(height = RESIZE, width = RESIZE, interpolation=1, always_apply=True, p=1)\n ]\n return A.Compose(test_transform)" ]
[ "0.618366", "0.57090163", "0.554121", "0.55004793", "0.5495945", "0.54675126", "0.54427373", "0.5428508", "0.53878695", "0.5384956", "0.5383808", "0.53803486", "0.53243446", "0.5311842", "0.5292812", "0.5282065", "0.5245183", "0.52103126", "0.5209436", "0.5195009", "0.5174649", "0.5138732", "0.5131807", "0.51283926", "0.5127597", "0.5117759", "0.5104618", "0.5076926", "0.5025444", "0.49954697", "0.4966985", "0.49483633", "0.4943874", "0.49434453", "0.49404693", "0.49392104", "0.49389777", "0.49321628", "0.49255803", "0.49235013", "0.4920738", "0.49116337", "0.49103475", "0.49027523", "0.48813787", "0.48657647", "0.48532778", "0.48447546", "0.48440808", "0.48408738", "0.48332852", "0.48275802", "0.4823294", "0.48230585", "0.48162964", "0.4808073", "0.48054367", "0.48005095", "0.4795876", "0.47929713", "0.47882715", "0.47609717", "0.47604498", "0.47600594", "0.47576886", "0.47451413", "0.47437814", "0.47429696", "0.47391096", "0.47381172", "0.4737331", "0.47366863", "0.47304574", "0.4729478", "0.47234637", "0.472202", "0.47209755", "0.47016802", "0.46947613", "0.46932736", "0.46839216", "0.4681269", "0.46803236", "0.46800238", "0.467438", "0.4673878", "0.4672284", "0.4670644", "0.4670644", "0.4670644", "0.46675995", "0.4661835", "0.4659998", "0.46594146", "0.46574524", "0.46573573", "0.46563995", "0.46457282", "0.4642725", "0.4642381" ]
0.5901364
1
Test case based on fashion mnist tutorial
def test_kafka_output_sequence(): fashion_mnist = tf.keras.datasets.fashion_mnist ((train_images, train_labels), (test_images, _)) = fashion_mnist.load_data() class_names = [ "T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot", ] train_images = train_images / 255.0 test_images = test_images / 255.0 model = tf.keras.Sequential( [ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax), ] ) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) model.fit(train_images, train_labels, epochs=5) class OutputCallback(tf.keras.callbacks.Callback): """KafkaOutputCallback""" def __init__( self, batch_size, topic, servers ): # pylint: disable=super-init-not-called self._sequence = kafka_ops.KafkaOutputSequence(topic=topic, servers=servers) self._batch_size = batch_size def on_predict_batch_end(self, batch, logs=None): index = batch * self._batch_size for outputs in logs["outputs"]: for output in outputs: self._sequence.setitem(index, class_names[np.argmax(output)]) index += 1 def flush(self): self._sequence.flush() channel = f"e{time.time()}e" topic = "test_" + channel # By default batch size is 32 output = OutputCallback(32, topic, "localhost") predictions = model.predict(test_images, callbacks=[output]) output.flush() predictions = [class_names[v] for v in np.argmax(predictions, axis=1)] # Reading from `test_e(time)e` we should get the same result dataset = tfio.kafka.KafkaDataset(topics=[topic], group="test", eof=True) for entry, prediction in zip(dataset, predictions): assert entry.numpy() == prediction.encode()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_machine_learning():", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def test_train():\n set_seed(42) # Noqa\n transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n mnist_train = MNIST(\"./\", download=True, train=False, transform=transform)\n model = SimpleNet()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)\n criterion = nn.CrossEntropyLoss()\n\n train_loader = DataLoader(mnist_train, batch_size=64, shuffle=True,\n num_workers=0)\n loss, accuracy = train(model, optimizer, criterion, train_loader,\n imshape=(-1, 28*28))\n\n assert type(loss) == torch.Tensor\n assert type(accuracy) == np.float64\n assert len(loss.shape) == 0", "def __init__(self):\n\n TEST_RATIO = 0.05\n mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None)\n idxs = np.arange(mnist_trainset.train_data.size(0))\n np.random.shuffle(idxs)\n\n #print(torch.min(mnist_trainset.train_labels), torch.max(mnist_trainset.train_labels))\n #print(mnist_trainset.train_labels.size())\n \n # reshape input data to (1, 784) and normalize to range [0., 1.]\n self.train_data = torch.reshape(\n mnist_trainset.train_data[idxs].float(), (-1,1,28,28))/255.\n self.data_size = self.train_data.size(0)\n self.train_len = self.train_data.size(0)\n self.train_label = torch.Tensor([1]).float() # since there is only one class - 'real' image\n\n print('Train images -- {}'.format(self.train_data.size()))", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_training(self):\n\t\tpass", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def run_mnist_test():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_x, train_y = mnist.train.images, mnist.train.labels,\n test_x, test_y = mnist.test.images, mnist.test.labels\n # Reshape right off the bat to save some time.\n train_x = train_x.reshape(-1, 28, 28, 1)\n test_x = test_x.reshape(-1, 28, 28, 1)\n\n conv1 = LeNetClassifier.ConvLayer(kernel_width=5, kernel_height=5,\n feature_maps=1)\n conv2 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=32)\n conv3 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=64)\n network = LeNetClassifier((28, 28, 1), [conv1, conv2, conv3],\n [4 * 4 * 128, 625], 10, batch_size=128)\n\n saver = tf.train.Saver()\n\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n\n writer = tf.train.SummaryWriter(\"mnist_logs\", sess.graph_def)\n\n print(\"Tensorflow: Starting MNIST test...\")\n\n accuracy = 0\n start_time = time.time()\n iterations = 0\n while iterations < 2000:\n if iterations % 500 == 0:\n test_batch = mnist.test.next_batch(128)\n result = sess.run(network.predict(),\n feed_dict={network.inputs(): test_batch[0],\n network.expected_outputs(): test_batch[1]})\n argmax = np.argmax(test_batch[1], axis=1)\n accuracy = np.mean(argmax == result)\n print(\"Tensorflow: step %d, testing accuracy %s\" % \\\n (iterations, accuracy))\n\n batch = mnist.train.next_batch(128)\n sess.run(network.train(), feed_dict={network.inputs(): batch[0],\n network.expected_outputs(): batch[1]})\n iterations += 1\n\n # Save the network at the end.\n #saver.save(sess, \"Variables/test.ckpt\")\n\n elapsed = time.time() - start_time\n speed = iterations / elapsed\n print(\"Tensorflow: Ran %d training iterations. (%f iter/s)\" % \\\n (iterations, speed))\n print(\"Tensorflow: MNIST test completed in %f seconds.\" % (elapsed))\n return (elapsed, speed)", "def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def test_predictor():", "def get_mnist_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # Input image dimensions\n img_rows, img_cols = 28, 28\n\n # Get the data.\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n \n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n #x_train = x_train.reshape(60000, 784)\n #x_test = x_test.reshape(10000, 784)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n # convert class vectors to binary class matrices\n #y_train = keras.utils.to_categorical(y_train, nb_classes)\n #y_test = keras.utils.to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def test_n_iris(self):\r\n n = NeuronNetwork(1,\r\n [3],\r\n [[[0.2,0.2,0.2,0.2]]*3],\r\n [[-1.0,-1.0,-1.0]],learningRate=0.3)\r\n print(n)\r\n \r\n data = load_iris()\r\n\r\n inputs = data.data\r\n target = []\r\n for x in data.target:\r\n empty = [0,0,0]\r\n empty[x] = 1\r\n target.append(empty)\r\n \r\n n.train(inputs, target, 2000, 10*60)\r\n print(n)\r\n\r\n total = 0\r\n error = 0\r\n for i, x in enumerate(target, 0):\r\n out = n.feed_forward(inputs[i])\r\n if i < 50:\r\n error += self.mse(out, [1,0,0])\r\n if np.argmax(out) == 0:\r\n total +=1\r\n print(i, out, 1)\r\n elif i >= 50 and i < 100:\r\n error += self.mse(out, [0,1,0])\r\n if np.argmax(out) == 1:\r\n total +=1\r\n print(i, out, 2)\r\n elif i >= 100 and i < 150:\r\n error += self.mse(out, [0,0,1])\r\n if np.argmax(out) == 2:\r\n total +=1\r\n print(i, out, 3)\r\n\r\n print(f'MSE: {error/150}, RMSE:{math.sqrt(error/150)}')\r\n print(f'Accuracy:{total/len(target)}')", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test", "def main():\n training_data, validation_data, test_data = mnist.load()\n\n model = nn.NeuralNetwork([784, 100, 10], learning_rate=0.01, batch_size=50)\n\n model_training = training.EarlyStoppingRegularization(model,\n training_data,\n validation_data,\n test_data,\n max_steps_without_progression=2)\n result = model_training.train()\n\n result.save('models/mnist')", "def mnist_v1(batch_size=128, epochs=20, kernel_size=3):\n (X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n\n # Data preparation\n X_train = prepare(X_train)\n X_test = prepare(X_test)\n Y_train = np_utils.to_categorical(Y_train, 10) # 0..9\n Y_test = np_utils.to_categorical(Y_test, 10) # 0..9\n\n # Fitting the data to the augmentation data generator\n datagen = augmentedData(X_train)\n\n # --------------------\n # NEURAL NETWORK MODEL\n # --------------------\n\n # Model architecture\n model = Sequential()\n\n model.add(Conv2D(32, (kernel_size, kernel_size), activation='relu', input_shape=(1, 28, 28)))\n model.add(Conv2D(32, (kernel_size, kernel_size), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(10, activation='softmax'))\n\n # Model compilation\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n #Tensor board saves\n now = datetime.datetime.now()\n tensorboard = TensorBoard(log_dir=\"logs_first/kernel_size:{}\".format(kernel_size))\n\n model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=epochs, verbose=1, callbacks=[tensorboard])\n\n # Model saves\n now = datetime.datetime.now()\n model.save(\"sirr_HYPERPARAMETERS_mnist_first_\" + str(now.hour) + \"h\" + str(now.minute) + \".h5\")\n\n # Model evaluation\n return model.evaluate(X_test, Y_test, verbose=1)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def main():\n # construct the argument parse and parse the arguments\n args = argparse.ArgumentParser()\n args.add_argument(\"-o\", \"--output\", required=True, help=\"path to the output loss/accuracy plot\")\n args = vars(args.parse_args())\n\n # grab the MNIST dataset (if this is your first time using this\n # dataset then the 11MB download may take a minute)\n print(\"[INFO] accessing MNIST...\")\n ((train_x, train_y), (test_x, test_y)) = mnist.load_data()\n\n # each image in the MNIST dataset is represented as a 28x28x1\n # image, but in order to apply a standard neural network we must\n # first \"flatten\" the image to be simple list of 28x28=784 pixels\n train_x = train_x.reshape((train_x.shape[0], 28 * 28 * 1))\n test_x = test_x.reshape((test_x.shape[0], 28 * 28 * 1))\n # scale data to the range of [0, 1]\n train_x = train_x.astype(\"float32\") / 255.0\n test_x = test_x.astype(\"float32\") / 255.0\n\n # convert the labels from integers to vectors\n label_binarizer = LabelBinarizer()\n train_y = label_binarizer.fit_transform(train_y)\n test_y = label_binarizer.transform(test_y)\n\n # define the 784-256-128-10 architecture using Keras\n model = Sequential()\n model.add(Dense(256, input_shape=(784,), activation=\"sigmoid\"))\n model.add(Dense(128, activation=\"sigmoid\"))\n model.add(Dense(10, activation=\"softmax\"))\n\n # train the model using SGD\n print(\"[INFO] training network...\")\n sgd = SGD(0.01)\n model.compile(loss=\"categorical_crossentropy\", optimizer=sgd, metrics=[\"accuracy\"])\n model_fit = model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=100, batch_size=128)\n\n # evaluate the network\n print(\"[INFO] evaluating network...\")\n predictions = model.predict(test_x, batch_size=128)\n print(\n classification_report(\n test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=[str(x) for x in label_binarizer.classes_]\n )\n )\n\n # plot the training loss and accuracy\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(np.arange(0, 100), model_fit.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, 100), model_fit.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, 100), model_fit.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, 100), model_fit.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.savefig(args[\"output\"])", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def test_on_all(self) -> None:\n x_test, y_test = self.mnist.test.images, self.mnist.test.labels\n N = self.mnist.test.num_examples\n\n # I have replaced all -1 with self.mb_size to be sure about exact shapes of all layers.\n assert N % self.mb_size == 0,\\\n \"Sorry, mb_size must divide the number of images in test set\"\n\n results = np.array([0., 0.])\n for batch_no in range(N // self.mb_size):\n beg = batch_no * self.mb_size\n end = min(N, (batch_no + 1) * self.mb_size)\n len_batch = end - beg\n batch_results = np.array(self.test_on_batch(x_test[beg:end], y_test[beg:end]))\n results += batch_results * len_batch\n results /= N\n self.logger.info(\"(Test(final): Loss: {0[0]}, accuracy: {0[1]}\".format(results))", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.0] * 10\n label[int(data[0])] = 1.0 \n\n # The data are images of 28x28 pixels\n image_array = np.asfarray(data[1:]).reshape((28, 28))\n # Normalize the pictures \n image_array = image_array / 255.0\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):\n\tif not fashion:\n\t\t(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\t\tx_train, x_test = x_train / 255.0, x_test / 255.0\n\telse:\n\t\t(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n\t\tx_train, x_test = x_train / 255.0, x_test / 255.0 \n \n\tdef crop(X, crop_size):\n\t\tassert crop_x < X.shape[1]/2\n\t\tassert crop_x < X.shape[2]/2\n\t\treturn X[:,crop_size:-crop_size,crop_size:-crop_size]\n\n\tif crop_x > 0:\n\t\tx_train = crop(x_train, crop_x)\n\t\tx_test = crop(x_test, crop_x)\n\n\t# Flatten to 2d arrays (each example 1d)\n\tdef flatten_image(X):\n\t return X.reshape(X.shape[0], X.shape[1]*X.shape[1])\n\tif flatten_x:\n\t\tx_train = flatten_image(x_train)\n\t\tx_test = flatten_image(x_test)\n\n\tif onehot_encode:\n\t\ty_train = onehot_encode_labels(y_train)\n\t\ty_test = onehot_encode_labels(y_test)\n\n\tif classes is not None:\n\t\tassert len(classes) == 2\n\t\tc0, c1 = classes\n\t\ttrain_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)\n\t\tx_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]\n\t\ttest_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)\n\t\tx_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]\n\n\t\ty_train = (y_train==c1).astype(int)[:,np.newaxis]\n\t\ty_test = (y_test==c1).astype(int)[:,np.newaxis]\n\n\treturn x_train, y_train, x_test, y_test", "def test_custom_relu_mnist():\n loss1 = mnist()\n loss2 = custom_mnist()\n assert np.allclose(loss1, loss2, equal_nan=True)", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.01] * 10\n label[int(data[0])] = 0.99\n\n # The data are images of 28x28 pixels\n #image_array = np.asfarray(data[1:]).reshape((28, 28))\n image_array = np.asfarray(data[1:])\n # Normalize all values between [0.01, 1.0]\n image_array = ((image_array) / 255.0 * 0.99) + 0.01\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def test(neuralnet, dataloader):\n neuralnet.eval()\n batch_transform = data.BatchTransform()\n\n idx = 0\n for iteration, batch in enumerate(dataloader):\n with torch.no_grad():\n im = batch[0].requires_grad_(False).to(DEVICE)\n keypts = batch[1].requires_grad_(False).to(DEVICE)\n\n deformed_batch = batch_transform.exe(im, landmarks=keypts)\n im, future_im, mask = deformed_batch['image'], deformed_batch['future_image'], deformed_batch['mask']\n\n future_im_pred, gauss_mu, _ = neuralnet(im, future_im)\n\n predict = future_im_pred.data.cpu().numpy().transpose(0, 2, 3, 1)\n gauss_mu = gauss_mu.data.cpu().numpy()\n # gauss_map = gauss_map.data.cpu().numpy()\n future_im = future_im.data.cpu().numpy().transpose(0, 2, 3, 1)\n\n os.makedirs('testcheck', exist_ok=True)\n fig_path = path.join('testcheck', 'fig_{}.png'.format(iteration))\n utils.savegrid(fig_path, future_im, predict, gauss_mu=gauss_mu, name='deform')\n\n idx += im.shape[0]\n\n neuralnet.train()\n return idx", "def load_mnist(dataset_name='mnist', **kwargs):\n dataset_name = dataset_name.strip().lower().replace('minist', 'mnist')\n\n if dataset_name.lower() not in ['mnist', 'fashion-mnist']:\n raise ValueError('Only mnist or fashion-mnist are valid dataset_name.')\n\n base = 'http://yann.lecun.com/exdb/mnist/'\n if dataset_name == 'fashion-mnist':\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n\n dirname = os.path.join(_trident_dir, dataset_name)\n make_dir_if_need(dirname)\n\n \"\"\"Load MNIST data from `path`\"\"\"\n trainData = None\n testData = None\n for kind in ['train', 'test']:\n labels_file = '{0}-labels-idx1-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n images_file = '{0}-images-idx3-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n # if dataset_name == 'emnist' :\n # labels_file='emnist-balanced-'+labels_file\n # images_file = 'emnist-balanced-' + images_file\n\n is_data_download = download_file(base + labels_file, dirname, labels_file, dataset_name + '_labels_{0}'.format(kind))\n is_label_download = download_file(base + images_file, dirname, images_file, dataset_name + '_images_{0}'.format(kind))\n if is_data_download and is_label_download:\n labels_path = os.path.join(dirname, labels_file)\n images_path = os.path.join(dirname, images_file)\n labeldata = None\n imagedata = None\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n labels = np.squeeze(labels).astype(np.int64)\n labeldata = LabelDataset(labels.tolist(),object_type=ObjectType.classification_label)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16)\n images = np.reshape(images, (len(labels), 784)).astype(dtype=_session.floatx)\n images = np.reshape(images, (-1, 28, 28))\n imagedata = ImageDataset(images, object_type=ObjectType.gray)\n if kind == 'train':\n trainData = Iterator(data=imagedata, label=labeldata)\n else:\n testData = Iterator(data=imagedata, label=labeldata)\n\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if dataset_name == 'mnist' else ['T-shirt/top', 'Trouser', 'Pullover',\n 'Dress', 'Coat', 'Sandal', 'Shirt',\n 'Sneaker', 'Bag', 'Ankle boot'],\n 'en-US')\n\n return dataset\n return None", "def main():\n # Initializing learning rate\n learning_rate = 0.0005\n # Initializing stopping criteria\n stopping_criteria = 0.01\n # load the data training data from a csv file with an url\n training_x,testing_x, training_y, testing_y,mean,sd= ai.store_data(\"https://github.com/santiagocantu98/K-Nearest-Neightbours/raw/master/diabetes.csv\",\"training\")\n normal_testing = np.copy(testing_x)\n\n # scalates the features of the testing data\n testing_data_scaled,mean,sd = ai.scale_features(testing_x,mean,sd)\n ai.print_scaled_data(testing_data_scaled,\"testing\")\n ai.calculate_euclidean_distance(training_x, training_y , testing_data_scaled, testing_y,normal_testing)", "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(config)\n hidden_builder = FFHiddenBuilder()\n _ = network_builder.build_network(hidden_builder)\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n trainer = Trainer(network_builder, train_config)\n trainer.train(dataset)", "def train():\n pass", "def test_deep_learning_models():\n atom = ATOMClassifier(*mnist, n_rows=0.1, random_state=1)\n pytest.raises(PermissionError, atom.clean)\n atom.run(KerasClassifier(neural_network, epochs=1, batch_size=512, verbose=0))", "def test_model_evaluation(model, mnist, idx, label):\n expected_probabilities = np.zeros((10,))\n expected_probabilities[label] = 1.0\n assert_array_almost_equal(\n model.classify(mnist.get_test_image(idx)),\n expected_probabilities\n )", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )", "def mnist_noniid(dataset, num_users):\n # num_shards, num_imgs = 2*num_users, int(dataset.data.size()[0]/2/num_users) # choose two number from a set with num_shards, each client has 2*num_imgs images\n # idx_shard = [i for i in range(num_shards)]\n # dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n # idxs = np.arange(dataset.data.size()[0])\n # labels = dataset.train_labels.numpy()\n #\n # # sort labels\n # idxs_labels = np.vstack((idxs, labels))\n # idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]\n # idxs = idxs_labels[0,:]\n #\n # # divide and assign\n # for i in range(num_users):\n # rand_set = set(np.random.choice(idx_shard, 2, replace=False))\n # idx_shard = list(set(idx_shard) - rand_set)\n # for rand in rand_set:\n # dict_users[i] = np.concatenate((dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)\n # return dict_users\n\n label_list = dataset.targets.numpy()\n minLabel = min(label_list)\n numLabels = len(dataset.classes)\n\n dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n for i in range(0, len(label_list)):\n tmp_target_node = int((label_list[i] - minLabel) % num_users)\n if num_users > numLabels:\n tmpMinIndex = 0\n tmpMinVal = math.inf\n for n in range(0, num_users):\n if (n) % numLabels == tmp_target_node and len(dict_users[n]) < tmpMinVal:\n tmpMinVal = len(dict_users[n])\n tmpMinIndex = n\n tmp_target_node = tmpMinIndex\n dict_users[tmp_target_node] = np.concatenate((dict_users[tmp_target_node], [i]), axis=0)\n return dict_users", "def test_mnist_valid():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_valid_fast'\n else:\n yaml_file = 'mnist_valid'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_metric_learning(smote_class):\n nn_params = {'metric': 'precomputed',\n 'metric_learning_method': 'ITML'}\n X, y = smote_class(nn_params=nn_params).sample(dataset['data'],\n dataset['target'])\n\n assert np.unique(y).shape[0] == 2\n assert X.shape[0] > 0", "def demonstrate(self, train_path):\n if not os.path.exists(train_path):\n print(\"training json file not exists, program quit\")\n sys.exit()\n with open(train_path) as f:\n json_data = json.load(f)\n self.train_time_stamp_list = json_data['time']\n self.train_image_path_list = json_data['image_path']\n self.train_position_list = json_data['position']\n self.train_angle_list = json_data['angle']\n self.train_semantic_tag_list = json_data['semantic_tag']\n num_images = len(self.train_image_path_list)\n\n # create nodes\n print(\"start demonstrating, totally {} images in demonstration set\".format(num_images))\n self.node_id_list = []\n self.node_semantic_tag_list = []\n self.node_metric_feature_list = []\n self.node_conv_feature_list = []\n last_node_position = np.array([float('inf'), float('inf'), float('inf')])\n for train_index in range(num_images):\n train_position = np.array(self.train_position_list[train_index])\n if np.sqrt(np.sum(np.square(train_position - last_node_position))) > self.min_node_distance:\n last_node_position = train_position\n self.node_id_list.append(train_index)\n train_semantic_tag = self.train_semantic_tag_list[train_index]\n self.node_semantic_tag_list.append(train_semantic_tag)\n node_image_path = self.train_image_path_list[train_index]\n node_image = cv2.imread(node_image_path)\n image_batch = self.process_batch([node_image])\n node_conv_feature, node_metric_feature = self.sess.run([self.conv_features,\n self.metric_features], feed_dict = {self.images_placeholder: image_batch})\n self.node_conv_feature_list.append(node_conv_feature[0])\n self.node_metric_feature_list.append(node_metric_feature[0])\n print(\"{}/{} demonstration image shown\".format(train_index+1, num_images))\n self.node_number = len(self.node_id_list)\n print(\"all nodes created, totally {} of nodes\".format(len(self.node_id_list)))", "def test_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(10000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.test.x[:10000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))", "def main():\n # Load database\n (images_train, targets_train), (images_test, targets_test) = tf.keras.datasets.mnist.load_data()\n\n # Normalization\n images_train = images_train.reshape(-1, 784).astype(float)\n scaler = StandardScaler()\n images_train = scaler.fit_transform(images_train)\n images_test = images_test.reshape(-1, 784).astype(float)\n images_test = scaler.transform(images_test)\n\n images_train = images_train.reshape(-1, 28, 28, 1).astype(float)\n images_test = images_test.reshape(-1, 28, 28, 1).astype(float)\n\n # One hot encoding\n targets_train = tf.keras.utils.to_categorical(targets_train)\n targets_test = tf.keras.utils.to_categorical(targets_test)\n\n # Network architecture\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(30, (5, 5), input_shape=(28, 28, 1), \\\n activation=\"relu\", padding='same'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Conv2D(15, (3, 3), activation=\"relu\", padding='same'))\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(50, activation=\"relu\"))\n model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))\n\n # Learn\n optimizer = tf.keras.optimizers.SGD()\n\n @tf.function\n def train_step(images, targets):\n \"\"\"\n Define the training step by step\n \"\"\"\n # Save all operations\n with tf.GradientTape() as tape:\n # Make prediction\n predictions = model(images)\n # Compute loss\n loss = tf.keras.losses.categorical_crossentropy(targets, predictions)\n # Compute gradients\n gradients = tape.gradient(loss, model.trainable_variables)\n # Update model\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n batch_size = 32\n epochs = 10\n images_per_epoch = len(images_train) // batch_size\n for _ in range(epochs):\n for i in range(images_per_epoch):\n start = i*batch_size\n train_step(images_train[start:start+batch_size], targets_train[start:start+batch_size])\n\n # Compile must be defined to use evaluate method\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"sgd\",\n metrics=[\"accuracy\"])\n\n # Evaluate on the test database\n scores = model.evaluate(images_test, targets_test, verbose=0)\n print(scores)", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )", "def test_gens():\n dataset_path = \"/home/kateryna/Documents\"\n train_gen, test_gen = generate_embeddings_gen(dataset_path)\n img, feature, labels = next(train_gen)\n print(len(img), len(feature), labels)", "def test_net(network, model, mnist_path):\n print(\"============== Starting Testing ==============\")\n # load the saved model for evaluation\n param_dict = load_checkpoint(\"checkpoint_lenet-1_1875.ckpt\")\n # load parameter to the network\n load_param_into_net(network, param_dict)\n # load testing dataset\n ds_eval = create_dataset(os.path.join(mnist_path, \"test\"))\n acc = model.eval(ds_eval, dataset_sink_mode=False)\n print(\"============== Accuracy:{} ==============\".format(acc))", "def __init__(self,\n dataset='mnist.pkl.gz',\n nkerns=[20, 50], batch_size=500, update_rule = 'regular', config=None, dropout=0, activation='tanh'):\n if activation=='tanh':\n activation_fn = T.tanh\n # Set activation function to none because with PreLU additional alpha variables have to be initialized\n # by setting the activation function to None the linear activation will be retrieved which then can be\n # activated by my PreLU implementation\n elif activation=='PreLU':\n activation_fn = None\n\n rng = numpy.random.RandomState(23455)\n\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0]\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]\n n_test_batches = test_set_x.get_value(borrow=True).shape[0]\n n_train_batches //= batch_size\n n_valid_batches //= batch_size\n n_test_batches //= batch_size\n self.n_train_batches = n_train_batches\n self.n_valid_batches = n_valid_batches\n self.n_test_batches = n_test_batches\n self.loss_history = []\n self.val_error_history = []\n self.train_error_history = []\n # allocate symbolic variables for the data\n\n index = T.lscalar() # index to a [mini]batch\n mode = T.lscalar() # 1 = training (dropout enabled), 0 = testing (dropout disabled)\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print('... building the model')\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 28, 28))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 28, 28),\n filter_shape=(nkerns[0], 1, 5, 5),\n poolsize=(2, 2),\n activation = activation_fn\n )\n if(activation=='PreLU'):\n ########################\n # PreLU Implementation #\n ########################\n # if the activation function is PreLU alpha has to be initialized with the same shape as the bias\n # alpha will be initialized at 0.25 as suggested in the article that introduced PreLU\n # Reference: Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification\n # (Kaiming He; Xiangyu Zhang; Shaoqing Ren; Jian Sun, Microsoft, 2015)\n alpha0 = theano.shared(numpy.ones(layer0.b.get_value().shape,dtype=theano.config.floatX)*0.25, borrow=True)\n layer1_input = self.PreLU(layer0.output, alpha0.dimshuffle('x', 0, 'x', 'x'))\n else:\n layer1_input = layer0.output\n\n # Construct the second convolutional pooling layer\n # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)\n # maxpooling reduces this further to (8/2, 8/2) = (4, 4)\n # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)\n layer1 = LeNetConvPoolLayer(\n rng,\n input=layer1_input,\n image_shape=(batch_size, nkerns[0], 12, 12),\n filter_shape=(nkerns[1], nkerns[0], 5, 5),\n poolsize=(2, 2),\n activation = activation_fn\n )\n if (activation == 'PreLU'):\n alpha1 = theano.shared(numpy.ones(layer1.b.get_value().shape, dtype=theano.config.floatX) * 0.25,\n borrow=True)\n layer1_output = self.PreLU(layer1.output, alpha1.dimshuffle('x', 0, 'x', 'x'))\n else:\n layer1_output = layer1.output\n\n # the HiddenLayer being fully-connected, it operates on 2D matrices of\n # shape (batch_size, num_pixels) (i.e matrix of rasterized images).\n # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),\n # or (500, 50 * 4 * 4) = (500, 800) with the default values.\n layer2_input = layer1_output.flatten(2)\n\n # Add dropout if dropout value is higher than 0 and in training mode\n if(dropout>0):\n layer2_input = theano.ifelse.ifelse(theano.tensor.eq(mode, 1), self.Dropout(layer2_input, dropout, rng), layer2_input)\n\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=nkerns[1] * 4 * 4,\n n_out=500,\n activation=activation_fn\n )\n if (activation == 'PreLU'):\n alpha2 = theano.shared(numpy.ones(layer2.b.get_value().shape, dtype=theano.config.floatX) * 0.25,\n borrow=True)\n layer2_output = self.PreLU(layer2.output, alpha2)\n else:\n layer2_output = layer2.output\n\n # Add dropout if dropout value is higher than 0 and in training mode\n if (dropout > 0):\n layer3_input = theano.ifelse.ifelse(theano.tensor.eq(mode, 1), self.Dropout(layer2_output, dropout, rng),\n layer2_output)\n else:\n layer3_input = layer2_output\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer3_input, n_in=500, n_out=10)\n\n # the cost we minimize during training is the NLL of the model\n cost = layer3.negative_log_likelihood(y)\n #self.print_output = theano.function(\n # [index],\n # [alpha0.dimshuffle('x',0,'x','x'), layer0.b, layer0.output],\n # givens={\n # x: test_set_x[index * batch_size: (index + 1) * batch_size],\n # },\n # on_unused_input='ignore'\n #)\n self.print_layer2 = theano.function(\n [index],\n layer2_input,\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n mode: 1\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n # create a function to compute the mistakes that are made by the model\n self.test_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 0\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n\n self.validate_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 0\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n self.train_error_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 0\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n\n # create a list of all model parameters to be fit by gradient descent\n params = layer3.params + layer2.params + layer1.params + layer0.params\n\n if activation == 'PreLU':\n alpha = [alpha0, alpha1, alpha2]\n params += alpha\n # create a list of gradients for all model parameters\n grads = T.grad(cost, params)\n\n # train_model is a function that updates the model parameters by\n # SGD Since this model has many parameters, it would be tedious to\n # manually create an update rule for each model parameter. We thus\n # create the updates list by automatically looping over all\n # (paras[i], grads[i]) pairs.\n if update_rule=='regular':\n if(config is None) : config = {}\n config.setdefault('learning_rate', 0.1)\n updates = [\n (param, param - 0.1 * grad)\n for param, grad in zip(params, grads)\n ]\n ###########################\n # AdaDelta implementation #\n ###########################\n # Implementing the adaDelta update rule as described in AdaDelta: An adaptive learning rate method\n # (Matthew D. Zeiler, Google, 2012)\n elif update_rule=='adaDelta':\n\n if(config is None): config = {}\n config.setdefault('decay_rate',0.95)\n config.setdefault('epsilon',1e-6)\n config.setdefault('learning_rate', 1.)\n \n # E(g^2) is a Theano variable to store the moving average of the squared gradient\n Egrads = [\n theano.shared(numpy.zeros_like(param.get_value(),dtype=theano.config.floatX),borrow=True)\n for param in params\n ]\n # E(dx^2) is a Theano variable to store the moving average of the squared updates to the parameters\n Edxs = [\n theano.shared(numpy.zeros_like(param.get_value(),dtype=theano.config.floatX),borrow=True)\n for param in params\n ]\n # The updated E(g^2) value is calculated and will be added to the parameter updates\n Egrads_new = [\n config['decay_rate'] * Egrad + (1 - config['decay_rate']) * (grad ** 2)\n for (Egrad, grad) in zip(Egrads, grads)\n ]\n # The parameter update is calculated using the AdaDelta update rule\n dxs = [\n -(T.sqrt(Edx + config['epsilon']) / T.sqrt(Egrad_new + config['epsilon'])) * grad\n for (Edx, Egrad_new, grad) in zip(Edxs, Egrads_new, grads)\n ]\n # The updated E(dx^2) value is calculated and will be added to the parameter updates\n Edxs_new = [\n config['decay_rate']*Edx + (1-config['decay_rate']) * (dx ** 2)\n for (Edx, dx) in zip(Edxs, dxs)\n ]\n Egrads_updates = zip(Egrads, Egrads_new)\n Edxs_updates = zip(Edxs, Edxs_new)\n param_updates = [\n (param, param+dx)\n for (param, dx) in zip(params, dxs)\n ]\n # The new E(g^2) and E(dx^2) are added to the parameter updates so they will be updated at the same time\n # as the model parameters.\n updates = param_updates + Egrads_updates + Edxs_updates\n\n else:\n raise ValueError('Unrecognized update rule %s' % update_rule)\n self.train_model = theano.function(\n [index],\n cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 1 # in training mode dropout is enabled (if dropout>0)\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n\n # end-snippet-1\n\n ###############\n # TRAIN MODEL #\n ###############", "def prepare_n_mnist(filename, is_filter, num_spikes, step_factor=1):\n td = ev.read_dataset(filename)\n # td.show_td(100)\n td.data = stabilize(td)\n td.data = td.extract_roi([3, 3], [28, 28], True)\n images = make_td_images(td, num_spikes, step_factor)\n\n if is_filter:\n images = ndimage.median_filter(images, 3)\n\n # for image in images:\n # cv2.imshow('img', image)\n # cv2.waitKey(70)\n return images", "def create_mnist_model():\n\n input_shape = (160, 320, 3)\n \n m = Sequential()\n \n new_size = [64,64]\n def mylambda(x):\n import tensorflow as tf\n return tf.image.resize_images(x, size=(64,64))\n \n # 1. Resize Input to 64x64\n m.add(Lambda(mylambda,\n input_shape=input_shape,\n ))\n \n # 2. Normalize\n m.add(Lambda(lambda x: x/255.0 - 0.5,\n ))\n \n # 3. Add 2 Conv layers with max pooling, and dropouts\n m.add(Convolution2D(5, 3, 3, subsample=(1,1), activation='relu'))\n m.add(Convolution2D(10, 3, 3, subsample=(1,1), activation='relu'))\n m.add(MaxPooling2D(pool_size=(2, 2)))\n m.add(Dropout(0.25))\n \n # 4. Flatten and use Fully Connected module\n m.add(Flatten())\n m.add(Dense(50, activation='relu'))\n m.add(Dropout(0.5))\n m.add(Dense(1, activation=None))\n \n return m", "def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def trainNet():", "def test_n_and_train(self):\r\n\r\n n = NeuronNetwork(1,\r\n [1],\r\n [[[0.0,0.0]]],\r\n [[0.0]])\r\n\r\n inputs = [[0,0], [0,1], [1,0], [1,1]]\r\n targets = [[0], [0], [0], [1]]\r\n\r\n n.train(inputs,targets,1000,180)\r\n\r\n print(n)\r\n self.assertLess(n.feed_forward([0,0]), [0.001])\r\n self.assertGreater(n.feed_forward([1,0]), [0.001])\r\n self.assertGreater(n.feed_forward([0,1]), [0.001])\r\n self.assertGreater(n.feed_forward([1,1]), [0.9])", "def test_intent_classifier_add_training_samples(self):\n pass", "def run_mnist(flags_obj):\n model_function = model_fn\n config = tf.estimator.RunConfig(protocol='grpc+verbs',\n save_checkpoints_secs=300,\n save_summary_steps=200,\n log_step_count_steps=200)\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n mnist_classifier = tf.estimator.Estimator(\n model_fn=model_function,\n model_dir=flags_obj.model_dir,\n config=config,\n params={\n 'data_format': data_format,\n })\n\n # Set up training and evaluation input functions.\n def train_input_fn():\n \"\"\"Prepare data for training.\"\"\"\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes use less memory. MNIST is a small\n # enough dataset that we can easily shuffle the full epoch.\n ds = dtrain(flags_obj.data_dir)\n ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)\n\n # Iterate through the dataset a set number (`epochs_between_evals`) of times\n # during each training session.\n ds = ds.repeat()\n return ds\n\n def eval_input_fn():\n return dtest(flags_obj.data_dir).batch(\n 100).make_one_shot_iterator().get_next()\n\n \n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=flags_obj.train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,throttle_secs=300)\n tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)\n \n '''# Train and evaluate model.\n for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):\n mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print('\\nEvaluation results:\\n\\t%s\\n' % eval_results)\n\n if model_helpers.past_stop_threshold(flags_obj.stop_threshold,\n eval_results['accuracy']):\n break\n '''\n # Export the model\n if flags_obj.export_dir is not None:\n image = tf.placeholder(tf.float32, [None, 28, 28])\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'image': image,\n })\n mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn)", "def setUp(self):\n self.X_train, self.y_train = load_data(\"../data/traindata.mat.tar.gz\")\n self.nn = NN_hwr([len(self.X_train[0]), 50, 10])", "def testing_featurizer_build():\n f = ImageFeaturizer()\n compare_featurizer_class(f, (0, 0), np.zeros((1)), 0, '', False, '', {}, 1)", "def test_init_net_simple(self):\n net = ecn.NeuralNet(2, (2,), 1)\n self.assertEqual(2, len(net.weights.keys()))\n self.assertEqual((2, 3), np.shape(net.weights['h0']))\n self.assertEqual((1, 3), np.shape(net.weights['y']))\n print('Finished testing simple neural net init\\n')", "def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)", "def __init__(self,\n method='random_int',\n flatten=True,\n train_len=60000,\n test_len=10000,\n train_order_seed=None):\n if method == 'random_int':\n x_train = np.random.randint(low=0, high=256, size=(train_len, 28, 28))\n x_test = np.random.randint(low=0, high=256, size=(test_len, 28, 28))\n y_train = np.random.randint(low=0, high=10, size=(train_len, 1))\n y_test = np.random.randint(low=0, high=10, size=(test_len, 1))\n else:\n raise NotImplementedError\n\n\n # Flatten x_train and x_test.\n if flatten:\n x_train = x_train.reshape((x_train.shape[0], -1))\n x_test = x_test.reshape((x_test.shape[0], -1))\n\n # Normalize x_train and x_test.\n x_train = keras.utils.normalize(x_train).astype(np.float32)\n x_test = keras.utils.normalize(x_test).astype(np.float32)\n\n # Convert y_train and y_test to one-hot.\n y_train = keras.utils.to_categorical(y_train)\n y_test = keras.utils.to_categorical(y_test)\n\n # Prepare the dataset.\n super(ConstructedDatasetMnist, self).__init__(\n (x_train, y_train),\n 64, (x_test, y_test),\n train_order_seed=train_order_seed)", "def gen_train_val_test_images(data_dir, seed=131):\n np.random.seed(seed)\n\n # Load SVHN Dataset (single digits)\n train_data = scipy_io.loadmat(data_dir + '/train_32x32.mat')\n test_data = scipy_io.loadmat(data_dir + '/test_32x32.mat')\n extra_data = scipy_io.loadmat(data_dir + '/extra_32x32.mat')\n\n train_X, train_y = train_data['X'], train_data['y']\n test_X, test_y = test_data['X'], test_data['y']\n extra_X, extra_y = extra_data['X'], extra_data['y']\n\n train_y = train_y.squeeze()\n test_y = test_y.squeeze()\n extra_y = extra_y.squeeze()\n\n # Change labels for '0' digit from 10 to 0\n train_y[train_y == 10] = 0\n test_y[test_y == 10] = 0\n extra_y[extra_y == 10] = 0\n\n del extra_data\n\n num_classes = 10\n\n train_val_sample_idxs = np.array([], int)\n for i in range(num_classes):\n class_idxs = np.arange(len(train_y))[train_y == i]\n sel_class_idxs = np.random.choice(class_idxs, size=400)\n train_val_sample_idxs = np.concatenate((train_val_sample_idxs,\n sel_class_idxs))\n not_train_val_sample_idxs = np.setdiff1d(np.arange(len(train_y)),\n train_val_sample_idxs)\n\n val_X = train_X[:, :, :, train_val_sample_idxs]\n val_y = train_y[train_val_sample_idxs]\n\n extra_val_sample_idxs = np.array([], int)\n for i in range(num_classes):\n class_idxs = np.arange(len(extra_y))[extra_y == i]\n sel_class_idxs = np.random.choice(class_idxs, size=200)\n extra_val_sample_idxs = np.concatenate((extra_val_sample_idxs,\n sel_class_idxs))\n not_extra_val_sample_idxs = np.setdiff1d(np.arange(len(extra_y)),\n extra_val_sample_idxs)\n\n val_X = np.concatenate((val_X, extra_X[:, :, :, extra_val_sample_idxs]), axis=3)\n val_y = np.hstack((val_y, extra_y[extra_val_sample_idxs]))\n\n train_X = np.concatenate((train_X[:, :, :, not_train_val_sample_idxs],\n extra_X[:, :, :, not_extra_val_sample_idxs]), axis=3)\n train_y = np.hstack((train_y[not_train_val_sample_idxs],\n extra_y[not_extra_val_sample_idxs]))\n\n # Create directories and save images\n train_dir = data_dir + '/imgs/train'\n test_dir = data_dir + '/imgs/test'\n validation_dir = data_dir + '/imgs/validation'\n\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n\n if not os.path.exists(validation_dir):\n os.makedirs(validation_dir)\n\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n for i in range(num_classes):\n if not os.path.exists(train_dir + '/' + str(i)):\n os.makedirs(train_dir + '/' + str(i))\n\n if not os.path.exists(validation_dir + '/' + str(i)):\n os.makedirs(validation_dir + '/' + str(i))\n\n if not os.path.exists(test_dir + '/' + str(i)):\n os.makedirs(test_dir + '/' + str(i))\n\n print \"Creating train images ... \"\n for i in range(len(train_y)):\n filename = train_dir + '/' + str(train_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, train_X[:, :, :, i])\n\n print \"Creating validation images ... \"\n for i in range(len(val_y)):\n filename = validation_dir + '/' + str(val_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, val_X[:, :, :, i])\n\n print \"Creating test images ... \"\n for i in range(len(test_y)):\n filename = test_dir + '/' + str(test_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, test_X[:, :, :, i])", "def test_fish():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fish(test_path)\n try:\n assert x_train.shape == (97, 20)\n except:\n shutil.rmtree(test_path)\n raise()", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def codeepneat_mnist_example(_):\n # Set standard configuration specific to TFNE but not the neuroevolution process\n logging_level = logging.INFO\n config_file_path = './codeepneat_mnist_example_config.cfg'\n backup_dir_path = './tfne_state_backups/'\n max_generations = 20\n max_fitness = None\n\n # Read in optionally supplied flags, changing the just set standard configuration\n if flags.FLAGS.logging_level is not None:\n logging_level = flags.FLAGS.logging_level\n if flags.FLAGS.config_file is not None:\n config_file_path = flags.FLAGS.config_file\n if flags.FLAGS.backup_dir is not None:\n backup_dir_path = flags.FLAGS.backup_dir\n if flags.FLAGS.max_generations is not None:\n max_generations = flags.FLAGS.max_generations\n if flags.FLAGS.max_fitness is not None:\n max_fitness = flags.FLAGS.max_fitness\n\n # Set logging, parse config\n logging.set_verbosity(logging_level)\n config = tfne.parse_configuration(config_file_path)\n\n # Initialize the environment and the specific NE algorithm\n environment = tfne.environments.MNISTEnvironment(weight_training=True, config=config, verbosity=logging_level)\n ne_algorithm = tfne.algorithms.CoDeepNEAT(config)\n\n # Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.\n engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,\n environment=environment,\n backup_dir_path=backup_dir_path,\n max_generations=max_generations,\n max_fitness=max_fitness)\n\n # Start training process, returning the best genome when training ends\n best_genome = engine.train()\n print(\"Best genome returned by evolution:\\n\")\n print(best_genome)\n\n # Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.\n print(\"Training best genome for 200 epochs...\\n\")\n environment.epochs = 20\n environment.eval_genome_fitness(best_genome)\n environment.replay_genome(best_genome)\n\n # Serialize and save genotype and Tensorflow model to demonstrate serialization\n best_genome.save_genotype(save_dir_path='./best_genome_genotype/')\n best_genome.save_model(file_path='./best_genome_model/')", "def fetch_mnist():\n data_path = check_fetch_mnist()\n f = gzip.open(data_path, 'rb')\n try:\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")\n except TypeError:\n train_set, valid_set, test_set = pickle.load(f)\n f.close()\n train_indices = np.arange(0, len(train_set[0]))\n valid_indices = np.arange(0, len(valid_set[0])) + train_indices[-1] + 1\n test_indices = np.arange(0, len(test_set[0])) + valid_indices[-1] + 1\n return {\"data\": np.concatenate((train_set[0], valid_set[0], test_set[0]),\n axis=0).astype(theano.config.floatX),\n \"target\": np.concatenate((train_set[1], valid_set[1], test_set[1]),\n axis=0).astype(np.int32),\n \"train_indices\": train_indices.astype(np.int32),\n \"valid_indices\": valid_indices.astype(np.int32),\n \"test_indices\": test_indices.astype(np.int32)}", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def load_mnist(path='mnist/mnist.npz'):\n\n with np.load(path) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train.astype(np.float32) / 255.\n y_train = y_train.astype(np.int32)\n x_test = x_test.astype(np.float32) / 255.\n y_test = y_test.astype(np.int32)\n \n return (x_train, y_train), (x_test, y_test)", "def train_vae_on_mnist(z_dim=2, kernel_initializer='glorot_uniform', optimizer = 'adam', learning_rate=0.001, n_epochs=4000,\n test_every=100, minibatch_size=100, encoder_hidden_sizes=[200, 200], decoder_hidden_sizes=[200, 200],\n hidden_activation='relu', plot_grid_size=10, plot_n_samples = 20):\n\n # Get Data\n x_train, x_test = load_mnist_images(binarize=True)\n train_iterator = tf.data.Dataset.from_tensor_slices(x_train).repeat().batch(minibatch_size).make_initializable_iterator()\n n_samples, n_dims = x_train.shape\n x_minibatch = train_iterator.get_next() # Get symbolic data, target tensors\n\n # Build Model\n raise NotImplementedError(\"Build the model here\")\n\n\n with tf.Session() as sess:\n sess.run(train_iterator.initializer) # Initialize the variables of the data-loader.\n sess.run(tf.global_variables_initializer()) # Initialize the model parameters.\n n_steps = (n_epochs * n_samples)/minibatch_size\n for i in xrange(n_steps):\n if i%test_every==0:\n raise NotImplementedError('INSERT CODE TO RUN TEST AND RECORD LOG-PROB PERIODICALLY')\n\n raise NotImplementedError('CALL TRAINING FUNCTION HERE')", "def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')", "def train(self, nsamples = 1, verbose = False, random = True):\n imgs, skels = self.images.get_batch(nimages = nsamples, random = random);\n self.trainer.run(session = self.session, feed_dict={self.input : imgs, self.skeleton : skels})\n if verbose:\n self.plot_results(imgs);", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def readmnist(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n # else:\n # raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "def test_im_model(name, xtrain_short=None, ytrain_short=None, nodisplay=False, \n summary=False, do_test=False):\n print(\"Loading model...\")\n objs = {\"accuracy\": fret_accuracy()}\n model = keras.models.load_model(\"models/\"+name+\".hdf5\", custom_objects=objs)\n\n if summary:\n # if we are just loading and have not trained\n model.summary()\n\n # if (batchsize, guitarstringindex, probabilities) then categorical, else \n # (batchsize, stringpred) is regression-type\n shape = model.get_output_shape_at(-1)\n if len(shape) > 2:\n categorical = True\n else:\n categorical = False\n\n \"\"\"\n testing\n \"\"\"\n data = load_all_data(\"data/inference_model_train\", num_splits=0, \n display=(not nodisplay), do_test=do_test)\n xtest, _, _, ytest, _, _ = data\n\n print(\"Evaluating on test set w/ no transitions\")\n print(len(xtest), \"testing images\")\n results = model.evaluate(xtest, ytest, verbose=1)\n with open(\"stats/\"+name+\"/stats.txt\", \"a\") as f:\n f.write(\"\\nTest results (no transitions):\\n\")\n for i,metric in enumerate(model.metrics_names):\n print(\" \", metric+\":\", results[i])\n f.write(metric+\": \"+str(results[i])+\"\\n\")\n\n # free memory\n del data, xtest, ytest\n import gc; gc.collect()\n\n data = load_all_data(\"data/inference_model_train\", num_splits=0, \n display=(not nodisplay), do_test=do_test, no_transitions=False)\n xtest, _, _, ytest, _, _ = data\n\n print(\"Evaluating on test set w/ transitions\")\n print(len(xtest), \"testing images\")\n results = model.evaluate(xtest, ytest, verbose=1)\n with open(\"stats/\"+name+\"/stats.txt\", \"a\") as f:\n f.write(\"\\nTest results (with transitions):\\n\")\n for i,metric in enumerate(model.metrics_names):\n print(\" \", metric+\":\", results[i])\n f.write(metric+\": \"+str(results[i])+\"\\n\")\n\n scaleup = 2.0\n\n # on training set, if available\n if xtrain_short is not None:\n print(\"Generating video on train set predictions\")\n trainpreds = model.predict(xtrain_short, verbose=1)\n\n vid = [cv.resize(i, dsize=(0,0), fx=scaleup, fy=scaleup, \\\n interpolation=cv.INTER_LINEAR) for i in xtrain_short]\n\n annotate_vid(vid, trainpreds, ytrain_short, categorical)\n if not nodisplay:\n showvid(vid, name=\"train ims\", ms=300)\n writevid(vid, \"stats/\"+name+\"/results_visualization_trainset\")\n\n # on test set\n print(\"Generating video on test set predictions\")\n numframes = 1000\n testpreds = model.predict(xtest[:numframes], verbose=1)\n\n vid = [cv.resize(i, dsize=(0,0), fx=scaleup, fy=scaleup, \\\n interpolation=cv.INTER_LINEAR) for i in xtest[:numframes]]\n\n annotate_vid(vid, testpreds, ytest[:numframes], categorical)\n if not nodisplay:\n showvid(vid, name=\"test set\", ms=35)\n writevid(vid, \"stats/\"+name+\"/results_visualization_testset\")", "def test_star():\n test_path = tempfile.mkdtemp()\n x_train, metadata = star(test_path)\n try:\n assert x_train.shape == (5748, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_train_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.train.equals(atom.mnb.train)\n assert check_scaling(atom.lr.train)", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def get_mnist(one_hot_enc, normalized, flatten):", "def cg_optimization_mnist(n_epochs=50, mnist_pkl_gz='mnist.pkl.gz'):\r\n #############\r\n # LOAD DATA #\r\n #############\r\n datasets = load_data(mnist_pkl_gz)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n batch_size = 600 # size of the minibatch\r\n\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ishape = (28, 28) # this is the size of MNIST images\r\n n_in = 28 * 28 # number of input units\r\n n_out = 10 # number of output units\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n minibatch_offset = T.lscalar() # offset to the start of a [mini]batch\r\n x = T.matrix() # the data is presented as rasterized images\r\n y = T.ivector() # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n # construct the logistic regression class\r\n classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model in symbolic format\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compile a theano function that computes the mistakes that are made by\r\n # the model on a minibatch\r\n test_model = theano.function([minibatch_offset], classifier.errors(y),\r\n givens={\r\n x: test_set_x[minibatch_offset:minibatch_offset + batch_size],\r\n y: test_set_y[minibatch_offset:minibatch_offset + batch_size]},\r\n name=\"test\")\r\n\r\n validate_model = theano.function([minibatch_offset], classifier.errors(y),\r\n givens={\r\n x: valid_set_x[minibatch_offset:\r\n minibatch_offset + batch_size],\r\n y: valid_set_y[minibatch_offset:\r\n minibatch_offset + batch_size]},\r\n name=\"validate\")\r\n\r\n # compile a thenao function that returns the cost of a minibatch\r\n batch_cost = theano.function([minibatch_offset], cost,\r\n givens={\r\n x: train_set_x[minibatch_offset:\r\n minibatch_offset + batch_size],\r\n y: train_set_y[minibatch_offset:\r\n minibatch_offset + batch_size]},\r\n name=\"batch_cost\")\r\n\r\n # compile a theano function that returns the gradient of the minibatch\r\n # with respect to theta\r\n batch_grad = theano.function([minibatch_offset],\r\n T.grad(cost, classifier.theta),\r\n givens={\r\n x: train_set_x[minibatch_offset:\r\n minibatch_offset + batch_size],\r\n y: train_set_y[minibatch_offset:\r\n minibatch_offset + batch_size]},\r\n name=\"batch_grad\")\r\n\r\n # creates a function that computes the average cost on the training set\r\n def train_fn(theta_value):\r\n classifier.theta.set_value(theta_value, borrow=True)\r\n train_losses = [batch_cost(i * batch_size)\r\n for i in xrange(n_train_batches)]\r\n return numpy.mean(train_losses)\r\n\r\n # creates a function that computes the average gradient of cost with\r\n # respect to theta\r\n def train_fn_grad(theta_value):\r\n classifier.theta.set_value(theta_value, borrow=True)\r\n grad = batch_grad(0)\r\n for i in xrange(1, n_train_batches):\r\n grad += batch_grad(i * batch_size)\r\n return grad / n_train_batches\r\n\r\n validation_scores = [numpy.inf, 0]\r\n\r\n # creates the validation function\r\n def callback(theta_value):\r\n classifier.theta.set_value(theta_value, borrow=True)\r\n #compute the validation loss\r\n validation_losses = [validate_model(i * batch_size)\r\n for i in xrange(n_valid_batches)]\r\n this_validation_loss = numpy.mean(validation_losses)\r\n print('validation error %f %%' % (this_validation_loss * 100.,))\r\n\r\n # check if it is better then best validation score got until now\r\n if this_validation_loss < validation_scores[0]:\r\n # if so, replace the old one, and compute the score on the\r\n # testing dataset\r\n validation_scores[0] = this_validation_loss\r\n test_losses = [test_model(i * batch_size)\r\n for i in xrange(n_test_batches)]\r\n validation_scores[1] = numpy.mean(test_losses)\r\n\r\n ###############\r\n # TRAIN MODEL #\r\n ###############\r\n\r\n # using scipy conjugate gradient optimizer\r\n import scipy.optimize\r\n print (\"Optimizing using scipy.optimize.fmin_cg...\")\r\n start_time = time.clock()\r\n best_w_b = scipy.optimize.fmin_cg(\r\n f=train_fn,\r\n x0=numpy.zeros((n_in + 1) * n_out, dtype=x.dtype),\r\n fprime=train_fn_grad,\r\n callback=callback,\r\n disp=0,\r\n maxiter=n_epochs)\r\n end_time = time.clock()\r\n print(('Optimization complete with best validation score of %f %%, with '\r\n 'test performance %f %%') %\r\n (validation_scores[0] * 100., validation_scores[1] * 100.))\r\n\r\n print >> sys.stderr, ('The code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.1fs' % ((end_time - start_time)))", "def test_text_classifier_add_training_samples(self):\n pass", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def main():\r\n # LOading the Test images & labels\r\n params, test_images, test_labels = get_data()\r\n\r\n # Accuracy on Test Data\r\n accuracy = model_score(params, test_images, test_labels, act='sig')\r\n print ('\\nAccuracy : ' + str(accuracy) + ' %\\n')", "def get_mnist_data(batch=128):\n \n def transformer(data, label):\n data = data.flatten().expand_dims(0).astype(np.float32)/255\n data = data-0.13/0.31\n label = label.astype(np.float32)\n return data, label\n\n train_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=True, transform=transformer)\n validation_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=False, transform=transformer)\n train_dataloader = gluon.data.DataLoader(train_dataset, batch_size=batch, last_batch='keep',shuffle=True)\n validation_dataloader = gluon.data.DataLoader(validation_dataset, batch_size=batch, last_batch='keep')\n \n return train_dataloader, validation_dataloader" ]
[ "0.7775563", "0.7588313", "0.74669766", "0.7369658", "0.7182041", "0.7171548", "0.7122935", "0.7103584", "0.7040259", "0.70040536", "0.69778866", "0.69447285", "0.69191986", "0.69013685", "0.68820626", "0.68727684", "0.68639225", "0.68560404", "0.6851476", "0.68482596", "0.68447804", "0.6837611", "0.67977846", "0.6794617", "0.67881024", "0.6785603", "0.678502", "0.6750209", "0.67396903", "0.6736315", "0.6630636", "0.6624444", "0.66024995", "0.6600424", "0.6580954", "0.65451145", "0.65428257", "0.6512272", "0.6494354", "0.6487676", "0.64865124", "0.6457865", "0.645253", "0.6379204", "0.63652915", "0.6340968", "0.6335792", "0.6332652", "0.6324311", "0.62980783", "0.6297579", "0.62968373", "0.62960106", "0.629274", "0.6291611", "0.6289607", "0.627441", "0.6252526", "0.62449753", "0.62332565", "0.6231386", "0.6224754", "0.62207645", "0.6212368", "0.6211482", "0.6200164", "0.61899906", "0.6186814", "0.61864465", "0.61838657", "0.61595875", "0.6154557", "0.61542046", "0.61534274", "0.61403555", "0.61384636", "0.61381626", "0.6134656", "0.6130655", "0.6127851", "0.6118519", "0.6111469", "0.6110999", "0.61091864", "0.6105572", "0.61037004", "0.60965943", "0.6094991", "0.60936815", "0.6085426", "0.608372", "0.6080913", "0.6067853", "0.60636246", "0.60621524", "0.6057668", "0.6048525", "0.6046182", "0.60425043", "0.60402113", "0.60398793" ]
0.0
-1
Test the functionality of the KafkaGroupIODataset when the consumer group is being newly created.
def test_kafka_group_io_dataset_primary_cg(): dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgtestprimary", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(10)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def test_create_device_group(self):\n pass", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def test_create_new_discussion_topic_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_add_group(self):\n pass", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_create_device_group1(self):\n pass", "def test_read_group(self):\n pass", "def test_create_resource_group(self):\n pass", "def test_user_group_controller_create(self):\n pass", "def test_verify_that_you_can_create_a_new_group():", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_update_group(self):\n pass", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_create_scalar(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', ())\n assert dset.shape == ()\n assert dset.data == 0", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_create_scalar_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def test_string(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', data=\"string\")\n assert dset.data == \"string\"", "def test_create_fillval(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (10,), fillvalue=4.0)\n assert dset[0] == 4.0\n assert dset[7] == 4.0", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def test_add(self):\n # add a new group\n self.assertTrue(self.run_function(\"group.add\", [self._group], gid=self._gid))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"gid\"], self._gid)\n self.assertEqual(group_info[\"name\"], self._group)\n # try adding the group again\n self.assertFalse(self.run_function(\"group.add\", [self._group], gid=self._gid))", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def test_dataset_intermediate_group(setup_teardown_file):\n f = setup_teardown_file[3]\n\n # Trying to create intermediate groups that are absolute should fail just\n # like when creating them on groups.\n with pytest.raises(NotImplementedError):\n f.create_dataset(\"/foo/bar/baz\", shape=(10, 10), dtype='<i4')\n\n ds = f.create_dataset(\"foo/bar/baz\", shape=(10, 10), dtype='<i4')\n assert isinstance(ds, Dataset)\n assert \"/foo/bar/baz\" in f", "def __on_group_created(self, logger, *args):", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def persist_test_group(self, obj: object, group: str) -> str:", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_type_confict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_group('foo')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'f')", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def test_get_group(self):\n pass", "def test_reshape(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f')\n dset = grp.create_dataset('foo', shape=(10, 3), data=data)\n assert dset.shape == (10, 3)\n assert np.array_equal(dset.data, data.reshape((10, 3)))", "def setUpClass(cls):\n super().setUpClass()\n cls.group = Group.objects.create(\n title=TEST_GROUP_NAME,\n slug=TEST_GROUP_SLUG\n )", "def test_exc(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n with pytest.raises(TypeError):\n grp.create_dataset('foo', (10,), dtype=\"float32\", fillvalue={\"a\": 2})", "def test_post_entry_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def test_createGroup(self):\n tabGroup = widgets.TabGroup(u'group1', u'Group', tabs=[\n widgets.Tab(u'id4', u'Title 4', self.contentFactory)])\n tabs = self.tabs + [\n tabGroup,\n widgets.Tab(u'id5', u'Title 5', self.contentFactory)]\n tabView = widgets.TabView(tabs)\n self.assertEquals(\n tabView.getTabIDs(),\n [u'id1', u'id2', u'id3', u'id4', u'id5'])\n self.assertEquals(\n tabView._tabGroups,\n {u'group1': tabGroup})", "def test_update_device_group_by_id(self):\n pass", "def test_trivial(self):\n group = Group()", "def test_update_resource_group(self):\n pass", "def test_test_group_parameters(self):\n pass", "def test_create_cloudwatch_log_group(cloudwatch_logs):\n cloudwatch_logs.create_log_group(\n logGroupName='/ecs/mongo/change-stream',\n tags={'env': 'test'}\n )\n\n result = cloudwatch_logs.describe_log_groups()\n assert len(result['logGroups']) == 1\n assert result['logGroups'][0]['logGroupName'] == '/ecs/mongo/change-stream'", "def test_groups_post(self):\n pass", "def test_message_group():", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_update_device_group_by_id1(self):\n pass", "def test_groups_group_id_state_put(self):\n pass", "def test_user_group_controller_update(self):\n pass", "def test_groups_group_ref_put(self):\n pass", "def test_delete_collection_group(self):\n pass", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def test_delete_groups(self):\n pass", "def test_create_and_delete_rdf_group(self):\n local_array = self.conn.array_id\n local_port_list, remote_port_list = self.get_online_rdf_ports()\n if not remote_port_list:\n self.skipTest('Skipping test_create_and_delete_rdf_group -'\n 'No remote port list.')\n\n self.conn.set_array_id(local_array)\n rdf_group = self.get_next_free_srdf_group()\n self.replication.create_rdf_group(\n local_director_port_list=local_port_list,\n remote_array_id=self.conn.remote_array,\n remote_director_port_list=remote_port_list,\n array_id=local_array, local_rdfg_number=rdf_group,\n remote_rdfg_number=rdf_group, label='pyu4v_' + str(rdf_group))\n rdf_group_list = self.replication.get_rdf_group_list()\n rdfg_list = list()\n for group in rdf_group_list:\n rdfg_list.append(group['rdfgNumber'])\n self.assertIn(rdf_group, rdfg_list)\n self.replication.delete_rdf_group(srdf_group_number=rdf_group)\n rdf_group_list = self.replication.get_rdf_group_list()\n rdfg_list = list()\n for group in rdf_group_list:\n rdfg_list.append(group['rdfgNumber'])\n self.assertNotIn(rdf_group, rdfg_list)", "def test_update_entry_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_delete(self):\n self.assertTrue(self.run_function(\"group.add\", [self._group]))\n\n # correct functionality\n self.assertTrue(self.run_function(\"group.delete\", [self._group]))\n\n # group does not exist\n self.assertFalse(self.run_function(\"group.delete\", [self._no_group]))", "def test_one_group(self, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_keys = []\n for i in range(9):\n group_key = Mock()\n group_key.urlsafe.return_value = i + 100\n group_keys.append(group_key)\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups(group_keys, event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def test_create_device_data(self):\n pass", "def test_creator_in_group_can_update(self):\n\n self.client.login(username='notlogged', password='notlogged')\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_can_access(self, self.url,\n post_redirect_url=expected_url,\n data=self.data)\n\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, self.data['name'])\n self.assertEqual(updated_group.description, self.data['description'])\n self.assertIsNotNone(updated_group.last_edit_date)", "def _get_new_group_id():\n new_group = data_types.TestcaseGroup()\n new_group.put()\n return new_group.key.id()", "def test_bulk_group(self):\n for fn, sep in [(self.filename_actg, '\\t'),\n (self.filename_actg_csv, ';')]:\n file_path_ag = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n fn)\n data = {\n 'bulk_upload' : open(file_path_ag, 'rb'),\n }\n\n existing_ags = ActivityGroup.objects.filter(keyflow=self.kic)\n existing_codes = list(existing_ags.values_list('code', flat=True))\n\n encoding = 'utf8'\n df_file_ags = pd.read_csv(file_path_ag, sep=sep)\n df_file_ags = df_file_ags.rename(\n columns={c: c.lower() for c in df_file_ags.columns})\n file_codes = df_file_ags['code']\n new_codes = [c for c in file_codes if c not in existing_codes]\n\n res = self.client.post(self.ag_url, data)\n res_json = res.json()\n assert res.status_code == status.HTTP_201_CREATED\n assert res_json['count'] == len(file_codes)\n assert len(res_json['created']) == len(new_codes)\n\n # assert that the number of activities matches\n all_ag = ActivityGroup.objects.filter(keyflow_id=self.kic.id)\n assert len(all_ag) == len(existing_codes) + len(new_codes)\n\n # assert that the Name matches in all values\n for row in df_file_ags.itertuples(index=False):\n ag = ActivityGroup.objects.get(keyflow=self.keyflow,\n code=row.code)\n assert ag.name == row.name", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_groups_group_users_put(self):\n pass", "def test_groups_group_users_put(self):\n pass", "def test_api_v1_groups_post(self):\n pass", "def test_save(self, name='test'):\n group = Group(name=name)\n group.save()\n return group", "def test_logged_can_create_group(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url,\n post_redirect_url=reverse('my_groups_view'),\n data=self.data)\n\n groups = Group.objects.all()\n self.assertEqual(len(groups), 1)\n self.assertEqual(groups[0].name, self.data['name'])\n self.assertEqual(groups[0].description, self.data['description'])\n self.assertEqual(groups[0].creator, logged_user)\n self.assertEqual(len(groups[0].users.all()), 1)", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def test_dtype(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (5,), '|S10')\n assert dset.dtype == np.dtype('|S10')", "def test_new_dataset_button_with_new_datasets(self):\n self.study_version.i_is_deprecated = True\n self.study_version.save()\n new_version = factories.SourceStudyVersionFactory.create(\n study=self.study, i_version=self.study_version.i_version + 1, i_date_added=timezone.now())\n new_dataset = factories.SourceDatasetFactory.create(source_study_version=new_version)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertIn('show_new_dataset_button', context)\n self.assertTrue(context['show_new_dataset_button'])\n self.assertContains(response, reverse('trait_browser:source:studies:pk:datasets:new', args=[self.study.pk]))", "def test_chgid(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.assertTrue(self.run_function(\"group.chgid\", [self._group, self._new_gid]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"gid\"], self._new_gid)", "def test_create_group_409(self):\n request = {\n 'name': self.test_group1_groupid\n }\n # First create a group indirectly by making a user with a group\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now create a group that is already there\n resp = self.app.post('/groups', data=json.dumps(request))\n assert resp.status_code == 409", "def data_group():\n ..." ]
[ "0.71414036", "0.7126442", "0.7073285", "0.6968703", "0.6968703", "0.6883464", "0.68237174", "0.6809277", "0.680548", "0.68034434", "0.67761457", "0.67453897", "0.6516205", "0.65071094", "0.6487364", "0.6473713", "0.6469515", "0.63676554", "0.63668543", "0.6361959", "0.63047993", "0.6277567", "0.62379813", "0.6229682", "0.6200205", "0.613886", "0.61073667", "0.6099471", "0.6084858", "0.6074848", "0.60692847", "0.60504186", "0.6037816", "0.6031068", "0.60299873", "0.6028903", "0.60271466", "0.6025733", "0.6015461", "0.60087365", "0.5975709", "0.5952861", "0.5943513", "0.59262574", "0.58734053", "0.5869751", "0.5869751", "0.58643585", "0.5851675", "0.5841088", "0.5818015", "0.58174956", "0.5806257", "0.5797404", "0.57888955", "0.57791984", "0.5763505", "0.57412434", "0.5716259", "0.56979674", "0.56970763", "0.5691928", "0.5682173", "0.56677085", "0.56664574", "0.56621766", "0.5655401", "0.5643303", "0.5640615", "0.5640615", "0.563696", "0.5636056", "0.5626912", "0.56189823", "0.56163424", "0.56158596", "0.56112975", "0.5604794", "0.56017864", "0.55979717", "0.5576111", "0.5558146", "0.5558146", "0.5557658", "0.5555751", "0.5552828", "0.5551497", "0.55480826", "0.55480826", "0.5537415", "0.5537415", "0.55373514", "0.55317503", "0.5509262", "0.5501171", "0.5495332", "0.54949456", "0.54790276", "0.54737055", "0.54709977" ]
0.65959835
12
Test the functionality of the KafkaGroupIODataset when the consumer group has read all the messages and committed the offsets.
def test_kafka_group_io_dataset_primary_cg_no_lag(): dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgtestprimary", servers="localhost:9092", configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"], ) assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def _single_group_offsets_callback(self, consumer_group, response):\n single_group_offsets = self.kafka_client._list_consumer_group_offsets_process_response(response)\n self.log.debug(\"Single group offsets: %s\", single_group_offsets)\n for (topic, partition), (offset, _metadata) in single_group_offsets.items():\n # If the OffsetFetchRequest explicitly specified partitions, the offset could returned as -1, meaning there\n # is no recorded offset for that partition... for example, if the partition doesn't exist in the cluster.\n # So ignore it.\n if offset == -1:\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n continue\n key = (consumer_group, topic, partition)\n self._consumer_offsets[key] = offset", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_read_group(self):\n pass", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_mark_all_entries_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def send_offset_commit_request(self, group, payloads=None,\n fail_on_error=True, callback=None,\n group_generation_id=-1,\n consumer_id=''):\n group = _coerce_consumer_group(group)\n encoder = partial(KafkaCodec.encode_offset_commit_request,\n group=group, group_generation_id=group_generation_id,\n consumer_id=consumer_id)\n decoder = KafkaCodec.decode_offset_commit_response\n resps = yield self._send_broker_aware_request(\n payloads, encoder, decoder, consumer_group=group)\n\n returnValue(self._handle_responses(\n resps, fail_on_error, callback, group))", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def _find_coordinator_callback(self, consumer_group, response):\n coordinator_id = self.kafka_client._find_coordinator_id_process_response(response)\n topics = self._consumer_groups[consumer_group]\n if not topics:\n topic_partitions = None # None signals to fetch all known offsets for the consumer group\n else:\n # transform [(\"t1\", [1, 2])] into [TopicPartition(\"t1\", 1), TopicPartition(\"t1\", 2)]\n topic_partitions = []\n for topic, partitions in topics.items():\n if not partitions: # If partitions aren't specified, fetch all partitions in the topic\n partitions = self.kafka_client._client.cluster.partitions_for_topic(topic)\n topic_partitions.extend([TopicPartition(topic, p) for p in partitions])\n single_group_offsets_future = self._list_consumer_group_offsets_send_request(\n group_id=consumer_group, group_coordinator_id=coordinator_id, partitions=topic_partitions\n )\n single_group_offsets_future.add_callback(self._single_group_offsets_callback, consumer_group)\n self._consumer_futures.append(single_group_offsets_future)", "def send_offset_fetch_request(self, group, payloads=None,\n fail_on_error=True, callback=None):\n encoder = partial(KafkaCodec.encode_offset_fetch_request,\n group=group)\n decoder = KafkaCodec.decode_offset_fetch_response\n resps = yield self._send_broker_aware_request(\n payloads, encoder, decoder, consumer_group=group)\n\n returnValue(self._handle_responses(\n resps, fail_on_error, callback, group))", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def _list_groups_callback(self, broker_id, response):\n for consumer_group, group_type in self.kafka_client._list_consumer_groups_process_response(response):\n # consumer groups from Kafka < 0.9 that store their offset in Kafka don't use Kafka for group-coordination\n # so their group_type is empty\n if group_type in ('consumer', ''):\n single_group_offsets_future = self._list_consumer_group_offsets_send_request(\n group_id=consumer_group, group_coordinator_id=broker_id\n )\n single_group_offsets_future.add_callback(self._single_group_offsets_callback, consumer_group)\n self._consumer_futures.append(single_group_offsets_future)", "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def check(self):\n self._consumer_offsets = {} # Expected format: {(consumer_group, topic, partition): offset}\n self._highwater_offsets = {} # Expected format: {(topic, partition): offset}\n\n # For calculating consumer lag, we have to fetch both the consumer offset and the broker highwater offset.\n # There's a potential race condition because whichever one we check first may be outdated by the time we check\n # the other. Better to check consumer offsets before checking broker offsets because worst case is that\n # overstates consumer lag a little. Doing it the other way can understate consumer lag to the point of having\n # negative consumer lag, which just creates confusion because it's theoretically impossible.\n\n # Fetch Kafka consumer offsets\n try:\n self._get_consumer_offsets()\n except Exception:\n self.log.exception(\"There was a problem collecting consumer offsets from Kafka.\")\n # don't raise because we might get valid broker offsets\n\n if self._data_streams_enabled:\n self._load_broker_timestamps()\n # Fetch the broker highwater offsets\n try:\n if len(self._consumer_offsets) < self._context_limit:\n self._get_highwater_offsets()\n else:\n self.warning(\"Context limit reached. Skipping highwater offset collection.\")\n except Exception:\n self.log.exception(\"There was a problem collecting the highwater mark offsets.\")\n # Unlike consumer offsets, fail immediately because we can't calculate consumer lag w/o highwater_offsets\n raise\n\n total_contexts = len(self._consumer_offsets) + len(self._highwater_offsets)\n if total_contexts >= self._context_limit:\n self.warning(\n \"\"\"Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the\n check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics\n and partitions you wish to monitor.\"\"\",\n total_contexts,\n self._context_limit,\n )\n\n if self._data_streams_enabled:\n self._save_broker_timestamps()\n\n # Report the metrics\n self._report_highwater_offsets(self._context_limit)\n self._report_consumer_offsets_and_lag(self._context_limit - len(self._highwater_offsets))\n\n self._collect_broker_metadata()", "def _get_consumer_offsets(self):\n # Store the list of futures on the object because some of the callbacks create/store additional futures and they\n # don't have access to variables scoped to this method, only to the object scope\n self._consumer_futures = []\n\n if self._monitor_unlisted_consumer_groups:\n for broker in self.kafka_client._client.cluster.brokers():\n # FIXME: This is using a workaround to skip socket wakeup, which causes blocking\n # (see https://github.com/dpkp/kafka-python/issues/2286).\n # Once https://github.com/dpkp/kafka-python/pull/2335 is merged in, we can use the official\n # implementation for this function instead.\n list_groups_future = self._list_consumer_groups_send_request(broker.nodeId)\n list_groups_future.add_callback(self._list_groups_callback, broker.nodeId)\n self._consumer_futures.append(list_groups_future)\n elif self._consumer_groups:\n self.validate_consumer_groups()\n for consumer_group in self._consumer_groups:\n find_coordinator_future = self._find_coordinator_id_send_request(consumer_group)\n find_coordinator_future.add_callback(self._find_coordinator_callback, consumer_group)\n self._consumer_futures.append(find_coordinator_future)\n else:\n raise ConfigurationError(\n \"Cannot fetch consumer offsets because no consumer_groups are specified and \"\n \"monitor_unlisted_consumer_groups is %s.\" % self._monitor_unlisted_consumer_groups\n )\n\n # Loop until all futures resolved.\n self.kafka_client._wait_for_futures(self._consumer_futures)\n del self._consumer_futures # since it's reset on every check run, no sense holding the reference between runs", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def load_consumer_metadata_for_group(self, group):\n group = _coerce_consumer_group(group)\n log.debug(\"%r: load_consumer_metadata_for_group: %r\", self, group)\n\n # If we are already loading the metadata for this group, then\n # just return the outstanding deferred\n if group in self.coordinator_fetches:\n return self.coordinator_fetches[group]\n\n # No outstanding request, create a new one\n requestId = self._next_id()\n request = KafkaCodec.encode_consumermetadata_request(\n self._clientIdBytes, requestId, group)\n\n # Callbacks for the request deferred...\n def _handleConsumerMetadataResponse(response, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n # Decode the response (returns ConsumerMetadataResponse)\n c_m_resp = KafkaCodec.decode_consumermetadata_response(response)\n log.debug(\"%r: c_m_resp: %r\", self, c_m_resp)\n if c_m_resp.error:\n # Raise the appropriate error\n resp_err = kafka_errors.get(\n c_m_resp.error, UnknownError)(c_m_resp)\n raise resp_err\n\n self.consumer_group_to_brokers[group] = \\\n BrokerMetadata(c_m_resp.node_id, c_m_resp.host,\n c_m_resp.port)\n return True\n\n def _handleConsumerMetadataErr(err, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n log.error(\"Failed to retrieve consumer metadata \"\n \"for group: %s Error:%r\", group, err)\n # Clear any stored value for the group's coordinator\n self.reset_consumer_group_metadata(group)\n raise ConsumerCoordinatorNotAvailableError(\n \"Coordinator for group: %s not available\" % (group))\n\n # Send the request, add the handlers\n d = self._send_broker_unaware_request(requestId, request)\n # Save the deferred under the fetches for this group\n self.coordinator_fetches[group] = d\n d.addCallback(_handleConsumerMetadataResponse, group)\n d.addErrback(_handleConsumerMetadataErr, group)\n return d", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def _list_consumer_group_offsets_send_request(self, group_id, group_coordinator_id, partitions=None):\n version = self.kafka_client._matching_api_version(OffsetFetchRequest)\n if version <= 3:\n if partitions is None:\n if version <= 1:\n raise ValueError(\n \"\"\"OffsetFetchRequest_v{} requires specifying the\n partitions for which to fetch offsets. Omitting the\n partitions is only supported on brokers >= 0.10.2.\n For details, see KIP-88.\"\"\".format(\n version\n )\n )\n topics_partitions = None\n else:\n # transform from [TopicPartition(\"t1\", 1), TopicPartition(\"t1\", 2)] to [(\"t1\", [1, 2])]\n topics_partitions_dict = defaultdict(set)\n for topic, partition in partitions:\n topics_partitions_dict[topic].add(partition)\n topics_partitions = list(six.iteritems(topics_partitions_dict))\n request = OffsetFetchRequest[version](group_id, topics_partitions)\n else:\n raise NotImplementedError(\n \"Support for OffsetFetchRequest_v{} has not yet been added to KafkaAdminClient.\".format(version)\n )\n return self._send_request_to_node(group_coordinator_id, request, wakeup=False)", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def _highwater_offsets_callback(self, response):\n if type(response) not in OffsetResponse:\n raise RuntimeError(\"response type should be OffsetResponse, but instead was %s.\" % type(response))\n for topic, partitions_data in response.topics:\n for partition, error_code, offsets in partitions_data:\n error_type = kafka_errors.for_code(error_code)\n if error_type is kafka_errors.NoError:\n self._highwater_offsets[(topic, partition)] = offsets[0]\n if self._data_streams_enabled:\n timestamps = self._broker_timestamps[\"{}_{}\".format(topic, partition)]\n timestamps[offsets[0]] = time()\n # If there's too many timestamps, we delete the oldest\n if len(timestamps) > MAX_TIMESTAMPS:\n del timestamps[min(timestamps)]\n elif error_type is kafka_errors.NotLeaderForPartitionError:\n self.log.warning(\n \"Kafka broker returned %s (error_code %s) for topic %s, partition: %s. This should only happen \"\n \"if the broker that was the partition leader when kafka_admin_client last fetched metadata is \"\n \"no longer the leader.\",\n error_type.message,\n error_type.errno,\n topic,\n partition,\n )\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n elif error_type is kafka_errors.UnknownTopicOrPartitionError:\n self.log.warning(\n \"Kafka broker returned %s (error_code %s) for topic: %s, partition: %s. This should only \"\n \"happen if the topic is currently being deleted or the check configuration lists non-existent \"\n \"topic partitions.\",\n error_type.message,\n error_type.errno,\n topic,\n partition,\n )\n else:\n raise error_type(\n \"Unexpected error encountered while attempting to fetch the highwater offsets for topic: %s, \"\n \"partition: %s.\" % (topic, partition)\n )", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def _check(self, instance):\n\n zk_connect_str = self.read_config(instance, 'zk_connect_str')\n kafka_host_ports = self.read_config(instance, 'kafka_connect_str')\n cluster_name = self.read_config(instance, \"name\")\n\n # Connect to Kafka\n kafka_conn = KafkaClient(kafka_host_ports, timeout=self.kafka_timeout)\n\n # Construct the Zookeeper path pattern\n zk_prefix = instance.get('zk_prefix', '')\n consumers = self.read_config(instance, 'topics', cast=self._validate_consumer_groups)\n\n consumer_lags = []\n for cm in consumers:\n if cm.get(\"zk_enabled\", True):\n pcm = self._get_consumer_offset_by_zookeeper(zk_connect_str, kafka_conn, cm, zk_prefix)\n else:\n pcm = self._get_consumer_offset_by_api(kafka_conn, cm, kafka_host_ports)\n consumer_lags.append(pcm)\n \n self.handle_collector(consumer_lags, self.collector, cluster_name)", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_bob_read(self):\n messages = list(self.bob_storage.read)\n self.assertEqual(1, len(messages))\n self.assertNotIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_alice_read(self):\n messages = list(self.alice_storage.read)\n self.assertEqual(1, len(messages))\n self.assertNotIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_multi_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(5)", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def _report_consumer_offsets_and_lag(self, contexts_limit):\n reported_contexts = 0\n self.log.debug(\"Reporting consumer offsets and lag metrics\")\n for (consumer_group, topic, partition), consumer_offset in self._consumer_offsets.items():\n if reported_contexts >= contexts_limit:\n self.log.debug(\n \"Reported contexts number %s greater than or equal to contexts limit of %s, returning\",\n str(reported_contexts),\n str(contexts_limit),\n )\n return\n consumer_group_tags = ['topic:%s' % topic, 'partition:%s' % partition, 'consumer_group:%s' % consumer_group]\n consumer_group_tags.extend(self._custom_tags)\n\n partitions = self.kafka_client._client.cluster.partitions_for_topic(topic)\n self.log.debug(\"Received partitions %s for topic %s\", partitions, topic)\n if partitions is not None and partition in partitions:\n # report consumer offset if the partition is valid because even if leaderless the consumer offset will\n # be valid once the leader failover completes\n self.gauge('consumer_offset', consumer_offset, tags=consumer_group_tags)\n reported_contexts += 1\n\n if (topic, partition) not in self._highwater_offsets:\n self.log.warning(\n \"Consumer group: %s has offsets for topic: %s partition: %s, but no stored highwater offset \"\n \"(likely the partition is in the middle of leader failover) so cannot calculate consumer lag.\",\n consumer_group,\n topic,\n partition,\n )\n continue\n producer_offset = self._highwater_offsets[(topic, partition)]\n consumer_lag = producer_offset - consumer_offset\n if reported_contexts < contexts_limit:\n self.gauge('consumer_lag', consumer_lag, tags=consumer_group_tags)\n reported_contexts += 1\n\n if consumer_lag < 0:\n # this will effectively result in data loss, so emit an event for max visibility\n title = \"Negative consumer lag for group: {}.\".format(consumer_group)\n message = (\n \"Consumer group: {}, topic: {}, partition: {} has negative consumer lag. This should never \"\n \"happen and will result in the consumer skipping new messages until the lag turns \"\n \"positive.\".format(consumer_group, topic, partition)\n )\n key = \"{}:{}:{}\".format(consumer_group, topic, partition)\n self.send_event(title, message, consumer_group_tags, 'consumer_lag', key, severity=\"error\")\n self.log.debug(message)\n\n if reported_contexts >= contexts_limit:\n continue\n if not self._data_streams_enabled:\n continue\n timestamps = self._broker_timestamps[\"{}_{}\".format(topic, partition)]\n # The producer timestamp can be not set if there was an error fetching broker offsets.\n producer_timestamp = timestamps.get(producer_offset, None)\n consumer_timestamp = self._get_interpolated_timestamp(timestamps, consumer_offset)\n if consumer_timestamp is None or producer_timestamp is None:\n continue\n lag = producer_timestamp - consumer_timestamp\n self.gauge('consumer_lag_seconds', lag, tags=consumer_group_tags)\n reported_contexts += 1\n else:\n if partitions is None:\n msg = (\n \"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic has no partitions \"\n \"in the cluster, so skipping reporting these offsets.\"\n )\n else:\n msg = (\n \"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic partition isn't \"\n \"included in the cluster partitions, so skipping reporting these offsets.\"\n )\n self.log.warning(msg, consumer_group, topic, partition)\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx):\n for callback in self.callbacks:\n callback.on_test_batch_end(self, self.get_model(), outputs, batch, batch_idx, dataloader_idx)", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def test_bob_all(self):\n messages = list(self.bob_storage.all)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def verify_consumer_seek(c, seek_to_msg):\n\n tp = confluent_kafka.TopicPartition(seek_to_msg.topic(),\n seek_to_msg.partition(),\n seek_to_msg.offset())\n print('seek: Seeking to %s' % tp)\n c.seek(tp)\n\n while True:\n msg = c.poll()\n assert msg is not None\n if msg.error():\n print('seek: Ignoring non-message: %s' % msg)\n continue\n\n if msg.topic() != seek_to_msg.topic() or msg.partition() != seek_to_msg.partition():\n continue\n\n print('seek: message at offset %d' % msg.offset())\n assert msg.offset() == seek_to_msg.offset(), \\\n 'expected message at offset %d, not %d' % (seek_to_msg.offset(), msg.offset())\n break", "def test_update_group(self):\n pass", "def _get_coordinator_for_group(self, consumer_group):\n if self.consumer_group_to_brokers.get(consumer_group) is None:\n yield self.load_consumer_metadata_for_group(consumer_group)\n\n returnValue(self.consumer_group_to_brokers.get(consumer_group))", "def test_alice_all(self):\n messages = list(self.alice_storage.all)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_bob_unread(self):\n messages = list(self.bob_storage.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_update_groups_Passes_correct_data_to_modify_groups_file(\n self, mock_modify_groups_file , mock_read_groups_file\n ):\n mock_modify_groups_file.return_value = True\n mock_read_groups_file.return_value = self.groups_data\n\n crud = CRUD()\n crud.update_groups(\"1\", \"name\", \"new_data\")\n mock_modify_groups_file.assert_called_once_with(self.groups_data)", "def testGetOffset(self):\n self.ports.get_offset(file_name = 'get_offset.xml', port_ids = portsDict['port_ids'], offsets = portsDict['offset'])", "def on_test_batch_end(self, batch, logs=None):", "async def test(self):\n await self._persist_data()\n self.assertIsNotNone(self._added_location)\n with self.assertRaises(FileNotFoundError):\n os.path.getsize(self._path + '/data00000.dat')\n with self.assertRaises(SystemExit):\n self.sut.on_stop_signal()\n self.sut.on_stop_signal() # fire it twice, it must be executed once\n self.assertEqual(\n self.sut._file_header_size + 4,\n os.path.getsize(self._path + '/data00000.dat')\n )\n self._setup_sut()\n asyncio.get_event_loop().create_task(self.sut.run())\n while not self.sut.running:\n await asyncio.sleep(1)\n data = await self.sut.read(self._added_location)\n self.assertEqual(b'data', data)", "def test_all_groups(self, group_query_mock, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n from sosbeacon.group import ADMIN_GROUPS_ID\n from sosbeacon.group import Group\n\n group_order_mock = group_query_mock.return_value.order\n group_iter_mock = group_order_mock.return_value.iter\n group_iter_mock.return_value = []\n\n group_key = Mock()\n group_key.id.return_value = ADMIN_GROUPS_ID\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups([group_key], event_key, message_key, '')\n\n group_query_mock.assert_called_once_with()\n group_order_mock.assert_called_once_with(Group.key)\n group_iter_mock.assert_called_once_with(keys_only=True)", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def kafka_commit(self):\n self.kf_producer.flush()", "def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)", "def test_resume_offset(sdc_builder, sdc_executor, azure):\n container_name = get_random_string(string.ascii_lowercase, 10)\n event_hub_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n\n azure_iot_event_hub_consumer = builder.add_stage(name=AZURE_IOT_EVENT_HUB_STAGE_NAME).set_attributes(\n container_name=container_name,\n data_format='JSON',\n event_hub_name=event_hub_name)\n\n wiretap = builder.add_wiretap()\n\n azure_iot_event_hub_consumer >> wiretap.destination\n\n consumer_origin_pipeline = builder.build().configure_for_environment(azure)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n create_blob_container(azure, container_name)\n\n try:\n eh_service_bus = azure.event_hubs.service_bus\n\n logger.info('Creating event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n assert eh_service_bus.create_event_hub(event_hub_name)\n\n send_records = [{'Body': f'Event {msg}'} for msg in range(10)]\n eh_service_bus.send_event(event_hub_name, json.dumps(send_records))\n\n sdc_executor.start_pipeline(consumer_origin_pipeline)\n sdc_executor.wait_for_pipeline_metric(consumer_origin_pipeline, 'input_record_count', 1, timeout_sec=120)\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n first_iteration_records = wiretap.output_records[0].field\n\n assert len(first_iteration_records) != 0\n results = [{key: value for key, value in record.items()} for record in first_iteration_records]\n assert results == send_records\n\n wiretap.reset()\n\n # Try adding more data and resuming from the offset\n send_records2 = [{'Body': f'Event {msg}'} for msg in range(10, 20)]\n eh_service_bus.send_event(event_hub_name, json.dumps(send_records2))\n\n sdc_executor.start_pipeline(consumer_origin_pipeline)\n sdc_executor.wait_for_pipeline_metric(consumer_origin_pipeline, 'input_record_count', 1, timeout_sec=120)\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n second_iteration_records = wiretap.output_records[0].field\n assert len(second_iteration_records) != 0\n results = [{key: value for key, value in record.items()} for record in second_iteration_records]\n assert results == send_records2\n\n assert len(second_iteration_records) + len(first_iteration_records) == len(send_records) + len(send_records2)\n\n finally:\n try:\n if sdc_executor.get_pipeline_status(consumer_origin_pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n except Exception as err:\n logger.error('Could not stop pipeline. Reason found: %s', err)\n\n try:\n logger.info('Deleting event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n event_hub_exists = True\n while event_hub_exists:\n eh_service_bus.delete_event_hub(event_hub_name)\n try:\n eh_service_bus.get_event_hub(event_hub_name)\n except Exception:\n event_hub_exists = False\n except Exception as err:\n logger.error('Failure deleting event hub %s. Reason found: %s', event_hub_name, err)\n\n try:\n logger.info('Deleting container %s on storage account %s', container_name, azure.storage.account_name)\n azure.storage.delete_blob_container(container_name)\n except Exception as err:\n logger.error('Failure deleting container %s. Reason found: %s', container_name, err)", "def test_mark_topic_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_topic_as_unread_groups(group_id, topic_id)", "def test_read_records_all(self, mocker, api):\n job = mocker.Mock(spec=InsightAsyncJob)\n job.get_result.return_value = [mocker.Mock(), mocker.Mock(), mocker.Mock()]\n job.interval = pendulum.Period(pendulum.date(2010, 1, 1), pendulum.date(2010, 1, 1))\n stream = AdsInsights(\n api=api,\n start_date=datetime(2010, 1, 1),\n end_date=datetime(2011, 1, 1),\n insights_lookback_window=28,\n )\n\n records = list(\n stream.read_records(\n sync_mode=SyncMode.incremental,\n stream_slice={\"insight_job\": job},\n )\n )\n\n assert len(records) == 3", "def test_all(self):\n opds1 = self._collection(protocol=ExternalIntegration.OPDS_IMPORT)\n opds2 = self._collection(protocol=ExternalIntegration.OPDS_IMPORT)\n overdrive = self._collection(protocol=ExternalIntegration.OVERDRIVE)\n providers = list(\n AlwaysSuccessfulCollectionCoverageProvider.all(self._db, batch_size=34)\n )\n\n # The providers were returned in a random order, but there's one\n # for each collection that supports the 'OPDS Import' protocol.\n assert 2 == len(providers)\n collections = set([x.collection for x in providers])\n assert set([opds1, opds2]) == collections\n\n # The providers are of the appropriate type and the keyword arguments\n # passed into all() were propagated to the constructor.\n for provider in providers:\n assert isinstance(provider, AlwaysSuccessfulCollectionCoverageProvider)\n assert 34 == provider.batch_size", "def test_process_data(self):\n pass", "def test_mmap(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (10**3, 10**3), fillvalue=2)\n dset[1, 1] = 100\n\n tmp_file = np.load(str(setup_teardown_file[1] / \"test\" / \"foo\" / \"data.npy\"))\n\n assert dset.data[1, 1] == 100\n assert tmp_file[1, 1] == 100", "def test_bulk_group(self):\n for fn, sep in [(self.filename_actg, '\\t'),\n (self.filename_actg_csv, ';')]:\n file_path_ag = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n fn)\n data = {\n 'bulk_upload' : open(file_path_ag, 'rb'),\n }\n\n existing_ags = ActivityGroup.objects.filter(keyflow=self.kic)\n existing_codes = list(existing_ags.values_list('code', flat=True))\n\n encoding = 'utf8'\n df_file_ags = pd.read_csv(file_path_ag, sep=sep)\n df_file_ags = df_file_ags.rename(\n columns={c: c.lower() for c in df_file_ags.columns})\n file_codes = df_file_ags['code']\n new_codes = [c for c in file_codes if c not in existing_codes]\n\n res = self.client.post(self.ag_url, data)\n res_json = res.json()\n assert res.status_code == status.HTTP_201_CREATED\n assert res_json['count'] == len(file_codes)\n assert len(res_json['created']) == len(new_codes)\n\n # assert that the number of activities matches\n all_ag = ActivityGroup.objects.filter(keyflow_id=self.kic.id)\n assert len(all_ag) == len(existing_codes) + len(new_codes)\n\n # assert that the Name matches in all values\n for row in df_file_ags.itertuples(index=False):\n ag = ActivityGroup.objects.get(keyflow=self.keyflow,\n code=row.code)\n assert ag.name == row.name", "def test_mark_all_entries_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_all_entries_as_unread_groups(group_id, topic_id, forced_read_state=None)", "def test_post_process_forwarder(\n dispatch_post_process_group_task, kafka_message_without_transaction_header\n):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n future = forwarder.process_message(kafka_message_without_transaction_header)\n\n forwarder.flush_batch([future])\n\n dispatch_post_process_group_task.assert_called_once_with(\n event_id=\"fe0ee9a2bc3b415497bad68aaf70dc7f\",\n project_id=1,\n group_id=43,\n primary_hash=\"311ee66a5b8e697929804ceb1c456ffe\",\n is_new=False,\n is_regression=None,\n is_new_group_environment=False,\n queue=\"post_process_errors\",\n group_states=[\n {\"id\": 43, \"is_new\": False, \"is_regression\": None, \"is_new_group_environment\": False}\n ],\n )\n\n forwarder.shutdown()", "def testInternalExportEvents(self):\n knowledge_base_object = knowledge_base.KnowledgeBase()\n\n output_mediator_object = output_mediator.OutputMediator(\n knowledge_base_object, data_location=shared_test_lib.TEST_DATA_PATH)\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator_object.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestOutputModule(output_mediator_object)\n\n test_engine = psort.PsortMultiProcessEngine()\n\n with shared_test_lib.TempDirectory() as temp_directory:\n temp_file = os.path.join(temp_directory, 'storage.plaso')\n self._CreateTestStorageFile(temp_file)\n self._ReadSessionConfiguration(temp_file, knowledge_base_object)\n\n storage_reader = (\n storage_factory.StorageFactory.CreateStorageReaderForFile(temp_file))\n storage_reader.ReadSystemConfiguration(knowledge_base_object)\n\n test_engine._ExportEvents(\n storage_reader, output_module, deduplicate_events=False)\n\n self.assertEqual(len(output_module.events), 17)\n self.assertEqual(len(output_module.macb_groups), 3)", "def test_batch_read_all_bins_pos(self):\n\n b = br.Read((\"test\", \"demo\", 1), ops=None, read_all_bins=True)\n\n assert b.read_all_bins", "def test_get_groupings_within_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=True), [])", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def test_seek_tell(self):\n self.default_kwargs['seek_callback'] = self._seek_callback\n self.default_kwargs['tell_callback'] = self._tell_callback\n self.encoder = StreamEncoder(**self.default_kwargs)\n test_samples = np.random.rand(DEFAULT_BLOCKSIZE, 1).astype('int16')\n self.encoder.process(test_samples)\n self.encoder.finish()\n self.assertTrue(self.write_callback_called)\n self.assertTrue(self.seek_callback_called)\n self.assertTrue(self.tell_callback_called)", "def test_written_data(demo_data, written_data):\n\n openeeg = openEDF(demo_data)\n openeeg2 = openEDF(written_data)\n\n #read data in steps of 5 million samples\n starts = np.arange(0, openeeg.shape[-1], int(5e6))\n stops = starts + int(5e3)\n for start, stop in zip(starts, stops):\n arr = openeeg.read(start, stop)\n other = openeeg2.read(start, stop)\n assert np.allclose(arr[[0, 3],:], other)", "def test_batch(self):\n pass", "def get_offset_end(brokers, run_id, num_end_sigils, topic=mjolnir.kafka.TOPIC_COMPLETE):\n consumer = kafka.KafkaConsumer(bootstrap_servers=brokers,\n # The topic we are reading from is very low volume,\n # containing only reflected end run sigils. To make\n # sure we don't miss one start at the beginning.\n auto_offset_reset='earliest',\n value_deserializer=json.loads,\n api_version=mjolnir.kafka.BROKER_VERSION)\n parts = consumer.partitions_for_topic(topic=mjolnir.kafka.TOPIC_COMPLETE)\n if parts is None:\n raise RuntimeError(\"topic %s missing\" % topic)\n\n partitions = [kafka.TopicPartition(topic, p) for p in consumer.partitions_for_topic(topic)]\n consumer.assign(partitions)\n # Tracks the maximum reported offset in the response topic\n offsets_end = [-1] * num_end_sigils\n # Tracks the sigils that have been seen for the request topics\n # Uses a set incase duplicate messages are sent somehow, to ensure\n # we see a message for all expected partitions\n seen_sigils = set()\n for message in consumer:\n if 'run_id' in message.value and message.value['run_id'] == run_id and 'complete' in message.value:\n print 'found sigil for run %s and partition %d' % (message.value['run_id'], message.value['partition'])\n for partition, offset in enumerate(message.value['offsets']):\n offsets_end[partition] = max(offsets_end[partition], offset)\n seen_sigils.add(message.value['partition'])\n # Keep reading until all sigils have been reflected.\n if len(seen_sigils) >= num_end_sigils:\n consumer.close()\n return offsets_end\n consumer.close()\n raise RuntimeError(\"Finished consuming, but %d partitions remain\" % (len(partitions) - len(seen_sigils)))", "def test_read(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])", "def test_bob_read(self):\n messages = list(self.bob_inbox.read)\n self.assertEqual(1, len(messages))\n self.assertNotIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_message_group():", "def test_bob_sent(self):\n messages = list(self.bob_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def _get_zk_consumer_offsets(self, zk_hosts_ports, consumer_groups=None, zk_prefix=''):\n zk_consumer_offsets = {}\n\n # Construct the Zookeeper path pattern\n # /consumers/[groupId]/offsets/[topic]/[partitionId]\n zk_path_consumer = zk_prefix + '/consumers/'\n zk_path_topic_tmpl = zk_path_consumer + '{group}/offsets/'\n zk_path_partition_tmpl = zk_path_topic_tmpl + '{topic}/'\n\n zk_conn = KazooClient(zk_hosts_ports, timeout=self.zk_timeout)\n zk_conn.start()\n try:\n if consumer_groups is None:\n # If consumer groups aren't specified, fetch them from ZK\n consumer_groups = {consumer_group: None for consumer_group in\n self._get_zk_path_children(zk_conn, zk_path_consumer, 'consumer groups')}\n\n for consumer_group, topics in consumer_groups.iteritems():\n if topics is None:\n # If topics are't specified, fetch them from ZK\n zk_path_topics = zk_path_topic_tmpl.format(group=consumer_group)\n topics = {topic: None for topic in\n self._get_zk_path_children(zk_conn, zk_path_topics, 'topics')}\n\n for topic, partitions in topics.iteritems():\n if partitions is not None:\n partitions = set(partitions) # defend against bad user input\n else:\n # If partitions aren't specified, fetch them from ZK\n zk_path_partitions = zk_path_partition_tmpl.format(\n group=consumer_group, topic=topic)\n # Zookeeper returns the partition IDs as strings because\n # they are extracted from the node path\n partitions = [int(x) for x in self._get_zk_path_children(\n zk_conn, zk_path_partitions, 'partitions')]\n\n # Fetch consumer offsets for each partition from ZK\n for partition in partitions:\n zk_path = (zk_path_partition_tmpl + '{partition}/').format(\n group=consumer_group, topic=topic, partition=partition)\n try:\n consumer_offset = int(zk_conn.get(zk_path)[0])\n key = (consumer_group, topic, partition)\n zk_consumer_offsets[key] = consumer_offset\n except NoNodeError:\n self.log.info('No zookeeper node at %s', zk_path)\n except Exception:\n self.log.exception('Could not read consumer offset from %s', zk_path)\n finally:\n try:\n zk_conn.stop()\n zk_conn.close()\n except Exception:\n self.log.exception('Error cleaning up Zookeeper connection')\n return zk_consumer_offsets", "def test_single_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(1)", "async def test_datasets_access_call_controlled(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, ([], [], ['mock:controlled:id']))", "def test_dataset_iter(train_dataset):\n for i, ex in enumerate(train_dataset):\n assert np.array_equal(ex, train_dataset[i])", "def test_update_discovery_map__failure_commit(self) -> None:\n self._config.discovery_map_exec = self._get_runnable_cmd(0, None, {})\n self._config.data_store_exec = self._get_runnable_cmd(6, None, {})\n gen = generate.GenerateDataImpl(self._config)\n res = gen.update_discovery_map()\n self.assertEqual(1, res)", "def test_alice_read(self):\n messages = list(self.alice_inbox.read)\n self.assertEqual(1, len(messages))\n self.assertNotIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def _get_kafka_consumer_offsets(self, instance, consumer_groups):\n consumer_offsets = {}\n topics = defaultdict(set)\n\n cli = self._get_kafka_client(instance)\n\n for consumer_group, topic_partitions in consumer_groups.iteritems():\n try:\n coordinator_id = self._get_group_coordinator(cli, consumer_group)\n if coordinator_id:\n offsets = self._get_consumer_offsets(cli, consumer_group, topic_partitions, coordinator_id)\n else:\n offsets = self._get_consumer_offsets(cli, consumer_group, topic_partitions)\n self.log.info(\"unable to find group coordinator for %s\", consumer_group)\n\n for (topic, partition), offset in offsets.iteritems():\n topics[topic].update([partition])\n key = (consumer_group, topic, partition)\n consumer_offsets[key] = offset\n except Exception:\n self.log.exception('Could not read consumer offsets from kafka.')\n\n return consumer_offsets, topics", "def testMarkAllReadForConversationAndUser(self):\n\n # Add three new messages to the first conversation, each one minute apart,\n # starting one minute after the last message.\n for x in range(3):\n self.conv_utils.addMessage(\n conversation=self.conv_a.key,\n time=(self.conv_a.last_message_on + timedelta(minutes=x+1)))\n\n # First user should have four unread messages\n expected = 4\n actual = gciconversation_logic.numUnreadMessagesForConversationAndUser(\n conversation=self.conv_a.key, user=self.user_keys[0])\n self.assertEqual(expected, actual)\n\n # Mark as read for first user\n gciconversation_logic.markAllReadForConversationAndUser(\n conversation=self.conv_a.key, user=self.user_keys[0])\n\n # First user should have zero unread messages\n expected = 0\n actual = gciconversation_logic.numUnreadMessagesForConversationAndUser(\n conversation=self.conv_a.key, user=self.user_keys[0])\n self.assertEqual(expected, actual)\n\n # Add two new messages to the first conversation, each one minute apart,\n # starting one minute after the last message.\n for x in range(2):\n self.conv_utils.addMessage(\n conversation=self.conv_a.key,\n time=(self.conv_a.last_message_on + timedelta(minutes=x+1)))\n\n # First user should have two unread messages\n expected = 2\n actual = gciconversation_logic.numUnreadMessagesForConversationAndUser(\n conversation=self.conv_a.key, user=self.user_keys[0])\n self.assertEqual(expected, actual)\n\n # Second user should have six unread messages\n expected = 6\n actual = gciconversation_logic.numUnreadMessagesForConversationAndUser(\n conversation=self.conv_a.key, user=self.user_keys[1])\n self.assertEqual(expected, actual)\n\n # An exception should be raised if the user is not involved in the\n # conversation. The first user is not involved in the second conversation.\n with self.assertRaises(Exception):\n gciconversation_logic.markAllReadForConversationAndUser(\n conversation=self.conv_b.key, user=self.user_keys[0])", "def test_batches_are_accessible(\n monkeypatch,\n multibatch_generic_csv_generator,\n multibatch_generic_csv_generator_context,\n):\n\n context: DataContext = multibatch_generic_csv_generator_context\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n\n datasource = context.datasources[datasource_name]\n\n data_connector = datasource.data_connectors[data_connector_name]\n\n total_batches: int = 20\n file_list = multibatch_generic_csv_generator(\n data_path=data_path, num_event_batches=total_batches\n )\n\n assert (\n data_connector._get_data_reference_list_from_cache_by_data_asset_name(\n data_asset_name=asset_name\n )\n == file_list\n )\n\n batch_request_1 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -1,\n },\n )\n # Should give most recent batch\n validator_1 = context.get_validator(\n batch_request=batch_request_1,\n create_expectation_suite_with_name=\"my_expectation_suite_name_1\",\n )\n metric_max = validator_1.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches\n metric_value_set = validator_1.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n batch_request_2 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -2,\n },\n )\n validator_2 = context.get_validator(\n batch_request=batch_request_2,\n create_expectation_suite_with_name=\"my_expectation_suite_name_2\",\n )\n metric_max = validator_2.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches - 1\n metric_value_set = validator_2.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n for batch_num in range(1, total_batches + 1):\n batch_request = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -batch_num,\n },\n )\n validator = context.get_validator(\n batch_request=batch_request,\n create_expectation_suite_with_name=f\"my_expectation_suite_name__{batch_num}\",\n )\n metric_max = validator.get_metric(\n MetricConfiguration(\n \"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"}\n )\n )\n assert metric_max == (total_batches + 1) - batch_num\n metric_value_set = validator.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}", "def test_read_count(self):\n self.assertEqual(1, self.alice_inbox.read_count)\n self.assertEqual(1, self.bob_inbox.read_count)\n self.assertEqual(0, self.carol_inbox.read_count)", "def test_alice_unread(self):\n messages = list(self.alice_storage.unread)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def _get_zk_consumer_offsets(self, zk_hosts_ports, consumer_groups=None, zk_prefix=''):\n zk_consumer_offsets = {}\n\n # Construct the Zookeeper path pattern\n # /consumers/[groupId]/offsets/[topic]/[partitionId]\n zk_path_consumer = zk_prefix + '/consumers/'\n zk_path_topic_tmpl = zk_path_consumer + '{group}/offsets/'\n zk_path_partition_tmpl = zk_path_topic_tmpl + '{topic}/'\n\n zk_conn = KazooClient(zk_hosts_ports, timeout=self._zk_timeout)\n zk_conn.start()\n try:\n if consumer_groups is None:\n # If consumer groups aren't specified, fetch them from ZK\n consumer_groups = {consumer_group: None for consumer_group in\n self._get_zk_path_children(zk_conn, zk_path_consumer, 'consumer groups')}\n\n for consumer_group, topics in consumer_groups.iteritems():\n if topics is None:\n # If topics are't specified, fetch them from ZK\n zk_path_topics = zk_path_topic_tmpl.format(group=consumer_group)\n topics = {topic: None for topic in\n self._get_zk_path_children(zk_conn, zk_path_topics, 'topics')}\n consumer_groups[consumer_group] = topics\n\n for topic, partitions in topics.iteritems():\n if partitions is not None:\n partitions = set(partitions) # defend against bad user input\n else:\n # If partitions aren't specified, fetch them from ZK\n zk_path_partitions = zk_path_partition_tmpl.format(\n group=consumer_group, topic=topic)\n # Zookeeper returns the partition IDs as strings because\n # they are extracted from the node path\n partitions = [int(x) for x in self._get_zk_path_children(\n zk_conn, zk_path_partitions, 'partitions')]\n consumer_groups[consumer_group][topic] = partitions\n\n # Fetch consumer offsets for each partition from ZK\n for partition in partitions:\n zk_path = (zk_path_partition_tmpl + '{partition}/').format(\n group=consumer_group, topic=topic, partition=partition)\n try:\n consumer_offset = int(zk_conn.get(zk_path)[0])\n key = (consumer_group, topic, partition)\n zk_consumer_offsets[key] = consumer_offset\n except NoNodeError:\n self.log.info('No zookeeper node at %s', zk_path)\n except Exception:\n self.log.exception('Could not read consumer offset from %s', zk_path)\n finally:\n try:\n zk_conn.stop()\n zk_conn.close()\n except Exception:\n self.log.exception('Error cleaning up Zookeeper connection')\n return zk_consumer_offsets, consumer_groups", "def test_recv(self):\n # Required to get useful test names\n super(TestCisPandasInput_local, self).test_recv()" ]
[ "0.6880712", "0.67094135", "0.66918844", "0.64915144", "0.63925254", "0.6284364", "0.61970186", "0.6131777", "0.61313844", "0.60980684", "0.60387677", "0.59368175", "0.59367937", "0.5835078", "0.56977046", "0.5690426", "0.56897986", "0.56894445", "0.56839097", "0.567647", "0.5580424", "0.5564435", "0.5545255", "0.55215347", "0.5519889", "0.5508493", "0.5504098", "0.5501962", "0.54194397", "0.53631544", "0.53262526", "0.53240424", "0.53128517", "0.52732056", "0.5266957", "0.5258156", "0.5247882", "0.52201134", "0.52164185", "0.52044713", "0.5181043", "0.51752913", "0.5173471", "0.517028", "0.5154864", "0.5151681", "0.5136585", "0.5120753", "0.5118308", "0.5111412", "0.51038706", "0.5103458", "0.5097667", "0.5072948", "0.50714666", "0.50553197", "0.5053093", "0.5044382", "0.50316864", "0.50287086", "0.50281507", "0.50267243", "0.5025893", "0.50214756", "0.5018396", "0.5012059", "0.49997735", "0.4985612", "0.49814373", "0.49742424", "0.49695063", "0.49666595", "0.4958154", "0.49572456", "0.49548078", "0.4948304", "0.4945478", "0.49413484", "0.49364176", "0.49329546", "0.49315834", "0.49292052", "0.49279308", "0.4926061", "0.49248403", "0.49155837", "0.49140045", "0.4906222", "0.49052703", "0.48989874", "0.48986185", "0.48948795", "0.48914042", "0.48823404", "0.48740935", "0.4872624", "0.4862882", "0.48582017", "0.48534924", "0.48500866" ]
0.62748325
6
Test the functionality of the KafkaGroupIODataset when the existing consumer group reads data from a new topic.
def test_kafka_group_io_dataset_primary_cg_new_topic(): dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-test"], group_id="cgtestprimary", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(10)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_read_group(self):\n pass", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_update_group(self):\n pass", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def test_create_new_discussion_topic_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_includes_two_new_datasets(self):\n new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for new_dataset in new_datasets:\n self.assertIn(new_dataset, table.data)", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def test_update_device_group_by_id1(self):\n pass", "def test_update_device_group_by_id(self):\n pass", "def test_mark_topic_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_topic_as_unread_groups(group_id, topic_id)", "def test_partially_update_device_group_by_id(self):\n pass", "def test_mark_all_entries_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_get_single_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_groups(group_id, topic_id)", "def test_object_names_topic(sdc_builder, sdc_executor, cluster, test_name, topic_name):\n raw_data = {'key': 'value'}\n\n # Build the Kafka destination pipeline.\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=True\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic_name,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(title=f'Kafka Destination Topic Names {test_name}').configure_for_environment(cluster)\n pipeline.configuration['rateLimit'] = 1\n\n sdc_executor.add_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic_name])\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # There should be no messages in Kafka\n msgs_received = [json.loads(msg.value.decode()) for msg in consumer]\n assert 1 == len(msgs_received)\n assert raw_data == msgs_received[0]", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_includes_one_new_dataset(self):\n new_dataset = factories.SourceDatasetFactory.create(source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertIn(new_dataset, table.data)", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_dataset_intermediate_group(setup_teardown_file):\n f = setup_teardown_file[3]\n\n # Trying to create intermediate groups that are absolute should fail just\n # like when creating them on groups.\n with pytest.raises(NotImplementedError):\n f.create_dataset(\"/foo/bar/baz\", shape=(10, 10), dtype='<i4')\n\n ds = f.create_dataset(\"foo/bar/baz\", shape=(10, 10), dtype='<i4')\n assert isinstance(ds, Dataset)\n assert \"/foo/bar/baz\" in f", "def test_reshape(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f')\n dset = grp.create_dataset('foo', shape=(10, 3), data=data)\n assert dset.shape == (10, 3)\n assert np.array_equal(dset.data, data.reshape((10, 3)))", "def test_string(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', data=\"string\")\n assert dset.data == \"string\"", "def test_update_entry_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_update_groups_Passes_correct_data_to_modify_groups_file(\n self, mock_modify_groups_file , mock_read_groups_file\n ):\n mock_modify_groups_file.return_value = True\n mock_read_groups_file.return_value = self.groups_data\n\n crud = CRUD()\n crud.update_groups(\"1\", \"name\", \"new_data\")\n mock_modify_groups_file.assert_called_once_with(self.groups_data)", "def test_reorder_pinned_topics_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_creator_in_group_can_update(self):\n\n self.client.login(username='notlogged', password='notlogged')\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_can_access(self, self.url,\n post_redirect_url=expected_url,\n data=self.data)\n\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, self.data['name'])\n self.assertEqual(updated_group.description, self.data['description'])\n self.assertIsNotNone(updated_group.last_edit_date)", "def test_get_full_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_groups(group_id, topic_id)", "def load_consumer_metadata_for_group(self, group):\n group = _coerce_consumer_group(group)\n log.debug(\"%r: load_consumer_metadata_for_group: %r\", self, group)\n\n # If we are already loading the metadata for this group, then\n # just return the outstanding deferred\n if group in self.coordinator_fetches:\n return self.coordinator_fetches[group]\n\n # No outstanding request, create a new one\n requestId = self._next_id()\n request = KafkaCodec.encode_consumermetadata_request(\n self._clientIdBytes, requestId, group)\n\n # Callbacks for the request deferred...\n def _handleConsumerMetadataResponse(response, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n # Decode the response (returns ConsumerMetadataResponse)\n c_m_resp = KafkaCodec.decode_consumermetadata_response(response)\n log.debug(\"%r: c_m_resp: %r\", self, c_m_resp)\n if c_m_resp.error:\n # Raise the appropriate error\n resp_err = kafka_errors.get(\n c_m_resp.error, UnknownError)(c_m_resp)\n raise resp_err\n\n self.consumer_group_to_brokers[group] = \\\n BrokerMetadata(c_m_resp.node_id, c_m_resp.host,\n c_m_resp.port)\n return True\n\n def _handleConsumerMetadataErr(err, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n log.error(\"Failed to retrieve consumer metadata \"\n \"for group: %s Error:%r\", group, err)\n # Clear any stored value for the group's coordinator\n self.reset_consumer_group_metadata(group)\n raise ConsumerCoordinatorNotAvailableError(\n \"Coordinator for group: %s not available\" % (group))\n\n # Send the request, add the handlers\n d = self._send_broker_unaware_request(requestId, request)\n # Save the deferred under the fetches for this group\n self.coordinator_fetches[group] = d\n d.addCallback(_handleConsumerMetadataResponse, group)\n d.addErrback(_handleConsumerMetadataErr, group)\n return d", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def tests_ti_document_get(self):\n super().group_get()", "def test_update_resource_group(self):\n pass", "def test_create_device_group(self):\n pass", "def test_user_group_controller_update(self):\n pass", "def test_update_topic_config(self):\n test_topic_config = {\n 'test.topic': {\n 'schema_name': 'test.schema'\n }\n }\n local_topic_config = eventlogging.topic.get_topic_config()\n local_topic_config.update(test_topic_config)\n\n # append the new test topic config to the global topic config\n eventlogging.topic.update_topic_config(test_topic_config)\n\n # test that the global topic config is what it should be\n self.assertEqual(\n eventlogging.topic.get_topic_config(),\n local_topic_config\n )", "def test_dtype_conflict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (10, 3), 'f')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'S10')", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()", "def test_add_group(self):\n pass", "def test_if_user_can_update_data_added(self):\n drink_data = self.test_data[\"drinks\"][0]\n # save a drink\n drink = Drink(**drink_data)\n drink.save()\n\n record_data = self.test_data[\"data\"][0]\n data = Data(\n favorite_drink=drink,\n consumer_name=record_data[\"consumer_name\"],\n location=record_data[\"location\"],\n collector=self.user,\n location_longitude=record_data[\"location_longitude\"],\n location_latitude=record_data[\"location_latitude\"]\n )\n # save a data record\n data.save()\n\n # retrieve the added data record\n url = \"/data/record/%s/\" % data._id\n get_response = self.client.get(url)\n\n self.assertEqual(get_response.status_code,\n status.HTTP_200_OK)\n recieved_data = get_response.json()\n self.assertEqual(recieved_data[\"consumer_name\"],\n \"dirk nowitzki\")\n\n # update the data record\n update_payload = {\n \"drink_id\": str(drink._id),\n \"consumer_name\": \"erick omondi\",\n \"location\": \"buruburu\",\n \"location_longitude\": \"55.255\",\n \"location_latitude\": \"74.2245\"\n }\n\n put_response = self.client.put(url, update_payload, format=\"json\")\n self.assertEqual(put_response.status_code,\n status.HTTP_200_OK)\n\n # retrieve the updated record\n updated_data = Data.objects.all()[0]\n # assert it has been updated\n self.assertNotEqual(updated_data.consumer_name,\n recieved_data[\"consumer_name\"])\n\n # delete the record\n delete_response = self.client.delete(url)\n # assert the status code is 204 no content\n self.assertEqual(delete_response.status_code,\n status.HTTP_204_NO_CONTENT)\n # assert the record was actually deleted from the database\n data_count = Data.objects.count()\n self.assertEqual(data_count, 0)", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def test_new_dataset_button_with_new_datasets(self):\n self.study_version.i_is_deprecated = True\n self.study_version.save()\n new_version = factories.SourceStudyVersionFactory.create(\n study=self.study, i_version=self.study_version.i_version + 1, i_date_added=timezone.now())\n new_dataset = factories.SourceDatasetFactory.create(source_study_version=new_version)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertIn('show_new_dataset_button', context)\n self.assertTrue(context['show_new_dataset_button'])\n self.assertContains(response, reverse('trait_browser:source:studies:pk:datasets:new', args=[self.study.pk]))", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_modify_group(self):\n # Add users\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n resp = self.app.post('/users', data=json.dumps(self.test_user2_data))\n assert resp.status_code == 200\n\n # Modify group 1 to add user 2\n resp = self.app.put('/groups/{}'.format(self.test_group1_groupid),\n data=json.dumps(self.test_group1_modify))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert self.test_user1_userid in data\n assert self.test_user2_userid in data\n\n # Check user2 to see if it has group1 listed\n resp = self.app.get('/users/{}'.format(self.test_user2_userid))\n assert resp.status_code == 200\n\n data = json.loads(resp.data)\n assert 'groups' in data\n assert self.test_group1_groupid in data['groups']", "def test_get_group(self):\n pass", "def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def test_create_scalar_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def test_topic_update_read(database, user, topic):\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n with current_app.test_request_context():\n # Test with logged in user\n login_user(user)\n assert current_user.is_authenticated\n\n # Update the tracker\n assert topic.update_read(current_user, topic.forum, forumsread)\n # Because the tracker is already up-to-date, it shouldn't update it\n # again.\n assert not topic.update_read(current_user, topic.forum, forumsread)\n\n # Adding a new post - now the tracker shouldn't be up-to-date anymore.\n post = Post(content=\"Test Content\")\n post.save(topic=topic, user=user)\n\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n # Test tracker length\n flaskbb_config[\"TRACKER_LENGTH\"] = 0\n assert not topic.update_read(current_user, topic.forum, forumsread)\n flaskbb_config[\"TRACKER_LENGTH\"] = 1\n assert topic.update_read(current_user, topic.forum, forumsread)\n\n # Test with logged out user\n logout_user()\n assert not current_user.is_authenticated\n assert not topic.update_read(current_user, topic.forum, forumsread)", "def tests_ti_document_update(self, request: FixtureRequest):\n super().group_update(request)", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_update_port_sub_group(self):\n pass", "def test_create_device_group1(self):\n pass", "def test_topic_reduction_edge_cases(base_bertopic):\n\n nr_topics = 5\n base_bertopic.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents, topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents, c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def test_mark_topic_as_read_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_create_fillval(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (10,), fillvalue=4.0)\n assert dset[0] == 4.0\n assert dset[7] == 4.0", "def test_type_confict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_group('foo')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'f')", "def test_consumer_cancel_during_shutdown(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n consumer_instance = mock_consumer.return_value\n consumer_start_d = defer.Deferred()\n consumer_instance.start.return_value = consumer_start_d\n consumer_instance._start_d = consumer_start_d\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(consumer_instance.start.called, True)\n\n def stop():\n consumer_start_d.errback(defer.CancelledError())\n\n consumer_instance.stop.side_effect = stop\n group.rejoin_after_error(Failure(RequestTimedOutError()))\n\n self.assertEqual(consumer_instance.stop.called, True)\n self.successResultOf(consumer_start_d)\n self.assertNoResult(start_d)", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def test_message_group():", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def test_create_and_delete_rdf_group(self):\n local_array = self.conn.array_id\n local_port_list, remote_port_list = self.get_online_rdf_ports()\n if not remote_port_list:\n self.skipTest('Skipping test_create_and_delete_rdf_group -'\n 'No remote port list.')\n\n self.conn.set_array_id(local_array)\n rdf_group = self.get_next_free_srdf_group()\n self.replication.create_rdf_group(\n local_director_port_list=local_port_list,\n remote_array_id=self.conn.remote_array,\n remote_director_port_list=remote_port_list,\n array_id=local_array, local_rdfg_number=rdf_group,\n remote_rdfg_number=rdf_group, label='pyu4v_' + str(rdf_group))\n rdf_group_list = self.replication.get_rdf_group_list()\n rdfg_list = list()\n for group in rdf_group_list:\n rdfg_list.append(group['rdfgNumber'])\n self.assertIn(rdf_group, rdfg_list)\n self.replication.delete_rdf_group(srdf_group_number=rdf_group)\n rdf_group_list = self.replication.get_rdf_group_list()\n rdfg_list = list()\n for group in rdf_group_list:\n rdfg_list.append(group['rdfgNumber'])\n self.assertNotIn(rdf_group, rdfg_list)", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def test_list_topic_entries_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_topic_entries_groups(group_id, topic_id)", "def test_shape_conflict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_dataset('foo', (10, 3), 'f')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 4), 'f')", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))" ]
[ "0.7862941", "0.73936385", "0.70595884", "0.68379927", "0.68281376", "0.6740333", "0.6714614", "0.6704045", "0.6651035", "0.6603814", "0.64650786", "0.6370465", "0.6314179", "0.60142636", "0.5972484", "0.5948897", "0.5936028", "0.5844628", "0.5811538", "0.58090895", "0.5807865", "0.58016247", "0.57826865", "0.57560956", "0.5730864", "0.57151455", "0.5706338", "0.5702994", "0.568886", "0.56751835", "0.56701726", "0.56172186", "0.5605977", "0.55956537", "0.5589209", "0.55856323", "0.5578022", "0.5574444", "0.55677736", "0.5547686", "0.55453575", "0.5498216", "0.54711723", "0.5470025", "0.5462488", "0.5420044", "0.53946215", "0.53924376", "0.53650975", "0.53528386", "0.5342445", "0.53357714", "0.5331543", "0.5330883", "0.53293043", "0.5292252", "0.5289586", "0.52882266", "0.5281338", "0.5277468", "0.5272135", "0.5257617", "0.5256363", "0.5253673", "0.5226806", "0.52001166", "0.5196078", "0.5190575", "0.5189822", "0.5188796", "0.51871914", "0.51860917", "0.5180832", "0.5176638", "0.51739794", "0.5170192", "0.51695687", "0.51695687", "0.5157474", "0.51513696", "0.5145899", "0.514162", "0.514162", "0.5141277", "0.5141277", "0.51209587", "0.5119462", "0.51117444", "0.51097846", "0.5101473", "0.5095227", "0.508627", "0.5085716", "0.5084712", "0.5064364", "0.50577873", "0.5056068", "0.50471365", "0.5045056", "0.50427985" ]
0.76525635
1
Test the functionality of the KafkaGroupIODataset when the consumer group is yet to catch up with the newly added messages only (Instead of reading from the beginning).
def test_kafka_group_io_dataset_resume_primary_cg(): import tensorflow_io.kafka as kafka_io # Write new messages to the topic for i in range(10, 100): message = f"D{i}" kafka_io.write_kafka(message=message, topic="key-partition-test") # Read only the newly sent 90 messages dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgtestprimary", servers="localhost:9092", configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(10, 100)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_read_group(self):\n pass", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_mark_all_entries_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_message_group():", "def test_mark_topic_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_topic_as_unread_groups(group_id, topic_id)", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_update_group(self):\n pass", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def test_flush_groups(self):\n channel_layer.send(\"fl_test\", {\"value\": \"blue\"})\n channel_layer.flush()\n channel, message = channel_layer.receive_many([\"fl_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_google_storage_no_more_data(sdc_builder, sdc_executor, gcp):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n bucket_name = get_random_string(ascii_lowercase, 10)\n\n storage_client = gcp.storage_client\n\n google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin')\n\n google_cloud_storage.set_attributes(bucket=bucket_name,\n common_prefix='gcs-test',\n prefix_pattern='**/*.txt',\n data_format='TEXT')\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n pipeline_finisher_executor.set_attributes(preconditions=['${record:eventType() == \\'no-more-data\\'}'])\n\n wiretap = pipeline_builder.add_wiretap()\n events_wiretap = pipeline_builder.add_wiretap()\n\n google_cloud_storage >> wiretap.destination\n google_cloud_storage >= [pipeline_finisher_executor, events_wiretap.destination]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n sdc_executor.add_pipeline(pipeline)\n\n created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name)\n try:\n logger.info('Starting GCS Origin with no data ...')\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert 0 == len(wiretap.output_records)\n event_record = events_wiretap.output_records[0]\n event_type = event_record.header.values['sdc.event.type']\n assert event_type == 'no-more-data', 'Received %s as event type (expected no-more-data)' % event_type\n finally:\n logger.info('Deleting bucket %s ...', created_bucket.name)\n gcp.retry_429(created_bucket.delete)(force=True)", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def test_by_group_no_messages_for_another_group(self):\n thread = self.create_thread()\n other_group = mommy.make('groups.Group')\n result = Thread.public.by_group(thread.group)\n self.assertNotIn(other_group, result)", "def test_missing_group_collection(self):\n EXPLAIN_CONNECT = 'sqlite:///:memory:'\n FORSETI_CONNECT = 'sqlite:///{}'.format(\n get_db_file_path('forseti_1_missing_groups.db'))\n\n self.service_config = ServiceConfig(EXPLAIN_CONNECT,\n FORSETI_CONNECT)\n self.source = 'FORSETI'\n self.model_manager = self.service_config.model_manager\n self.model_name = self.model_manager.create(name=self.source)\n\n scoped_session, data_access = self.model_manager.get(self.model_name)\n with scoped_session as session:\n\n importer_cls = importer.by_source(self.source)\n import_runner = importer_cls(\n session,\n self.model_manager.model(self.model_name, expunge=False),\n data_access,\n self.service_config)\n import_runner.run()\n\n model = self.model_manager.model(self.model_name)\n self.assertEqual(model.state, 'BROKEN', 'Model state should be BROKEN')\n\n error_msg = 'Did you enable Forseti group collection?'\n self.assertTrue(error_msg in model.message)", "def _single_group_offsets_callback(self, consumer_group, response):\n single_group_offsets = self.kafka_client._list_consumer_group_offsets_process_response(response)\n self.log.debug(\"Single group offsets: %s\", single_group_offsets)\n for (topic, partition), (offset, _metadata) in single_group_offsets.items():\n # If the OffsetFetchRequest explicitly specified partitions, the offset could returned as -1, meaning there\n # is no recorded offset for that partition... for example, if the partition doesn't exist in the cluster.\n # So ignore it.\n if offset == -1:\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n continue\n key = (consumer_group, topic, partition)\n self._consumer_offsets[key] = offset", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def test_create_new_discussion_topic_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_bob_sent(self):\n messages = list(self.bob_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)", "def test_partially_update_device_group_by_id(self):\n pass", "def test_reorder_pinned_topics_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_mark_all_entries_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_all_entries_as_unread_groups(group_id, topic_id, forced_read_state=None)", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def test_post_process_forwarder(\n dispatch_post_process_group_task, kafka_message_without_transaction_header\n):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n future = forwarder.process_message(kafka_message_without_transaction_header)\n\n forwarder.flush_batch([future])\n\n dispatch_post_process_group_task.assert_called_once_with(\n event_id=\"fe0ee9a2bc3b415497bad68aaf70dc7f\",\n project_id=1,\n group_id=43,\n primary_hash=\"311ee66a5b8e697929804ceb1c456ffe\",\n is_new=False,\n is_regression=None,\n is_new_group_environment=False,\n queue=\"post_process_errors\",\n group_states=[\n {\"id\": 43, \"is_new\": False, \"is_regression\": None, \"is_new_group_environment\": False}\n ],\n )\n\n forwarder.shutdown()", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def test_get_full_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_groups(group_id, topic_id)", "def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 2 blocks, start with this and get both since we used them\n # separately in other tests \n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step2.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n # This file has had a section of DO data replaced with 0s\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step3.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_3.txt.result.yml',\n count=3\n )\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_4.txt.result.yml',\n count=1\n )\n\n # start over now, using step 4\n self.driver.stop_sampling()\n\n # Reset the driver with no memento\n self.memento = None\n self.driver = MflmDOSTADDataSetDriver(\n self._driver_config()['startup_config'],\n self.memento,\n self.data_callback,\n self.state_callback,\n self.event_callback,\n self.exception_callback)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-4.txt.result.yml',\n count=7\n )", "def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()", "def test_bob_read(self):\n messages = list(self.bob_storage.read)\n self.assertEqual(1, len(messages))\n self.assertNotIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_alice_read(self):\n messages = list(self.alice_storage.read)\n self.assertEqual(1, len(messages))\n self.assertNotIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_bob_unread(self):\n messages = list(self.bob_storage.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_no_updated_datasets(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, table.data)", "def test_update_device_group_by_id1(self):\n pass", "def test_block_missing_batch(self):\n pass", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def test_pipeline2(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_get_single_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_groups(group_id, topic_id)", "def test_producer_send_messages_keyed(self):\n first_part = 43\n second_part = 56\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part, 102]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"35\"\n key2 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, key=key2, msgs=msgs2)\n # Check the expected request was sent\n msgSet1 = create_message_set(make_send_requests(msgs1, key=key1), producer.codec)\n msgSet2 = create_message_set(make_send_requests(msgs2, key=key2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n producer.stop()", "def test_add_group(self):\n pass", "def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)", "def _list_groups_callback(self, broker_id, response):\n for consumer_group, group_type in self.kafka_client._list_consumer_groups_process_response(response):\n # consumer groups from Kafka < 0.9 that store their offset in Kafka don't use Kafka for group-coordination\n # so their group_type is empty\n if group_type in ('consumer', ''):\n single_group_offsets_future = self._list_consumer_group_offsets_send_request(\n group_id=consumer_group, group_coordinator_id=broker_id\n )\n single_group_offsets_future.add_callback(self._single_group_offsets_callback, consumer_group)\n self._consumer_futures.append(single_group_offsets_future)", "def test_producer_send_messages_batched_partial_success(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n topic2 = \"tpsmbps_two\"\n client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}\n client.metadata_error_for_topic.return_value = False\n\n init_resp = [\n ProduceResponse(self.topic, 0, 0, 10),\n ProduceResponse(self.topic, 1, 6, 20),\n ProduceResponse(topic2, 5, 0, 30),\n ]\n next_resp = [\n ProduceResponse(self.topic, 2, 0, 10),\n ProduceResponse(self.topic, 1, 0, 20),\n ProduceResponse(topic2, 4, 0, 30),\n ]\n failed_payloads = [\n (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()),\n (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()),\n ]\n\n client.send_produce_request.side_effect = [\n fail(Failure(FailedPayloadsError(init_resp, failed_payloads))),\n succeed(next_resp),\n ]\n\n msgs = self.msgs(range(10))\n results = []\n\n producer = Producer(client, batch_send=True, batch_every_t=0)\n # Send 5 total requests: 4 here, one after we make sure we didn't\n # send early\n results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))\n results.append(producer.send_messages(topic2, msgs=msgs[3:5]))\n results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))\n results.append(producer.send_messages(topic2, msgs=msgs[8:9]))\n # No call yet, not enough messages\n self.assertFalse(client.send_produce_request.called)\n # Enough messages to start the request\n client.reset_topic_metadata.reset_mock()\n results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))\n # Before the retry, there should be some results\n self.assertEqual(init_resp[0], self.successResultOf(results[0]))\n self.assertEqual(init_resp[2], self.successResultOf(results[3]))\n # And the errors should have forced a metadata reset on one of the topics.\n client.reset_topic_metadata.assert_called_with(self.topic)\n # Advance the clock to trigger retries.\n clock.advance(producer._retry_interval)\n # Check the otehr results came in\n self.assertEqual(next_resp[0], self.successResultOf(results[4]))\n self.assertEqual(next_resp[1], self.successResultOf(results[2]))\n self.assertEqual(next_resp[2], self.successResultOf(results[1]))\n\n producer.stop()", "def test_update_device_group_by_id(self):\n pass", "def test_sql_server_cdc_no_more_data(sdc_builder, sdc_executor, database, no_of_threads):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(max_pool_size=no_of_threads,\n no_of_threads=no_of_threads)\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n tables = []\n no_of_records = 5\n rows_in_database = setup_sample_data(no_of_threads * no_of_records)\n\n for index in range(0, no_of_threads):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, DEFAULT_SCHEMA_NAME, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{DEFAULT_SCHEMA_NAME}_{table_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, rows_in_database, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)\n\n logger.info('Dropping table %s in %s database...', dest_table, database.type)\n dest_table.drop(database.engine)", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def test_producer_stop_during_request(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f), Deferred()]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n clock.advance(producer._retry_interval)\n\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_one_group(self, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_keys = []\n for i in range(9):\n group_key = Mock()\n group_key.urlsafe.return_value = i + 100\n group_keys.append(group_key)\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups(group_keys, event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)", "def test_dataset_intermediate_group(setup_teardown_file):\n f = setup_teardown_file[3]\n\n # Trying to create intermediate groups that are absolute should fail just\n # like when creating them on groups.\n with pytest.raises(NotImplementedError):\n f.create_dataset(\"/foo/bar/baz\", shape=(10, 10), dtype='<i4')\n\n ds = f.create_dataset(\"foo/bar/baz\", shape=(10, 10), dtype='<i4')\n assert isinstance(ds, Dataset)\n assert \"/foo/bar/baz\" in f", "def test_consumer_cancel_during_shutdown(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n consumer_instance = mock_consumer.return_value\n consumer_start_d = defer.Deferred()\n consumer_instance.start.return_value = consumer_start_d\n consumer_instance._start_d = consumer_start_d\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(consumer_instance.start.called, True)\n\n def stop():\n consumer_start_d.errback(defer.CancelledError())\n\n consumer_instance.stop.side_effect = stop\n group.rejoin_after_error(Failure(RequestTimedOutError()))\n\n self.assertEqual(consumer_instance.stop.called, True)\n self.successResultOf(consumer_start_d)\n self.assertNoResult(start_d)", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def load_consumer_metadata_for_group(self, group):\n group = _coerce_consumer_group(group)\n log.debug(\"%r: load_consumer_metadata_for_group: %r\", self, group)\n\n # If we are already loading the metadata for this group, then\n # just return the outstanding deferred\n if group in self.coordinator_fetches:\n return self.coordinator_fetches[group]\n\n # No outstanding request, create a new one\n requestId = self._next_id()\n request = KafkaCodec.encode_consumermetadata_request(\n self._clientIdBytes, requestId, group)\n\n # Callbacks for the request deferred...\n def _handleConsumerMetadataResponse(response, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n # Decode the response (returns ConsumerMetadataResponse)\n c_m_resp = KafkaCodec.decode_consumermetadata_response(response)\n log.debug(\"%r: c_m_resp: %r\", self, c_m_resp)\n if c_m_resp.error:\n # Raise the appropriate error\n resp_err = kafka_errors.get(\n c_m_resp.error, UnknownError)(c_m_resp)\n raise resp_err\n\n self.consumer_group_to_brokers[group] = \\\n BrokerMetadata(c_m_resp.node_id, c_m_resp.host,\n c_m_resp.port)\n return True\n\n def _handleConsumerMetadataErr(err, group):\n # Clear the outstanding fetch\n self.coordinator_fetches.pop(group, None)\n log.error(\"Failed to retrieve consumer metadata \"\n \"for group: %s Error:%r\", group, err)\n # Clear any stored value for the group's coordinator\n self.reset_consumer_group_metadata(group)\n raise ConsumerCoordinatorNotAvailableError(\n \"Coordinator for group: %s not available\" % (group))\n\n # Send the request, add the handlers\n d = self._send_broker_unaware_request(requestId, request)\n # Save the deferred under the fetches for this group\n self.coordinator_fetches[group] = d\n d.addCallback(_handleConsumerMetadataResponse, group)\n d.addErrback(_handleConsumerMetadataErr, group)\n return d", "def group_data_callback(self, action: EventType, group_id: str) -> None:\n self.process_item(group_id, {})", "def test_unsavedgroup(self):\n m = mapper(Order, orders, properties={\n 'description':deferred(orders.c.description, group='primary'),\n 'opened':deferred(orders.c.isopen, group='primary')\n })\n\n sess = create_session()\n o = Order()\n sess.save(o)\n o.order_id = 7\n def go():\n o.description = \"some description\"\n self.assert_sql_count(testing.db, go, 0)", "def test_received_message_deletion(self):\n # Send and receive on the channel first to make the channel key\n self.channel_layer.send(\"test-deletion\", {\"first\": True})\n self.receive([\"test-deletion\"])\n # Get the number of keys in the Redis database before we send\n num_keys = self.channel_layer.connection(0).dbsize()\n # Send and receive\n self.channel_layer.send(\"test-deletion\", {\"big\": False})\n self.receive([\"test-deletion\"])\n # Verify the database did not grow in size\n self.assertEqual(num_keys, self.channel_layer.connection(0).dbsize())", "def test_recv(self):\n # Required to get useful test names\n super(TestCisPandasInput_local, self).test_recv()", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)", "def test_sync_group_no_data():\n gid = 123\n azure = create_azure_mock('GROUP1', [])\n data = [create_everbridge_contacts([], True)]\n ever = create_everbridge_mock(data)\n itr_ad = AzureGroupMemberIterator(azure, gid)\n itr_ev = EverbridgeGroupMemberIterator(ever, gid)\n app = Synchronizer(azure, ever)\n # Call sync_group\n rslt = app.sync_group(itr_ad, itr_ev)\n # Tests each method call\n ever.add_group.assert_not_called()\n ever.delete_group.assert_not_called()\n ever.delete_contacts.assert_not_called()\n ever.delete_members_from_group.assert_not_called()\n ever.upsert_contacts.assert_not_called()\n ever.get_contacts_by_external_ids.assert_not_called()\n ever.add_members_to_group.assert_not_called()\n assert rslt == {\n 'azure_group_id': 123, 'everbridge_group_id': 123, 'azure_count': 0, 'everbridge_count': 0,\n 'inserted_contacts': 0, 'updated_contacts': 0, 'removed_members': 0,\n 'deleted_contacts': 0, 'added_members': 0, 'error_contacts': 0\n }", "def test_producer_stop_waiting_to_retry(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f)]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n # Advance the clock, some, but not enough to retry\n clock.advance(producer._retry_interval / 2)\n # Stop the producer before the retry\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_pipeline2(self):\n\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_alice_unread(self):\n messages = list(self.alice_storage.unread)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def _mp_consume(client, group, topic, queue, size, events, **consumer_options):\n\n # Initial interval for retries in seconds.\n interval = 1\n while not events.exit.is_set():\n try:\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None,\n **consumer_options)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n events.start.wait()\n\n # If we are asked to quit, do so\n if events.exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n while True:\n try:\n queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)\n break\n except queue.Full:\n if events.exit.is_set():\n break\n\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n events.pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()\n\n except KafkaError as e:\n # Retry with exponential backoff\n log.error(\n \"Problem communicating with Kafka (%s), retrying in %d seconds...\" % (e, interval))\n time.sleep(interval)\n interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS", "def tests_ti_document_get(self):\n super().group_get()", "async def test_no_change(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n mqtt_mock = await mqtt_mock_entry()\n\n calls = []\n\n @callback\n def record_calls(*args):\n \"\"\"Record calls.\"\"\"\n calls.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls}},\n )\n await async_subscribe_topics(hass, sub_state)\n subscribe_call_count = mqtt_mock.async_subscribe.call_count\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 1\n\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls}},\n )\n await async_subscribe_topics(hass, sub_state)\n assert subscribe_call_count == mqtt_mock.async_subscribe.call_count\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 2\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 2", "def test_bob_new(self):\n messages = list(self.bob_storage.new)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_alice_new(self):\n messages = list(self.alice_storage.new)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_list_topic_entries_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_topic_entries_groups(group_id, topic_id)", "def test_update_entry_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass" ]
[ "0.76577485", "0.73002553", "0.6971321", "0.6940536", "0.6914675", "0.6828621", "0.6707299", "0.6647323", "0.6454408", "0.64065754", "0.62465376", "0.6183665", "0.61425513", "0.6138433", "0.6063199", "0.6052937", "0.60468554", "0.5998803", "0.5994782", "0.59467036", "0.59285516", "0.59175766", "0.5853226", "0.58519936", "0.57357377", "0.57285523", "0.57261264", "0.5676079", "0.5667113", "0.5658065", "0.5640796", "0.56396466", "0.561597", "0.5597967", "0.5594684", "0.55807376", "0.55520296", "0.5506626", "0.54989684", "0.5479515", "0.5476803", "0.5461185", "0.54513925", "0.5440892", "0.5414955", "0.54148525", "0.5414676", "0.54027224", "0.5402446", "0.54019034", "0.5391778", "0.539109", "0.5382134", "0.5374993", "0.53691196", "0.5365642", "0.5363463", "0.53604674", "0.5331886", "0.5306074", "0.5289735", "0.52889395", "0.5286698", "0.52813375", "0.527669", "0.527666", "0.5275784", "0.52631694", "0.5262872", "0.52618146", "0.5261525", "0.5260092", "0.52496916", "0.5242148", "0.5241409", "0.52409774", "0.52395535", "0.5238332", "0.5235122", "0.52223665", "0.52137774", "0.51904196", "0.51892537", "0.517648", "0.51710945", "0.5164218", "0.5159941", "0.5157535", "0.5150009", "0.5147085", "0.5146771", "0.5146771", "0.5143375", "0.5142107", "0.51404524", "0.5132674", "0.51319444", "0.5130237", "0.51292413", "0.512527" ]
0.7597071
1
Test the functionality of the KafkaGroupIODataset when the consumer group is yet to catch up with the newly added messages only (Instead of reading from the beginning) from the new topic.
def test_kafka_group_io_dataset_resume_primary_cg_new_topic(): import tensorflow_io.kafka as kafka_io # Write new messages to the topic for i in range(10, 100): message = f"D{i}" kafka_io.write_kafka(message=message, topic="key-test") # Read only the newly sent 90 messages dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-test"], group_id="cgtestprimary", servers="localhost:9092", configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(10, 100)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test_mark_topic_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_topic_as_unread_groups(group_id, topic_id)", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_read_group(self):\n pass", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def test_create_new_discussion_topic_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_update_group(self):\n pass", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def test_reorder_pinned_topics_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def test_mark_all_entries_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()", "def test_message_group():", "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_get_full_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_groups(group_id, topic_id)", "def test_producer_send_messages_batched_partial_success(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n topic2 = \"tpsmbps_two\"\n client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}\n client.metadata_error_for_topic.return_value = False\n\n init_resp = [\n ProduceResponse(self.topic, 0, 0, 10),\n ProduceResponse(self.topic, 1, 6, 20),\n ProduceResponse(topic2, 5, 0, 30),\n ]\n next_resp = [\n ProduceResponse(self.topic, 2, 0, 10),\n ProduceResponse(self.topic, 1, 0, 20),\n ProduceResponse(topic2, 4, 0, 30),\n ]\n failed_payloads = [\n (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()),\n (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()),\n ]\n\n client.send_produce_request.side_effect = [\n fail(Failure(FailedPayloadsError(init_resp, failed_payloads))),\n succeed(next_resp),\n ]\n\n msgs = self.msgs(range(10))\n results = []\n\n producer = Producer(client, batch_send=True, batch_every_t=0)\n # Send 5 total requests: 4 here, one after we make sure we didn't\n # send early\n results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))\n results.append(producer.send_messages(topic2, msgs=msgs[3:5]))\n results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))\n results.append(producer.send_messages(topic2, msgs=msgs[8:9]))\n # No call yet, not enough messages\n self.assertFalse(client.send_produce_request.called)\n # Enough messages to start the request\n client.reset_topic_metadata.reset_mock()\n results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))\n # Before the retry, there should be some results\n self.assertEqual(init_resp[0], self.successResultOf(results[0]))\n self.assertEqual(init_resp[2], self.successResultOf(results[3]))\n # And the errors should have forced a metadata reset on one of the topics.\n client.reset_topic_metadata.assert_called_with(self.topic)\n # Advance the clock to trigger retries.\n clock.advance(producer._retry_interval)\n # Check the otehr results came in\n self.assertEqual(next_resp[0], self.successResultOf(results[4]))\n self.assertEqual(next_resp[1], self.successResultOf(results[2]))\n self.assertEqual(next_resp[2], self.successResultOf(results[1]))\n\n producer.stop()", "def test_get_single_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_groups(group_id, topic_id)", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_producer_stop_during_request(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f), Deferred()]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n clock.advance(producer._retry_interval)\n\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "async def test_no_change(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n mqtt_mock = await mqtt_mock_entry()\n\n calls = []\n\n @callback\n def record_calls(*args):\n \"\"\"Record calls.\"\"\"\n calls.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls}},\n )\n await async_subscribe_topics(hass, sub_state)\n subscribe_call_count = mqtt_mock.async_subscribe.call_count\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 1\n\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls}},\n )\n await async_subscribe_topics(hass, sub_state)\n assert subscribe_call_count == mqtt_mock.async_subscribe.call_count\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 2\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 2", "async def test_modify_topics(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await mqtt_mock_entry()\n calls1 = []\n\n @callback\n def record_calls1(*args):\n \"\"\"Record calls.\"\"\"\n calls1.append(args)\n\n calls2 = []\n\n @callback\n def record_calls2(*args):\n \"\"\"Record calls.\"\"\"\n calls2.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\n \"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls1},\n \"test_topic2\": {\"topic\": \"test-topic2\", \"msg_callback\": record_calls2},\n },\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 0\n\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1_1\", \"msg_callback\": record_calls1}},\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n\n async_fire_mqtt_message(hass, \"test-topic1_1\", \"test-payload\")\n assert len(calls1) == 2\n assert calls1[1][0].topic == \"test-topic1_1\"\n assert calls1[1][0].payload == \"test-payload\"\n assert len(calls2) == 1\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1_1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n\n assert len(calls1) == 2\n assert len(calls2) == 1", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def test_partially_update_device_group_by_id(self):\n pass", "def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def test_producer_send_messages_keyed(self):\n first_part = 43\n second_part = 56\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part, 102]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"35\"\n key2 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, key=key2, msgs=msgs2)\n # Check the expected request was sent\n msgSet1 = create_message_set(make_send_requests(msgs1, key=key1), producer.codec)\n msgSet2 = create_message_set(make_send_requests(msgs2, key=key2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n producer.stop()", "def test_update_topic_config(self):\n test_topic_config = {\n 'test.topic': {\n 'schema_name': 'test.schema'\n }\n }\n local_topic_config = eventlogging.topic.get_topic_config()\n local_topic_config.update(test_topic_config)\n\n # append the new test topic config to the global topic config\n eventlogging.topic.update_topic_config(test_topic_config)\n\n # test that the global topic config is what it should be\n self.assertEqual(\n eventlogging.topic.get_topic_config(),\n local_topic_config\n )", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_update_device_group_by_id1(self):\n pass", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_consumer_cancel_during_shutdown(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n consumer_instance = mock_consumer.return_value\n consumer_start_d = defer.Deferred()\n consumer_instance.start.return_value = consumer_start_d\n consumer_instance._start_d = consumer_start_d\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(consumer_instance.start.called, True)\n\n def stop():\n consumer_start_d.errback(defer.CancelledError())\n\n consumer_instance.stop.side_effect = stop\n group.rejoin_after_error(Failure(RequestTimedOutError()))\n\n self.assertEqual(consumer_instance.stop.called, True)\n self.successResultOf(consumer_start_d)\n self.assertNoResult(start_d)", "def test_alice_new(self):\n messages = list(self.alice_storage.new)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertNotIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def _single_group_offsets_callback(self, consumer_group, response):\n single_group_offsets = self.kafka_client._list_consumer_group_offsets_process_response(response)\n self.log.debug(\"Single group offsets: %s\", single_group_offsets)\n for (topic, partition), (offset, _metadata) in single_group_offsets.items():\n # If the OffsetFetchRequest explicitly specified partitions, the offset could returned as -1, meaning there\n # is no recorded offset for that partition... for example, if the partition doesn't exist in the cluster.\n # So ignore it.\n if offset == -1:\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n continue\n key = (consumer_group, topic, partition)\n self._consumer_offsets[key] = offset", "def test_post_process_forwarder(\n dispatch_post_process_group_task, kafka_message_without_transaction_header\n):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n future = forwarder.process_message(kafka_message_without_transaction_header)\n\n forwarder.flush_batch([future])\n\n dispatch_post_process_group_task.assert_called_once_with(\n event_id=\"fe0ee9a2bc3b415497bad68aaf70dc7f\",\n project_id=1,\n group_id=43,\n primary_hash=\"311ee66a5b8e697929804ceb1c456ffe\",\n is_new=False,\n is_regression=None,\n is_new_group_environment=False,\n queue=\"post_process_errors\",\n group_states=[\n {\"id\": 43, \"is_new\": False, \"is_regression\": None, \"is_new_group_environment\": False}\n ],\n )\n\n forwarder.shutdown()", "def test_flush_groups(self):\n channel_layer.send(\"fl_test\", {\"value\": \"blue\"})\n channel_layer.flush()\n channel, message = channel_layer.receive_many([\"fl_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_update_device_group_by_id(self):\n pass", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def test_topic_reduction_edge_cases(base_bertopic):\n\n nr_topics = 5\n base_bertopic.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents, topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents, c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def test_producer_stop_waiting_to_retry(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f)]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n # Advance the clock, some, but not enough to retry\n clock.advance(producer._retry_interval / 2)\n # Stop the producer before the retry\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_topic_tracker_needs_update_cleared(database, user, topic):\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n topicsread = TopicsRead.query.\\\n filter(TopicsRead.user_id == user.id,\n TopicsRead.topic_id == topic.id).first()\n\n with current_app.test_request_context():\n assert topic.tracker_needs_update(forumsread, topicsread)\n\n # Update the tracker\n forumsread = ForumsRead()\n forumsread.user_id = user.id\n forumsread.forum_id = topic.forum_id\n forumsread.last_read = datetime.utcnow()\n forumsread.cleared = datetime.utcnow()\n forumsread.save()\n\n # Now the topic should be read\n assert not topic.tracker_needs_update(forumsread, topicsread)", "def test_includes_two_new_datasets(self):\n new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for new_dataset in new_datasets:\n self.assertIn(new_dataset, table.data)", "def test_mark_all_entries_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_all_entries_as_unread_groups(group_id, topic_id, forced_read_state=None)", "def test_object_names_topic(sdc_builder, sdc_executor, cluster, test_name, topic_name):\n raw_data = {'key': 'value'}\n\n # Build the Kafka destination pipeline.\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=True\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic_name,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(title=f'Kafka Destination Topic Names {test_name}').configure_for_environment(cluster)\n pipeline.configuration['rateLimit'] = 1\n\n sdc_executor.add_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic_name])\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # There should be no messages in Kafka\n msgs_received = [json.loads(msg.value.decode()) for msg in consumer]\n assert 1 == len(msgs_received)\n assert raw_data == msgs_received[0]", "def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def test_google_storage_no_more_data(sdc_builder, sdc_executor, gcp):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n bucket_name = get_random_string(ascii_lowercase, 10)\n\n storage_client = gcp.storage_client\n\n google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin')\n\n google_cloud_storage.set_attributes(bucket=bucket_name,\n common_prefix='gcs-test',\n prefix_pattern='**/*.txt',\n data_format='TEXT')\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n pipeline_finisher_executor.set_attributes(preconditions=['${record:eventType() == \\'no-more-data\\'}'])\n\n wiretap = pipeline_builder.add_wiretap()\n events_wiretap = pipeline_builder.add_wiretap()\n\n google_cloud_storage >> wiretap.destination\n google_cloud_storage >= [pipeline_finisher_executor, events_wiretap.destination]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n sdc_executor.add_pipeline(pipeline)\n\n created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name)\n try:\n logger.info('Starting GCS Origin with no data ...')\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert 0 == len(wiretap.output_records)\n event_record = events_wiretap.output_records[0]\n event_type = event_record.header.values['sdc.event.type']\n assert event_type == 'no-more-data', 'Received %s as event type (expected no-more-data)' % event_type\n finally:\n logger.info('Deleting bucket %s ...', created_bucket.name)\n gcp.retry_429(created_bucket.delete)(force=True)", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def test_patch_group(self):\n pass", "def test_patch_group(self):\n pass", "def test_update_entry_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_missing_group_collection(self):\n EXPLAIN_CONNECT = 'sqlite:///:memory:'\n FORSETI_CONNECT = 'sqlite:///{}'.format(\n get_db_file_path('forseti_1_missing_groups.db'))\n\n self.service_config = ServiceConfig(EXPLAIN_CONNECT,\n FORSETI_CONNECT)\n self.source = 'FORSETI'\n self.model_manager = self.service_config.model_manager\n self.model_name = self.model_manager.create(name=self.source)\n\n scoped_session, data_access = self.model_manager.get(self.model_name)\n with scoped_session as session:\n\n importer_cls = importer.by_source(self.source)\n import_runner = importer_cls(\n session,\n self.model_manager.model(self.model_name, expunge=False),\n data_access,\n self.service_config)\n import_runner.run()\n\n model = self.model_manager.model(self.model_name)\n self.assertEqual(model.state, 'BROKEN', 'Model state should be BROKEN')\n\n error_msg = 'Did you enable Forseti group collection?'\n self.assertTrue(error_msg in model.message)", "def test_user_group_controller_update(self):\n pass", "def test_add_group(self):\n pass", "def test_list_topic_entries_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_topic_entries_groups(group_id, topic_id)", "def test_bob_new(self):\n messages = list(self.bob_storage.new)\n self.assertEqual(3, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def test_topic_tracker_needs_update(database, user, topic):\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n topicsread = TopicsRead.query.\\\n filter(TopicsRead.user_id == user.id,\n TopicsRead.topic_id == topic.id).first()\n\n with current_app.test_request_context():\n assert topic.tracker_needs_update(forumsread, topicsread)\n\n # Update the tracker\n topicsread = TopicsRead()\n topicsread.user_id = user.id\n topicsread.topic_id = topic.id\n topicsread.forum_id = topic.forum_id\n topicsread.last_read = datetime.utcnow()\n topicsread.save()\n\n forumsread = ForumsRead()\n forumsread.user_id = user.id\n forumsread.forum_id = topic.forum_id\n forumsread.last_read = datetime.utcnow()\n forumsread.save()\n\n # Now the topic should be read\n assert not topic.tracker_needs_update(forumsread, topicsread)\n\n post = Post(content=\"Test Content\")\n post.save(topic=topic, user=user)\n\n assert topic.tracker_needs_update(forumsread, topicsread)", "def test_one_group(self, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_keys = []\n for i in range(9):\n group_key = Mock()\n group_key.urlsafe.return_value = i + 100\n group_keys.append(group_key)\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups(group_keys, event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def test_subsequent_new_messages(self):\n # Emulate check\n self.alice_storage.update_last_checked()\n\n messages = []\n for i in range(5):\n new_message = Message.objects.create(level=constants.INFO, text=\"Message {0}\".format(i + 1),\n author=self.bob, user_generated=True)\n new_message.sent_to_users.add(self.alice)\n messages.append(new_message)\n self.assertEqual(i + 1, self.alice_storage.new_count)\n\n self.assertEqual(5, self.alice_storage.new_count)\n\n self.alice_storage.update_last_checked()\n\n self.assertEqual(0, self.alice_storage.new_count)\n self.assertSequenceEqual([], list(self.alice_storage.new))", "def test_by_group_no_messages_for_another_group(self):\n thread = self.create_thread()\n other_group = mommy.make('groups.Group')\n result = Thread.public.by_group(thread.group)\n self.assertNotIn(other_group, result)", "def test_dataset_intermediate_group(setup_teardown_file):\n f = setup_teardown_file[3]\n\n # Trying to create intermediate groups that are absolute should fail just\n # like when creating them on groups.\n with pytest.raises(NotImplementedError):\n f.create_dataset(\"/foo/bar/baz\", shape=(10, 10), dtype='<i4')\n\n ds = f.create_dataset(\"foo/bar/baz\", shape=(10, 10), dtype='<i4')\n assert isinstance(ds, Dataset)\n assert \"/foo/bar/baz\" in f", "def tests_ti_document_update(self, request: FixtureRequest):\n super().group_update(request)", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def test_producer_send_timer_failed(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n batch_t = 5\n\n # FIXME: Don't use patch to test logging\n with patch.object(aProducer, \"log\") as klog:\n producer = Producer(client, batch_send=True, batch_every_t=batch_t)\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n d = producer.send_messages(self.topic, msgs=msgs)\n # Check no request was yet sent\n self.assertFalse(client.send_produce_request.called)\n # Patch Producer's Deferred to throw an exception\n with patch.object(aProducer, \"Deferred\") as d:\n d.side_effect = ValueError(\"test_producer_send_timer_failed induced failure\")\n # Advance the clock\n clock.advance(batch_t)\n # Check the expected message was logged by the looping call restart\n klog.warning.assert_called_once_with(\n \"Batch timer failed: %s. Will restart.\",\n ANY,\n exc_info=ANY,\n )\n # Check that the looping call was restarted\n self.assertTrue(producer._sendLooper.running)\n\n producer.stop()", "def test_add_dc_after_mv_simple_replication(self):\n\n self._add_dc_after_mv_test(1, False)", "def verify_producer():\n\n # Producer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'error_cb': error_cb,\n 'api.version.request': api_version_request,\n 'default.topic.config': {'produce.offset.report': True}}\n\n # Create producer\n p = confluent_kafka.Producer(**conf)\n print('producer at %s' % p)\n\n headers = [('foo1', 'bar'), ('foo1', 'bar2'), ('foo2', b'1')]\n\n # Produce some messages\n p.produce(topic, 'Hello Python!', headers=headers)\n p.produce(topic, key='Just a key and headers', headers=headers)\n p.produce(topic, key='Just a key')\n p.produce(topic, partition=1, value='Strictly for partition 1',\n key='mykey', headers=headers)\n\n # Produce more messages, now with delivery report callbacks in various forms.\n mydr = MyTestDr()\n p.produce(topic, value='This one has a dr callback',\n callback=mydr.delivery)\n p.produce(topic, value='This one has a lambda',\n callback=lambda err, msg: MyTestDr._delivery(err, msg))\n p.produce(topic, value='This one has neither')\n\n # Try producing with a timestamp\n try:\n p.produce(topic, value='with a timestamp', timestamp=123456789000)\n except NotImplementedError:\n if confluent_kafka.libversion()[1] >= 0x00090400:\n raise\n\n # Produce even more messages\n for i in range(0, 10):\n p.produce(topic, value='Message #%d' % i, key=str(i),\n callback=mydr.delivery)\n p.poll(0)\n\n print('Waiting for %d messages to be delivered' % len(p))\n\n # Block until all messages are delivered/failed\n p.flush()\n\n #\n # Additional isolated tests\n #\n test_producer_dr_only_error()", "def test_if_user_can_update_data_added(self):\n drink_data = self.test_data[\"drinks\"][0]\n # save a drink\n drink = Drink(**drink_data)\n drink.save()\n\n record_data = self.test_data[\"data\"][0]\n data = Data(\n favorite_drink=drink,\n consumer_name=record_data[\"consumer_name\"],\n location=record_data[\"location\"],\n collector=self.user,\n location_longitude=record_data[\"location_longitude\"],\n location_latitude=record_data[\"location_latitude\"]\n )\n # save a data record\n data.save()\n\n # retrieve the added data record\n url = \"/data/record/%s/\" % data._id\n get_response = self.client.get(url)\n\n self.assertEqual(get_response.status_code,\n status.HTTP_200_OK)\n recieved_data = get_response.json()\n self.assertEqual(recieved_data[\"consumer_name\"],\n \"dirk nowitzki\")\n\n # update the data record\n update_payload = {\n \"drink_id\": str(drink._id),\n \"consumer_name\": \"erick omondi\",\n \"location\": \"buruburu\",\n \"location_longitude\": \"55.255\",\n \"location_latitude\": \"74.2245\"\n }\n\n put_response = self.client.put(url, update_payload, format=\"json\")\n self.assertEqual(put_response.status_code,\n status.HTTP_200_OK)\n\n # retrieve the updated record\n updated_data = Data.objects.all()[0]\n # assert it has been updated\n self.assertNotEqual(updated_data.consumer_name,\n recieved_data[\"consumer_name\"])\n\n # delete the record\n delete_response = self.client.delete(url)\n # assert the status code is 204 no content\n self.assertEqual(delete_response.status_code,\n status.HTTP_204_NO_CONTENT)\n # assert the record was actually deleted from the database\n data_count = Data.objects.count()\n self.assertEqual(data_count, 0)", "def test_bob_sent(self):\n messages = list(self.bob_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)", "def test_creator_in_group_can_update(self):\n\n self.client.login(username='notlogged', password='notlogged')\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_can_access(self, self.url,\n post_redirect_url=expected_url,\n data=self.data)\n\n updated_group = Group.objects.get(pk=self.group.pk)\n self.assertEqual(updated_group.name, self.data['name'])\n self.assertEqual(updated_group.description, self.data['description'])\n self.assertIsNotNone(updated_group.last_edit_date)", "def test_topic_update_read(database, user, topic):\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n with current_app.test_request_context():\n # Test with logged in user\n login_user(user)\n assert current_user.is_authenticated\n\n # Update the tracker\n assert topic.update_read(current_user, topic.forum, forumsread)\n # Because the tracker is already up-to-date, it shouldn't update it\n # again.\n assert not topic.update_read(current_user, topic.forum, forumsread)\n\n # Adding a new post - now the tracker shouldn't be up-to-date anymore.\n post = Post(content=\"Test Content\")\n post.save(topic=topic, user=user)\n\n forumsread = ForumsRead.query.\\\n filter(ForumsRead.user_id == user.id,\n ForumsRead.forum_id == topic.forum_id).first()\n\n # Test tracker length\n flaskbb_config[\"TRACKER_LENGTH\"] = 0\n assert not topic.update_read(current_user, topic.forum, forumsread)\n flaskbb_config[\"TRACKER_LENGTH\"] = 1\n assert topic.update_read(current_user, topic.forum, forumsread)\n\n # Test with logged out user\n logout_user()\n assert not current_user.is_authenticated\n assert not topic.update_read(current_user, topic.forum, forumsread)", "def test_received_message_deletion(self):\n # Send and receive on the channel first to make the channel key\n self.channel_layer.send(\"test-deletion\", {\"first\": True})\n self.receive([\"test-deletion\"])\n # Get the number of keys in the Redis database before we send\n num_keys = self.channel_layer.connection(0).dbsize()\n # Send and receive\n self.channel_layer.send(\"test-deletion\", {\"big\": False})\n self.receive([\"test-deletion\"])\n # Verify the database did not grow in size\n self.assertEqual(num_keys, self.channel_layer.connection(0).dbsize())", "def test_no_updated_datasets(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, table.data)" ]
[ "0.74266845", "0.72838503", "0.69508654", "0.69229776", "0.6898682", "0.6788215", "0.66945463", "0.65659136", "0.64467585", "0.6332732", "0.62817466", "0.62599236", "0.6245761", "0.61413795", "0.6088383", "0.6065624", "0.6036422", "0.60189974", "0.6011775", "0.598527", "0.5944673", "0.5918849", "0.58561224", "0.5823046", "0.5820549", "0.58114964", "0.5783288", "0.5772221", "0.5762194", "0.56553304", "0.5651534", "0.56514615", "0.5650181", "0.5605704", "0.557555", "0.5558478", "0.55338925", "0.5530918", "0.5529134", "0.55267394", "0.55168265", "0.5515871", "0.5513059", "0.5511029", "0.5491331", "0.54809135", "0.5478876", "0.5466525", "0.5464255", "0.5452791", "0.5447202", "0.5424799", "0.54117215", "0.5408659", "0.5399072", "0.539902", "0.5386369", "0.5378977", "0.53726375", "0.53674895", "0.5357923", "0.53570235", "0.53465486", "0.5341067", "0.5340104", "0.53284377", "0.5324584", "0.5319175", "0.5314283", "0.5306536", "0.5302331", "0.52794117", "0.5277775", "0.5269781", "0.52684575", "0.52684575", "0.52604103", "0.52550614", "0.5246314", "0.52444327", "0.522834", "0.5228164", "0.52255785", "0.5219327", "0.52112806", "0.5206251", "0.52052456", "0.5202908", "0.51940936", "0.51784736", "0.5172681", "0.51699543", "0.51632947", "0.51624507", "0.5159997", "0.515446", "0.5154043", "0.5152037", "0.5149402", "0.51457906" ]
0.7933696
0
Test the functionality of the KafkaGroupIODataset when a secondary consumer group is created and is yet to catch up all the messages, from the beginning.
def test_kafka_group_io_dataset_secondary_cg(): dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgtestsecondary", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(100)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_read_group(self):\n pass", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def test_consumer_cancel_during_shutdown(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n consumer_instance = mock_consumer.return_value\n consumer_start_d = defer.Deferred()\n consumer_instance.start.return_value = consumer_start_d\n consumer_instance._start_d = consumer_start_d\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(consumer_instance.start.called, True)\n\n def stop():\n consumer_start_d.errback(defer.CancelledError())\n\n consumer_instance.stop.side_effect = stop\n group.rejoin_after_error(Failure(RequestTimedOutError()))\n\n self.assertEqual(consumer_instance.stop.called, True)\n self.successResultOf(consumer_start_d)\n self.assertNoResult(start_d)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def test_dataset_intermediate_group(setup_teardown_file):\n f = setup_teardown_file[3]\n\n # Trying to create intermediate groups that are absolute should fail just\n # like when creating them on groups.\n with pytest.raises(NotImplementedError):\n f.create_dataset(\"/foo/bar/baz\", shape=(10, 10), dtype='<i4')\n\n ds = f.create_dataset(\"foo/bar/baz\", shape=(10, 10), dtype='<i4')\n assert isinstance(ds, Dataset)\n assert \"/foo/bar/baz\" in f", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def test_by_group_no_messages_for_another_group(self):\n thread = self.create_thread()\n other_group = mommy.make('groups.Group')\n result = Thread.public.by_group(thread.group)\n self.assertNotIn(other_group, result)", "def test_create_device_group(self):\n pass", "def test_create_consumer(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n except Exception as e:\n self.fail(f\"test_create_consumer() failed with exception: {e}\")\n\n try:\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer.start() in test_create_consumer() failed with exception: {e}\")\n\n # Sleep for a couple seconds to allow the thread to come up.\n time.sleep(2)\n self.assertEqual(3, threading.active_count()) # Main thread, consumer thread, consumer-group hear-beat daemon.\n\n test_consumer.stop()\n test_consumer.join()\n self.assertEqual(2, threading.active_count())", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def test_create_device_group1(self):\n pass", "def test_create_new_discussion_topic_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_message_group():", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def test_rejoin_after_error(self):\n client = self.mock_client([])\n coord = self.make_coordinator(client)\n coord.on_group_leave = Mock()\n\n def check(rejoin_needed, exc):\n coord._rejoin_needed = False\n coord._rejoin_wait_dc = None\n for call in client.reactor.getDelayedCalls():\n call.cancel()\n client.reset_consumer_group_metadata.reset_mock()\n coord.on_group_leave.reset_mock()\n\n coord.rejoin_after_error(Failure(exc))\n if rejoin_needed:\n self.assertEqual(coord._rejoin_needed, True)\n assert_delayed_calls(1, client)\n else:\n self.assertEqual(coord._rejoin_needed, False)\n assert_delayed_calls(0, client)\n self.assertEqual(coord._rejoin_wait_dc, None)\n\n check(True, RebalanceInProgress())\n check(True, CoordinatorNotAvailable())\n client.reset_consumer_group_metadata.assert_any_call(coord.group_id)\n check(True, IllegalGeneration())\n coord.on_group_leave.assert_any_call()\n check(True, InvalidGroupId())\n coord.on_group_leave.assert_any_call()\n check(True, InconsistentGroupProtocol())\n check(True, RequestTimedOutError())\n coord.on_group_leave.assert_any_call()\n check(True, UnknownError())\n\n coord._stopping = True\n check(False, defer.CancelledError())\n coord._stopping = False\n\n start_d = coord.start()\n start_d.addErrback(lambda f: None)\n check(False, ValueError())\n coord.on_group_leave.assert_any_call()\n self.successResultOf(start_d)", "def test_exc(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n with pytest.raises(TypeError):\n grp.create_dataset('foo', (10,), dtype=\"float32\", fillvalue={\"a\": 2})", "def test_missing_group_collection(self):\n EXPLAIN_CONNECT = 'sqlite:///:memory:'\n FORSETI_CONNECT = 'sqlite:///{}'.format(\n get_db_file_path('forseti_1_missing_groups.db'))\n\n self.service_config = ServiceConfig(EXPLAIN_CONNECT,\n FORSETI_CONNECT)\n self.source = 'FORSETI'\n self.model_manager = self.service_config.model_manager\n self.model_name = self.model_manager.create(name=self.source)\n\n scoped_session, data_access = self.model_manager.get(self.model_name)\n with scoped_session as session:\n\n importer_cls = importer.by_source(self.source)\n import_runner = importer_cls(\n session,\n self.model_manager.model(self.model_name, expunge=False),\n data_access,\n self.service_config)\n import_runner.run()\n\n model = self.model_manager.model(self.model_name)\n self.assertEqual(model.state, 'BROKEN', 'Model state should be BROKEN')\n\n error_msg = 'Did you enable Forseti group collection?'\n self.assertTrue(error_msg in model.message)", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_mark_topic_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_topic_as_unread_groups(group_id, topic_id)", "def test_sql_server_cdc_no_more_data(sdc_builder, sdc_executor, database, no_of_threads):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(max_pool_size=no_of_threads,\n no_of_threads=no_of_threads)\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n tables = []\n no_of_records = 5\n rows_in_database = setup_sample_data(no_of_threads * no_of_records)\n\n for index in range(0, no_of_threads):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, DEFAULT_SCHEMA_NAME, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{DEFAULT_SCHEMA_NAME}_{table_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, rows_in_database, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)\n\n logger.info('Dropping table %s in %s database...', dest_table, database.type)\n dest_table.drop(database.engine)", "def test_clean_session(sdc_builder, sdc_executor, mqtt_broker, clean_session):\n data_topic = get_random_string(string.ascii_letters, 10)\n mqtt_broker.initialize(initial_topics=[data_topic])\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n mqtt_source = pipeline_builder.add_stage('MQTT Subscriber').set_attributes(data_format='TEXT',\n topic_filter=[data_topic],\n clean_session=clean_session,\n quality_of_service='AT_LEAST_ONCE')\n\n wiretap = pipeline_builder.add_wiretap()\n\n mqtt_source >> wiretap.destination\n\n pipeline = pipeline_builder.build().configure_for_environment(mqtt_broker)\n sdc_executor.add_pipeline(pipeline)\n try:\n sdc_executor.start_pipeline(pipeline)\n\n # can't figure out a cleaner way to do this; it takes a bit of time for the pipeline\n # to ACTUALLY start listening on the MQTT port, so if we don't sleep here, the\n # messages won't be delivered (without setting persist)\n time.sleep(1)\n expected_messages = []\n for i in range(10, 20):\n expected_message = f'Message {i}'\n mqtt_broker.publish_message(topic=data_topic, payload=expected_message)\n expected_messages.append(expected_message)\n\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 10)\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(wiretap.output_records) == len(expected_messages)\n messages = [record.field['text'] for record in wiretap.output_records]\n assert sorted(messages) == sorted(expected_messages)\n\n wiretap.reset()\n\n expected_messages_2 = []\n for i in range(20, 30):\n expected_message = f'Message {i}'\n mqtt_broker.publish_message(topic=data_topic, payload=expected_message)\n expected_messages_2.append(expected_message)\n\n sdc_executor.start_pipeline(pipeline)\n time.sleep(1)\n\n expected_messages_3 = []\n for i in range(30, 40):\n expected_message = f'Message {i}'\n mqtt_broker.publish_message(topic=data_topic, payload=expected_message)\n expected_messages_3.append(expected_message)\n\n if clean_session:\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 10)\n final_expected_messages = expected_messages_3\n else:\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 20)\n final_expected_messages = expected_messages_2 + expected_messages_3\n\n sdc_executor.stop_pipeline(pipeline)\n assert len(wiretap.output_records) == len(final_expected_messages)\n messages = [record.field['text'] for record in wiretap.output_records]\n assert sorted(messages) == sorted(final_expected_messages)\n\n finally:\n if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(pipeline)\n mqtt_broker.destroy()", "def test_000_add_group(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_get_full_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_groups(group_id, topic_id)", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def test_back_fill(self):\n self.driver.start_sampling()\n\n # step 2 contains 2 blocks, start with this and get both since we used them\n # separately in other tests \n self.clear_async_data()\n self.create_sample_data_set_dir(\n \"node59p1_step2.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n # This file has had a section of DO data replaced with 0s\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step3.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_3.txt.result.yml',\n count=3\n )\n\n # Now fill in the zeroed section from step3, this should just return the new\n # data\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_4.txt.result.yml',\n count=1\n )\n\n # start over now, using step 4\n self.driver.stop_sampling()\n\n # Reset the driver with no memento\n self.memento = None\n self.driver = MflmDOSTADDataSetDriver(\n self._driver_config()['startup_config'],\n self.memento,\n self.data_callback,\n self.state_callback,\n self.event_callback,\n self.exception_callback)\n self.driver.start_sampling()\n\n self.clear_async_data()\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-4.txt.result.yml',\n count=7\n )", "def test_can_download_report_no_group(self):\r\n self.assertFalse(_can_download_report(self.user))", "def test_get_single_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_groups(group_id, topic_id)", "def test_google_storage_no_more_data(sdc_builder, sdc_executor, gcp):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n bucket_name = get_random_string(ascii_lowercase, 10)\n\n storage_client = gcp.storage_client\n\n google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin')\n\n google_cloud_storage.set_attributes(bucket=bucket_name,\n common_prefix='gcs-test',\n prefix_pattern='**/*.txt',\n data_format='TEXT')\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n pipeline_finisher_executor.set_attributes(preconditions=['${record:eventType() == \\'no-more-data\\'}'])\n\n wiretap = pipeline_builder.add_wiretap()\n events_wiretap = pipeline_builder.add_wiretap()\n\n google_cloud_storage >> wiretap.destination\n google_cloud_storage >= [pipeline_finisher_executor, events_wiretap.destination]\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n sdc_executor.add_pipeline(pipeline)\n\n created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name)\n try:\n logger.info('Starting GCS Origin with no data ...')\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert 0 == len(wiretap.output_records)\n event_record = events_wiretap.output_records[0]\n event_type = event_record.header.values['sdc.event.type']\n assert event_type == 'no-more-data', 'Received %s as event type (expected no-more-data)' % event_type\n finally:\n logger.info('Deleting bucket %s ...', created_bucket.name)\n gcp.retry_429(created_bucket.delete)(force=True)", "def test_shape_conflict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_dataset('foo', (10, 3), 'f')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 4), 'f')", "def pytest_started_handling_group(session, worker):", "def test_persistent_group_already_running(dev):\n form(dev[0], dev[1])\n peer = dev[1].get_peer(dev[0].p2p_dev_addr())\n listen_freq = peer['listen_freq']\n dev[0].dump_monitor()\n dev[1].dump_monitor()\n networks = dev[0].list_networks(p2p=True)\n if len(networks) != 1:\n raise Exception(\"Unexpected number of networks\")\n if \"[P2P-PERSISTENT]\" not in networks[0]['flags']:\n raise Exception(\"Not the persistent group data\")\n if \"OK\" not in dev[0].global_request(\"P2P_GROUP_ADD persistent=\" + networks[0]['id'] + \" freq=\" + listen_freq):\n raise Exception(\"Could not state GO\")\n invite_from_cli(dev[0], dev[1])", "def try_creating_kafka_consumer(broker, broker_port, topic, consumer_group):\n retries = 8\n for i in range(retries):\n try:\n return KafkaConsumer(topic, group_id=consumer_group, bootstrap_servers=[f'{broker}:{broker_port}'])\n except errors.NoBrokersAvailable:\n logging.error(\"attempt number: \" + str(i + 1) + \" broker: \" + broker + \":\" + str(broker_port))\n sleep(10)\n raise errors.NoBrokersAvailable", "def test_producer_stop_waiting_to_retry(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f)]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n # Advance the clock, some, but not enough to retry\n clock.advance(producer._retry_interval / 2)\n # Stop the producer before the retry\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def test_producer_stop_during_request(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f), Deferred()]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n clock.advance(producer._retry_interval)\n\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_persistent_group_per_sta_psk(dev):\n addr0 = dev[0].p2p_dev_addr()\n addr1 = dev[1].p2p_dev_addr()\n addr2 = dev[2].p2p_dev_addr()\n dev[0].global_request(\"P2P_SET per_sta_psk 1\")\n logger.info(\"Form a persistent group\")\n [i_res, r_res] = go_neg_pin_authorized_persistent(i_dev=dev[0], i_intent=15,\n r_dev=dev[1], r_intent=0)\n if not i_res['persistent'] or not r_res['persistent']:\n raise Exception(\"Formed group was not persistent\")\n\n logger.info(\"Join another client to the group\")\n pin = dev[2].wps_read_pin()\n dev[0].p2p_go_authorize_client(pin)\n social = int(i_res['freq']) in [ 2412, 2437, 2462 ]\n c_res = dev[2].p2p_connect_group(addr0, pin, timeout=60, social=social,\n freq=i_res['freq'])\n if not c_res['persistent']:\n raise Exception(\"Joining client did not recognize persistent group\")\n if r_res['psk'] == c_res['psk']:\n raise Exception(\"Same PSK assigned for both clients\")\n hwsim_utils.test_connectivity_p2p(dev[1], dev[2])\n\n logger.info(\"Remove persistent group and re-start it manually\")\n dev[0].remove_group()\n dev[1].wait_go_ending_session()\n dev[2].wait_go_ending_session()\n dev[0].dump_monitor()\n dev[1].dump_monitor()\n dev[2].dump_monitor()\n\n for i in range(0, 3):\n networks = dev[i].list_networks(p2p=True)\n if len(networks) != 1:\n raise Exception(\"Unexpected number of networks\")\n if \"[P2P-PERSISTENT]\" not in networks[0]['flags']:\n raise Exception(\"Not the persistent group data\")\n if i > 0:\n # speed up testing by avoiding use of the old BSS entry since the\n # GO may have changed channels\n dev[i].request(\"BSS_FLUSH 0\")\n dev[i].scan(freq=\"2412\", only_new=True)\n if \"OK\" not in dev[i].global_request(\"P2P_GROUP_ADD persistent=\" + networks[0]['id'] + \" freq=2412\"):\n raise Exception(\"Could not re-start persistent group\")\n ev = dev[i].wait_global_event([\"P2P-GROUP-STARTED\"], timeout=30)\n if ev is None:\n raise Exception(\"Timeout on group restart\")\n dev[i].group_form_result(ev)\n\n logger.info(\"Leave persistent group and rejoin it\")\n dev[2].remove_group()\n ev = dev[2].wait_global_event([\"P2P-GROUP-REMOVED\"], timeout=3)\n if ev is None:\n raise Exception(\"Group removal event timed out\")\n if not dev[2].discover_peer(addr0, social=True):\n raise Exception(\"Peer \" + peer + \" not found\")\n dev[2].dump_monitor()\n peer = dev[2].get_peer(addr0)\n dev[2].global_request(\"P2P_GROUP_ADD persistent=\" + peer['persistent'] + \" freq=2412\")\n ev = dev[2].wait_global_event([\"P2P-GROUP-STARTED\"], timeout=30)\n if ev is None:\n raise Exception(\"Timeout on group restart (on client)\")\n cli_res = dev[2].group_form_result(ev)\n if not cli_res['persistent']:\n raise Exception(\"Persistent group not restarted as persistent (cli)\")\n hwsim_utils.test_connectivity_p2p(dev[1], dev[2])\n\n logger.info(\"Remove one of the clients from the group without removing persistent group information for the client\")\n dev[0].global_request(\"P2P_REMOVE_CLIENT iface=\" + dev[2].p2p_interface_addr())\n dev[2].wait_go_ending_session()\n\n logger.info(\"Try to reconnect after having been removed from group (but persistent group info still present)\")\n if not dev[2].discover_peer(addr0, social=True):\n raise Exception(\"Peer \" + peer + \" not found\")\n dev[2].dump_monitor()\n peer = dev[2].get_peer(addr0)\n dev[2].global_request(\"P2P_GROUP_ADD persistent=\" + peer['persistent'] + \" freq=2412\")\n ev = dev[2].wait_global_event([\"P2P-GROUP-STARTED\",\"WPA: 4-Way Handshake failed\"], timeout=30)\n if ev is None:\n raise Exception(\"Timeout on group restart (on client)\")\n if \"P2P-GROUP-STARTED\" not in ev:\n raise Exception(\"Connection failed\")\n\n logger.info(\"Remove one of the clients from the group\")\n dev[0].global_request(\"P2P_REMOVE_CLIENT \" + addr2)\n dev[2].wait_go_ending_session()\n\n logger.info(\"Try to reconnect after having been removed from group\")\n if not dev[2].discover_peer(addr0, social=True):\n raise Exception(\"Peer \" + peer + \" not found\")\n dev[2].dump_monitor()\n peer = dev[2].get_peer(addr0)\n dev[2].global_request(\"P2P_GROUP_ADD persistent=\" + peer['persistent'] + \" freq=2412\")\n ev = dev[2].wait_global_event([\"P2P-GROUP-STARTED\",\"WPA: 4-Way Handshake failed\"], timeout=30)\n if ev is None:\n raise Exception(\"Timeout on group restart (on client)\")\n if \"P2P-GROUP-STARTED\" in ev:\n raise Exception(\"Client managed to connect after being removed\")\n\n logger.info(\"Remove the remaining client from the group\")\n dev[0].global_request(\"P2P_REMOVE_CLIENT \" + addr1)\n dev[1].wait_go_ending_session()\n\n logger.info(\"Terminate persistent group\")\n dev[0].remove_group()\n dev[0].dump_monitor()\n\n logger.info(\"Try to re-invoke persistent group from client\")\n dev[0].global_request(\"SET persistent_reconnect 1\")\n dev[0].p2p_listen()\n if not dev[1].discover_peer(addr0, social=True):\n raise Exception(\"Peer \" + peer + \" not found\")\n dev[1].dump_monitor()\n peer = dev[1].get_peer(addr0)\n dev[1].global_request(\"P2P_INVITE persistent=\" + peer['persistent'] + \" peer=\" + addr0)\n ev = dev[0].wait_global_event([\"P2P-GROUP-STARTED\"], timeout=30)\n dev[0].group_form_result(ev)\n ev = dev[1].wait_global_event([\"P2P-GROUP-STARTED\",\"WPA: 4-Way Handshake failed\"], timeout=30)\n if ev is None:\n raise Exception(\"Timeout on group restart (on client)\")\n if \"P2P-GROUP-STARTED\" in ev:\n raise Exception(\"Client managed to re-invoke after being removed\")\n dev[0].dump_monitor()\n\n logger.info(\"Terminate persistent group\")\n dev[0].remove_group()\n dev[0].dump_monitor()", "def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)", "def test_sync_group_no_data():\n gid = 123\n azure = create_azure_mock('GROUP1', [])\n data = [create_everbridge_contacts([], True)]\n ever = create_everbridge_mock(data)\n itr_ad = AzureGroupMemberIterator(azure, gid)\n itr_ev = EverbridgeGroupMemberIterator(ever, gid)\n app = Synchronizer(azure, ever)\n # Call sync_group\n rslt = app.sync_group(itr_ad, itr_ev)\n # Tests each method call\n ever.add_group.assert_not_called()\n ever.delete_group.assert_not_called()\n ever.delete_contacts.assert_not_called()\n ever.delete_members_from_group.assert_not_called()\n ever.upsert_contacts.assert_not_called()\n ever.get_contacts_by_external_ids.assert_not_called()\n ever.add_members_to_group.assert_not_called()\n assert rslt == {\n 'azure_group_id': 123, 'everbridge_group_id': 123, 'azure_count': 0, 'everbridge_count': 0,\n 'inserted_contacts': 0, 'updated_contacts': 0, 'removed_members': 0,\n 'deleted_contacts': 0, 'added_members': 0, 'error_contacts': 0\n }", "def test_type_confict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_group('foo')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'f')", "def test_NonrealtimeProvider_add_bus_group_error(session):\n provider = Provider.from_context(session)\n with pytest.raises(ValueError):\n provider.add_bus_group()\n with provider.at(0):\n with pytest.raises(ValueError):\n provider.add_bus_group(channel_count=0)\n with pytest.raises(ValueError):\n provider.add_bus_group(calculation_rate=\"scalar\")", "def test_trivial(self):\n group = Group()", "def test_nogroup(self):\n pkg = self.mk_pkg('nogroup', 65533)\n check = self.mk_check((pkg,))\n self.assertNoReport(check, pkg)", "def test_get_coordinator_fatal(self):\n client = self.mock_client([])\n client._get_coordinator_for_group.return_value = defer.fail(AttributeError())\n coord = self.make_coordinator(client)\n\n self.failureResultOf(coord.get_coordinator_broker()).check(AttributeError)\n\n # No heartbeat scheduled.\n self.assertEqual([], client.reactor.getDelayedCalls())", "def test_mark_all_entries_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "async def test_nogroup_device_id(hass: HomeAssistant, monkeypatch) -> None:\n config = {\n \"rflink\": {\"port\": \"/dev/ttyABC0\"},\n DOMAIN: {\n \"platform\": \"rflink\",\n \"devices\": {\"test_nogroup_0_0\": {\"name\": \"test\", \"group\": False}},\n },\n }\n\n # setup mocking rflink module\n event_callback, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)\n\n assert hass.states.get(f\"{DOMAIN}.test\").state == STATE_CLOSED\n\n # test sending group command to nogroup\n event_callback({\"id\": \"test_nogroup_0_0\", \"command\": \"allon\"})\n await hass.async_block_till_done()\n # should not affect state\n assert hass.states.get(f\"{DOMAIN}.test\").state == STATE_CLOSED\n\n # test sending group command to nogroup\n event_callback({\"id\": \"test_nogroup_0_0\", \"command\": \"up\"})\n await hass.async_block_till_done()\n # should affect state\n assert hass.states.get(f\"{DOMAIN}.test\").state == STATE_OPEN", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def test_producer_send_messages_batched_partial_success(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n topic2 = \"tpsmbps_two\"\n client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}\n client.metadata_error_for_topic.return_value = False\n\n init_resp = [\n ProduceResponse(self.topic, 0, 0, 10),\n ProduceResponse(self.topic, 1, 6, 20),\n ProduceResponse(topic2, 5, 0, 30),\n ]\n next_resp = [\n ProduceResponse(self.topic, 2, 0, 10),\n ProduceResponse(self.topic, 1, 0, 20),\n ProduceResponse(topic2, 4, 0, 30),\n ]\n failed_payloads = [\n (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()),\n (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()),\n ]\n\n client.send_produce_request.side_effect = [\n fail(Failure(FailedPayloadsError(init_resp, failed_payloads))),\n succeed(next_resp),\n ]\n\n msgs = self.msgs(range(10))\n results = []\n\n producer = Producer(client, batch_send=True, batch_every_t=0)\n # Send 5 total requests: 4 here, one after we make sure we didn't\n # send early\n results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))\n results.append(producer.send_messages(topic2, msgs=msgs[3:5]))\n results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))\n results.append(producer.send_messages(topic2, msgs=msgs[8:9]))\n # No call yet, not enough messages\n self.assertFalse(client.send_produce_request.called)\n # Enough messages to start the request\n client.reset_topic_metadata.reset_mock()\n results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))\n # Before the retry, there should be some results\n self.assertEqual(init_resp[0], self.successResultOf(results[0]))\n self.assertEqual(init_resp[2], self.successResultOf(results[3]))\n # And the errors should have forced a metadata reset on one of the topics.\n client.reset_topic_metadata.assert_called_with(self.topic)\n # Advance the clock to trigger retries.\n clock.advance(producer._retry_interval)\n # Check the otehr results came in\n self.assertEqual(next_resp[0], self.successResultOf(results[4]))\n self.assertEqual(next_resp[1], self.successResultOf(results[2]))\n self.assertEqual(next_resp[2], self.successResultOf(results[1]))\n\n producer.stop()", "def test_update_group(self):\n pass", "def test_daogroup_two(self):\n with pytest.warns(AstropyDeprecationWarning):\n first_group = Table([np.zeros(5), np.linspace(0, 1, 5),\n np.arange(5) + 1, np.ones(5, dtype=int)],\n names=('x_0', 'y_0', 'id', 'group_id'))\n second_group = Table([np.zeros(5), np.linspace(2, 3, 5),\n 6 + np.arange(5), 2 * np.ones(5, dtype=int)],\n names=('x_0', 'y_0', 'id', 'group_id'))\n starlist = vstack([first_group, second_group])\n daogroup = DAOGroup(crit_separation=0.3)\n test_starlist = daogroup(starlist['x_0', 'y_0', 'id'])\n assert_table_almost_equal(starlist, test_starlist)", "def test_add_group(self):\n pass", "def test_digi_scenarios(self):\n # -- data group has no digitizer devices ----\n _map = self.map\n self.assertBasics(_map)\n self.assertEqual(_map, {})\n\n # -- data group has all mappable devices ----\n self.f.add_module(\"SIS 3301\", {})\n self.f.add_module(\"SIS crate\", {})\n _map = self.map\n self.assertBasics(_map)\n\n # check all controls were mapped\n self.assertEqual(len(_map), 2)\n self.assertIn(\"SIS 3301\", _map)\n self.assertIn(\"SIS crate\", _map)\n\n # the data group has mappable and unknown digitizers ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS 3301\", {})\n self.f[\"Raw data + config\"].create_group(\"Not known\")\n _map = self.map\n self.assertBasics(_map)\n\n # check correct diagnostics were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS 3301\", _map)\n self.assertNotIn(\"Not known\", _map)\n\n # delete unknown group\n del self.f[\"Raw data + config/Not known\"]\n\n # the data group has a dataset ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS crate\", {})\n data = np.empty((2, 100), dtype=np.float32)\n self.f[\"Raw data + config\"].create_dataset(\"A dataset\", data=data)\n _map = self.map\n self.assertBasics(_map)\n\n # check correct diagnostics were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS crate\", _map)\n self.assertNotIn(\"A dataset\", _map)\n\n # delete dataset\n del self.f[\"Raw data + config/A dataset\"]\n\n # the data group has a mappable digitizer but ----\n # mapping fails ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS 3301\", {})\n self.f.add_module(\"SIS crate\", {})\n\n # remove a dataset from 'SIS 3301'\n # - this will cause mapping of 'Waveform' to fail\n #\n sis_group = self.f[\"Raw data + config/SIS 3301\"]\n for name in sis_group:\n if isinstance(sis_group[name], h5py.Dataset):\n del sis_group[name]\n\n # check map\n _map = self.map\n self.assertBasics(_map)\n\n # check correct controls were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS crate\", _map)\n self.assertNotIn(\"SIS 3301\", _map)", "def test_00_mail_group_access_rights(self):\n cr, uid, user_bert_id, user_raoul_id = self.cr, self.uid, self.user_bert_id, self.user_raoul_id\n\n # Do: Bert reads Jobs -> ok, public\n self.mail_group.read(cr, user_bert_id, [self.group_jobs_id])\n # Do: Bert read Pigs -> ko, restricted to employees\n self.assertRaises(except_orm, self.mail_group.read,\n cr, user_bert_id, [self.group_pigs_id])\n # Do: Raoul read Pigs -> ok, belong to employees\n self.mail_group.read(cr, user_raoul_id, [self.group_pigs_id])\n\n # Do: Bert creates a group -> ko, no access rights\n self.assertRaises(except_orm, self.mail_group.create,\n cr, user_bert_id, {'name': 'Test'})\n # Do: Raoul creates a restricted group -> ok\n new_group_id = self.mail_group.create(cr, user_raoul_id, {'name': 'Test'})\n # Do: Bert added in followers, read -> ok, in followers\n self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_bert_id])\n self.mail_group.read(cr, user_bert_id, [new_group_id])\n\n # Do: Raoul reads Priv -> ko, private\n self.assertRaises(except_orm, self.mail_group.read,\n cr, user_raoul_id, [self.group_priv_id])\n # Do: Raoul added in follower, read -> ok, in followers\n self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_raoul_id])\n self.mail_group.read(cr, user_raoul_id, [self.group_priv_id])\n\n # Do: Raoul write on Jobs -> ok\n self.mail_group.write(cr, user_raoul_id, [self.group_priv_id], {'name': 'modified'})\n # Do: Bert cannot write on Private -> ko (read but no write)\n self.assertRaises(except_orm, self.mail_group.write,\n cr, user_bert_id, [self.group_priv_id], {'name': 're-modified'})\n # Test: Bert cannot unlink the group\n self.assertRaises(except_orm,\n self.mail_group.unlink,\n cr, user_bert_id, [self.group_priv_id])\n # Do: Raoul unlinks the group, there are no followers and messages left\n self.mail_group.unlink(cr, user_raoul_id, [self.group_priv_id])\n fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])\n self.assertFalse(fol_ids, 'unlinked document should not have any followers left')\n msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])\n self.assertFalse(msg_ids, 'unlinked document should not have any followers left')", "def test_filter_device_group(self):\n pass", "def test_reorder_pinned_topics_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_pipeline2(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_stop_resume(self):\n \n self.create_sample_data_set_dir(\n \"node59p1_step1.dat\",\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n \n # create the recovered file\n self.create_sample_data_set_dir(\n \"DOS15908_1st7_step1.DAT\",\n RECOV_DIR,\n \"DOS15908.DAT\",\n copy_metadata=False\n )\n \n # create some data to parse\n self.clear_async_data()\n \n self.driver.start_sampling()\n\n # verify data is produced\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_1_ss1.txt.result.yml',\n count=1,\n timeout=10\n )\n self.assert_data(\n DostadParserRecoveredDataParticle,\n 'test_data_1r_ss1.txt.result.yml',\n count=1,\n timeout=10\n )\n\n self.driver.stop_sampling()\n\n self.driver.start_sampling()\n \n self.assert_data(\n DostadParserTelemeteredMetadataDataParticle,\n 'test_data_1_ss2.txt.result.yml',\n count=1,\n timeout=10\n )\n self.assert_data(\n DostadParserRecoveredMetadataDataParticle,\n 'test_data_1r_ss2.txt.result.yml',\n count=1,\n timeout=10\n )", "def test_post_process_forwarder(\n dispatch_post_process_group_task, kafka_message_without_transaction_header\n):\n forwarder = PostProcessForwarderWorker(concurrency=1)\n future = forwarder.process_message(kafka_message_without_transaction_header)\n\n forwarder.flush_batch([future])\n\n dispatch_post_process_group_task.assert_called_once_with(\n event_id=\"fe0ee9a2bc3b415497bad68aaf70dc7f\",\n project_id=1,\n group_id=43,\n primary_hash=\"311ee66a5b8e697929804ceb1c456ffe\",\n is_new=False,\n is_regression=None,\n is_new_group_environment=False,\n queue=\"post_process_errors\",\n group_states=[\n {\"id\": 43, \"is_new\": False, \"is_regression\": None, \"is_new_group_environment\": False}\n ],\n )\n\n forwarder.shutdown()", "def test_group_is_private_user_is_not_member(self):\n thread = self.create_thread()\n thread.group.private = True\n thread.save()\n message = thread.first_message\n user = self.create_user()\n self.assertFalse(message.visible_to_user(user))", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def test_get_device_groups1(self):\n pass", "def test_persistent_group_peer_dropped2(dev):\n form(dev[0], dev[1])\n invite_from_go(dev[0], dev[1])\n\n logger.info(\"Remove group on the client and try to invite from the GO\")\n dev[1].global_request(\"REMOVE_NETWORK all\")\n invite(dev[0], dev[1])\n ev = dev[0].wait_global_event([\"P2P-INVITATION-RESULT\"], timeout=10)\n if ev is None:\n raise Exception(\"No invitation result seen\")\n if \"status=8\" not in ev:\n raise Exception(\"Unexpected invitation result: \" + ev)\n networks = dev[1].list_networks(p2p=True)\n if len(networks) > 0:\n raise Exception(\"Unexpected network block on client\")\n\n logger.info(\"Verify that a new group can be formed\")\n form(dev[0], dev[1])", "def test_flush_groups(self):\n channel_layer.send(\"fl_test\", {\"value\": \"blue\"})\n channel_layer.flush()\n channel, message = channel_layer.receive_many([\"fl_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()", "def test_empty_messages(self):\n self.failureResultOf(self.producer.send_messages(\"topic\"), ValueError)\n self.failureResultOf(self.producer.send_messages(\"topic\", msgs=[]), ValueError)", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)", "def test_create_resource_group(self):\n pass", "def test_create_fillval(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (10,), fillvalue=4.0)\n assert dset[0] == 4.0\n assert dset[7] == 4.0" ]
[ "0.73009527", "0.72444355", "0.7244203", "0.70529", "0.6862954", "0.6756388", "0.66355544", "0.6621006", "0.64989007", "0.64381534", "0.63905436", "0.6337798", "0.62951696", "0.62927544", "0.6165134", "0.6079073", "0.6057113", "0.6047525", "0.5999353", "0.59861374", "0.5933441", "0.5927073", "0.59061676", "0.5898181", "0.58546966", "0.58476615", "0.5822458", "0.5822438", "0.5816431", "0.5801566", "0.5767115", "0.57383895", "0.57090455", "0.56760174", "0.5665889", "0.5646613", "0.56451404", "0.56336683", "0.56302845", "0.558628", "0.558628", "0.5574609", "0.55366874", "0.55315197", "0.55275404", "0.5522464", "0.550791", "0.54828703", "0.54576826", "0.5443054", "0.5416531", "0.54109067", "0.54086226", "0.5377668", "0.5377569", "0.5373127", "0.53609544", "0.53606594", "0.53534967", "0.5346748", "0.53463674", "0.53458583", "0.53421557", "0.5336908", "0.5329717", "0.5317723", "0.5315581", "0.5312058", "0.53107816", "0.5308437", "0.52969843", "0.5289764", "0.52893543", "0.52871394", "0.5268273", "0.5261851", "0.52611583", "0.5257442", "0.5257246", "0.525503", "0.525181", "0.5246966", "0.5246779", "0.5244213", "0.5243371", "0.5234018", "0.5233008", "0.5232992", "0.5230097", "0.52199924", "0.5212761", "0.52090114", "0.51987195", "0.51930714", "0.51918924", "0.51898074", "0.5184075", "0.5179351", "0.51788086", "0.5173151" ]
0.7149818
3
Test the functionality of the KafkaGroupIODataset when a new consumer group reads data from multiple topics from the beginning.
def test_kafka_group_io_dataset_tertiary_cg_multiple_topics(): dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test", "key-test"], group_id="cgtesttertiary", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted([("D" + str(i)).encode() for i in range(100)] * 2) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_read_group(self):\n pass", "def test_iter(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f').reshape((10, 3))\n dset = grp.create_dataset('foo', data=data)\n for x, y in zip(dset, data):\n assert len(x) == 3\n assert np.array_equal(x, y)", "def test_includes_two_new_datasets(self):\n new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for new_dataset in new_datasets:\n self.assertIn(new_dataset, table.data)", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def test_get_single_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_groups(group_id, topic_id)", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test_update_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def test_get_full_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_groups(group_id, topic_id)", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_dataset_intermediate_group(setup_teardown_file):\n f = setup_teardown_file[3]\n\n # Trying to create intermediate groups that are absolute should fail just\n # like when creating them on groups.\n with pytest.raises(NotImplementedError):\n f.create_dataset(\"/foo/bar/baz\", shape=(10, 10), dtype='<i4')\n\n ds = f.create_dataset(\"foo/bar/baz\", shape=(10, 10), dtype='<i4')\n assert isinstance(ds, Dataset)\n assert \"/foo/bar/baz\" in f", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def test_list_topic_entries_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_topic_entries_groups(group_id, topic_id)", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def test_mark_all_entries_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def test_merge_datasets(self):\n disk.merge_datasets(self.input_datasets[0:2], self.output_dataset)\n self.assertEqual(4, len(self.output_dataset.metadata()))", "def test_set_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n testdata = np.ones((10, 2))\n grp['testdata'] = testdata\n outdata = grp['testdata'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype\n\n grp['testdata'] = testdata", "def test_create_new_discussion_topic_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_get_device_groups1(self):\n pass", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def test_pipeline2(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_groups(self):\n # Make a group and send to it\n channel_layer.group_add(\"tgroup\", \"tg_test\")\n channel_layer.group_add(\"tgroup\", \"tg_test2\")\n channel_layer.group_add(\"tgroup\", \"tg_test3\")\n channel_layer.group_discard(\"tgroup\", \"tg_test3\")\n channel_layer.send_group(\"tgroup\", {\"value\": \"orange\"})\n # Receive from the two channels in the group and ensure messages\n channel, message = channel_layer.receive_many([\"tg_test\"])\n self.assertEqual(channel, \"tg_test\")\n self.assertEqual(message, {\"value\": \"orange\"})\n channel, message = channel_layer.receive_many([\"tg_test2\"])\n self.assertEqual(channel, \"tg_test2\")\n self.assertEqual(message, {\"value\": \"orange\"})\n # Make sure another channel does not get a message\n channel, message = channel_layer.receive_many([\"tg_test3\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_multi_sub(self):\n # Create bus\n bus = pubsub.PubSubBus()\n\n topic_data_dict = {\n 'foo': 'data',\n 'bar': 589,\n 'baz': {'a': 1, 'b': 2}}\n\n # Create multi-subscriber and publish to all topics before subscribing.\n multi_sub = MultiSubscriber(self, bus, topic_data_dict)\n\n for (topic, data) in topic_data_dict.iteritems():\n bus.publish(topic, data)\n\n self.assertEquals(multi_sub.num_cb, 0)\n\n # Subscribe to all topics\n multi_sub.subscribe_topics()\n\n # Publish several times in various combos of topics and then verify\n # counts\n\n for (topic, data) in topic_data_dict.iteritems():\n bus.publish(topic, data)\n\n self.assertEquals(multi_sub.num_cb, len(topic_data_dict))\n\n for (topic, data) in topic_data_dict.iteritems():\n bus.publish(topic, data)\n bus.publish(topic, data)\n\n self.assertEquals(multi_sub.num_cb, 3 * len(topic_data_dict))\n\n bus.publish('foo', topic_data_dict['foo'])\n self.assertEquals(multi_sub.num_cb, 3 * len(topic_data_dict) + 1)", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_multiple_groups(self, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_keys = []\n for i in xrange(35):\n group_key = Mock()\n group_key.urlsafe.return_value = i + 100\n group_keys.append(group_key)\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups(group_keys, event_key, message_key, '')\n\n self.assertEqual(4, insert_tasks_mock.call_count)\n\n call_history = insert_tasks_mock.call_args_list\n\n for i in range(3):\n self.assertEqual(11, len(call_history[i][0][0]))\n\n self.assertEqual(2, len(call_history[3][0][0]))", "def test_describe_consumer_group_does_not_exist(kafka_admin_client):\n with pytest.raises(GroupCoordinatorNotAvailableError):\n group_description = kafka_admin_client.describe_consumer_groups(['test'])", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def test_reorder_pinned_topics_groups(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "async def test_modify_topics(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await mqtt_mock_entry()\n calls1 = []\n\n @callback\n def record_calls1(*args):\n \"\"\"Record calls.\"\"\"\n calls1.append(args)\n\n calls2 = []\n\n @callback\n def record_calls2(*args):\n \"\"\"Record calls.\"\"\"\n calls2.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\n \"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls1},\n \"test_topic2\": {\"topic\": \"test-topic2\", \"msg_callback\": record_calls2},\n },\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 0\n\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1_1\", \"msg_callback\": record_calls1}},\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n\n async_fire_mqtt_message(hass, \"test-topic1_1\", \"test-payload\")\n assert len(calls1) == 2\n assert calls1[1][0].topic == \"test-topic1_1\"\n assert calls1[1][0].payload == \"test-payload\"\n assert len(calls2) == 1\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1_1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n\n assert len(calls1) == 2\n assert len(calls2) == 1", "def test_pipeline2(self):\n\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_mark_topic_as_unread_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.mark_topic_as_unread_groups(group_id, topic_id)", "def test_scp_callback_return_dataset_multi(self):\n self.scp = DummyVerificationSCP()\n self.scp.status = Dataset()\n self.scp.status.Status = 0x0001\n self.scp.status.ErrorComment = 'Test'\n self.scp.start()\n\n ae = AE()\n ae.add_requested_context(VerificationSOPClass)\n assoc = ae.associate('localhost', 11112)\n assert assoc.is_established\n rsp = assoc.send_c_echo()\n assert rsp.Status == 0x0001\n assert rsp.ErrorComment == 'Test'\n assoc.release()\n self.scp.stop()", "def test_get_device_group_by_id1(self):\n pass", "def test_get_groupings_between_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=False), [])", "def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)", "def test_producer_send_messages_keyed(self):\n first_part = 43\n second_part = 56\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part, 102]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"35\"\n key2 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, key=key2, msgs=msgs2)\n # Check the expected request was sent\n msgSet1 = create_message_set(make_send_requests(msgs1, key=key1), producer.codec)\n msgSet2 = create_message_set(make_send_requests(msgs2, key=key2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n producer.stop()", "def test_update_device_group_by_id1(self):\n pass", "def test_list_entries_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.list_entries_groups(group_id, topic_id, ids=None)", "def test_reshape(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.arange(30, dtype='f')\n dset = grp.create_dataset('foo', shape=(10, 3), data=data)\n assert dset.shape == (10, 3)\n assert np.array_equal(dset.data, data.reshape((10, 3)))", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def test_sql_server_cdc_no_more_data(sdc_builder, sdc_executor, database, no_of_threads):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(max_pool_size=no_of_threads,\n no_of_threads=no_of_threads)\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n tables = []\n no_of_records = 5\n rows_in_database = setup_sample_data(no_of_threads * no_of_records)\n\n for index in range(0, no_of_threads):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, DEFAULT_SCHEMA_NAME, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{DEFAULT_SCHEMA_NAME}_{table_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, rows_in_database, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)\n\n logger.info('Dropping table %s in %s database...', dest_table, database.type)\n dest_table.drop(database.engine)", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_acquire_dataset(self):\n\n # make sure the data does not yet exist\n with self.subTest(name='no data yet'):\n response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], -2)\n\n # acquire sample data into local database\n # mock out network calls to external hosts\n with self.subTest(name='first acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', side_effect=[self.test_utils.load_sample_dataset(\"dataset0.csv\"), # dataset for 3/13\n self.test_utils.load_sample_dataset(\"dataset0.csv\"), # first dataset for 3/15\n self.test_utils.load_sample_dataset()] # second dataset for 3/15\n ) as mock_fetch:\n acquired = Update.run()\n self.assertTrue(acquired)\n self.assertEqual(mock_fetch_meta.call_count, 1)\n\n # make sure the data now exists\n with self.subTest(name='initial data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)\n row = response['epidata'][0]\n self.assertEqual(row['state'], 'WY')\n self.assertEqual(row['date'], 20201209)\n self.assertEqual(row['issue'], 20210315)\n self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)\n actual = row['inpatient_bed_covid_utilization']\n expected = 0.11729857819905214\n self.assertAlmostEqual(actual, expected)\n self.assertIsNone(row['critical_staffing_shortage_today_no'])\n\n # expect 61 fields per row (63 database columns, except `id` and `record_type`)\n self.assertEqual(len(row), 61)\n\n with self.subTest(name='all date batches acquired'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), issues=20210313)\n self.assertEqual(response['result'], 1)\n\n # re-acquisition of the same dataset should be a no-op\n with self.subTest(name='second acquisition'), \\\n patch.object(Network, 'fetch_metadata', return_value=self.test_utils.load_sample_metadata()) as mock_fetch_meta, \\\n patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:\n acquired = Update.run()\n self.assertFalse(acquired)\n\n # make sure the data still exists\n with self.subTest(name='final data checks'):\n response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))\n self.assertEqual(response['result'], 1)\n self.assertEqual(len(response['epidata']), 1)", "def test_taskgroup_set(self, test_taskgroup_dag):\n # Unpack the fixture\n dag, group, (op1, op2, op3, op4) = test_taskgroup_dag\n # Arrange them with a Label in the middle\n op1.set_downstream(group, Label(\"Group label\"))\n group.set_downstream(op4)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op1.task_id, op3.task_id) == {\"label\": \"Group label\"}\n assert dag.get_edge_info(op3.task_id, op4.task_id) == {}", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_partially_update_device_group_by_id(self):\n pass", "def test_get_groupings_within_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=True), [])", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def _single_group_offsets_callback(self, consumer_group, response):\n single_group_offsets = self.kafka_client._list_consumer_group_offsets_process_response(response)\n self.log.debug(\"Single group offsets: %s\", single_group_offsets)\n for (topic, partition), (offset, _metadata) in single_group_offsets.items():\n # If the OffsetFetchRequest explicitly specified partitions, the offset could returned as -1, meaning there\n # is no recorded offset for that partition... for example, if the partition doesn't exist in the cluster.\n # So ignore it.\n if offset == -1:\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()\n continue\n key = (consumer_group, topic, partition)\n self._consumer_offsets[key] = offset", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]", "def test_pipeline3(self, batch_size):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy1, config=config)\n dummy2 = DummyPackProcessor()\n nlp.add(component=dummy2)\n dummy3 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 2 * batch_size}}\n nlp.add(component=dummy3, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH][PACK][BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def data_group():\n ...", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def test_one_group(self, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_keys = []\n for i in range(9):\n group_key = Mock()\n group_key.urlsafe.return_value = i + 100\n group_keys.append(group_key)\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups(group_keys, event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)", "def test_dataset():\n # File paths\n intervals_file = \"example_files/intervals.bed\"\n target_file = \"example_files/targets.tsv\"\n gtf_file = \"example_files/gencode.v24.annotation_chr22.gtf\"\n fasta_file = \"example_files/hg38_chr22.fa\"\n ds = SeqDistDataset(intervals_file, fasta_file, gtf_file, target_file)\n\n ds[0]\n ds[10]\n it = ds.batch_iter(32)\n next(it)", "def test_very_many_partitions_and_instances_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(99):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i for i in range(999)}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])", "def _list_groups_callback(self, broker_id, response):\n for consumer_group, group_type in self.kafka_client._list_consumer_groups_process_response(response):\n # consumer groups from Kafka < 0.9 that store their offset in Kafka don't use Kafka for group-coordination\n # so their group_type is empty\n if group_type in ('consumer', ''):\n single_group_offsets_future = self._list_consumer_group_offsets_send_request(\n group_id=consumer_group, group_coordinator_id=broker_id\n )\n single_group_offsets_future.add_callback(self._single_group_offsets_callback, consumer_group)\n self._consumer_futures.append(single_group_offsets_future)", "def test_get_device_groups(self):\n pass", "def test_pipeline4(self, batch_size):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummyPackProcessor()\n nlp.add(component=dummy1)\n\n dummy2 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy2, config=config)\n\n dummy3 = DummyPackProcessor()\n nlp.add(component=dummy3)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK][BATCH][PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_pipeline3(self, batch_size):\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy1, config=config,\n selector=FirstPackSelector())\n dummy2 = DummyPackProcessor()\n nlp.add(component=dummy2, selector=FirstPackSelector())\n dummy3 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 2 * batch_size}}\n nlp.add(component=dummy3, config=config,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH][PACK][BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_pipeline4(self, batch_size):\n\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummyPackProcessor()\n nlp.add(component=dummy1, selector=FirstPackSelector())\n\n dummy2 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy2, config=config,\n selector=FirstPackSelector())\n\n dummy3 = DummyPackProcessor()\n nlp.add(component=dummy3,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK][BATCH][PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def describe_group(args, topic):\n global bootstrap\n out = ()\n\n consumer = KafkaConsumer(\n bootstrap_servers=bootstrap,\n group_id=\"backbeat-replication-group-{0}\".format(args.destination),\n enable_auto_commit=False,\n )\n topics = consumer.topics()\n if not topic in topics:\n return False\n\n for part in consumer.partitions_for_topic(topic):\n tp = TopicPartition(topic, part)\n consumer.assign([tp])\n committed = consumer.committed(tp)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n try:\n out += (\n {\n \"topic\": topic,\n \"partition\": part,\n \"committed\": committed,\n \"last_offset\": last_offset,\n \"lag\": (last_offset - committed),\n },\n )\n except TypeError:\n sys.stderr.write(\"bad/missing info on consumer group (doesn't exist?)\\n\")\n sys.exit(1)\n\n consumer.close(autocommit=False)\n return out", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def test_very_many_partitions_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(10000):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])", "def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def test_message_group():", "def test_producer_send_messages_batched_partial_success(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n topic2 = \"tpsmbps_two\"\n client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}\n client.metadata_error_for_topic.return_value = False\n\n init_resp = [\n ProduceResponse(self.topic, 0, 0, 10),\n ProduceResponse(self.topic, 1, 6, 20),\n ProduceResponse(topic2, 5, 0, 30),\n ]\n next_resp = [\n ProduceResponse(self.topic, 2, 0, 10),\n ProduceResponse(self.topic, 1, 0, 20),\n ProduceResponse(topic2, 4, 0, 30),\n ]\n failed_payloads = [\n (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()),\n (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()),\n ]\n\n client.send_produce_request.side_effect = [\n fail(Failure(FailedPayloadsError(init_resp, failed_payloads))),\n succeed(next_resp),\n ]\n\n msgs = self.msgs(range(10))\n results = []\n\n producer = Producer(client, batch_send=True, batch_every_t=0)\n # Send 5 total requests: 4 here, one after we make sure we didn't\n # send early\n results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))\n results.append(producer.send_messages(topic2, msgs=msgs[3:5]))\n results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))\n results.append(producer.send_messages(topic2, msgs=msgs[8:9]))\n # No call yet, not enough messages\n self.assertFalse(client.send_produce_request.called)\n # Enough messages to start the request\n client.reset_topic_metadata.reset_mock()\n results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))\n # Before the retry, there should be some results\n self.assertEqual(init_resp[0], self.successResultOf(results[0]))\n self.assertEqual(init_resp[2], self.successResultOf(results[3]))\n # And the errors should have forced a metadata reset on one of the topics.\n client.reset_topic_metadata.assert_called_with(self.topic)\n # Advance the clock to trigger retries.\n clock.advance(producer._retry_interval)\n # Check the otehr results came in\n self.assertEqual(next_resp[0], self.successResultOf(results[4]))\n self.assertEqual(next_resp[1], self.successResultOf(results[2]))\n self.assertEqual(next_resp[2], self.successResultOf(results[1]))\n\n producer.stop()", "def _setup_consumer(self):\n # <WTF> https://github.com/dpkp/kafka-python/issues/601\n self.available_topics = self.client.topics()\n # </WTF>\n\n # might as well use it\n assert self.topic in self.available_topics\n\n if (self.start_params is None) != (self.end_params is None):\n raise ValueError(\"Both start and end params must be set or both must be None\")\n\n if self.start_params is None:\n # setup partitions to read through\n # TODO not checked with multiple partitions since inheriting from foxglove\n # An offset is assigned to make repeatability (via a locking file) possible later on.\n # and it's easier to terminate the fetch loop this way.\n p_id = self.client.partitions_for_topic(self.topic)\n topic_partitions = [TopicPartition(topic=self.topic, partition=p) for p in list(p_id)]\n starts = self.client.beginning_offsets(topic_partitions)\n ends = self.client.end_offsets(topic_partitions)\n\n self.start_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset, timestamp=None)\n for tp, offset in starts.items()\n }\n self.end_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset - 1, timestamp=None)\n for tp, offset in ends.items()\n }\n\n else:\n # TODO - this code was inherited from Foxglove and hasn't be checked through\n # setup start and end partitions and offsets\n # self.client.seek_to_beginning()\n # datetime is only start/end implemented\n assert isinstance(self.start_params, datetime) and isinstance(self.end_params, datetime)\n start = int(self.start_params.timestamp() * 1000)\n end = int(self.end_params.timestamp() * 1000)\n\n partitions = self.client.partitions_for_topic(self.topic)\n tx = {TopicPartition(topic=self.topic, partition=p): start for p in list(partitions)}\n self.start_p_offsets = self.client.offsets_for_times(tx)\n\n # if you give a timestamp after the last record it returns None\n for tp, offset_details in self.start_p_offsets.items():\n if offset_details is None:\n raise ValueError(\"Start date outside of available messages\")\n\n tx = {TopicPartition(topic=self.topic, partition=p): end for p in list(partitions)}\n self.end_p_offsets = self.client.offsets_for_times(tx)\n\n # as above - out of range, for end offset give something useful\n for tp, offset_details in self.end_p_offsets.items():\n if offset_details is None:\n # go to last message. I'm not 100% sure this is correct\n end_offsets = self.client.end_offsets([tp])\n offset = end_offsets[tp] - 1\n self.end_p_offsets[tp] = OffsetAndTimestamp(offset=offset, timestamp=None)", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def test_create_device_group1(self):\n pass", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def test_pipeline1(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummyPackProcessor()\n nlp.add(dummy)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_batches_are_accessible(\n monkeypatch,\n multibatch_generic_csv_generator,\n multibatch_generic_csv_generator_context,\n):\n\n context: DataContext = multibatch_generic_csv_generator_context\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n\n datasource = context.datasources[datasource_name]\n\n data_connector = datasource.data_connectors[data_connector_name]\n\n total_batches: int = 20\n file_list = multibatch_generic_csv_generator(\n data_path=data_path, num_event_batches=total_batches\n )\n\n assert (\n data_connector._get_data_reference_list_from_cache_by_data_asset_name(\n data_asset_name=asset_name\n )\n == file_list\n )\n\n batch_request_1 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -1,\n },\n )\n # Should give most recent batch\n validator_1 = context.get_validator(\n batch_request=batch_request_1,\n create_expectation_suite_with_name=\"my_expectation_suite_name_1\",\n )\n metric_max = validator_1.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches\n metric_value_set = validator_1.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n batch_request_2 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -2,\n },\n )\n validator_2 = context.get_validator(\n batch_request=batch_request_2,\n create_expectation_suite_with_name=\"my_expectation_suite_name_2\",\n )\n metric_max = validator_2.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches - 1\n metric_value_set = validator_2.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n for batch_num in range(1, total_batches + 1):\n batch_request = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -batch_num,\n },\n )\n validator = context.get_validator(\n batch_request=batch_request,\n create_expectation_suite_with_name=f\"my_expectation_suite_name__{batch_num}\",\n )\n metric_max = validator.get_metric(\n MetricConfiguration(\n \"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"}\n )\n )\n assert metric_max == (total_batches + 1) - batch_num\n metric_value_set = validator.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}", "def test_object_names_topic(sdc_builder, sdc_executor, cluster, test_name, topic_name):\n raw_data = {'key': 'value'}\n\n # Build the Kafka destination pipeline.\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=True\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic_name,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(title=f'Kafka Destination Topic Names {test_name}').configure_for_environment(cluster)\n pipeline.configuration['rateLimit'] = 1\n\n sdc_executor.add_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic_name])\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # There should be no messages in Kafka\n msgs_received = [json.loads(msg.value.decode()) for msg in consumer]\n assert 1 == len(msgs_received)\n assert raw_data == msgs_received[0]" ]
[ "0.7916993", "0.7726499", "0.75612617", "0.73106486", "0.7162436", "0.7129199", "0.71261126", "0.6688718", "0.6652531", "0.63588357", "0.629463", "0.62792355", "0.61795956", "0.61230063", "0.61221826", "0.6078071", "0.6073669", "0.6049846", "0.5994584", "0.59919417", "0.5970307", "0.58744025", "0.58737206", "0.58544785", "0.584843", "0.5842035", "0.57678336", "0.57650626", "0.57622993", "0.5761189", "0.5734449", "0.571912", "0.5698072", "0.5675166", "0.5669938", "0.5662632", "0.5658702", "0.56381893", "0.5636177", "0.5627865", "0.56137395", "0.5608314", "0.55965847", "0.5583923", "0.558225", "0.55792624", "0.5573106", "0.5567498", "0.5547996", "0.55367243", "0.5518876", "0.5515745", "0.5507359", "0.54991585", "0.5488089", "0.5474219", "0.5465505", "0.5455232", "0.5449558", "0.5444931", "0.5443517", "0.5443517", "0.5443276", "0.54282475", "0.54265964", "0.54252803", "0.5420304", "0.5419433", "0.5415195", "0.5412119", "0.54047453", "0.53895265", "0.53895265", "0.5389322", "0.537653", "0.5369931", "0.535117", "0.5350102", "0.53411555", "0.53386635", "0.5334703", "0.53340226", "0.5332108", "0.5328118", "0.5322257", "0.52956975", "0.52873206", "0.5284914", "0.5281006", "0.52785414", "0.5261908", "0.52544683", "0.5251872", "0.52514225", "0.52344036", "0.5228257", "0.52249277", "0.5221798", "0.5213657", "0.52100784" ]
0.7474016
3
Test the functionality of the `auto.offset.reset` configuration at global and topic level
def test_kafka_group_io_dataset_auto_offset_reset(): dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgglobaloffsetearliest", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(100)) ) dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgglobaloffsetlatest", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=latest", ], ) assert np.all(sorted(k.numpy() for (k, _) in dataset) == []) dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgtopicoffsetearliest", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "conf.topic.auto.offset.reset=earliest", ], ) assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(100)) ) dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgtopicoffsetlatest", servers="localhost:9092", configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "conf.topic.auto.offset.reset=latest", ], ) assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_reset_time(self):\n pass", "def test_reset():\n dev = _aws_device(wires=2)\n dev._circuit = CIRCUIT\n dev._task = TASK\n\n dev.reset()\n assert dev.circuit is None\n assert dev.task is None", "def reset_topic(bot, trigger):\n global report\n\n if get_state():\n if is_reported():\n for channel in bot.channels:\n bot.msg(channel, \"Clearing my changes from topic\")\n print(\"TOPIC RESET\")\n parse_topic(bot, trigger, 0, 1)\n\n else:\n bot.msg(CHANNEL, \"No changes made.\")", "def test_reset_default_calls(self):\n check_attr(self.o, 'reset')\n self.assertIsNone(self.o.reset(), \"no output expected\")\n self.subtest_someAgents(self.o, 2, 10)\n for _ in range(2):\n self.o.getFirme(_).reset = addCpt(self.o.getFirme(_).reset)\n for _ in range(10):\n self.o.getConsommateur(_).reset = addCpt(self.o.getConsommateur(_).reset)\n \n for x in range(2):\n self.assertEqual(self.o.getFirme(x).reset.cpt, 0,\n \"exactly one reset for firms\") \n for x in range(10):\n self.assertEqual(self.o.getConsommateur(x).reset.cpt, 0,\n \"exactly one reset for consumers\")\n self.o.reset()\n for x in range(2):\n self.assertEqual(self.o.getFirme(x).reset.cpt, 1,\n \"exactly one reset for firms\") \n for x in range(10):\n self.assertEqual(self.o.getConsommateur(x).reset.cpt, 1,\n \"exactly one reset for consumers\")", "def exec_reset():\n # {{{1\n def python_callback(event):\n print('__RESET__')\n update(task='test:main:reset', timeframe='5m', axis_reset=True, active=False)\n return python_callback", "def test_01_factory_reset(self):\n time.sleep(_LOG_CATCH_UP_DELAY)\n start_time = datetime.datetime.now()\n\n self.device.factory_reset()\n self.assertTrue(\n self.device.connected,\n f\"{self.device.name} is offline after factory_reset() execution \"\n \"finished. factory_reset should block until the device comes back \"\n \"online and becomes responsive.\")\n self._verify_no_unexpected_reboots(start_time)", "def test_reset_default(self):\n check_attr(self.o, 'reset')\n self.subtest_noAgent(self.o)\n self.assertIsNone(self.o.reset(), \"no output expected\")\n self.subtest_someAgents(self.o, 2, 10)", "def reset_config():\n return _set_config(_gen_config())", "def testGetConfigOffset(self):\n self.ports.getconfig_offset(file_name = 'get_offset.xml', port_ids = portsDict['port_ids'], offsets = portsDict['offset'])", "def svc_reset_zone_config(self) -> None:\n self._call_client_api(self._device.reset_config)", "def reset_config():\r\n # TODO implement configuration reset\r\n pass", "def tearDown(self):\n updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()\n updateConfigurationCmd.name = \"use.external.dns\"\n updateConfigurationCmd.value = \"false\"\n updateConfigurationCmd.scopename = \"zone\"\n updateConfigurationCmd.scopeid = 1\n self.apiClient.updateConfiguration(updateConfigurationCmd)", "def teardown_function(function):\n print(\"Resetting PVs\", flush=True)\n prod = ProducerWrapper(\"localhost:9092\", CONFIG_TOPIC, \"\")\n prod.stop_all_pvs()\n\n defaults = {\n PVDOUBLE: 0.0,\n # We have to use this as the second parameter for caput gets parsed as empty so does not change the value of\n # the PV\n PVSTR: \"\",\n PVLONG: 0,\n PVENUM: np.array([\"INIT\"]).astype(np.string_),\n PVDOUBLE_WITH_ALARM_THRESHOLDS: 0.0,\n }\n\n for key, value in defaults.items():\n change_pv_value(key, value)\n change_pv_value(PVFLOATARRAY, INITIAL_FLOATARRAY_VALUE)\n sleep(3)", "def teardown_function(function):\n print(\"Resetting PVs\", flush=True)\n prod = ProducerWrapper(\"localhost:9092\", CONFIG_TOPIC, \"\")\n prod.stop_all_pvs()\n\n defaults = {\n PVDOUBLE: 0.0,\n # We have to use this as the second parameter for caput gets parsed as empty so does not change the value of\n # the PV\n PVSTR: '\"\"',\n PVLONG: 0,\n PVENUM: \"INIT\",\n }\n\n for key, value in defaults.items():\n change_pv_value(key, value)\n change_array_pv_value(PVFLOATARRAY, \"3 1.1 2.2 3.3\")\n sleep(3)", "def test_update_topic_config(self):\n test_topic_config = {\n 'test.topic': {\n 'schema_name': 'test.schema'\n }\n }\n local_topic_config = eventlogging.topic.get_topic_config()\n local_topic_config.update(test_topic_config)\n\n # append the new test topic config to the global topic config\n eventlogging.topic.update_topic_config(test_topic_config)\n\n # test that the global topic config is what it should be\n self.assertEqual(\n eventlogging.topic.get_topic_config(),\n local_topic_config\n )", "def __init__(self, CC: object, auto_offset_reset: str=\"latest\"):\n self.config = CC.config\n if self.config[\"messaging_service\"]!=\"none\" and \"kafka\" in self.config and self.config['messaging_service']==\"kafka\":\n self.hostIP = self.config['kafka']['host']\n self.hostPort = self.config['kafka']['port']\n self.auto_offset_reset= auto_offset_reset\n self.producer = KafkaProducer(bootstrap_servers=str(self.hostIP)+\":\"+str(self.hostPort), api_version=(0,10),\n value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n compression_type='gzip')\n\n self.consumer = KafkaConsumer(bootstrap_servers=str(self.hostIP)+\":\"+str(self.hostPort), api_version=(0,10),\n auto_offset_reset=self.auto_offset_reset)", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "async def admin_reset(self, ctx: commands.Context):\n await self.config.clear_all()\n await self.initialize_internals()\n await ctx.send('Global team management factory reset complete.')", "def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)", "async def test_hb_off(self):\n await self.async_setup()\n off = TopicItem(\n self._off_topic,\n {\n \"cmd1\": 0x13,\n \"cmd2\": 0x00,\n \"target\": Address(\"000004\"),\n \"user_data\": None,\n \"hops_left\": 3,\n },\n 0.05,\n )\n send_topics([off])\n await asyncio.sleep(0.1)\n assert not self._heartbeat\n assert not self._heartbeat_off\n assert self._heartbeat_on is None", "def test_snat_with_kube_manager_restart(self):\n self.addCleanup(self.invalidate_kube_manager_inspect)\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_kube_manager()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "async def test_persistent_state_after_reconfig(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await mqtt_mock_entry()\n discovery_data = '{ \"name\": \"Milk\", \"state_topic\": \"test-topic\", \"command_topic\": \"test-topic\", \"options\": [\"milk\", \"beer\"]}'\n await help_test_discovery_setup(hass, SELECT_DOMAIN, discovery_data, \"milk\")\n\n # assign an initial state\n async_fire_mqtt_message(hass, \"test-topic\", \"beer\")\n state = hass.states.get(\"select.milk\")\n assert state.state == \"beer\"\n assert state.attributes[\"options\"] == [\"milk\", \"beer\"]\n\n # remove \"milk\" option\n discovery_data = '{ \"name\": \"Milk\", \"state_topic\": \"test-topic\", \"command_topic\": \"test-topic\", \"options\": [\"beer\"]}'\n await help_test_discovery_setup(hass, SELECT_DOMAIN, discovery_data, \"milk\")\n\n # assert the state persistent\n state = hass.states.get(\"select.milk\")\n assert state.state == \"beer\"\n assert state.attributes[\"options\"] == [\"beer\"]", "def test_reset(u_boot_console):\n\n u_boot_console.run_command('reset', wait_for_prompt=False)\n assert(u_boot_console.validate_exited())", "def setUp(self):\n self.setup_start_servers = False\n super(ZeroConfigTest, self).setUp()", "def test_reset(self):\n # Set tracked variables to some non-zero values\n base_gcmc_sampler.n_accepted = 1\n base_gcmc_sampler.n_moves = 1\n base_gcmc_sampler.Ns = [1]\n\n # Reset base_gcmc_sampler\n base_gcmc_sampler.reset()\n\n # Check that the values have been reset\n assert base_gcmc_sampler.n_accepted == 0\n assert base_gcmc_sampler.n_moves == 0\n assert len(base_gcmc_sampler.Ns) == 0\n\n return None", "def factory_reset():\n command_file = os.path.join(\n config.updater.cache_partition, 'ubuntu_command')\n with atomic(command_file) as fp:\n print('format data', file=fp)\n log.info('Performing a factory reset')\n config.hooks.apply().apply()", "def test_reset_reset(self):\n check_attr(self.o, 'reset')\n self.o.reset()\n self.subtest_someAgents(self.o, 2, 10)\n _0 = self.patch_agent_reset(self.o)\n # Démarrage des patches et stockage des mocks\n _1 = [_.start() for _ in _0 ]\n self.assertEqual(sum([_.call_count for _ in _1]), 0)\n self.o.reset()\n self.assertEqual(sum([_.call_count for _ in _1]), len(_0), \"individual calls expected\")\n # Fin du patching\n # for _ in _0 : _.stop()\n hum.patch.stopall()", "def test_reconfigure_then_listen(sysmon_tester_agent):\n new_config = _test_config.copy()\n new_config['base_topic'] = 'test2/sysmon'\n sysmon_tester_agent.vip.rpc.call('platform.sysmon', 'reconfigure',\n **new_config)\n listen(sysmon_tester_agent, new_config)", "def test_reset_resetAgents(self):\n check_attr(self.o, 'reset')\n check_attr(self.o, 'resetAgents')\n self.o.reset()\n self.subtest_someAgents(self.o, 2, 10)\n _0 = self.patch_agent_reset(self.o)\n # Démarrage des patches et stockage des mocks\n _1 = [_.start() for _ in _0 ]\n self.assertEqual(sum([_.call_count for _ in _1]), 0)\n self.o.resetAgents() # every agent\n self.assertEqual(sum([_.call_count for _ in _1]), len(_0))\n self.o.resetAgents(False) # consumer only\n self.assertEqual(sum([_.call_count for _ in _1]), len(_0)+10)\n self.o.resetAgents(True, False) # firm only\n self.assertEqual(sum([_.call_count for _ in _1]), 2*len(_0))\n self.o.resetAgents(False, False) # none\n self.assertEqual(sum([_.call_count for _ in _1]), 2*len(_0))\n # Fin du patching\n # for _ in _0 : _.stop()\n hum.patch.stopall()", "def antenny_config_reset(self):\n return self.antenny_config.reset_default_config()", "def setUp(self):\n super().setUp()\n gcm.reset()", "def test_shutdown_restart_recovered(self):\n log.info(\"CONFIG: %s\", self._agent_config())\n self.create_sample_data_set_dir(\n 'DOS15908_1st7_step1.DAT',\n RECOV_DIR,\n \"DOS15908.DAT\",\n copy_metadata=False\n )\n\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n\n # Slow down processing to 1 per second to give us time to stop\n self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})\n self.assert_start_sampling()\n\n # Verify we get one sample\n try:\n # Read the first file and verify the data\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1)\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED, 1)\n result.extend(result1)\n log.debug(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1r.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.METADATA_RECOVERED, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE_RECOVERED, 0)\n\n self.create_sample_data_set_dir(\n 'DOS15908_1st7_step4.DAT',\n RECOV_DIR,\n \"DOS15909.DAT\",\n copy_metadata=False\n )\n # Now read the first records of the second file then stop\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED, 2)\n log.debug(\"RESULT 1: %s\", result1)\n\n # stop and re-start the agent\n self.stop_dataset_agent_client()\n self.init_dataset_agent_client()\n # re-initialize\n self.assert_initialize()\n\n result2 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED, 2)\n log.debug(\"RESULT 2: %s\", result2)\n result = result1\n result.extend(result2)\n log.debug(\"RESULT: %s\", result)\n self.assert_sample_queue_size(DataParticleType.METADATA_RECOVERED, 1)\n self.assert_data_values(result, 'test_data_4r.txt.result.yml')\n except SampleTimeout as e:\n log.error(\"Exception trapped: %s\", e, exc_info=True)\n self.fail(\"Sample timeout.\")", "def test_reset(self, scml_system):\n scml_system._t = 12\n scml_system._k = 33\n state_space = scml_system.state_space\n state_positions = scml_system.state_positions\n initial_state = scml_system.reset()\n target = np.array([0, 0, 0, 0, 0, 0, 560]) / scml_system.limits\n assert np.all(initial_state == target), 'Initial states of the system are incorrect'\n assert scml_system._t == 0, 'Time of the system was not set to zero after reset'\n assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'\n assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \\\n == scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\\\n 'The reset was not passed to all components of the SCMLSystem'\n assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'\n assert all(scml_system._ode_solver.y == np.zeros_like(\n scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float\n )), ' The ode solver was not reset correctly'", "def test_reset_fixtures_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_reset_fixtures()\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n assert caplog.messages == [\n 'The `reset_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/reset-fixtures/',\n ]", "def reset(self):\n raise AssertionError(\"Reset function not implemented\")", "def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def reset_config():\n\n Config.config().update({\"coerce\": True, \"debug\": True, \"active\": True})", "def test_configure_kafka_publisher_abort_run_true(tmp_path, RE):\n # write a temporary file for this test\n test_config_file_path = tmp_path / \"bluesky_kafka_config.yml\"\n with open(test_config_file_path, \"wt\") as f:\n f.write(test_bluesky_kafka_config_true)\n\n bluesky_kafka_configuration, publisher_details = configure_kafka_publisher(\n RE,\n \"abc\",\n override_config_path=test_config_file_path\n )\n\n assert publisher_details.__class__.__name__ == \"SubscribeKafkaPublisherDetails\"\n assert publisher_details.beamline_topic == \"abc.bluesky.runengine.documents\"\n assert publisher_details.bootstrap_servers == \"kafka1:9092,kafka2:9092,kafka3:9092\"\n assert publisher_details.re_subscribe_token == 0", "def test_configure_kafka_publisher_abort_run_false(tmp_path, RE):\n # write a temporary file for this test\n test_config_file_path = tmp_path / \"bluesky_kafka_config.yml\"\n with open(test_config_file_path, \"wt\") as f:\n f.write(test_bluesky_kafka_config_false)\n\n bluesky_kafka_configuration, publisher_details = configure_kafka_publisher(\n RE,\n \"abc\",\n override_config_path=test_config_file_path\n )\n\n assert publisher_details.__class__.__name__ == \"SubscribeKafkaQueueThreadPublisherDetails\"\n assert publisher_details.beamline_topic == \"abc.bluesky.runengine.documents\"\n assert publisher_details.bootstrap_servers == \"kafka1:9092,kafka2:9092,kafka3:9092\"\n assert publisher_details.re_subscribe_token is None", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def reset(self):\n self._topics.clear()", "def on_reset(self):\n pass", "def svc_reset_zone_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def reset(self):\n\n return bool(APIConsumer.post(\"/reset\"))", "def reset(self, config, **kwargs):\n pass", "def testClusterfuzzAnalysisReset(self):\n analysis = ClusterfuzzAnalysis()\n analysis.crashed_type = 'check'\n analysis.crash_address = '0x0000'\n analysis.sanitizer = 'ASAN'\n analysis.job_type = 'android_asan_win'\n analysis.Reset()\n self.assertIsNone(analysis.crashed_type)\n self.assertIsNone(analysis.crashed_address)\n self.assertIsNone(analysis.sanitizer)\n self.assertIsNone(analysis.job_type)", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "async def test_no_change(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n mqtt_mock = await mqtt_mock_entry()\n\n calls = []\n\n @callback\n def record_calls(*args):\n \"\"\"Record calls.\"\"\"\n calls.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls}},\n )\n await async_subscribe_topics(hass, sub_state)\n subscribe_call_count = mqtt_mock.async_subscribe.call_count\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 1\n\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls}},\n )\n await async_subscribe_topics(hass, sub_state)\n assert subscribe_call_count == mqtt_mock.async_subscribe.call_count\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 2\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls) == 2", "def reset() -> None:\n ...", "def test_shutdown_restart(self):\n log.info(\"CONFIG: %s\", self._agent_config())\n self.create_sample_data_set_dir('node59p1_step2.dat', TELEM_DIR, \"node59p1.dat\")\n\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n\n # Slow down processing to 1 per second to give us time to stop\n self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})\n self.assert_start_sampling()\n\n # Verify we get one sample\n try:\n # Read the first file and verify the data\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1)\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\n result.extend(result1)\n log.debug(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1-2.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n self.create_sample_data_set_dir('node59p1_step4.dat', TELEM_DIR, \"node59p1.dat\")\n # Now read the first record of the second file then stop\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\n log.debug(\"RESULT 1: %s\", result)\n self.assert_stop_sampling()\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n # stop and re-start the agent\n self.stop_dataset_agent_client()\n self.init_dataset_agent_client()\n # re-initialize\n self.assert_initialize()\n\n result2 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 3)\n log.debug(\"RESULT 2: %s\", result2)\n result.extend(result2)\n log.debug(\"RESULT: %s\", result)\n self.assert_data_values(result, 'test_data_3-4.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n except SampleTimeout as e:\n log.error(\"Exception trapped: %s\", e, exc_info=True)\n self.fail(\"Sample timeout.\")", "def test_states_reset(self):\n\n rule = ('alert(name:\"test1\"; side:client; match:\"AB\"; state:set,foo; match:\"AB\";)\\n'\n 'block(name:\"test2\"; state:is,foo;)\\n'\n 'alert(name:\"test3\"; side:server; match:\"AB\";)\\n')\n\n tests = {\n (\"ABCD\", \"ABCD\"): [\"proxying connection from\", \"INFO : filter matched: 'test3'\"],\n (\"ABAB\", \"\"): [\"proxying connection from\", \"INFO : blocking connection: filter matched 'test2'\", \"INFO : closed connection\"],\n }\n\n self.run_rules(rule, tests, echo=True)", "def test_resetAgents_default(self):\n check_attr(self.o, 'resetAgents')\n self.subtest_noAgent(self.o)\n self.assertIsNone(self.o.resetAgents(), \"no output expected\")\n self.subtest_noAgent(self.o)", "def soft_reset() -> None:\n ...", "def tearDown(self):\n reset()", "def test_reset_settings(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(4)\n self.feature_test.reset_settings()\n\n generated = Feature(\"testing\")\n self.assertEqual(generated.percentage, 0)\n self.assertFalse(3 in generated.whitelist)\n self.assertFalse(4 in generated.blacklist)", "def test_shutdown_restart(self):\n log.info(\"START QUAL TEST SHUTDOWN RESTART\")\n self.create_sample_data('all_A0000003.DEC', \"A0000003.DEC\")\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n self.dataset_agent_client.set_resource(\n {DriverParameter.RECORDS_PER_SECOND: 1})\n self.assert_start_sampling()\n\n try:\n # Read the first file (3 velocity records) and verify the data.\n result = self.get_samples(DataParticleType.VELOCITY_PARTICLE, 3)\n time_result = self.get_samples(DataParticleType.TIME_PARTICLE, 1)\n result.extend(time_result)\n\n # Verify values\n self.assert_data_values(result, 'all_A0000003.yml')\n self.verify_queue_empty()\n\n # Read the first 2 velocity records of the second file then stop.\n self.create_sample_data('valid_A0000004.DEC', \"A0000004.DEC\")\n result = self.get_samples(DataParticleType.VELOCITY_PARTICLE, 2)\n self.assert_stop_sampling()\n self.verify_queue_empty()\n\n # Stop the agent\n self.stop_dataset_agent_client()\n # Re-start the agent\n self.init_dataset_agent_client()\n # Re-initialize\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n\n # Restart sampling and get the last 2 records of the file\n # and combine with the previous ones we read.\n self.assert_start_sampling()\n result2 = self.get_samples(DataParticleType.VELOCITY_PARTICLE, 2)\n result.extend(result2)\n\n # Get the time record and combine with previous records.\n time_result = self.data_subscribers.get_samples(\n DataParticleType.TIME_PARTICLE, 1)\n result.extend(time_result)\n self.assert_data_values(result, 'valid_A0000004.yml')\n\n self.verify_queue_empty()\n\n except SampleTimeout as e:\n log.error(\"Exception trapped: %s\", e, exc_info=True)\n self.fail(\"Sample timeout.\")\n\n log.info(\"END QUAL TEST SHUTDOWN RESTART\")", "def testReset(self):\n \n clk = Signal(0)\n rst = Signal(1)\n clock_gen = ClkDriver(clk, period=4)\n \n out = Signal(intbv(0)[4:])\n counter = Counter(out, clk, rst)\n \n def test():\n for i in range(200):\n # count up to 9 then reset\n if int(out) == 9:\n rst.next = 0\n yield delay(1)\n self.assertEqual(int(out), 0)\n # turn off reset next time\n else:\n rst.next = 1\n yield delay(1)\n \n check = test()\n sim = Simulation(counter, clock_gen, check)\n sim.run(400, quiet=1)", "def setUp(self):\n recorder = opentracing.tracer.recorder\n recorder.clear_spans()", "def devices_reset(module):\n # If any configlet changed updated 'changed' flag\n changed = False\n # Compare configlets against cvp_facts-configlets\n reset_device = [] # devices to factory reset\n reset = []\n newTasks = [] # Task Ids that have been identified during device actions\n taskList = [] # Tasks that have a pending status after function runs\n\n for cvp_device in module.params['cvp_facts']['devices']:\n # Include only devices that match filter elements, \"all\" will\n # include all devices.\n if match_filter(device_filter=module.params['device_filter'], device_name=cvp_device['hostname']):\n try:\n device_action = module.client.api.reset_device(\"Ansible\", cvp_device)\n except Exception as error:\n errorMessage = str(error)\n message = \"Device %s cannot be reset - %s\" % (cvp_device['hostname'], errorMessage)\n reset.append({cvp_device['hostname']: message})\n else:\n if \"errorMessage\" in str(device_action):\n message = \"Device %s cannot be Reset - %s\" % (cvp_device['hostname'], device_action['errorMessage'])\n reset.append({cvp_device['hostname']: message})\n else:\n changed = True\n if 'taskIds' in str(device_action):\n for taskId in device_action['data']['taskIds']:\n newTasks.append(taskId)\n reset.append({cvp_device['hostname']: 'Reset-%s' % taskId})\n else:\n reset.append({cvp_device['hostname']: 'Reset-No_Tasks'})\n taskList = get_tasks(taskid_list=newTasks, module=module)\n\n data = {'reset': reset, 'tasks': taskList}\n return data", "def soft_reset():", "def test_config_changed_non_leader(\n self,\n ) -> NoReturn:\n self.harness.set_leader(is_leader=False)\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)", "def test_resume_offset(sdc_builder, sdc_executor, azure):\n container_name = get_random_string(string.ascii_lowercase, 10)\n event_hub_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n\n azure_iot_event_hub_consumer = builder.add_stage(name=AZURE_IOT_EVENT_HUB_STAGE_NAME).set_attributes(\n container_name=container_name,\n data_format='JSON',\n event_hub_name=event_hub_name)\n\n wiretap = builder.add_wiretap()\n\n azure_iot_event_hub_consumer >> wiretap.destination\n\n consumer_origin_pipeline = builder.build().configure_for_environment(azure)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n create_blob_container(azure, container_name)\n\n try:\n eh_service_bus = azure.event_hubs.service_bus\n\n logger.info('Creating event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n assert eh_service_bus.create_event_hub(event_hub_name)\n\n send_records = [{'Body': f'Event {msg}'} for msg in range(10)]\n eh_service_bus.send_event(event_hub_name, json.dumps(send_records))\n\n sdc_executor.start_pipeline(consumer_origin_pipeline)\n sdc_executor.wait_for_pipeline_metric(consumer_origin_pipeline, 'input_record_count', 1, timeout_sec=120)\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n first_iteration_records = wiretap.output_records[0].field\n\n assert len(first_iteration_records) != 0\n results = [{key: value for key, value in record.items()} for record in first_iteration_records]\n assert results == send_records\n\n wiretap.reset()\n\n # Try adding more data and resuming from the offset\n send_records2 = [{'Body': f'Event {msg}'} for msg in range(10, 20)]\n eh_service_bus.send_event(event_hub_name, json.dumps(send_records2))\n\n sdc_executor.start_pipeline(consumer_origin_pipeline)\n sdc_executor.wait_for_pipeline_metric(consumer_origin_pipeline, 'input_record_count', 1, timeout_sec=120)\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n second_iteration_records = wiretap.output_records[0].field\n assert len(second_iteration_records) != 0\n results = [{key: value for key, value in record.items()} for record in second_iteration_records]\n assert results == send_records2\n\n assert len(second_iteration_records) + len(first_iteration_records) == len(send_records) + len(send_records2)\n\n finally:\n try:\n if sdc_executor.get_pipeline_status(consumer_origin_pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n except Exception as err:\n logger.error('Could not stop pipeline. Reason found: %s', err)\n\n try:\n logger.info('Deleting event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n event_hub_exists = True\n while event_hub_exists:\n eh_service_bus.delete_event_hub(event_hub_name)\n try:\n eh_service_bus.get_event_hub(event_hub_name)\n except Exception:\n event_hub_exists = False\n except Exception as err:\n logger.error('Failure deleting event hub %s. Reason found: %s', event_hub_name, err)\n\n try:\n logger.info('Deleting container %s on storage account %s', container_name, azure.storage.account_name)\n azure.storage.delete_blob_container(container_name)\n except Exception as err:\n logger.error('Failure deleting container %s. Reason found: %s', container_name, err)", "def test_reset_unmask(self):\r\n module = CapaFactory.create(xml=self.common_shuffle_xml)\r\n get_request_dict = {CapaFactory.input_key(): 'mask_0'}\r\n module.check_problem(get_request_dict)\r\n # On reset, 'old_state' should use unmasked names\r\n with patch.object(module.runtime, 'track_function') as mock_track_function:\r\n module.reset_problem(None)\r\n mock_call = mock_track_function.mock_calls[0]\r\n event_info = mock_call[1][1]\r\n self.assertEquals(mock_call[1][0], 'reset_problem')\r\n self.assertEquals(event_info['old_state']['student_answers'][CapaFactory.answer_key()], 'choice_2')\r\n self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])", "def tearDown(self):\n # set the config module level variables back to None\n config.config._conf_parser = None\n config.config._user_config_file = None", "def test_reset(sim):\n repeats = 3\n dt = 1\n sim.setup(timestep=dt, min_delay=dt)\n p = sim.Population(1, sim.IF_curr_exp(i_offset=0.1))\n p.record('v')\n\n for i in range(repeats):\n sim.run(10.0)\n sim.reset()\n data = p.get_data(clear=False)\n sim.end()\n\n assert len(data.segments) == repeats\n for segment in data.segments[1:]:\n assert_array_almost_equal(segment.analogsignals[0],\n data.segments[0].analogsignals[0], 10)", "def test_heartbeat(self):\n pass", "def reset(self):\n error_estop = \"\"\"\\\nE-Stop is ASSERTED. Disengage E-Stop and then reset the robot.\n\"\"\"\n error_nonfatal = \"\"\"Non-fatal Robot Error on reset.\nRobot reset cleared stopped state and robot can be enabled, but a non-fatal\nerror persists. Check diagnostics or rethink.log for more info.\n\"\"\"\n error_env = \"\"\"Failed to reset robot.\nPlease verify that the ROS_IP or ROS_HOSTNAME environment variables are set\nand resolvable. For more information please visit:\nhttp://sdk.rethinkrobotics.com/wiki/RSDK_Shell#Initialize\n\"\"\"\n is_reset = lambda: (self._state.enabled == False and\n self._state.stopped == False and\n self._state.error == False and\n self._state.estop_button == 0 and\n self._state.estop_source == 0)\n pub = rospy.Publisher('robot/set_super_reset', Empty, queue_size=10)\n\n if (self._state.stopped and\n self._state.estop_button == AssemblyState.ESTOP_BUTTON_PRESSED):\n rospy.logfatal(error_estop)\n raise IOError(errno.EREMOTEIO, \"Failed to Reset: E-Stop Engaged\")\n\n rospy.loginfo(\"Resetting robot...\")\n try:\n baxter_dataflow.wait_for(\n test=is_reset,\n timeout=3.0,\n timeout_msg=error_env,\n body=pub.publish\n )\n except OSError as e:\n if e.errno == errno.ETIMEDOUT:\n if self._state.error == True and self._state.stopped == False:\n rospy.logwarn(error_nonfatal)\n return False\n raise", "def deconfigure(self):\n\n pass", "def reset(self):\n\n ## Turn off controller to bring to a known state\n try:\n self.logger.info(\"Turning off sta3800 controller (sta3800_off).\")\n ccdsetup.sta3800_off()\n except Exception:\n self.logger.exception(\"Unable to turn off controller! State may be unknown.\")\n raise\n else:\n self.logger.info(\"Controller turned off successfully.\")\n\n ## Initialize controller\n try:\n self.logger.info(\"Turning on sta3800 controller (sta3800_setup).\")\n ccdsetup.sta3800_setup()\n except Exception:\n self.logger.exception(\"Unable to turn on sta3800 controller!\")\n raise\n else:\n self.logger.info(\"Controller turned on successfully.\")", "def test_order_reset(self):\n test_order = self._create_orders(1)[0]\n resp = self.app.delete('/orders/reset')\n self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(resp.data), 0)\n # make sure they are deleted\n resp = self.app.get('/orders/{}'.format(test_order.id),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_reset_resetTerrain(self):\n check_attr(self.o, 'reset')\n check_attr(self.o, 'resetTerrain')\n self.o.reset()\n self.subtest_someAgents(self.o, 2, 10)\n _0 = self.patch_agent_reset(self.o)\n # Démarrage des patches et stockage des mocks\n _1 = [_.start() for _ in _0 ]\n self.assertEqual(sum([_.call_count for _ in _1]), 0)\n self.o.resetTerrain()\n self.assertEqual(sum([_.call_count for _ in _1]), 0, \"no changes expected\")\n # Fin du patching\n # for _ in _0 : _.stop()\n hum.patch.stopall()", "def hard_reset() -> NoReturn:", "def test_set_page_config_reset(self):\n\n fake_enqueue = lambda msg: None\n ctx = ReportContext(\"TestSessionID\", fake_enqueue, \"\", None, MagicMock(), None)\n\n msg = ForwardMsg()\n msg.page_config_changed.title = \"foo\"\n\n ctx.enqueue(msg)\n ctx.reset()\n try:\n ctx.enqueue(msg)\n except StreamlitAPIException:\n self.fail(\"set_page_config should have succeeded after reset!\")", "def test_reset_configuration(self):\r\n # Test resetting the default profile (no profile arguments passed)\r\n profile = self.profile_manager.get('default')\r\n open(profile.get_filepath('freeseer.conf'), 'w+')\r\n open(profile.get_filepath('plugin.conf'), 'w+')\r\n self.assertTrue(os.path.exists(profile.get_filepath('plugin.conf')))\r\n self.assertTrue(os.path.exists(profile.get_filepath('freeseer.conf')))\r\n reset_configuration(self.config_dir)\r\n self.assertFalse(os.path.exists(profile.get_filepath('plugin.conf')))\r\n self.assertFalse(os.path.exists(profile.get_filepath('freeseer.conf')))\r\n\r\n # Test resetting a non-default profile\r\n profile = self.profile_manager.get('not-default')\r\n open(profile.get_filepath('freeseer.conf'), 'w+')\r\n open(profile.get_filepath('plugin.conf'), 'w+')\r\n self.assertTrue(os.path.exists(profile.get_filepath('plugin.conf')))\r\n self.assertTrue(os.path.exists(profile.get_filepath('freeseer.conf')))\r\n reset_configuration(self.config_dir, 'not-default')\r\n self.assertFalse(os.path.exists(profile.get_filepath('plugin.conf')))\r\n self.assertFalse(os.path.exists(profile.get_filepath('freeseer.conf')))", "def pibooth_reset(cfg, hard):", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def test_reset(self):\n\t\tfor AI in self.list_of_AIs:\n\t\t\tAI.reset()", "def reset():\n pass", "def reset():\n pass", "def teardown():\n enstools.core.set_behavior(log_level=\"ERROR\")", "def test_load_auto_config(self, m_gke, m_eks, m_kind, m_mini, m_incluster):\n fun = k8s.load_auto_config\n\n m_incluster.return_value = (K8sConfig(), False)\n m_mini.return_value = (K8sConfig(), False)\n m_kind.return_value = (K8sConfig(), False)\n m_eks.return_value = (K8sConfig(), False)\n m_gke.return_value = (K8sConfig(), False)\n\n # Incluster returns a non-zero value.\n kubeconf, context = \"kubeconf\", \"context\"\n assert fun(kubeconf, context) == m_incluster.return_value\n m_incluster.assert_called_once_with()\n\n # Incluster fails but Minikube does not.\n m_incluster.return_value = (K8sConfig(), True)\n assert fun(kubeconf, context) == m_mini.return_value\n m_mini.assert_called_once_with(kubeconf, context)\n\n # Incluster & Minikube fail but KIND succeeds.\n m_mini.return_value = (K8sConfig(), True)\n assert fun(kubeconf, context) == m_kind.return_value\n m_kind.assert_called_once_with(kubeconf, context)\n\n # Incluster & Minikube & KIND fail but EKS succeeds.\n m_kind.return_value = (K8sConfig(), True)\n assert fun(kubeconf, context) == m_eks.return_value\n m_eks.assert_called_once_with(kubeconf, context, False)\n\n # Incluster & Minikube & KIND & EKS fail but GKE succeeds.\n m_eks.return_value = (K8sConfig(), True)\n assert fun(kubeconf, context) == m_gke.return_value\n m_gke.assert_called_once_with(kubeconf, context, False)\n\n # All fail.\n m_gke.return_value = (K8sConfig(), True)\n assert fun(kubeconf, context) == (K8sConfig(), True)", "def tearDown(self):\n test_utils.delete_test_config()", "def reset( self ):\n self.conf = self.defaults", "async def test_set_config(self):\n set_log_levels(logger_topics=True)\n\n disable_auto_linking = random_bool()\n monitor_mode = random_bool()\n auto_led = random_bool()\n deadman = random_bool()\n topic = f\"ack.{SET_IM_CONFIGURATION}\"\n topic_item = TopicItem(\n topic,\n {\n \"disable_auto_linking\": disable_auto_linking,\n \"monitor_mode\": monitor_mode,\n \"auto_led\": auto_led,\n \"deadman\": deadman,\n },\n 0.1,\n )\n\n modem = ModemBase()\n reset_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman)\n\n send_topics([topic_item])\n await modem.async_set_configuration(\n disable_auto_linking, monitor_mode, auto_led, deadman\n )\n await asyncio.sleep(0.1)\n\n assert modem.configuration[DISABLE_AUTO_LINKING].value == disable_auto_linking\n assert modem.configuration[MONITOR_MODE].value == monitor_mode\n assert modem.configuration[AUTO_LED].value == auto_led\n assert modem.configuration[DEADMAN].value == deadman\n\n assert modem.configuration[DISABLE_AUTO_LINKING].new_value is None\n assert modem.configuration[MONITOR_MODE].new_value is None\n assert modem.configuration[AUTO_LED].new_value is None\n assert modem.configuration[DEADMAN].new_value is None", "def test_shutdown_restart(self):\n log.info(\"CONFIG: %s\", self._agent_config())\n self.create_sample_data_set_dir(\n 'node59p1_step2.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n\n # Slow down processing to 1 per second to give us time to stop\n self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})\n self.assert_start_sampling()\n\n # Verify we get one sample\n try:\n # Read the first file and verify the data\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1)\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED, 2)\n result.extend(result1)\n log.debug(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1-2.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.METADATA_TELEMETERED, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE_TELEMETERED, 0)\n\n self.create_sample_data_set_dir(\n 'node59p1_step4.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n # Now read the first records of the second file then stop\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED, 2)\n log.debug(\"RESULT 1: %s\", result1)\n self.assert_stop_sampling()\n self.assert_sample_queue_size(DataParticleType.METADATA_TELEMETERED, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE_TELEMETERED, 0)\n\n # stop and re-start the agent\n self.stop_dataset_agent_client()\n self.init_dataset_agent_client()\n # re-initialize\n self.assert_initialize()\n\n # Restart sampling and ensure we get the last 2 records of the file\n result2 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED, 2)\n log.debug(\"RESULT 2: %s\", result2)\n result = result1\n result.extend(result2)\n log.debug(\"RESULT: %s\", result)\n self.assert_sample_queue_size(DataParticleType.METADATA_TELEMETERED, 0)\n self.assert_data_values(result, 'test_data_3-4.txt.result.yml')\n self.assert_sample_queue_size(DataParticleType.METADATA_TELEMETERED, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE_TELEMETERED, 0)\n except SampleTimeout as e:\n log.error(\"Exception trapped: %s\", e, exc_info=True)\n self.fail(\"Sample timeout.\")", "def test_reset(self):\n p1 = self.player()\n p1.reset()\n self.assertEqual(p1.history, [])\n self.assertEqual(p1.genome[0], C)", "def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)" ]
[ "0.58305633", "0.5826309", "0.57031226", "0.56880814", "0.5674384", "0.56572354", "0.55940706", "0.55784523", "0.5553939", "0.5552377", "0.5534695", "0.5404209", "0.54005706", "0.5393411", "0.53927755", "0.53910935", "0.53739655", "0.5346079", "0.5346079", "0.5346079", "0.5346079", "0.53354084", "0.53354084", "0.53354084", "0.53354084", "0.5303943", "0.53038245", "0.52968043", "0.52688617", "0.52330655", "0.5225736", "0.5222155", "0.5212095", "0.5197556", "0.5193413", "0.5193208", "0.5145318", "0.51379365", "0.51352197", "0.5128528", "0.512435", "0.5122541", "0.51090163", "0.510777", "0.5106655", "0.51022494", "0.5100416", "0.5081952", "0.50600266", "0.5057245", "0.5052472", "0.50512636", "0.5044756", "0.50353056", "0.5032121", "0.50310016", "0.5019449", "0.50187296", "0.5018168", "0.50141263", "0.50061387", "0.5001987", "0.50015324", "0.49989244", "0.4995192", "0.49924037", "0.49914038", "0.49890202", "0.4981106", "0.49796757", "0.49796718", "0.49786308", "0.49783024", "0.49756393", "0.49727735", "0.4967171", "0.4955877", "0.49487603", "0.49473178", "0.49444267", "0.49399444", "0.49399343", "0.49397224", "0.49362233", "0.49329394", "0.49326", "0.49326", "0.49326", "0.49326", "0.49291196", "0.49264055", "0.49264055", "0.49257883", "0.49192607", "0.49188474", "0.49114326", "0.49097806", "0.4905972", "0.4903516", "0.4902266" ]
0.6541038
0
Test the functionality of the KafkaGroupIODataset when the consumer is configured to have an invalid stream_timeout value which is less than the message_timeout value.
def test_kafka_group_io_dataset_invalid_stream_timeout(): STREAM_TIMEOUT = -20 try: tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test", "key-test"], group_id="cgteststreaminvalid", servers="localhost:9092", stream_timeout=STREAM_TIMEOUT, configuration=["session.timeout.ms=7000", "max.poll.interval.ms=8000"], ) except ValueError as e: assert str( e ) == "Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.".format( STREAM_TIMEOUT )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_timeout_setting(self):\n self.assertEqual(self.es.sse_kwargs.get('timeout'),\n config.socket_timeout)", "def test_socket_timeout():\n schema = vol.Schema(cv.socket_timeout)\n\n with pytest.raises(vol.Invalid):\n schema(0.0)\n\n with pytest.raises(vol.Invalid):\n schema(-1)\n\n assert schema(None) == _GLOBAL_DEFAULT_TIMEOUT\n\n assert schema(1) == 1.0", "def test_timeout_not_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time()\n assert not connection._timeout_exceeded(start)", "def test_timeout_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time() - 11\n try:\n connection._timeout_exceeded(start)\n except NetmikoTimeoutException as exc:\n assert isinstance(exc, NetmikoTimeoutException)\n return\n\n assert False", "def test_timeout_invalid_start():\n connection = FakeBaseConnection(session_timeout=10)\n assert not connection._timeout_exceeded(start=0)", "def assert_timeout(self) -> None:", "def test_datasource_expires_chunksize_error(datasource_expires_config):\n ds1 = MockDataSource(\"mock\", datasource_expires_config(-1))\n with pytest.raises(ValueError, match=\"incompatible\"):\n _ = ds1.get_dataframe(chunksize=5)\n ds2 = MockDataSource(\"mock\", datasource_expires_config(20000))\n with pytest.raises(ValueError, match=\"incompatible\"):\n _ = ds2.get_dataframe(chunksize=5)\n ds3 = MockDataSource(\"mock\", datasource_expires_config(0))\n _ = ds3.get_dataframe(chunksize=5)\n ds4 = MockDataSource(\"mock\", datasource_expires_config(20000))\n _ = ds4.get_dataframe()", "def check_timeout(self, msg):\n if msg.clock.secs > self.timeout and not self.is_cancelled:\n rospy.loginfo(\"Test timed out, cancelling job\")\n self.utils.set_tag(name=self.test_name + \"_Status\", value=\"Failed\")\n self.utils.set_tag(name=self.test_name + \"_Timed_Out\", value=str(self.timeout))\n self.utils.cancel_job()", "def kafka_consumer_stats_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"kafka_consumer_stats_timeout\")", "def test_set_timeout_value_error(self, timeout):\n self.assertRaises(ValueError, self.root.set_timeout, timeout)", "def test_polling_plugin_timeout(self):\n pass", "def test_timed_context_no_metric_exception(self):\n\n def func(self):\n with self.statsd.timed():\n time.sleep(0.5)\n\n # Ensure the exception was raised.\n with pytest.raises(TypeError):\n func(self)\n\n # Ensure the timing was recorded.\n packet = self.statsd.socket.recv()\n self.assertIsNone(packet)", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n np.testing.assert_array_equal(res, self.pandas_frame)", "def test_producer_send_timer_failed(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n batch_t = 5\n\n # FIXME: Don't use patch to test logging\n with patch.object(aProducer, \"log\") as klog:\n producer = Producer(client, batch_send=True, batch_every_t=batch_t)\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n d = producer.send_messages(self.topic, msgs=msgs)\n # Check no request was yet sent\n self.assertFalse(client.send_produce_request.called)\n # Patch Producer's Deferred to throw an exception\n with patch.object(aProducer, \"Deferred\") as d:\n d.side_effect = ValueError(\"test_producer_send_timer_failed induced failure\")\n # Advance the clock\n clock.advance(batch_t)\n # Check the expected message was logged by the looping call restart\n klog.warning.assert_called_once_with(\n \"Batch timer failed: %s. Will restart.\",\n ANY,\n exc_info=ANY,\n )\n # Check that the looping call was restarted\n self.assertTrue(producer._sendLooper.running)\n\n producer.stop()", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_get_timeouts_disabled(self):\n\n self.set_options(timeouts=False, timeout_default=2)\n task = self.create_task(self.context())\n\n self.assertIsNone(task._timeout_for_targets([targetA, targetB]))", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def test_get_timeouts_with_maximum(self):\n\n self.set_options(timeouts=True, timeout_maximum=1)\n task = self.create_task(self.context())\n self.assertEquals(task._timeout_for_targets([targetC]), 1)", "def test_pool_timeout_hw(self):\n self.test_pool_timeout()", "def test__put_afk_timeout_into():\n for input_value, defaults, expected_output in (\n (AFK_TIMEOUT_DEFAULT, False, {'afk_timeout': AFK_TIMEOUT_DEFAULT}),\n (60, False, {'afk_timeout': 60}),\n ):\n data = put_afk_timeout_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def test_bound_size_of_output_queue_size_reader(synthetic_dataset):\n TIME_TO_GET_TO_STATIONARY_STATE = 0.5\n\n with make_reader(synthetic_dataset.url, reader_pool_type='process', workers_count=1) as reader:\n assert 0 == reader.diagnostics['items_produced']\n next(reader)\n # Verify that we did not consume all rowgroups (should be 10) and ventilator throttles number of ventilated\n # items\n sleep(TIME_TO_GET_TO_STATIONARY_STATE)\n assert reader.diagnostics['items_consumed'] < 5\n assert reader.diagnostics['items_inprocess'] < 5", "def test_timeout_retries(self):\n\n batch = Batch(Mock())\n self.check_instance(batch=batch)\n\n self.assertEqual(batch.timeout_retries, 0)\n self.check_instance(batch, timeout_retries=0)\n\n batch.timeout_retries = 10\n self.assertEqual(batch.timeout_retries, 10)\n self.check_instance(batch, timeout_retries=10)\n\n batch.timeout_retries = 0\n self.assertEqual(batch.timeout_retries, 0)\n self.check_instance(batch, timeout_retries=0)\n\n batch.timeout_retries = 1\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n\n # exceptions\n ## error messages\n value_error = \"'timeout_retries' must be positive, i.e. greater or equal that zero (>=0).\"\n type_error = f\"'timeout_retries' must be of type {int}.\"\n\n #######################################################################\n # test wrong value\n with self.assertRaises(ValueError) as error:\n batch.timeout_retries = -1\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, value_error)\n\n #######################################################################\n # test wrong type\n with self.assertRaises(TypeError) as error:\n batch.timeout_retries = True\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, type_error)\n\n with self.assertRaises(TypeError) as error:\n batch.timeout_retries = '2'\n self.assertEqual(batch.timeout_retries, 1)\n self.check_instance(batch, timeout_retries=1)\n check_error_message(self, error, type_error)", "def test_recv_nolimit(self):\n self.driver.send_nolimit(self.msg_long)\n msg_flag, msg_recv = self.instance.recv_nolimit(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_long)", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_too_many_requests(self):\n try:\n self._mock_time_series(error=fitbit_exceptions.HTTPTooManyRequests,\n error_attrs={'retry_after_secs': 35})\n except fitbit_exceptions.HTTPTooManyRequests:\n self.assertEqual(sys.exc_info()[1].retry_after_secs, 35)\n else:\n assert False, 'Should have thrown exception'", "def test_send_subscriber_timeout(self):\n\n class TimeoutConnection(object):\n reliable_subscriber = False\n\n def send_frame(self, frame):\n raise socket.timeout(\"timed out\")\n\n def reset(self):\n pass\n\n dest = '/topic/dest'\n\n bad_client = TimeoutConnection()\n\n # Subscribe both a good client and a bad client.\n self.tm.subscribe(bad_client, dest)\n self.tm.subscribe(self.conn, dest)\n\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n # Make sure out good client got the message.\n self.assertEqual(len(self.conn.frames), 1)\n subscription = self.conn.frames[0].headers.pop(\"subscription\", None)\n self.assertEqual(subscription, 0)\n self.assertEqual(self.conn.frames[0], f)\n\n # Make sure our bad client got disconnected\n # (This might be a bit too intimate.)\n connections = {s.connection for s in self.tm._subscriptions.subscribers(dest)}\n self.assertNotIn(bad_client, connections)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_bad_interval(self):\n # Intentionally set a small interval (3 min) to fail.\n interval = np.timedelta64(3, 'm')\n self.assertFalse(utils.check_timestamps(self.times, interval))", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_neg_list_size_with_incorrect_policy(self):\n key = ('test', 'demo', 1)\n policy = {\n 'timeout': 0.5\n }\n try:\n self.as_connection.list_size(key, \"contact_no\", {}, policy)\n\n except e.ParamError as exception:\n assert exception.code == -2\n assert exception.msg == \"timeout is invalid\"", "def _check_timeouts(self, chunk_timeout, total_timeout):\n cur_time = time()\n\n if chunk_timeout is not None and cur_time > self._chunk_time + chunk_timeout:\n raise ChunkTimeout('Item timeout expired.')\n elif total_timeout is not None and cur_time > self._total_time + total_timeout:\n raise TotalTimeout('Total timeout expired.')", "def test_default_maximum_conflict(self):\n\n self.set_options(timeouts=True, timeout_maximum=1, timeout_default=10)\n task = self.create_task(self.context())\n with self.assertRaises(ErrorWhileTesting):\n task.execute()", "def test_set_blocking_timeout(self, _):\n\n project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key')\n\n # Assert that if invalid blocking_timeout is set, then exception is raised.\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Invalid blocking timeout \"invalid timeout\" provided.',\n ):\n project_config_manager.set_blocking_timeout('invalid timeout')\n\n # Assert that blocking_timeout cannot be set to less than allowed minimum and instead is set to default value.\n project_config_manager.set_blocking_timeout(-4)\n self.assertEqual(\n enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT, project_config_manager.blocking_timeout,\n )\n\n # Assert that blocking_timeout can be set to 0.\n project_config_manager.set_blocking_timeout(0)\n self.assertIs(0, project_config_manager.blocking_timeout)\n\n # Assert that if no blocking_timeout is provided, it is set to default value.\n project_config_manager.set_blocking_timeout(None)\n self.assertEqual(\n enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT, project_config_manager.blocking_timeout,\n )\n\n # Assert that if valid blocking_timeout is provided, it is set to that value.\n project_config_manager.set_blocking_timeout(5)\n self.assertEqual(5, project_config_manager.blocking_timeout)\n\n project_config_manager.stop()", "def test_timeoutRaises(self):\n\n @self.eventloop.wait_for(timeout=0.5)\n def times_out():\n return Deferred().addErrback(lambda f: f.trap(CancelledError))\n\n start = time.time()\n self.assertRaises(TimeoutError, times_out)\n self.assertTrue(abs(time.time() - start - 0.5) < 0.1)", "def test_set_timeout(init_process_group_mock):\n test_timedelta = timedelta(seconds=30)\n strategy = FSDPStrategy(timeout=test_timedelta, parallel_devices=[torch.device(\"cpu\")])\n strategy.cluster_environment = LightningEnvironment()\n strategy.accelerator = Mock()\n strategy.setup_environment()\n process_group_backend = strategy._get_process_group_backend()\n global_rank = strategy.cluster_environment.global_rank()\n world_size = strategy.cluster_environment.world_size()\n init_process_group_mock.assert_called_with(\n process_group_backend, rank=global_rank, world_size=world_size, timeout=test_timedelta\n )", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_server_no_save_timeout(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_config(self._cmd_args,\n imageroller.test.get_config_parser(\n self._server_no_save_timeout))\n self.assertEqual(\n str(cm.exception),\n \"Server Config for %s is missing SaveTimeoutMinutes\" %\n CONFIG_DATA[\"TestServerFQDN\"])", "def test_timeout(self):\n start = time.time()\n dr = EventualResult(Deferred(), None)\n self.assertRaises(TimeoutError, dr.wait, timeout=0.03)\n # be a little lenient for slow computers:\n self.assertTrue(abs(time.time() - start) < 0.05)", "def check_timeout(self) -> None:\n\n # find all timed out metric instances\n to_delete = [\n labelstr\n for labelstr, instance in self._data.items()\n if instance.is_timed_out\n ]\n\n # remove the metric instances\n for labelstr in to_delete:\n del self._data[labelstr]", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_wait_for_predicate_timeout(self):\n predicate_mock = mock.MagicMock(side_effect=[True, True, True])\n with self.assertRaises(TimeoutError):\n train_utils.wait_for_predicate(predicate_mock, num_retries=3)", "def assert_timeout(self) -> None:\n if self._cancelled:\n raise asyncio.TimeoutError from None", "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_RPC_TIMEOUT(self):\n self.assertIsInstance(constants.RPC_TIMEOUT, int,\n \"constants.RPC_TIMEOUT must be an integer.\")", "def has_set_timeout(self) -> bool:\n return False", "def test_stream(self):\n with skipping(NotImplementedError):\n self.es = EventStreamsTestClass(streams='recentchange')\n limit = 50\n self.es.set_maximum_items(limit)\n self.assertLength(list(self.es), limit)", "def timeout(self):\n pf.debug(\"TIMEOUT\")\n self.acceptData(TIMEOUT)", "def test_pool_timeout(self):\n namespace = \"runner.timeout\"\n timeouts = {\"after_interrupted\": [], \"process_alive\": [], \"process_died\": []}\n\n self.log.info(\"Before creating pools:\")\n for key in sorted(timeouts):\n timeouts[key].append(get_avocado_config_value(namespace, key))\n self.log.info(\" %s.%s = %s\", namespace, key, timeouts[key][0])\n\n self.add_pool(create=True, connect=True)\n self.add_pool(create=True, connect=False)\n self.add_pool(create=False)\n\n self.log.info(\"After creating pools:\")\n for key in sorted(timeouts):\n timeouts[key].append(get_avocado_config_value(namespace, key))\n self.log.info(\" %s.%s = %s\", namespace, key, timeouts[key][1])\n\n for key in sorted(timeouts):\n self.assertEqual(\n int(timeouts[key][1]) - int(timeouts[key][0]), POOL_TIMEOUT_INCREMENT * 3,\n \"Incorrect {}.{} value detected after adding 3 pools\".format(namespace, key))\n\n self.log.info(\"Test passed\")", "def pytest_timeout_set_timer(item, settings):", "def test_timeout(self):\n s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)\n poller = self.Poller()\n poller.register(s1, zmqpy.POLLIN)\n tic = time.time()\n evt = poller.poll(timeout=.005)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n tic = time.time()\n evt = poller.poll(timeout=5)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n self.assertTrue(toc-tic > .001)\n tic = time.time()\n evt = poller.poll(timeout=500)\n toc = time.time()\n self.assertTrue(toc-tic < 1)\n self.assertTrue(toc-tic > 0.1)", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_max_response_time():\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n assert type(cmd.max_response_time) is float\r\n assert cmd.max_response_time == 0.5", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def check_timeout(self, transport, earlier_time, interval, error_msg):\n now = datetime.datetime.now()\n secs = int((now - earlier_time).total_seconds())\n if secs >= interval:\n self.connection_lost(transport, f'{error_msg}: {secs} seconds')", "def test_get_timeouts_with_default(self):\n\n self.set_options(timeouts=True, timeout_default=2)\n task = self.create_task(self.context())\n\n self.assertEquals(task._timeout_for_targets([targetA, targetB]), 3)", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_LOOKUP_TIMEOUT(self):\n self.assertIsInstance(constants.LOOKUP_TIMEOUT, int,\n \"constants.LOOKUP_TIMEOUT must be an integer.\")", "def pytest_timeout_cancel_timer(item):", "def test_len_testset(self):\n self.assertEqual(self.__dataset.get_test_len, 1000)", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_get_timeouts_no_default(self):\n\n self.set_options(timeouts=True, timeout_default=None)\n task = self.create_task(self.context())\n\n self.assertIsNone(task._timeout_for_targets([targetA, targetB]))", "def test_zmq_execution_timeout(self, pool):\n socket = self._create_zmq_execution_mocks(pool, valid=False,\n response=None)\n\n command = stellr.SelectCommand(TEST_ZMQ)\n command.add_param('fq', 'field:filter')\n try:\n data = command.execute()\n except stellr.StellrError as e:\n self.assertTrue(e.timeout)\n self.assertEqual(e.status, -1)\n self.assertEqual(e.url, '/select?wt=json&fq=field%3Afilter')\n self.assertEqual(e.body, None)\n self.assertEqual(e.response, None)\n return\n\n self.assertFalse(True, 'Error should have been raised')", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def test_too_short_limit(self):\n message = \"few characters\"\n\n with self.assertRaises(AssertionError) as error:\n truncate_message(message, limit=2)\n\n self.assertEqual(str(error.exception), \"Limit too short\")", "def test_send_nolimit(self):\n msg_flag = self.instance.send_nolimit(self.msg_long)\n assert(msg_flag)\n msg_flag, msg_recv = self.driver.recv_nolimit(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_long)", "def test_producer_stop_waiting_to_retry(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f)]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n # Advance the clock, some, but not enough to retry\n clock.advance(producer._retry_interval / 2)\n # Stop the producer before the retry\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def _timeout(self):\n if self._store_timeout > 0 and (not self._messages.empty()):\n \n # Update Timestamp\n timestamp = 0\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # Remove Timeout Messages\n while (not self._messages.empty()):\n msg_time = self._messages.queue[0][0]\n if (timestamp - msg_time >= self._store_timeout) or\\\n (timestamp < msg_time and 4294967295 - \\\n msg_time + timestamp >= self._store_timeout):\n logger.warning(\"%s: message store timeout occurred.\" %\\\n (self.__class__.__name__))\n self._messages.get()\n else:\n break", "def on_timeout(self):\n logging.error(\"Streaming request timed out\")", "def test_timeout(self):\n # Attempt connection with short timeout\n with self.assertRaises(requests.exceptions.ReadTimeout):\n a = api.InvenTreeAPI(SERVER, username=USERNAME, password=PASSWORD, timeout=0.001) # noqa: F841", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n assert(len(res) > 0)\n self.assert_equal_data_dict(res)", "def test_timeout_pending(self):\n deadline = Deadline(MS)\n timeout = deadline.timeout()\n self.assertGreater(timeout, 0)\n self.assertLess(timeout, MS)", "def test_only_one_delay_in_window(self):\n sliding_window = SlidingWindow(SLIDING_WINDOW_SIZE)\n sliding_window.delays = [100]\n self.assertEqual(sliding_window.get_median(), -1)", "def test_get_timeout():\n (read, connect) = AWSClient.get_timeout(None)\n assert read == 60 and connect == 10\n (read, connect) = AWSClient.get_timeout(\"100\")\n assert read == 100 and connect == 10\n (read, connect) = AWSClient.get_timeout(\"200,2\")\n assert read == 200 and connect == 2", "def test_producer_stop_during_request(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f), Deferred()]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n clock.advance(producer._retry_interval)\n\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def TestClosedSocketTimeout(self):\n self.txrx.timeout = 0.1 # Set a really short timeout so we don't hold up testing\n self.s.close()\n byte_array_msg_tx = bytes('\\x0C\\x0D\\x0E\\x0F\\x10\\x11', encoding=DATA_ENCODING)\n\n self.txrx.tx_msg(byte_array_msg_tx)\n with self.assertRaises(PercivalCommsError):\n reply = self.txrx.rx_msg()", "def test_NonrealtimeProvider_add_bus_group_error(session):\n provider = Provider.from_context(session)\n with pytest.raises(ValueError):\n provider.add_bus_group()\n with provider.at(0):\n with pytest.raises(ValueError):\n provider.add_bus_group(channel_count=0)\n with pytest.raises(ValueError):\n provider.add_bus_group(calculation_rate=\"scalar\")", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_timeout_with_crud_failures(self):\n\n # Local methods to validate vb_seqno\n\n def compare_vb_stat(stat_1, stat_2, vb, comparison=\"!=\"):\n keys_to_check = [\"high_seqno\", \"high_completed_seqno\"]\n result = True\n for key in keys_to_check:\n if vb in stat_1.keys():\n if stat_1[vb][\"uuid\"] != stat_2[vb][\"uuid\"]:\n self.log_failure(\"Mismatch in vb-%s UUID. %s != %s\"\n % (vb, stat_1[vb][\"uuid\"],\n stat_2[vb][\"uuid\"]))\n if comparison == \"!=\":\n if stat_1[vb][key] != stat_2[vb][key]:\n result = False\n self.log.warning(\n \"Mismatch in vb-%s stat %s. %s != %s\"\n % (vb, key, stat_1[vb][key], stat_2[vb][key]))\n elif stat_1[vb][key] == stat_2[vb][key]:\n result = False\n self.log.warning(\"Stat not updated for vb-%s stat %s. \"\n \"%s == %s\"\n % (vb, key,\n stat_1[vb][key], stat_2[vb][key]))\n return result\n\n def validate_vb_seqno_stats():\n \"\"\"\n :return retry_validation: Boolean denoting to retry validation\n \"\"\"\n retry_validation = False\n vb_info[\"post_timeout\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n for tem_vb_num in range(self.cluster.vbuckets):\n tem_vb_num = str(tem_vb_num)\n if tem_vb_num not in affected_vbs:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log_failure(\"Unaffected vb-%s stat\" % tem_vb_num)\n elif int(tem_vb_num) in target_nodes_vbuckets[\"active\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"active\", tem_vb_num))\n elif int(tem_vb_num) in target_nodes_vbuckets[\"replica\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num, comparison=\"==\") is False:\n retry_validation = True\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"replica\", tem_vb_num))\n return retry_validation\n\n shell_conn = dict()\n cbstat_obj = dict()\n error_sim = dict()\n target_nodes_vbuckets = dict()\n vb_info = dict()\n tasks = dict()\n doc_gen = dict()\n affected_vbs = list()\n\n target_nodes_vbuckets[\"active\"] = []\n target_nodes_vbuckets[\"replica\"] = []\n vb_info[\"init\"] = dict()\n vb_info[\"post_timeout\"] = dict()\n vb_info[\"afterCrud\"] = dict()\n\n # Override crud_batch_size to minimum value for testing\n self.crud_batch_size = 5\n self.key = \"test_collections\"\n self.sdk_timeout = 3\n\n # Select target vbucket type to load_docs\n target_vb_type = \"replica\"\n if self.simulate_error == CouchbaseError.STOP_PERSISTENCE \\\n and self.durability_level \\\n == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:\n target_vb_type = \"active\"\n\n # Create required scope/collection for successful CRUD operation\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n self.collection_name = self.bucket_util.get_random_name()\n self.log.info(\"Creating scope::collection %s::%s\"\n % (self.scope_name, self.collection_name))\n self.create_scope_collection()\n\n # Load docs into created collection\n self.log.info(\"Loading data into created collection\")\n load_gen = doc_generator(self.key, 0, self.num_items)\n task = self.task.async_load_gen_docs(\n self.cluster, self.bucket, load_gen, \"create\", 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=200, process_concurrency=8,\n timeout_secs=60)\n self.task_manager.get_task_result(task)\n if self.subdoc_test:\n load_gen = sub_doc_generator(self.key, 0, self.num_items/2)\n task = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket,\n load_gen, Bucket_Op.SubDocOps.INSERT,\n timeout_secs=self.sdk_timeout,\n compression=self.sdk_compression,\n path_create=True,\n batch_size=100,\n process_concurrency=8,\n durability=self.durability_level,\n scope=self.scope_name, collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool)\n self.task_manager.get_task_result(task)\n\n self.bucket.scopes[self.scope_name].collections[\n self.collection_name].num_items = self.num_items\n\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n for node in target_nodes:\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n target_nodes_vbuckets[\"active\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"active\")\n target_nodes_vbuckets[\"replica\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"replica\")\n vb_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])\n\n curr_time = int(time.time())\n expected_timeout = curr_time + self.sdk_timeout\n\n if target_vb_type == \"active\":\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"replica\"])))\n else:\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"active\"])))\n\n # Create required doc_generators\n doc_gen[\"create\"] = doc_generator(self.key, self.num_items,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"delete\"] = doc_generator(self.key, 0,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"read\"] = doc_generator(\n self.key, int(self.num_items/3),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"update\"] = doc_generator(\n self.key, int(self.num_items/2),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Create required subdoc generators\n doc_gen[\"insert\"] = sub_doc_generator(\n self.key, int(self.num_items/2), self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"upsert\"] = sub_doc_generator_for_edit(\n self.key, 0, self.crud_batch_size,\n template_index=1,\n target_vbucket=target_vbs)\n doc_gen[\"remove\"] = sub_doc_generator(\n self.key, 0, self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Perform specified action\n for node in target_nodes:\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n self.sleep(5, \"Wait for error_simulation to take effect\")\n\n ops_to_perform = [Bucket_Op.DocOps.CREATE, Bucket_Op.DocOps.UPDATE,\n Bucket_Op.DocOps.READ, Bucket_Op.DocOps.DELETE]\n if self.subdoc_test:\n ops_to_perform = [Bucket_Op.SubDocOps.INSERT,\n Bucket_Op.SubDocOps.UPSERT,\n Bucket_Op.SubDocOps.REMOVE]\n\n for op_type in ops_to_perform:\n self.log.info(\"Starting doc op %s\" % op_type)\n if op_type in Bucket_Op.DOC_OPS:\n tasks[op_type] = self.task.async_load_gen_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n suppress_error_table=True,\n print_ops_rate=False,\n skip_read_on_error=True)\n else:\n tasks[op_type] = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n path_create=True,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n print_ops_rate=False)\n\n self.task.jython_task_manager.get_task_result(tasks[op_type])\n\n # Validate task failures\n if op_type == Bucket_Op.DocOps.READ:\n # Validation for read task\n if len(tasks[op_type].fail.keys()) != 0:\n self.log_failure(\"Read failed for few docs: %s\"\n % tasks[op_type].fail.keys())\n else:\n # Validation of CRUDs - Update / Create / Delete\n for doc_id, crud_result in tasks[op_type].fail.items():\n vb_num = self.bucket_util.get_vbucket_num_for_key(\n doc_id, self.cluster.vbuckets)\n if SDKException.DurabilityAmbiguousException \\\n not in str(crud_result[\"error\"]):\n self.log_failure(\n \"Invalid exception for doc %s, vb %s: %s\"\n % (doc_id, vb_num, crud_result))\n\n # Revert the specified error scenario\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Check whether the timeout triggered properly\n if int(time.time()) < expected_timeout:\n self.log_failure(\"Timed-out before expected time\")\n\n for op_type in ops_to_perform:\n if op_type == Bucket_Op.DocOps.READ:\n continue\n while doc_gen[op_type].has_next():\n doc_id, _ = doc_gen[op_type].next()\n affected_vbs.append(\n str(self.bucket_util.get_vbucket_num_for_key(\n doc_id,\n self.cluster.vbuckets)))\n\n affected_vbs = list(set(affected_vbs))\n # Fetch latest stats and validate the seq_nos are not updated\n for node in target_nodes:\n retry_count = 0\n max_retry = 3\n while retry_count < max_retry:\n self.log.info(\"Trying to validate vbseq_no stats: %d\"\n % (retry_count+1))\n retry_count += 1\n retry_required = validate_vb_seqno_stats()\n if not retry_required:\n break\n self.sleep(5, \"Sleep for vbseq_no stats to update\")\n else:\n # This will be exited only if `break` condition is not met\n self.log_failure(\"validate_vb_seqno_stats verification failed\")\n\n self.validate_test_failure()\n\n # Get SDK Client from client_pool\n sdk_client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n # Doc error validation\n for op_type in ops_to_perform:\n task = tasks[op_type]\n\n if self.nodes_init == 1 \\\n and op_type != Bucket_Op.DocOps.READ \\\n and len(task.fail.keys()) != (doc_gen[op_type].end\n - doc_gen[op_type].start):\n self.log_failure(\"Failed keys %d are less than expected %d\"\n % (len(task.fail.keys()),\n (doc_gen[op_type].end\n - doc_gen[op_type].start)))\n\n # Create table objects for display\n table_view = TableView(self.log.error)\n ambiguous_table_view = TableView(self.log.info)\n table_view.set_headers([\"Key\", \"vBucket\", \"Exception\"])\n ambiguous_table_view.set_headers([\"Key\", \"vBucket\"])\n\n # Iterate failed keys for validation\n for doc_key, doc_info in task.fail.items():\n vb_for_key = self.bucket_util.get_vbucket_num_for_key(doc_key)\n\n if SDKException.DurabilityAmbiguousException \\\n not in str(doc_info[\"error\"]):\n table_view.add_row([doc_key, vb_for_key,\n doc_info[\"error\"]])\n\n ambiguous_table_view.add_row([doc_key, str(vb_for_key)])\n if op_type not in Bucket_Op.SUB_DOC_OPS:\n retry_success = \\\n self.durability_helper.retry_for_ambiguous_exception(\n sdk_client, op_type, doc_key, doc_info)\n if not retry_success:\n self.log_failure(\"%s failed in retry for %s\"\n % (op_type, doc_key))\n\n # Display the tables (if any errors)\n table_view.display(\"Unexpected exception during %s\" % op_type)\n ambiguous_table_view.display(\"D_Ambiguous exception during %s\"\n % op_type)\n\n # Release the acquired client\n self.sdk_client_pool.release_client(sdk_client)\n\n # Verify doc count after expected CRUD failure\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest stats and validate the values are updated\n for node in target_nodes:\n vb_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n if vb_info[\"init\"][node.ip] == vb_info[\"afterCrud\"][node.ip]:\n self.log_failure(\"vBucket seq_no stats not updated\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()", "def test_dataset_nonevenly_divisible_batch_size(self):\n with self.assertRaisesRegex(\n ValueError, 'Test data not evenly divisible by batch size: .*'):\n self._dataset = cifar10.CIFAR10Dataset(\n self._batch_size, batch_size_test=101)", "def test_consumer_cancel_during_shutdown(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n consumer_instance = mock_consumer.return_value\n consumer_start_d = defer.Deferred()\n consumer_instance.start.return_value = consumer_start_d\n consumer_instance._start_d = consumer_start_d\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(consumer_instance.start.called, True)\n\n def stop():\n consumer_start_d.errback(defer.CancelledError())\n\n consumer_instance.stop.side_effect = stop\n group.rejoin_after_error(Failure(RequestTimedOutError()))\n\n self.assertEqual(consumer_instance.stop.called, True)\n self.successResultOf(consumer_start_d)\n self.assertNoResult(start_d)", "def test_having_condition_with_preventing_aggregate_metrics_only(self):\n response = self.do_request(\n {\n \"field\": [\"transaction\", \"project\", \"p50(transaction.duration)\"],\n \"query\": \"event.type:transaction p50(transaction.duration):<50\",\n \"dataset\": \"metrics\",\n \"preventMetricAggregates\": \"1\",\n \"per_page\": 50,\n \"project\": self.project.id,\n }\n )\n assert response.status_code == 400, response.content", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex", "def wait_for_data(self, duration):\n start_time = rospy.get_rostime()\n while not rospy.is_shutdown() and not (rospy.get_rostime() > (start_time + rospy.Duration(duration))):\n if len(self.graph.nodes) >= len(EXPECTED_NODES) and len(self.graph.topics) >= len(EXPECTED_TOPICS):\n return\n rospy.sleep(1.0)", "def is_timeout(self) -> bool:\n return self.runtime.timeout <= 0.0", "def test_configuration1(self):\n with self.assertRaises(AttributeError):\n AsyncConsumer(TEST_QUEUE, TEST_QUEUE, TEST_QUEUE, 'direct', process_msg)", "def test_pipeline_timeout(mockpipe_timeout, testdir):\n test = testdir.makepyfile(TEST_TIMEOUT)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 0\n assert len(skipped) == 0\n assert len(failed) == 1", "def test_set_timeout_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_timeout(None)", "def test_wait_for_page_in_timeout(self):\n start_time = datetime.now()\n with self.assertRaises(SpdbError):\n csdb = CacheStateDB(self.config_data)\n ch = csdb.create_page_in_channel()\n\n csdb.wait_for_page_in([\"MY_TEST_KEY1\", \"MY_TEST_KEY2\"], ch, 1)\n\n assert (datetime.now() - start_time).seconds < 3", "def get_test_timeout(self):\n return None", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def is_chunk_timeout(self, chunk_timeout): \n return time() - self._chunk_timeout_time > chunk_timeout", "def test__validate_topic__1():\n for input_value in (\n 'a' * (TOPIC_LENGTH_MAX + 1),\n ):\n with vampytest.assert_raises(ValueError):\n validate_topic(input_value)", "def timedout(self):\n\n return self.duration() > self.check.timeout" ]
[ "0.7494565", "0.68445444", "0.61413234", "0.6002265", "0.59668875", "0.5940467", "0.59011936", "0.58735675", "0.58508337", "0.58345395", "0.57948846", "0.57629395", "0.57347316", "0.57000494", "0.5662307", "0.5627969", "0.5604364", "0.5560788", "0.5560788", "0.55548257", "0.5529509", "0.5507486", "0.54953605", "0.5489886", "0.5479143", "0.5475843", "0.5441193", "0.54405874", "0.5413798", "0.5406279", "0.5397496", "0.53855056", "0.5379187", "0.5377147", "0.53675896", "0.53628254", "0.5360027", "0.53542554", "0.5347896", "0.53406644", "0.5323046", "0.5310023", "0.5308835", "0.53004134", "0.52939063", "0.5286121", "0.5279485", "0.5279057", "0.5264627", "0.52345973", "0.52317655", "0.5229731", "0.522636", "0.522636", "0.52237415", "0.5222926", "0.52200943", "0.521684", "0.5208606", "0.5207688", "0.5204385", "0.51796204", "0.5177687", "0.5172619", "0.5164928", "0.5164617", "0.5148407", "0.51458555", "0.5140909", "0.51375556", "0.5137476", "0.512973", "0.51260674", "0.51243", "0.51041114", "0.50999844", "0.50998867", "0.50985867", "0.50898147", "0.5088302", "0.5079382", "0.5077553", "0.50695086", "0.5067824", "0.5064791", "0.5062396", "0.5062334", "0.505734", "0.50517535", "0.50510657", "0.5050647", "0.50504375", "0.50502217", "0.50395846", "0.50363034", "0.5029488", "0.50230235", "0.50202346", "0.5013877", "0.5012142" ]
0.83997965
0
Test the functionality of the KafkaGroupIODataset when the consumer is configured to have a valid stream_timeout value and thus waits for the new messages from kafka.
def test_kafka_group_io_dataset_stream_timeout_check(): import tensorflow_io.kafka as kafka_io def write_messages_background(): # Write new messages to the topic in a background thread time.sleep(6) for i in range(100, 200): message = f"D{i}" kafka_io.write_kafka(message=message, topic="key-partition-test") dataset = tfio.experimental.streaming.KafkaGroupIODataset( topics=["key-partition-test"], group_id="cgteststreamvalid", servers="localhost:9092", stream_timeout=20000, configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) # start writing the new messages to kafka using the background job. # the job sleeps for some time (< stream_timeout) and then writes the # messages into the topic. thread = threading.Thread(target=write_messages_background, args=()) thread.daemon = True thread.start() # At the end, after the timeout has occurred, we must have the old 100 messages # along with the new 100 messages assert np.all( sorted(k.numpy() for (k, _) in dataset) == sorted(("D" + str(i)).encode() for i in range(200)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def test_describe_consumer_group_exists(kafka_admin_client, kafka_consumer_factory, topic):\n consumers = {}\n stop = {}\n threads = {}\n random_group_id = 'test-group-' + random_string(6)\n group_id_list = [random_group_id, random_group_id + '_2']\n generations = {group_id_list[0]: set(), group_id_list[1]: set()}\n def consumer_thread(i, group_id):\n assert i not in consumers\n assert i not in stop\n stop[i] = Event()\n consumers[i] = kafka_consumer_factory(group_id=group_id)\n while not stop[i].is_set():\n consumers[i].poll(20)\n consumers[i].close()\n consumers[i] = None\n stop[i] = None\n\n num_consumers = 3\n for i in range(num_consumers):\n group_id = group_id_list[i % 2]\n t = Thread(target=consumer_thread, args=(i, group_id,))\n t.start()\n threads[i] = t\n\n try:\n timeout = time() + 35\n while True:\n for c in range(num_consumers):\n\n # Verify all consumers have been created\n if c not in consumers:\n break\n\n # Verify all consumers have an assignment\n elif not consumers[c].assignment():\n break\n\n # If all consumers exist and have an assignment\n else:\n\n info('All consumers have assignment... checking for stable group')\n # Verify all consumers are in the same generation\n # then log state and break while loop\n\n for consumer in consumers.values():\n generations[consumer.config['group_id']].add(consumer._coordinator._generation.generation_id)\n\n is_same_generation = any([len(consumer_generation) == 1 for consumer_generation in generations.values()])\n\n # New generation assignment is not complete until\n # coordinator.rejoining = False\n rejoining = any([consumer._coordinator.rejoining\n for consumer in list(consumers.values())])\n\n if not rejoining and is_same_generation:\n break\n else:\n sleep(1)\n assert time() < timeout, \"timeout waiting for assignments\"\n\n info('Group stabilized; verifying assignment')\n output = kafka_admin_client.describe_consumer_groups(group_id_list)\n assert len(output) == 2\n consumer_groups = set()\n for consumer_group in output:\n assert(consumer_group.group in group_id_list)\n if consumer_group.group == group_id_list[0]:\n assert(len(consumer_group.members) == 2)\n else:\n assert(len(consumer_group.members) == 1)\n for member in consumer_group.members:\n assert(member.member_metadata.subscription[0] == topic)\n assert(member.member_assignment.assignment[0][0] == topic)\n consumer_groups.add(consumer_group.group)\n assert(sorted(list(consumer_groups)) == group_id_list)\n finally:\n info('Shutting down %s consumers', num_consumers)\n for c in range(num_consumers):\n info('Stopping consumer %s', c)\n stop[c].set()\n threads[c].join()\n threads[c] = None", "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_primary_cg_no_lag():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def wait_for_data(self, duration):\n start_time = rospy.get_rostime()\n while not rospy.is_shutdown() and not (rospy.get_rostime() > (start_time + rospy.Duration(duration))):\n if len(self.graph.nodes) >= len(EXPECTED_NODES) and len(self.graph.topics) >= len(EXPECTED_TOPICS):\n return\n rospy.sleep(1.0)", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n np.testing.assert_array_equal(res, self.pandas_frame)", "def verify_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n def print_wmark(consumer, parts):\n # Verify #294: get_watermark_offsets() should not fail on the first call\n # This is really a librdkafka issue.\n for p in parts:\n wmarks = consumer.get_watermark_offsets(parts[0])\n print('Watermarks for %s: %s' % (p, wmarks))\n\n # Subscribe to a list of topics\n c.subscribe([topic], on_assign=print_wmark)\n\n max_msgcnt = 100\n msgcnt = 0\n\n first_msg = None\n\n while True:\n # Consume until EOF or error\n\n # Consume message (error()==0) or event (error()!=0)\n msg = c.poll()\n if msg is None:\n raise Exception('Got timeout from poll() without a timeout set: %s' % msg)\n\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n print('Reached end of %s [%d] at offset %d' %\n (msg.topic(), msg.partition(), msg.offset()))\n break\n else:\n print('Consumer error: %s: ignoring' % msg.error())\n break\n\n tstype, timestamp = msg.timestamp()\n headers = msg.headers()\n if headers:\n example_header = headers\n\n msg.set_headers([('foo', 'bar')])\n assert msg.headers() == [('foo', 'bar')]\n\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s headers=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp, headers))\n\n if first_msg is None:\n first_msg = msg\n\n if (msgcnt == 11):\n parts = c.assignment()\n print('Pausing partitions briefly')\n c.pause(parts)\n exp_None = c.poll(timeout=2.0)\n assert exp_None is None, \"expected no messages during pause, got %s\" % exp_None\n print('Resuming partitions')\n c.resume(parts)\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n if msgcnt >= max_msgcnt:\n print('max_msgcnt %d reached' % msgcnt)\n break\n\n assert example_header, \"We should have received at least one header\"\n assert example_header == [(u'foo1', 'bar'), (u'foo1', 'bar2'), (u'foo2', '1')]\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query offsets for timestamps by setting the topic partition offset to a timestamp. 123456789000 + 1\n topic_partions_to_search = list(map(lambda p: confluent_kafka.TopicPartition(topic, p, 123456789001), range(0, 3)))\n print(\"Searching for offsets with %s\" % topic_partions_to_search)\n\n offsets = c.offsets_for_times(topic_partions_to_search, timeout=1.0)\n print(\"offsets_for_times results: %s\" % offsets)\n\n verify_consumer_seek(c, first_msg)\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_timeout_setting(self):\n self.assertEqual(self.es.sse_kwargs.get('timeout'),\n config.socket_timeout)", "def test_consumer(self):\n try:\n consumer = Consumer()\n consumer.poll()\n except (Exception) as error:\n logging.error(\"\\n\\nConsumer's connection to\"\n \"kafka failed with error: {}\\n\\n\".format(error))\n assert(False)", "def test_kafka_group_io_dataset_primary_cg_new_topic():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def wait_for_kafka_connection(delay=5):\n while True:\n try:\n kafka = KafkaProducer(bootstrap_servers=KAFKA_BROKERS)\n LOGGER.info('Connection to kafka cluster established')\n kafka.close()\n break\n except:\n LOGGER.error('Can not connect to kafka cluster')\n time.sleep(delay)", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)", "def test_polling_plugin_timeout(self):\n pass", "def test_set_timeout(init_process_group_mock):\n test_timedelta = timedelta(seconds=30)\n strategy = FSDPStrategy(timeout=test_timedelta, parallel_devices=[torch.device(\"cpu\")])\n strategy.cluster_environment = LightningEnvironment()\n strategy.accelerator = Mock()\n strategy.setup_environment()\n process_group_backend = strategy._get_process_group_backend()\n global_rank = strategy.cluster_environment.global_rank()\n world_size = strategy.cluster_environment.world_size()\n init_process_group_mock.assert_called_with(\n process_group_backend, rank=global_rank, world_size=world_size, timeout=test_timedelta\n )", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_stream(self):\n with skipping(NotImplementedError):\n self.es = EventStreamsTestClass(streams='recentchange')\n limit = 50\n self.es.set_maximum_items(limit)\n self.assertLength(list(self.es), limit)", "def test_producer_stop_during_request(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f), Deferred()]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n clock.advance(producer._retry_interval)\n\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_clean_session(sdc_builder, sdc_executor, mqtt_broker, clean_session):\n data_topic = get_random_string(string.ascii_letters, 10)\n mqtt_broker.initialize(initial_topics=[data_topic])\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n mqtt_source = pipeline_builder.add_stage('MQTT Subscriber').set_attributes(data_format='TEXT',\n topic_filter=[data_topic],\n clean_session=clean_session,\n quality_of_service='AT_LEAST_ONCE')\n\n wiretap = pipeline_builder.add_wiretap()\n\n mqtt_source >> wiretap.destination\n\n pipeline = pipeline_builder.build().configure_for_environment(mqtt_broker)\n sdc_executor.add_pipeline(pipeline)\n try:\n sdc_executor.start_pipeline(pipeline)\n\n # can't figure out a cleaner way to do this; it takes a bit of time for the pipeline\n # to ACTUALLY start listening on the MQTT port, so if we don't sleep here, the\n # messages won't be delivered (without setting persist)\n time.sleep(1)\n expected_messages = []\n for i in range(10, 20):\n expected_message = f'Message {i}'\n mqtt_broker.publish_message(topic=data_topic, payload=expected_message)\n expected_messages.append(expected_message)\n\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 10)\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(wiretap.output_records) == len(expected_messages)\n messages = [record.field['text'] for record in wiretap.output_records]\n assert sorted(messages) == sorted(expected_messages)\n\n wiretap.reset()\n\n expected_messages_2 = []\n for i in range(20, 30):\n expected_message = f'Message {i}'\n mqtt_broker.publish_message(topic=data_topic, payload=expected_message)\n expected_messages_2.append(expected_message)\n\n sdc_executor.start_pipeline(pipeline)\n time.sleep(1)\n\n expected_messages_3 = []\n for i in range(30, 40):\n expected_message = f'Message {i}'\n mqtt_broker.publish_message(topic=data_topic, payload=expected_message)\n expected_messages_3.append(expected_message)\n\n if clean_session:\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 10)\n final_expected_messages = expected_messages_3\n else:\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 20)\n final_expected_messages = expected_messages_2 + expected_messages_3\n\n sdc_executor.stop_pipeline(pipeline)\n assert len(wiretap.output_records) == len(final_expected_messages)\n messages = [record.field['text'] for record in wiretap.output_records]\n assert sorted(messages) == sorted(final_expected_messages)\n\n finally:\n if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(pipeline)\n mqtt_broker.destroy()", "def test_consumer_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n self.assertNoResult(start_d)\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n mock_consumer.return_value.start.return_value = d = defer.Deferred()\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(mock_consumer.return_value.start.called, True)\n d.errback(Failure(AssertionError()))\n self.failureResultOf(start_d, AssertionError)\n d.addErrback(lambda result: None)", "def test_kafka_mini_dataset_size():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(200, 10000):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n BATCH_NUM_MESSAGES = 5000\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgminibatchsize\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n f\"batch.num.messages={BATCH_NUM_MESSAGES}\",\n ],\n )\n for mini_d in dataset:\n count = 0\n for _ in mini_d:\n count += 1\n assert count == BATCH_NUM_MESSAGES\n break", "def test_start_stop(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.stop()\n self.assertEqual(len(group.consumers), 0)", "def wait_for_data_in_ct_table(ct_table_name, no_of_records, database=None, timeout_sec=50):\n logger.info('Waiting for no_data_event to be updated in %s seconds ...', timeout_sec)\n start_waiting_time = time()\n stop_waiting_time = start_waiting_time + timeout_sec\n db_engine = database.engine\n\n while time() < stop_waiting_time:\n event_table = sqlalchemy.Table(ct_table_name, sqlalchemy.MetaData(), autoload=True,\n autoload_with=db_engine, schema='cdc')\n event_result = db_engine.execute(event_table.select())\n event_result_list = event_result.fetchall()\n event_result.close()\n\n if len(event_result_list) >= no_of_records:\n logger.info('%s of data is captured in CT Table %s', no_of_records, ct_table_name)\n return\n sleep(5)\n\n raise Exception('Timed out after %s seconds while waiting for captured data.', timeout_sec)", "def test_producer_stop_waiting_to_retry(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n f = Failure(BrokerNotAvailableError())\n ret = [fail(f)]\n client.send_produce_request.side_effect = ret\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n batch_n = 2\n\n producer = Producer(client, batch_every_n=batch_n, batch_send=True)\n d = producer.send_messages(self.topic, msgs=msgs)\n # At first, there's no result. Have to retry due to first failure\n self.assertNoResult(d)\n # Advance the clock, some, but not enough to retry\n clock.advance(producer._retry_interval / 2)\n # Stop the producer before the retry\n producer.stop()\n self.failureResultOf(d, tid_CancelledError)", "def test_datachannel_send_wait(testloop, testchannel):\n\n val = []\n\n @testchannel\n async def one(data):\n \"\"\"one\"\"\"\n if len(val) == 5:\n testchannel.stop()\n return\n val.append(data)\n\n async def run():\n \"\"\"run\"\"\"\n async for i in aiter(range(10)):\n asyncio.ensure_future(testchannel.send(i))\n await asyncio.sleep(0)\n await testchannel.join()\n\n with testchannel.open():\n testchannel.start()\n testloop.run_until_complete(run())\n\n assert val == list(range(5))", "def test_bound_size_of_output_queue_size_reader(synthetic_dataset):\n TIME_TO_GET_TO_STATIONARY_STATE = 0.5\n\n with make_reader(synthetic_dataset.url, reader_pool_type='process', workers_count=1) as reader:\n assert 0 == reader.diagnostics['items_produced']\n next(reader)\n # Verify that we did not consume all rowgroups (should be 10) and ventilator throttles number of ventilated\n # items\n sleep(TIME_TO_GET_TO_STATIONARY_STATE)\n assert reader.diagnostics['items_consumed'] < 5\n assert reader.diagnostics['items_inprocess'] < 5", "def test_create_consumer(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n except Exception as e:\n self.fail(f\"test_create_consumer() failed with exception: {e}\")\n\n try:\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer.start() in test_create_consumer() failed with exception: {e}\")\n\n # Sleep for a couple seconds to allow the thread to come up.\n time.sleep(2)\n self.assertEqual(3, threading.active_count()) # Main thread, consumer thread, consumer-group hear-beat daemon.\n\n test_consumer.stop()\n test_consumer.join()\n self.assertEqual(2, threading.active_count())", "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n assert(len(res) > 0)\n self.assert_equal_data_dict(res)", "def test_stop_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\"):\n group.on_join_complete({\"topic1\": [1]})\n consumer = group.consumers[\"topic1\"][0]\n consumer.stop.side_effect = KeyError()\n group.stop_consumers()", "def test_consumer_cancel_during_shutdown(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n start_d = group.start()\n with patch(\"afkak._group.Consumer\") as mock_consumer:\n consumer_instance = mock_consumer.return_value\n consumer_start_d = defer.Deferred()\n consumer_instance.start.return_value = consumer_start_d\n consumer_instance._start_d = consumer_start_d\n group.on_join_complete({\"topic1\": [1]})\n self.assertEqual(consumer_instance.start.called, True)\n\n def stop():\n consumer_start_d.errback(defer.CancelledError())\n\n consumer_instance.stop.side_effect = stop\n group.rejoin_after_error(Failure(RequestTimedOutError()))\n\n self.assertEqual(consumer_instance.stop.called, True)\n self.successResultOf(consumer_start_d)\n self.assertNoResult(start_d)", "def test_kafka_group_io_dataset_tertiary_cg_multiple_topics():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgtesttertiary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted([(\"D\" + str(i)).encode() for i in range(100)] * 2)\n )", "def test_streamWaitForEvents(self):\n resource = self.eventSourceResource()\n response = self.render(resource)\n\n # Read should block on new events.\n d = response.stream.read()\n self.assertFalse(d.called)\n\n d.addErrback(lambda f: None)\n d.cancel()", "def test_sql_server_cdc_no_more_data(sdc_builder, sdc_executor, database, no_of_threads):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(max_pool_size=no_of_threads,\n no_of_threads=no_of_threads)\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n tables = []\n no_of_records = 5\n rows_in_database = setup_sample_data(no_of_threads * no_of_records)\n\n for index in range(0, no_of_threads):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, DEFAULT_SCHEMA_NAME, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{DEFAULT_SCHEMA_NAME}_{table_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, rows_in_database, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)\n\n logger.info('Dropping table %s in %s database...', dest_table, database.type)\n dest_table.drop(database.engine)", "def test_streaming(self, dummy_streamers, dummy_receivers):\n for dummy, device, source_id, subscriptions in dummy_streamers:\n receiver = dummy_receivers[source_id]\n # basic thread behaviour (start on `receiver.start()`)\n for thread in receiver._threads.values():\n assert not thread.is_alive()\n receiver.start()\n for thread in receiver._threads.values():\n assert thread.is_alive()\n\n # TODO: compare data (use pre-defined data)\n\n # NOTE: some threads may take a while to stop,\n # not sure how to assert this properly\n receiver.stop()\n #for thread in receiver._threads.values():\n # assert not thread.is_alive()", "def timeout(self):\n pf.debug(\"TIMEOUT\")\n self.acceptData(TIMEOUT)", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def assert_timeout(self) -> None:", "def test_recv(self):\n self.driver.sched_task(0.01, self.driver.send,\n args=[self.msg_short])\n msg_flag, msg_recv = self.instance.recv(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_short)", "def data_consumption_manager(\n start_time_ms: int,\n stop_time_ms: Optional[int],\n run_id: str,\n topics: List[str],\n kafka_broker: str,\n consumer_type: ConsumerType,\n stream_info: Optional[List[StreamInfo]],\n interval_s: float,\n event_buffer_size: int,\n slow_metadata_buffer_size: int,\n fast_metadata_buffer_size: int,\n chopper_buffer_size: int,\n worker_instruction_queue: mp.Queue,\n data_queue: mp.Queue,\n test_message_queue: Optional[mp.Queue],\n):\n buffer = StreamedDataBuffer(\n data_queue,\n event_buffer_size,\n slow_metadata_buffer_size,\n fast_metadata_buffer_size,\n chopper_buffer_size,\n interval_s,\n run_id,\n )\n\n if stream_info is not None:\n buffer.init_metadata_buffers(stream_info)\n\n consumers = create_consumers(\n start_time_ms,\n stop_time_ms,\n set(topics),\n kafka_broker,\n consumer_type,\n buffer.new_data,\n test_message_queue,\n )\n\n start_consumers(consumers)\n buffer.start()\n\n while not all_consumers_stopped(consumers):\n try:\n instruction = worker_instruction_queue.get(timeout=0.5)\n if instruction.type == InstructionType.STOP_NOW:\n stop_consumers(consumers)\n elif instruction.type == InstructionType.UPDATE_STOP_TIME:\n for consumer in consumers:\n consumer.update_stop_time(instruction.stop_time_ms)\n except QueueEmpty:\n pass\n except (ValueError, OSError):\n # Queue has been closed, stop worker\n stop_consumers(consumers)\n\n buffer.stop()", "def test_timeout(self):\n s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)\n poller = self.Poller()\n poller.register(s1, zmqpy.POLLIN)\n tic = time.time()\n evt = poller.poll(timeout=.005)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n tic = time.time()\n evt = poller.poll(timeout=5)\n toc = time.time()\n self.assertTrue(toc-tic < 0.1)\n self.assertTrue(toc-tic > .001)\n tic = time.time()\n evt = poller.poll(timeout=500)\n toc = time.time()\n self.assertTrue(toc-tic < 1)\n self.assertTrue(toc-tic > 0.1)", "def test_datachannel_send_wait_notopened(testloop, testchannel):\n\n val = []\n\n @testchannel\n async def one(data):\n \"\"\"one\"\"\"\n val.append(data)\n\n async def run():\n \"\"\"run\"\"\"\n async for i in aiter(range(10)):\n asyncio.ensure_future(testchannel.send(i))\n await asyncio.sleep(0)\n await testchannel.join()\n\n testloop.run_until_complete(run())\n\n assert not val", "def kafka_consumer_stats_timeout(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"kafka_consumer_stats_timeout\")", "def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()", "def test_start_leave(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n group.on_join_prepare()\n group.on_join_complete({\"topic1\": [1, 2, 3]})\n self.assertEqual(len(group.consumers[\"topic1\"]), 3)\n group.on_group_leave()\n self.assertEqual(len(group.consumers), 0)", "def test__put_afk_timeout_into():\n for input_value, defaults, expected_output in (\n (AFK_TIMEOUT_DEFAULT, False, {'afk_timeout': AFK_TIMEOUT_DEFAULT}),\n (60, False, {'afk_timeout': 60}),\n ):\n data = put_afk_timeout_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def test_mark_topic_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n assert(len(res) > 0)\n nt.assert_equal(res, self.ply_dict)", "def _mp_consume(client, group, topic, queue, size, events, **consumer_options):\n\n # Initial interval for retries in seconds.\n interval = 1\n while not events.exit.is_set():\n try:\n # Make the child processes open separate socket connections\n client.reinit()\n\n # We will start consumers without auto-commit. Auto-commit will be\n # done by the master controller process.\n consumer = SimpleConsumer(client, group, topic,\n auto_commit=False,\n auto_commit_every_n=None,\n auto_commit_every_t=None,\n **consumer_options)\n\n # Ensure that the consumer provides the partition information\n consumer.provide_partition_info()\n\n while True:\n # Wait till the controller indicates us to start consumption\n events.start.wait()\n\n # If we are asked to quit, do so\n if events.exit.is_set():\n break\n\n # Consume messages and add them to the queue. If the controller\n # indicates a specific number of messages, follow that advice\n count = 0\n\n message = consumer.get_message()\n if message:\n while True:\n try:\n queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)\n break\n except queue.Full:\n if events.exit.is_set():\n break\n\n count += 1\n\n # We have reached the required size. The controller might have\n # more than what he needs. Wait for a while.\n # Without this logic, it is possible that we run into a big\n # loop consuming all available messages before the controller\n # can reset the 'start' event\n if count == size.value:\n events.pause.wait()\n\n else:\n # In case we did not receive any message, give up the CPU for\n # a while before we try again\n time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)\n\n consumer.stop()\n\n except KafkaError as e:\n # Retry with exponential backoff\n log.error(\n \"Problem communicating with Kafka (%s), retrying in %d seconds...\" % (e, interval))\n time.sleep(interval)\n interval = interval * 2 if interval * 2 < MAX_BACKOFF_SECONDS else MAX_BACKOFF_SECONDS", "def test_next_window_time_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n # Value 15 will be filtered as it ranges between lower and upper bound limits\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)\n # Let next window time elapse\n time.sleep(4)\n filtered_value = test_window_scheme.filter(self.more_than_upper_bound)\n # None is expected as filtered value because at least one sample has been already passed and\n # value ranges outside lower and upper bound limits\n self.assertEquals(filtered_value, None)", "def test_pool_timeout_hw(self):\n self.test_pool_timeout()", "def test_group_message_eviction(self):\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.receive([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)", "def test_producer_send_timer_failed(self):\n clock = MemoryReactorClock()\n client = Mock(reactor=clock)\n client.topic_partitions = {self.topic: [0, 1, 2, 3]}\n client.metadata_error_for_topic.return_value = False\n batch_t = 5\n\n # FIXME: Don't use patch to test logging\n with patch.object(aProducer, \"log\") as klog:\n producer = Producer(client, batch_send=True, batch_every_t=batch_t)\n msgs = [self.msg(\"one\"), self.msg(\"two\")]\n d = producer.send_messages(self.topic, msgs=msgs)\n # Check no request was yet sent\n self.assertFalse(client.send_produce_request.called)\n # Patch Producer's Deferred to throw an exception\n with patch.object(aProducer, \"Deferred\") as d:\n d.side_effect = ValueError(\"test_producer_send_timer_failed induced failure\")\n # Advance the clock\n clock.advance(batch_t)\n # Check the expected message was logged by the looping call restart\n klog.warning.assert_called_once_with(\n \"Batch timer failed: %s. Will restart.\",\n ANY,\n exc_info=ANY,\n )\n # Check that the looping call was restarted\n self.assertTrue(producer._sendLooper.running)\n\n producer.stop()", "def monitor(\n config: TextIO,\n period: float,\n timeout: float,\n brokers: str,\n security_protocol: str,\n ssl_cafile: Optional[str],\n ssl_certfile: Optional[str],\n ssl_keyfile: Optional[str],\n topic: str,\n):\n logging.basicConfig(level=logging.INFO)\n\n targets = [Target.from_tsv(row) for row in config.readlines()]\n brokers = brokers.split(\",\")\n\n logger.info(\"Connecting to brokers %s\", brokers)\n\n producer = kafka.KafkaProducer(\n bootstrap_servers=brokers,\n value_serializer=value_serializer,\n security_protocol=security_protocol,\n ssl_cafile=ssl_cafile,\n ssl_certfile=ssl_certfile,\n ssl_keyfile=ssl_keyfile,\n )\n\n with contextlib.closing(producer):\n with concurrent.futures.ThreadPoolExecutor() as executor:\n while RUNNING:\n logger.info(\"Starting monitoring round\")\n start = datetime.datetime.utcnow()\n\n run(targets, timeout, topic, executor, producer)\n\n next_start = start + datetime.timedelta(seconds=period)\n sleep = next_start - datetime.datetime.utcnow()\n\n if sleep < datetime.timedelta(seconds=0):\n logger.warning(\"Checks took longer than %s seconds to run\", period)\n else:\n time.sleep(sleep.total_seconds())", "def wait_for_topics(self, timeout):\n rospy.loginfo(\"waiting for simulation topics to be ready\")\n loop_freq = 1 # Hz\n rate = rospy.Rate(loop_freq)\n simulation_ready = False\n for i in xrange(timeout * loop_freq):\n if all(value for value in self.sub_topics_ready.values()):\n simulation_ready = True\n rospy.loginfo(\"simulation topics ready | seconds: {0} of {1}\".\n format(i / loop_freq, timeout))\n break\n\n rate.sleep()\n\n self.assertTrue(simulation_ready, (\n \"failed to hear from all subscribed simulation topics | topic ready flags: {0} | timeout(seconds): {1}\".\n format(self.sub_topics_ready, timeout)))", "def message_listener(self, topic, timeout):\n \"\"\"\n demo_message = [\n {'user_id': 'Lazy Man', 'timestamp': '2019-10-06T22:59:59.989Z', 'risk_level': 3}\n ]\n\n for message in demo_message:\n yield ERROR_CODE_ZERO, \"\", message\n \"\"\"\n\n while True:\n for error_code, error_message, message in self._consumer.subscribe(topic, timeout):\n yield error_code, error_message, message\n if error_code == 1:\n break", "def test_query_consumer_network(sdc_builder, sdc_executor, database):\n number_of_rows = 10_000\n table_name = get_random_string(string.ascii_lowercase, 20)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer')\n jdbc_query_consumer.set_attributes(incremental_mode=False, sql_query=f'SELECT * FROM {table_name}')\n\n delay = pipeline_builder.add_stage('Delay')\n # milliseconds to delay between batches, so as we get time to disconnect network\n delay.set_attributes(delay_between_batches=1000)\n\n trash = pipeline_builder.add_stage('Trash')\n\n finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n jdbc_query_consumer >> delay >> trash\n jdbc_query_consumer >= finisher\n\n pipeline = pipeline_builder.build('JDBC Query Origin').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name, metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String(40)))\n try:\n logger.info('Creating table %s in %s database ...', table_name, database.type)\n table.create(database.engine)\n\n logger.info('Adding %s rows into %s database ...', number_of_rows, database.type)\n connection = database.engine.connect()\n connection.execute(table.insert(), [{'id': i, 'name': str(uuid.uuid4())} for i in range(1, number_of_rows+1)])\n\n pipeline_cmd = sdc_executor.start_pipeline(pipeline)\n pipeline_cmd.wait_for_pipeline_output_records_count(int(number_of_rows/3))\n sdc_executor.container.network_disconnect()\n sleep(5) # sleep few seconds to have pipeline go into retry mode\n sdc_executor.container.network_reconnect()\n pipeline_cmd.wait_for_finished()\n\n history = sdc_executor.get_pipeline_history(pipeline)\n # -2 to take out two events generated from record count\n pipeline_record_count = (history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count - 2)\n assert pipeline_record_count == number_of_rows\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n table.drop(database.engine)", "def test_polling_loop(self, cursor):\n cursor._poll_interval = 0\n yield cursor.execute('SELECT COUNT(*) FROM many_rows')\n self.assertEqual((yield cursor.fetchone()), [10000])", "async def test_wait_for_activity_timeout(aiopg_connector):\n pg_app = app.App(connector=aiopg_connector)\n worker = worker_module.Worker(app=pg_app, timeout=2)\n worker.notify_event = asyncio.Event()\n task = asyncio.ensure_future(worker.single_worker(worker_id=0))\n try:\n await asyncio.sleep(0.2) # should be enough so that we're waiting\n\n worker.stop_requested = True\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(task, timeout=0.2)\n finally:\n worker.notify_event.set()", "def test_shutdown_error(self):\n client = self.mock_client([])\n processor = Mock()\n group = ConsumerGroup(client, \"group_id\", \"topic1\", processor)\n group.start()\n with patch(\"afkak._group.Consumer\", side_effect=[Mock(), Mock()]):\n group.on_join_complete({\"topic1\": [1, 2]})\n consumer = group.consumers[\"topic1\"][0]\n consumer._start_d = defer.Deferred()\n consumer.shutdown.side_effect = KeyError()\n consumer.stop.side_effect = KeyError()\n consumer2 = group.consumers[\"topic1\"][1]\n consumer2.shutdown.return_value = defer.Deferred()\n\n de = group.shutdown_consumers()\n self.assertNoResult(de)\n self.assertEqual(len(group.consumers), 0)\n\n consumer2.shutdown.return_value.errback(KeyError())\n consumer2.stop.assert_called_once_with()\n self.successResultOf(de)", "def test_connection_status_messages(docker_compose_no_command):\n data_topic = \"TEST_forwarderData_connection_status\"\n pvs = [PVENUM]\n\n prod = ProducerWrapper(\"localhost:9092\", CONFIG_TOPIC, data_topic)\n prod.add_config(pvs)\n # Wait for config to be pushed\n sleep(5)\n\n cons = create_consumer()\n\n # Update value\n change_pv_value(PVENUM, \"START\")\n # Wait for PV to be updated\n sleep(5)\n cons.subscribe([data_topic])\n\n first_msg = poll_for_connection_status_message(cons)\n check_expected_connection_status_values(first_msg, EventType.CONNECTED)\n\n cons.close()", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)", "def test_get_timeouts_with_maximum(self):\n\n self.set_options(timeouts=True, timeout_maximum=1)\n task = self.create_task(self.context())\n self.assertEquals(task._timeout_for_targets([targetC]), 1)", "def test_recv(self):\n Tout = self.instance.start_timeout()\n while ((not Tout.is_out)\n and (os.stat(self.tempfile).st_size == 0)): # pragma: debug\n self.instance.sleep()\n self.instance.stop_timeout()\n msg_flag, res = self.instance.recv(timeout=self.timeout)\n assert(msg_flag)\n assert(len(res) > 0)\n nt.assert_equal(res, self.obj_dict)", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def test_is_streaming(fprime_test_api):\n results = fprime_test_api.assert_telemetry_count(5, timeout=10)\n for result in results:\n msg = \"received channel {} update: {}\".format(result.get_id(), result.get_str())\n print(msg)\n fprime_test_api.assert_telemetry(\n \"sendBuffComp.SendState\", value=\"SEND_IDLE\", timeout=3\n )", "def test_wait_for_dispatched_metrics(self):\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertEqual(self.successResultOf(d), [])\n\n self._add_to_dispatched_metrics(worker_helper.broker, MetricMessage())\n msg = MetricMessage()\n msg.append('fake metric 1')\n msg.append('fake metric 2')\n self._add_to_dispatched_metrics(worker_helper.broker, msg)\n worker_helper.kick_delivery()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertNoResult(d)\n yield worker_helper.broker.wait_delivery()\n self.assertEqual(\n self.successResultOf(d), [[], ['fake metric 1', 'fake metric 2']])", "def publish_and_wait_mqtt(self, topic, data={}):\n result = [None, None]\n finish = Event()\n\n def on_response(payload, data):\n result[0] = payload\n result[1] = data\n\n def do_timeout():\n finish.set()\n\n self.publish_mqtt(topic, data, on_response=on_response)\n timer = Timer(self.timeout / 1000, do_timeout)\n timer.start()\n\n while (not result[0]) and (not finish.is_set()):\n self.wait()\n\n timer.cancel()\n\n if finish.is_set():\n raise TimeoutError('Reached timeout of %sms while waiting for response!' % self.timeout)\n\n return result", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def start_exited_consumers(kafka, p):\n for i in TOPICS[\"data\"]:\n kafka.initialize_consumer(topic=i[\"topic\"], config=i[\"config\"], partition=int(i[\"partition\"]))", "def consume_units_with_timeout(self, units, timeout_ms, always_consume):\n pass", "def publish_and_wait(self, node, topic, data={}):\n pass", "def test_listen_for_dweets_from_with_key(self):\n dweets_heard = 0\n for dweet in dweepy.listen_for_dweets_from(self.my_thing_id, timeout=5, key=test_key):\n dweets_heard += 1\n check_valid_dweet_response(self, dweet)\n self.assertGreater(dweets_heard, 0)", "def pytest_timeout_set_timer(item, settings):", "def TODO_testTimeout(self):\n return \"\"\"TODO: Highly dependent on hardcoded downstream timeout val\"\"\"\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n\n self.client_send('get time0\\r\\n', 0)\n self.mock_recv('get time0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # downstream timeout logic should kick in,\n # without our mock server having to send anything.\n\n self.wait(210)\n\n self.client_recv('END\\r\\n', 0)\n\n # TODO: The number of server sessions should be 0,\n # except the close might not have propagated.", "def batch(\n consumer: Consumer,\n batch_count: Optional[int] = None,\n batch_size: int = DEFAULT_BATCH_SIZE,\n batch_timeout: Optional[float] = DEFAULT_BATCH_TIMEOUT,\n poll_timeout: Optional[float] = DEFAULT_POLL_TIMEOUT,\n timeout: Optional[float] = None,\n) -> Iterator[List[Message]]:\n if batch_count is not None and batch_count <= 0:\n raise ValueError(\"batch_count must be a positive integer.\")\n if batch_size <= 0:\n raise ValueError(\"batch_size must be a positive integer.\")\n if batch_timeout is not None and batch_timeout <= 0:\n raise ValueError(\"batch_timeout must be a positive float.\")\n if poll_timeout is not None and poll_timeout <= 0:\n raise ValueError(\"poll_timeout must be a positive float.\")\n if timeout is not None and timeout <= 0:\n raise ValueError(\"timeout must be a positive float.\")\n\n if batch_count is None:\n LOGGER.debug(\"Streaming message batches....\")\n else:\n LOGGER.debug(\"Streaming up to %d message batches....\", batch_count)\n\n if timeout is not None:\n timeout_delta = timedelta(milliseconds=int(timeout * MILLIS_IN_SECOND))\n\n batch_duration = timedelta()\n num_batches = 0\n while batch_count is None or num_batches < batch_count:\n if musekafka.shutdown.is_shutting_down():\n break\n\n if timeout is not None:\n if batch_duration >= timeout_delta:\n LOGGER.debug(\"Hit batch timeout (%.3f seconds).\", timeout)\n break\n batch_timeout = min(\n batch_timeout or timeout, (timeout_delta - batch_duration).total_seconds()\n )\n\n batch_start = datetime.utcnow()\n batch = list(\n stream(consumer, count=batch_size, poll_timeout=poll_timeout, timeout=batch_timeout)\n )\n batch_duration += datetime.utcnow() - batch_start\n if not batch:\n # Empty batch does not count towards num_batches,\n # since we require batch_size to be > 0.\n continue\n LOGGER.debug(\"Got batch of %d messages.\", len(batch))\n num_batches += 1\n yield batch\n\n LOGGER.debug(\n \"Completed streaming %d batches, with each batch of size at most %d.\",\n num_batches,\n batch_size,\n )", "def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)", "def wait_for_samples(\n self, pcollection_ids: List[str]) -> beam_fn_api_pb2.SampleDataResponse:\n now = time.time()\n end = now + 30\n\n samples = beam_fn_api_pb2.SampleDataResponse()\n while now < end:\n time.sleep(0.1)\n now = time.time()\n samples.MergeFrom(self.samples(pcollection_ids))\n\n if not samples:\n continue\n\n has_all = all(\n pcoll_id in samples.element_samples for pcoll_id in pcollection_ids)\n if has_all:\n break\n\n return samples", "def test_wait_for_page_in_timeout(self):\n start_time = datetime.now()\n with self.assertRaises(SpdbError):\n csdb = CacheStateDB(self.config_data)\n ch = csdb.create_page_in_channel()\n\n csdb.wait_for_page_in([\"MY_TEST_KEY1\", \"MY_TEST_KEY2\"], ch, 1)\n\n assert (datetime.now() - start_time).seconds < 3", "def waitOverlappedDone(self, count, timeout):\n startTime = time.time()\n while True: #wait until measuring flag goes to 0\n try:\n measured = self.handle.ask(\":DATA:POIN?;\")\n measured = measured.strip() #remove CR \n measured = int(measured) #convert to number\n if measured == count: #final number of samples achieved\n break;\n except Exception:\n print('Dvm34411.waitOverlappedDone() polling failed !')\n raise\n \n if time.time() - startTime > timeout:\n print('Dvm34411.waitOverlappedDone() timeout !')\n return False\n \n samples = [] \n for i in range(0, count):\n try:\n reading = self.handle.ask('R? 1;') #definite-Length block format\n except Exception: \n print('Dvm34411.Reading results failed !')\n raise;\n #DLB: '#' followed by number od decimal digits to follow\n #the decimal number is length of data in bytes\n if reading[0] != '#':\n print('Dvm34411.DLB format error - # expected !')\n return False\n digits = int(reading[1])\n reading = reading[2 + digits:]\n samples.append(reading.strip())\n \n return samples", "def assertIsPublishing(self, topic, topic_type, timeout=3.0):\n is_publishing = False\n err = None\n try:\n msg = rospy.wait_for_message(topic, topic_type, timeout=timeout)\n self.messages.append(msg)\n except Exception as err:\n pass\n else:\n is_publishing = True\n self.assertTrue(is_publishing,\n topic + \" is not publishing : \" + str(err))\n return is_publishing", "def _block_until_dataset_ready(self, dataset_id, maxwait=12000, interval=30, raise_on_timeout=True):\n assert maxwait > 0\n assert maxwait > interval\n assert interval > 0\n\n for time_left in xrange(maxwait, 0, -interval):\n if self._is_dataset_complete(dataset_id):\n return\n log.warn( \"Waiting for dataset %s to complete. Will wait another %is\" % (dataset_id, time_left))\n time.sleep(interval)\n if raise_on_timeout:\n #noinspection PyUnboundLocalVariable\n raise DatasetTimeoutException(\"Waited too long for dataset to complete: %s\" % dataset_id)", "def consume(self, timeout=None):\n pass", "def test_recv_nolimit(self):\n self.driver.send_nolimit(self.msg_long)\n msg_flag, msg_recv = self.instance.recv_nolimit(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_long)", "def test_len_testset(self):\n self.assertEqual(self.__dataset.get_test_len, 1000)", "def test_disconnect_from_client_side(\n start_result_q_publisher, start_result_q_subscriber\n):\n\n result_pub = start_result_q_publisher()\n f = result_pub.publish(b\"Hello test_disconnect_from_client_side 1\")\n assert f.result(timeout=1) is None\n\n result_pub.stop()\n\n try_assert(lambda: not result_pub.is_alive(), \"Verify test setup\")\n with pytest.raises(ValueError):\n result_pub.publish(b\"Hello test_disconnect_from_client_side 2\")", "def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3", "def test_multithreading(sdc_builder, sdc_executor, azure, max_threads):\n container_name = get_random_string(string.ascii_lowercase, 10)\n event_hub_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n\n azure_iot_event_hub_consumer = builder.add_stage(name=AZURE_IOT_EVENT_HUB_STAGE_NAME).set_attributes(\n container_name=container_name,\n data_format='JSON',\n event_hub_name=event_hub_name,\n max_threads=max_threads)\n\n wiretap = builder.add_wiretap()\n\n azure_iot_event_hub_consumer >> wiretap.destination\n\n consumer_origin_pipeline = builder.build().configure_for_environment(azure)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n create_blob_container(azure, container_name)\n\n try:\n eh_service_bus = azure.event_hubs.service_bus\n\n logger.info('Creating event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n assert eh_service_bus.create_event_hub(event_hub_name)\n\n send_records = [{'Body': f'Event {msg}'} for msg in range(10)]\n eh_service_bus.send_event(event_hub_name, json.dumps(send_records))\n\n sdc_executor.start_pipeline(consumer_origin_pipeline)\n sdc_executor.wait_for_pipeline_metric(consumer_origin_pipeline, 'input_record_count', 1, timeout_sec=120)\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n results = [{key: value for key, value in record.items()} for record in wiretap.output_records[0].field]\n assert len(results) == 10\n assert results == send_records\n finally:\n try:\n if sdc_executor.get_pipeline_status(consumer_origin_pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n except Exception as err:\n logger.error('Could not stop pipeline. Reason found: %s', err)\n\n try:\n logger.info('Deleting event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n event_hub_exists = True\n while event_hub_exists:\n eh_service_bus.delete_event_hub(event_hub_name)\n try:\n eh_service_bus.get_event_hub(event_hub_name)\n except Exception:\n event_hub_exists = False\n except Exception as err:\n logger.error('Failure deleting event hub %s. Reason found: %s', event_hub_name, err)\n\n try:\n logger.info('Deleting container %s on storage account %s', container_name, azure.storage.account_name)\n azure.storage.delete_blob_container(container_name)\n except Exception as err:\n logger.error('Failure deleting container %s. Reason found: %s', container_name, err)", "def test_forwarder_updates_multiple_pvs(docker_compose_no_command):\n data_topic = \"TEST_forwarderData_multiple\"\n\n pvs = [PVSTR, PVLONG]\n prod = ProducerWrapper(\"localhost:9092\", CONFIG_TOPIC, data_topic)\n prod.add_config(pvs)\n\n sleep(2)\n cons = create_consumer()\n sleep(2)\n cons.subscribe([data_topic])\n sleep(4)\n\n expected_values = {PVSTR: (Value.String, b\"\"), PVLONG: (Value.Int, 0)}\n\n first_msg, _ = poll_for_valid_message(cons)\n second_msg, _ = poll_for_valid_message(cons)\n messages = [first_msg, second_msg]\n\n check_multiple_expected_values(messages, expected_values)\n cons.close()", "def test_kinesis_consumer(sdc_builder, sdc_executor, aws):\n # build consumer pipeline\n application_name = get_random_string(string.ascii_letters, 10)\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n kinesis_consumer = builder.add_stage('Kinesis Consumer')\n kinesis_consumer.set_attributes(application_name=application_name, data_format='TEXT',\n initial_position='TRIM_HORIZON',\n stream_name=stream_name)\n\n trash = builder.add_stage('Trash')\n\n kinesis_consumer >> trash\n\n consumer_origin_pipeline = builder.build(title='Kinesis Consumer pipeline').configure_for_environment(aws)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n # run pipeline and capture snapshot\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n\n expected_messages = set('Message {0}'.format(i) for i in range(10))\n # not using PartitionKey logic and hence assign some temp key\n put_records = [{'Data': exp_msg, 'PartitionKey': '111'} for exp_msg in expected_messages]\n client.put_records(Records=put_records, StreamName=stream_name)\n\n # messages are published, read through the pipeline and assert\n snapshot = sdc_executor.capture_snapshot(consumer_origin_pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n output_records = [record.field['text'].value\n for record in snapshot[kinesis_consumer.instance_name].output]\n\n assert set(output_records) == expected_messages\n finally:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name) # Stream operations are done. Delete the stream.\n logger.info('Deleting %s DynamoDB table on AWS ...', application_name)\n aws.dynamodb.delete_table(TableName=application_name)", "def test_socket_timeout():\n schema = vol.Schema(cv.socket_timeout)\n\n with pytest.raises(vol.Invalid):\n schema(0.0)\n\n with pytest.raises(vol.Invalid):\n schema(-1)\n\n assert schema(None) == _GLOBAL_DEFAULT_TIMEOUT\n\n assert schema(1) == 1.0", "def test_get_timeouts_with_default(self):\n\n self.set_options(timeouts=True, timeout_default=2)\n task = self.create_task(self.context())\n\n self.assertEquals(task._timeout_for_targets([targetA, targetB]), 3)", "def test_set_blocking_timeout(self, _):\n\n project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key')\n\n # Assert that if invalid blocking_timeout is set, then exception is raised.\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Invalid blocking timeout \"invalid timeout\" provided.',\n ):\n project_config_manager.set_blocking_timeout('invalid timeout')\n\n # Assert that blocking_timeout cannot be set to less than allowed minimum and instead is set to default value.\n project_config_manager.set_blocking_timeout(-4)\n self.assertEqual(\n enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT, project_config_manager.blocking_timeout,\n )\n\n # Assert that blocking_timeout can be set to 0.\n project_config_manager.set_blocking_timeout(0)\n self.assertIs(0, project_config_manager.blocking_timeout)\n\n # Assert that if no blocking_timeout is provided, it is set to default value.\n project_config_manager.set_blocking_timeout(None)\n self.assertEqual(\n enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT, project_config_manager.blocking_timeout,\n )\n\n # Assert that if valid blocking_timeout is provided, it is set to that value.\n project_config_manager.set_blocking_timeout(5)\n self.assertEqual(5, project_config_manager.blocking_timeout)\n\n project_config_manager.stop()", "def test_meeting_live_stream_update(self):\n pass", "def test_send_subscriber_timeout(self):\n\n class TimeoutConnection(object):\n reliable_subscriber = False\n\n def send_frame(self, frame):\n raise socket.timeout(\"timed out\")\n\n def reset(self):\n pass\n\n dest = '/topic/dest'\n\n bad_client = TimeoutConnection()\n\n # Subscribe both a good client and a bad client.\n self.tm.subscribe(bad_client, dest)\n self.tm.subscribe(self.conn, dest)\n\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n # Make sure out good client got the message.\n self.assertEqual(len(self.conn.frames), 1)\n subscription = self.conn.frames[0].headers.pop(\"subscription\", None)\n self.assertEqual(subscription, 0)\n self.assertEqual(self.conn.frames[0], f)\n\n # Make sure our bad client got disconnected\n # (This might be a bit too intimate.)\n connections = {s.connection for s in self.tm._subscriptions.subscribers(dest)}\n self.assertNotIn(bad_client, connections)", "async def test_waiting_for_client_timeout(\n hass: HomeAssistant,\n) -> None:\n hass.state = CoreState.starting\n await hass.async_block_till_done()\n\n entry = MockConfigEntry(\n domain=mqtt.DOMAIN,\n data={\"broker\": \"test-broker\"},\n state=ConfigEntryState.NOT_LOADED,\n )\n entry.add_to_hass(hass)\n\n assert entry.state == ConfigEntryState.NOT_LOADED\n # returns False after timeout\n assert not await mqtt.async_wait_for_mqtt_client(hass)" ]
[ "0.790406", "0.6366534", "0.6356977", "0.61877793", "0.60831314", "0.60248893", "0.5973552", "0.59520674", "0.5936202", "0.58431596", "0.57945126", "0.5725716", "0.5712746", "0.570165", "0.5689419", "0.56843454", "0.5650357", "0.55747294", "0.557346", "0.5550135", "0.5498954", "0.54846984", "0.5454496", "0.54420197", "0.5433035", "0.54264444", "0.5424081", "0.5375692", "0.537094", "0.5366752", "0.5366471", "0.5360436", "0.5357552", "0.53410864", "0.53399104", "0.5338694", "0.53353757", "0.53351176", "0.5327556", "0.5327082", "0.5320775", "0.52949643", "0.52861357", "0.52852404", "0.5282525", "0.527713", "0.5265084", "0.525998", "0.52360475", "0.523419", "0.5218048", "0.52008396", "0.5198529", "0.51971906", "0.5188691", "0.5179516", "0.51613855", "0.5147176", "0.51457083", "0.5127498", "0.5127285", "0.5090874", "0.5085954", "0.50805444", "0.50709504", "0.5058746", "0.5056721", "0.5043262", "0.502438", "0.5021634", "0.5017718", "0.50155705", "0.5013408", "0.50028455", "0.4992593", "0.49842727", "0.4977706", "0.49763322", "0.4974857", "0.49727142", "0.49722642", "0.49625316", "0.4959946", "0.4954951", "0.49432817", "0.4933562", "0.49330905", "0.493212", "0.49311936", "0.49233446", "0.49211916", "0.49185413", "0.49168023", "0.49167606", "0.48995778", "0.48944354", "0.48930687", "0.48923886", "0.48869684", "0.48846653" ]
0.8004889
0
Test the functionality of batch.num.messages property of KafkaBatchIODataset/KafkaGroupIODataset.
def test_kafka_mini_dataset_size(): import tensorflow_io.kafka as kafka_io # Write new messages to the topic for i in range(200, 10000): message = f"D{i}" kafka_io.write_kafka(message=message, topic="key-partition-test") BATCH_NUM_MESSAGES = 5000 dataset = tfio.experimental.streaming.KafkaBatchIODataset( topics=["key-partition-test"], group_id="cgminibatchsize", servers=None, stream_timeout=5000, configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", f"batch.num.messages={BATCH_NUM_MESSAGES}", ], ) for mini_d in dataset: count = 0 for _ in mini_d: count += 1 assert count == BATCH_NUM_MESSAGES break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kafka_batch_io_dataset():\n\n dataset = tfio.experimental.streaming.KafkaBatchIODataset(\n topics=[\"mini-batch-test\"],\n group_id=\"cgminibatchtrain\",\n servers=None,\n stream_timeout=5000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n NUM_COLUMNS = 1\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Input(shape=(NUM_COLUMNS,)),\n tf.keras.layers.Dense(4, activation=\"relu\"),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\"accuracy\"],\n )\n assert issubclass(type(dataset), tf.data.Dataset)\n for mini_d in dataset:\n mini_d = mini_d.map(\n lambda m, k: (\n tf.strings.to_number(m, out_type=tf.float32),\n tf.strings.to_number(k, out_type=tf.float32),\n )\n ).batch(2)\n assert issubclass(type(mini_d), tf.data.Dataset)\n # Fits the model as long as the data keeps on streaming\n model.fit(mini_d, epochs=5)", "def batch_size_test(self, batch: ReferenceBatchRequest, expected_size: int):\n\n # test __len__\n self.assertEqual(len(batch), expected_size)\n\n # test _from_object_class_names\n self.assertEqual(len(batch._from_object_class_names), expected_size)\n\n # test _from_object_ids\n self.assertEqual(len(batch._from_object_ids), expected_size)\n\n # test _from_object_properties\n self.assertEqual(len(batch._from_object_properties), expected_size)\n\n # test _to_object_ids\n self.assertEqual(len(batch._to_object_ids), expected_size)", "def test_test_data_length(self):\n total_count = 0\n for batch in self._dataset.get_test():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_test_len())", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.MessageDataset())) == 138737", "def test_batch_size_pack_size():", "def test_kafka_group_io_dataset_resume_primary_cg():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def test_sent_count(self):\n self.assertEqual(1, self.alice_storage.sent_count)\n self.assertEqual(1, self.bob_storage.sent_count)\n self.assertEqual(2, self.carol_storage.sent_count)\n self.assertEqual(0, self.anonymous_storage.sent_count)", "def test_batch_accepting():\n client = create_client()\n message = types.PubsubMessage(data=b'foo')\n\n # At first, there are no batches, so this should return a new batch\n # which is also saved to the object.\n ante = len(client._batches)\n batch = client.batch('topic_name', message, autocommit=False)\n assert len(client._batches) == ante + 1\n assert batch is client._batches['topic_name']\n\n # A subsequent request should return the same batch.\n batch2 = client.batch('topic_name', message, autocommit=False)\n assert batch is batch2\n assert batch2 is client._batches['topic_name']", "def test_num_training_batches(tmpdir):\n # when we have fewer batches in the dataloader we should use those instead of the limit\n model = EvalModelTemplate()\n trainer = Trainer(limit_val_batches=100, limit_train_batches=100, max_epochs=1)\n trainer.fit(model)\n\n assert len(model.train_dataloader()) == 10\n assert len(model.val_dataloader()) == 10\n assert isinstance(trainer.num_val_batches, list)\n assert trainer.num_val_batches[0] == 10\n assert trainer.num_training_batches == 10\n\n # when we have more batches in the dataloader we should limit them\n model = EvalModelTemplate()\n trainer = Trainer(limit_val_batches=7, limit_train_batches=7, max_epochs=1)\n trainer.fit(model)\n\n assert len(model.train_dataloader()) == 10\n assert len(model.val_dataloader()) == 10\n assert isinstance(trainer.num_val_batches, list)\n assert trainer.num_val_batches[0] == 7\n assert trainer.num_training_batches == 7", "def test_train_data_length(self):\n total_count = 0\n for batch in self._dataset.get_train():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_train_len())", "def test_kafka_group_io_dataset_stream_timeout_check():\n import tensorflow_io.kafka as kafka_io\n\n def write_messages_background():\n # Write new messages to the topic in a background thread\n time.sleep(6)\n for i in range(100, 200):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-partition-test\")\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgteststreamvalid\",\n servers=\"localhost:9092\",\n stream_timeout=20000,\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n\n # start writing the new messages to kafka using the background job.\n # the job sleeps for some time (< stream_timeout) and then writes the\n # messages into the topic.\n thread = threading.Thread(target=write_messages_background, args=())\n thread.daemon = True\n thread.start()\n\n # At the end, after the timeout has occurred, we must have the old 100 messages\n # along with the new 100 messages\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(200))\n )", "def test_extract_batch_size():\n\n def _check_warning_not_raised(data, expected):\n with no_warning_call(match=\"Trying to infer the `batch_size`\"):\n assert extract_batch_size(data) == expected\n\n def _check_warning_raised(data, expected):\n with pytest.warns(UserWarning, match=f\"Trying to infer the `batch_size` .* we found is {expected}.\"):\n assert extract_batch_size(batch) == expected\n warning_cache.clear()\n\n def _check_error_raised(data):\n with pytest.raises(MisconfigurationException, match=\"We could not infer the batch_size\"):\n extract_batch_size(batch)\n\n # Warning not raised\n batch = torch.zeros(11, 10, 9, 8)\n _check_warning_not_raised(batch, 11)\n\n batch = {\"test\": torch.zeros(11, 10)}\n _check_warning_not_raised(batch, 11)\n\n batch = [torch.zeros(11, 10)]\n _check_warning_not_raised(batch, 11)\n\n batch = {\"test\": [{\"test\": [torch.zeros(11, 10)]}]}\n _check_warning_not_raised(batch, 11)\n\n # Warning raised\n batch = {\"a\": [torch.tensor(1), torch.tensor(2)], \"b\": torch.tensor([1, 2, 3, 4])}\n _check_warning_raised(batch, 1)\n\n batch = {\"test\": [{\"test\": [torch.zeros(11, 10), torch.zeros(10, 10)]}]}\n _check_warning_raised(batch, 11)\n\n batch = {\"test\": [{\"test\": [torch.zeros(10, 10), torch.zeros(11, 10)]}]}\n _check_warning_raised(batch, 10)\n\n batch = [{\"test\": torch.zeros(10, 10), \"test_1\": torch.zeros(11, 10)}]\n _check_warning_raised(batch, 10)\n\n # Error raised\n batch = \"test string\"\n _check_error_raised(batch)\n\n data = {\"test\": [\"some text\"] * 7}\n _check_error_raised(data)\n\n class CustomBatch:\n def __init__(self):\n self.x = torch.randn(7, 2)\n\n data = CustomBatch()\n _check_error_raised(data)", "def batch_size(self) -> int:\n ...", "def test_kafka_group_io_dataset_resume_primary_cg_new_topic():\n import tensorflow_io.kafka as kafka_io\n\n # Write new messages to the topic\n for i in range(10, 100):\n message = f\"D{i}\"\n kafka_io.write_kafka(message=message, topic=\"key-test\")\n # Read only the newly sent 90 messages\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10, 100))\n )", "def verify_batch_consumer():\n\n # Consumer config\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': 'test.py',\n 'session.timeout.ms': 6000,\n 'enable.auto.commit': False,\n 'api.version.request': api_version_request,\n 'on_commit': print_commit_result,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n # Create consumer\n c = confluent_kafka.Consumer(**conf)\n\n # Subscribe to a list of topics\n c.subscribe([topic])\n\n max_msgcnt = 1000\n batch_cnt = 100\n msgcnt = 0\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n # Consume messages (error()==0) or event (error()!=0)\n msglist = c.consume(batch_cnt, 10.0)\n assert len(msglist) == batch_cnt, 'expected %d messages, not %d' % (batch_cnt, len(msglist))\n\n for msg in msglist:\n if msg.error():\n print('Consumer error: %s: ignoring' % msg.error())\n continue\n\n tstype, timestamp = msg.timestamp()\n print('%s[%d]@%d: key=%s, value=%s, tstype=%d, timestamp=%s' %\n (msg.topic(), msg.partition(), msg.offset(),\n msg.key(), msg.value(), tstype, timestamp))\n\n if (msg.offset() % 5) == 0:\n # Async commit\n c.commit(msg, asynchronous=True)\n elif (msg.offset() % 4) == 0:\n offsets = c.commit(msg, asynchronous=False)\n assert len(offsets) == 1, 'expected 1 offset, not %s' % (offsets)\n assert offsets[0].offset == msg.offset()+1, \\\n 'expected offset %d to be committed, not %s' % \\\n (msg.offset(), offsets)\n print('Sync committed offset: %s' % offsets)\n\n msgcnt += 1\n\n print('max_msgcnt %d reached' % msgcnt)\n\n # Get current assignment\n assignment = c.assignment()\n\n # Get cached watermark offsets\n # Since we're not making use of statistics the low offset is not known so ignore it.\n lo, hi = c.get_watermark_offsets(assignment[0], cached=True)\n print('Cached offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Query broker for offsets\n lo, hi = c.get_watermark_offsets(assignment[0], timeout=1.0)\n print('Queried offsets for %s: %d - %d' % (assignment[0], lo, hi))\n\n # Close consumer\n c.close()\n\n # Start a new client and get the committed offsets\n c = confluent_kafka.Consumer(**conf)\n offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3))))\n for tp in offsets:\n print(tp)\n\n c.close()", "def test_all_count(self):\n self.assertEqual(2, self.alice_inbox.all_count)\n self.assertEqual(3, self.bob_inbox.all_count)\n self.assertEqual(0, self.carol_inbox.all_count)", "def test_batch(self):\n pass", "def test_max_number_of_records(self):\n self._config['Number of examples'] = '2'\n result = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertLen(result, 2)", "def test_read_count(self):\n self.assertEqual(1, self.alice_inbox.read_count)\n self.assertEqual(1, self.bob_inbox.read_count)\n self.assertEqual(0, self.carol_inbox.read_count)", "def test_unread_count(self):\n self.assertEqual(1, self.alice_inbox.unread_count)\n self.assertEqual(2, self.bob_inbox.unread_count)\n self.assertEqual(0, self.carol_inbox.unread_count)", "def test_len_testset(self):\n self.assertEqual(self.__dataset.get_test_len, 1000)", "def __len__(self):\n return len(self.batches)", "def count_records(batches: List[Batch]) -> int:\n return sum(b.current_size for b in batches)", "def test_reading_counter_without_batching(self):\n self._test_reading_counter_template(copy_options={'MAXBATCHSIZE': '1'})", "def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )", "def message_count(self):\n pass", "def test_updates_count(self):\n user = self.create_user()\n thread = self.create_thread(sender=user)\n\n original_count = thread.message_set.count()\n\n for _ in range(0, 5):\n msg = mommy.make(Message, thread=thread, sender=user)\n\n send_message(msg.pk)\n\n # Because django caches querysets, we need to request the thread again\n refreshed_thread = Thread.objects.get(pk=msg.thread.pk)\n\n self.assertEqual(refreshed_thread.total_messages, original_count + 5)", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def test_message_counts_correct(self):\n\n value_less_than_375 = 100\n value_equal_to_375 = 375\n value_greater_than_375 = 400\n num_models_per_category = 2\n\n model_class = config_models.ConfigPropertySnapshotMetadataModel\n for i in python_utils.RANGE(num_models_per_category):\n model_class(\n id='model_id-%d-%d' % (i, value_less_than_375),\n committer_id='committer_id',\n commit_type='create',\n commit_message='a' * value_less_than_375).put()\n model_class(\n id='model_id-%d-%d' % (i, value_equal_to_375),\n committer_id='committer_id',\n commit_type='create',\n commit_message='a' * value_equal_to_375).put()\n model_class(\n id='model_id-%d-%d' % (i, value_greater_than_375),\n committer_id='committer_id',\n commit_type='create',\n commit_message='a' * value_greater_than_375).put()\n self.maxDiff = None\n one_off_results = self._run_one_off_job()\n expected_results = [\n ['GREATER_THAN_375', [\n 'ConfigPropertySnapshotMetadataModel with id model_id-0-400.' +\n ' Message: %s' % ('a' * value_greater_than_375),\n 'ConfigPropertySnapshotMetadataModel with id model_id-1-400.' +\n ' Message: %s' % ('a' * value_greater_than_375),\n ]],\n ['LESS_OR_EQUAL_TO_375', 2 * num_models_per_category + 1]]\n\n # Ensure results have same length.\n self.assertEqual(len(one_off_results), len(expected_results))\n\n # Create results dictionaries.\n one_off_results_dict = dict()\n expected_results_dict = dict()\n for i, _ in enumerate(one_off_results):\n one_off_results_dict[one_off_results[i][0]] = one_off_results[i][1]\n expected_results_dict[\n expected_results[i][0]] = expected_results[i][1]\n\n one_off_results_dict[\n 'GREATER_THAN_375'\n ] = sorted(one_off_results_dict['GREATER_THAN_375'])\n expected_results_dict[\n 'GREATER_THAN_375'\n ] = sorted(expected_results_dict['GREATER_THAN_375'])\n self.assertDictEqual(one_off_results_dict, expected_results_dict)", "def test_unread_count(self):\n self.assertEqual(1, self.alice_storage.unread_count)\n self.assertEqual(2, self.bob_storage.unread_count)\n self.assertEqual(0, self.carol_storage.unread_count)\n self.assertEqual(0, self.anonymous_storage.unread_count)", "def __len__(self):\n return self.limit_batches", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.ConversationDataset())) == 7168", "def get_num_batches(self,batch_size):\r\n \r\n return len(self) // batch_size", "def test_users_fetch(self, users_batch_groups, users_batch, user):\n notification = Notification.objects.create(created_by=user, text='a')\n notification.groups.add(*users_batch_groups)\n users = notification.fetch_target_users()\n\n assert users.count() == len(users_batch)", "async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def verify_batch_consumer_performance():\n\n conf = {'bootstrap.servers': bootstrap_servers,\n 'group.id': uuid.uuid1(),\n 'session.timeout.ms': 6000,\n 'error_cb': error_cb,\n 'default.topic.config': {\n 'auto.offset.reset': 'earliest'\n }}\n\n c = confluent_kafka.Consumer(**conf)\n\n def my_on_assign(consumer, partitions):\n print('on_assign:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.assign(partitions)\n\n def my_on_revoke(consumer, partitions):\n print('on_revoke:', len(partitions), 'partitions:')\n for p in partitions:\n print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))\n consumer.unassign()\n\n c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)\n\n max_msgcnt = 1000000\n bytecnt = 0\n msgcnt = 0\n batch_size = 1000\n\n print('Will now consume %d messages' % max_msgcnt)\n\n if with_progress:\n bar = Bar('Consuming', max=max_msgcnt,\n suffix='%(index)d/%(max)d [%(eta_td)s]')\n else:\n bar = None\n\n while msgcnt < max_msgcnt:\n # Consume until we hit max_msgcnt\n\n msglist = c.consume(num_messages=batch_size, timeout=20.0)\n\n for msg in msglist:\n if msg.error():\n if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:\n # Reached EOF for a partition, ignore.\n continue\n else:\n raise confluent_kafka.KafkaException(msg.error())\n\n bytecnt += len(msg)\n msgcnt += 1\n\n if bar is not None and (msgcnt % 10000) == 0:\n bar.next(n=10000)\n\n if msgcnt == 1:\n t_first_msg = time.time()\n\n if bar is not None:\n bar.finish()\n\n if msgcnt > 0:\n t_spent = time.time() - t_first_msg\n print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %\n (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,\n (bytecnt / t_spent) / (1024*1024)))\n\n print('closing consumer')\n c.close()", "def test_batch_idx(self) -> int:\n return self._test_batch_idx", "def test_pipeline3(self, batch_size):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy1, config=config)\n dummy2 = DummyPackProcessor()\n nlp.add(component=dummy2)\n dummy3 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 2 * batch_size}}\n nlp.add(component=dummy3, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH][PACK][BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def get_num_batches(self, instances: Iterable[Instance]) -> int:\n n_docs = len(set([instance[\"metadata\"][\"doc_key\"] for instance in instances]))\n return n_docs", "def getMessageCount(self):\n return 9", "def test_context_data_info_message_for_multiple_result(self):\n factories.SourceDatasetFactory.create_batch(2, i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '2 results found.')", "def test_bob_sent(self):\n messages = list(self.bob_storage.sent)\n self.assertEqual(1, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)", "def _check_batch_size(self, data_list):\n if self.api_info is None:\n self.get_info() # sets the image size and other such info from server.\n MAX_BATCH_SIZE = self.api_info['max_batch_size']\n if len(data_list) > MAX_BATCH_SIZE:\n raise ApiError((\"Number of images provided in bach %d is greater than maximum allowed per \"\n \"request %d\") % (len(data_list), MAX_BATCH_SIZE))", "def test_dataset_nonevenly_divisible_batch_size(self):\n with self.assertRaisesRegex(\n ValueError, 'Test data not evenly divisible by batch size: .*'):\n self._dataset = cifar10.CIFAR10Dataset(\n self._batch_size, batch_size_test=101)", "def test_get_unread_sms_count(self):\n pass", "def test_channel_messages_unlimited_pagination():\n clear()\n userOne = auth_register('[email protected]', '123abc!@#', 'First', 'User') \n randChannel = channels_create(userOne['token'], 'randChannel', True)\n for _ in range(149):\n message_send(userOne['token'], randChannel['channel_id'], 'Hello')\n messages = channel_messages(userOne['token'], randChannel['channel_id'], 0)\n assert(messages['start'] == 0)\n assert(messages['end'] == 50) \n messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50)\n assert(messages2['start'] == 50)\n assert(messages2['end'] == 100) \n messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100)\n assert(messages3['start'] == 100)\n assert(messages3['end'] == -1) \n assert(len(messages3['messages']) == 49)\n # an error should be raised when start is beyond 149 messages\n with pytest.raises(InputError): \n channel_messages(userOne['token'], randChannel['channel_id'], 150)", "def test_pipeline3(self, batch_size):\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy1, config=config,\n selector=FirstPackSelector())\n dummy2 = DummyPackProcessor()\n nlp.add(component=dummy2, selector=FirstPackSelector())\n dummy3 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 2 * batch_size}}\n nlp.add(component=dummy3, config=config,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH][PACK][BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "def test_batch_size(self):\n\n class A(Document):\n s = StringField()\n\n A.drop_collection()\n\n for i in range(100):\n A.objects.create(s=str(i))\n\n # test iterating over the result set\n cnt = 0\n for _ in A.objects.batch_size(10):\n cnt += 1\n assert cnt == 100\n\n # test chaining\n qs = A.objects.all()\n qs = qs.limit(10).batch_size(20).skip(91)\n cnt = 0\n for _ in qs:\n cnt += 1\n assert cnt == 9\n\n # test invalid batch size\n qs = A.objects.batch_size(-1)\n with pytest.raises(ValueError):\n list(qs)", "def test_shape_and_num_data(self):\n\n batch = Batch(Mock())\n\n self.assertEqual(batch.num_objects(), 0)\n self.assertEqual(batch.num_references(), 0)\n self.assertEqual(batch.shape, (0, 0))\n\n #######################################################################\n # add one object\n batch.add_data_object({}, 'Test')\n\n self.assertEqual(batch.num_objects(), 1)\n self.assertEqual(batch.num_references(), 0)\n self.assertEqual(batch.shape, (1, 0))\n\n #######################################################################\n # add one object\n batch.add_data_object({}, 'Test')\n\n self.assertEqual(batch.num_objects(), 2)\n self.assertEqual(batch.num_references(), 0)\n self.assertEqual(batch.shape, (2, 0))\n\n #######################################################################\n # add one reference\n batch.add_reference(\n 'f0153f24-3923-4046-919b-6a3e8fd37394',\n 'Test',\n 'test',\n 'f0153f24-3923-4046-919b-6a3e8fd37395'\n )\n\n self.assertEqual(batch.num_objects(), 2)\n self.assertEqual(batch.num_references(), 1)\n self.assertEqual(batch.shape, (2, 1))\n\n #######################################################################\n # add one reference\n batch.add_reference(\n 'f0153f24-3923-4046-919b-6a3e8fd37396',\n 'Test',\n 'test',\n 'f0153f24-3923-4046-919b-6a3e8fd37397'\n )\n\n self.assertEqual(batch.num_objects(), 2)\n self.assertEqual(batch.num_references(), 2)\n self.assertEqual(batch.shape, (2, 2))", "def batch(\n consumer: Consumer,\n batch_count: Optional[int] = None,\n batch_size: int = DEFAULT_BATCH_SIZE,\n batch_timeout: Optional[float] = DEFAULT_BATCH_TIMEOUT,\n poll_timeout: Optional[float] = DEFAULT_POLL_TIMEOUT,\n timeout: Optional[float] = None,\n) -> Iterator[List[Message]]:\n if batch_count is not None and batch_count <= 0:\n raise ValueError(\"batch_count must be a positive integer.\")\n if batch_size <= 0:\n raise ValueError(\"batch_size must be a positive integer.\")\n if batch_timeout is not None and batch_timeout <= 0:\n raise ValueError(\"batch_timeout must be a positive float.\")\n if poll_timeout is not None and poll_timeout <= 0:\n raise ValueError(\"poll_timeout must be a positive float.\")\n if timeout is not None and timeout <= 0:\n raise ValueError(\"timeout must be a positive float.\")\n\n if batch_count is None:\n LOGGER.debug(\"Streaming message batches....\")\n else:\n LOGGER.debug(\"Streaming up to %d message batches....\", batch_count)\n\n if timeout is not None:\n timeout_delta = timedelta(milliseconds=int(timeout * MILLIS_IN_SECOND))\n\n batch_duration = timedelta()\n num_batches = 0\n while batch_count is None or num_batches < batch_count:\n if musekafka.shutdown.is_shutting_down():\n break\n\n if timeout is not None:\n if batch_duration >= timeout_delta:\n LOGGER.debug(\"Hit batch timeout (%.3f seconds).\", timeout)\n break\n batch_timeout = min(\n batch_timeout or timeout, (timeout_delta - batch_duration).total_seconds()\n )\n\n batch_start = datetime.utcnow()\n batch = list(\n stream(consumer, count=batch_size, poll_timeout=poll_timeout, timeout=batch_timeout)\n )\n batch_duration += datetime.utcnow() - batch_start\n if not batch:\n # Empty batch does not count towards num_batches,\n # since we require batch_size to be > 0.\n continue\n LOGGER.debug(\"Got batch of %d messages.\", len(batch))\n num_batches += 1\n yield batch\n\n LOGGER.debug(\n \"Completed streaming %d batches, with each batch of size at most %d.\",\n num_batches,\n batch_size,\n )", "def test_pipeline4(self, batch_size):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummyPackProcessor()\n nlp.add(component=dummy1)\n\n dummy2 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy2, config=config)\n\n dummy3 = DummyPackProcessor()\n nlp.add(component=dummy3)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK][BATCH][PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_new_count(self):\n self.assertEqual(2, self.alice_inbox.new_count)\n self.assertEqual(3, self.bob_inbox.new_count)\n self.assertEqual(0, self.carol_inbox.new_count)", "def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def testQueueMultiMsg(self):\n for i in range(10):\n self.mgr.queueMsg(i)\n\n self.assertEqual( self.mgr.msgQueue.qsize(), 9)", "def test_batches_are_accessible(\n monkeypatch,\n multibatch_generic_csv_generator,\n multibatch_generic_csv_generator_context,\n):\n\n context: DataContext = multibatch_generic_csv_generator_context\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n\n datasource = context.datasources[datasource_name]\n\n data_connector = datasource.data_connectors[data_connector_name]\n\n total_batches: int = 20\n file_list = multibatch_generic_csv_generator(\n data_path=data_path, num_event_batches=total_batches\n )\n\n assert (\n data_connector._get_data_reference_list_from_cache_by_data_asset_name(\n data_asset_name=asset_name\n )\n == file_list\n )\n\n batch_request_1 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -1,\n },\n )\n # Should give most recent batch\n validator_1 = context.get_validator(\n batch_request=batch_request_1,\n create_expectation_suite_with_name=\"my_expectation_suite_name_1\",\n )\n metric_max = validator_1.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches\n metric_value_set = validator_1.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n batch_request_2 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -2,\n },\n )\n validator_2 = context.get_validator(\n batch_request=batch_request_2,\n create_expectation_suite_with_name=\"my_expectation_suite_name_2\",\n )\n metric_max = validator_2.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches - 1\n metric_value_set = validator_2.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n for batch_num in range(1, total_batches + 1):\n batch_request = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -batch_num,\n },\n )\n validator = context.get_validator(\n batch_request=batch_request,\n create_expectation_suite_with_name=f\"my_expectation_suite_name__{batch_num}\",\n )\n metric_max = validator.get_metric(\n MetricConfiguration(\n \"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"}\n )\n )\n assert metric_max == (total_batches + 1) - batch_num\n metric_value_set = validator.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}", "def test_pipeline4(self, batch_size):\n\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy1 = DummyPackProcessor()\n nlp.add(component=dummy1, selector=FirstPackSelector())\n\n dummy2 = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": batch_size}}\n nlp.add(component=dummy2, config=config,\n selector=FirstPackSelector())\n\n dummy3 = DummyPackProcessor()\n nlp.add(component=dummy3,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK][BATCH][PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def record_batch_size(self):\n return 10000", "def on_message_batch(self, messages):\n assert isinstance(messages, list)\n assert len(messages) > 0\n assert all(isinstance(message, Message.Implementation) for message in messages)\n assert all(message.community == messages[0].community for message in messages)\n assert all(message.meta == messages[0].meta for message in messages)\n\n def _filter_fail(message):\n if isinstance(message, DelayMessage):\n if __debug__:\n dprint(message.delayed.candidate, \" delay \", message.delayed, \" (\", message, \")\")\n \n if message.create_request():\n self._statistics.delay_send += 1\n self._statistics.dict_inc(self._statistics.delay, \"om_message_batch:%s\" % message.delayed)\n self._statistics.delay_count += 1\n return False\n\n elif isinstance(message, DropMessage):\n if __debug__:\n dprint(message.dropped.candidate, \" drop: \", message.dropped.name, \" (\", message, \")\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_message_batch:%s\" % message)\n self._statistics.drop_count += 1\n return False\n\n else:\n return True\n\n meta = messages[0].meta\n\n if __debug__:\n debug_count = len(messages)\n debug_begin = time()\n\n # drop all duplicate or old messages\n assert type(meta.distribution) in self._check_distribution_batch_map\n messages = list(self._check_distribution_batch_map[type(meta.distribution)](messages))\n assert len(messages) > 0 # should return at least one item for each message\n assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)\n\n # handle/remove DropMessage and DelayMessage instances\n messages = [message for message in messages if isinstance(message, Message.Implementation) or _filter_fail(message)]\n if not messages:\n return 0\n\n # check all remaining messages on the community side. may yield Message.Implementation,\n # DropMessage, and DelayMessage instances\n try:\n messages = list(meta.check_callback(messages))\n except:\n dprint(\"exception during check_callback for \", meta.name, exception=True, level=\"error\")\n return 0\n assert len(messages) >= 0 # may return zero messages\n assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)\n\n if __debug__:\n if len(messages) == 0:\n dprint(meta.check_callback, \" yielded zero messages, drop, or delays. This is allowed but likely to be an error.\", level=\"warning\")\n\n # handle/remove DropMessage and DelayMessage instances\n messages = [message for message in messages if _filter_fail(message)]\n if not messages:\n return 0\n\n # store to disk and update locally\n if __debug__:\n dprint(\"in... \", len(messages), \" \", meta.name, \" messages from \", \", \".join(str(candidate) for candidate in set(message.candidate for message in messages)))\n \n if self.store_update_forward(messages, True, True, False):\n \n self._statistics.dict_inc(self._statistics.success, meta.name, len(messages))\n self._statistics.success_count += len(messages)\n\n # tell what happened\n if __debug__:\n debug_end = time()\n level = \"warning\" if (debug_end - debug_begin) > 1.0 else \"normal\"\n dprint(\"handled \", len(messages), \"/\", debug_count, \" %.2fs\" % (debug_end - debug_begin), \" \", meta.name, \" messages (with \", meta.batch.max_window, \"s cache window)\", level=level)\n \n # return the number of messages that were correctly handled (non delay, duplictes, etc)\n return len(messages)\n \n return 0", "def test_message_group():", "def message_count(self):\n return len(self.messages)", "def test_get_queue_msg_count1(self):\n self.queue.direct_declare(TEST_QUEUE)\n self.queue.publish(TEST_QUEUE, 'this is a test msg')\n\n msg_count = self.queue.get_queue_msg_count(TEST_QUEUE)\n assert isinstance(msg_count, int)", "def test_bufMsgSize():\n nt.assert_equal(CisInterface.bufMsgSize(), CIS_MSG_BUF)", "def find_batch_size(data):\n if isinstance(data, (tuple, list)):\n return find_batch_size(data[0])\n elif isinstance(data, Mapping):\n for k in data.keys():\n return find_batch_size(data[k])\n elif not isinstance(data, torch.Tensor):\n raise TypeError(f\"Can only find the batch size of tensors but got {type(data)}.\")\n return data.shape[0]", "def on_test_batch_begin(self, batch, logs=None):", "def assert_queue_size(sizes):\n for queue in sizes:\n assert_that(count_messages(queue), is_(sizes[queue]))", "def test_maxMsgSize():\n nt.assert_equal(CisInterface.maxMsgSize(), CIS_MSG_MAX)", "def test_get_messages(self):\n\n messages = sentiment_analysis.get_messages(3)\n\n self.assertEqual(messages[0].message_text, 'This is a russian test message')\n self.assertEqual(messages[0].user_id, 3)\n self.assertEqual(messages[0].original_lang_id, 5)", "def batch_shape(self) -> torch.Size:\n self._check_if_fitted()\n return torch.Size([self.num_mcmc_samples])", "def batch_len(batch):\n flatlist, _ = tree_util.tree_flatten(batch)\n if len(flatlist) < 1:\n return 0\n b = flatlist[0].shape[0]\n assert all(\n arr.shape[0] == b for arr in flatlist if th.is_tensor(arr)\n ), \"Not all arrays have same batchsize!\"\n return b", "def length(self, data: Sequence[Sequence[torch.Tensor]]) -> int:\n return self.n_batch", "def test_kafka_group_io_dataset_secondary_cg():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestsecondary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )", "def test_pipeline2(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_kafka_group_io_dataset_auto_offset_reset():\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgglobaloffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetearliest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(100))\n )\n\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtopicoffsetlatest\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"conf.topic.auto.offset.reset=latest\",\n ],\n )\n assert np.all(sorted(k.numpy() for (k, _) in dataset) == [])", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.CompoundingConversationDataset())) == 131569", "def test_number_of_records_for_spell(self):\n records = self.admitgen.data.findall('record')\n self.assertEqual(9, len(records),\n 'Incorrect number of records generated')", "def test_overall_report_data():\n assert (len(overall_data['overall_report']['data']) == 8)", "def test_kafka_group_io_dataset_invalid_stream_timeout():\n\n STREAM_TIMEOUT = -20\n try:\n tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\", \"key-test\"],\n group_id=\"cgteststreaminvalid\",\n servers=\"localhost:9092\",\n stream_timeout=STREAM_TIMEOUT,\n configuration=[\"session.timeout.ms=7000\", \"max.poll.interval.ms=8000\"],\n )\n except ValueError as e:\n assert str(\n e\n ) == \"Invalid stream_timeout value: {} ,set it to -1 to block indefinitely.\".format(\n STREAM_TIMEOUT\n )", "def new_messages_number(self, tag):\n return len(self._message_queue.setdefault(tag,[]))", "def __len__(self) -> int:\n batch_sampler = cast(BatchSampler, self.batch_sampler)\n return len(batch_sampler)", "def test_process_messages_rate_limited(caplog, settings) -> None:\n settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0\n rate_limited_payload = deepcopy(distribution_payload)\n rate_limited_payload[\"tags\"][\"custom_tag\"] = \"rate_limited_test\"\n\n rate_limited_payload2 = deepcopy(distribution_payload)\n rate_limited_payload2[\"name\"] = \"rate_limited_test\"\n\n message_batch = [\n Message(\n BrokerValue(\n KafkaPayload(None, json.dumps(counter_payload).encode(\"utf-8\"), []),\n Partition(Topic(\"topic\"), 0),\n 0,\n datetime.now(),\n )\n ),\n Message(\n BrokerValue(\n KafkaPayload(None, json.dumps(rate_limited_payload).encode(\"utf-8\"), []),\n Partition(Topic(\"topic\"), 0),\n 1,\n datetime.now(),\n )\n ),\n Message(\n BrokerValue(\n KafkaPayload(None, json.dumps(rate_limited_payload2).encode(\"utf-8\"), []),\n Partition(Topic(\"topic\"), 0),\n 2,\n datetime.now(),\n )\n ),\n ]\n # the outer message uses the last message's partition, offset, and timestamp\n last = message_batch[-1]\n outer_message = Message(Value(message_batch, last.committable))\n\n message_processor = MessageProcessor(\n get_ingest_config(UseCaseKey.RELEASE_HEALTH, IndexerStorage.MOCK)\n )\n # Insert a None-value into the mock-indexer to simulate a rate-limit.\n message_processor._indexer.indexer._strings[1][\"rate_limited_test\"] = None\n\n with caplog.at_level(logging.ERROR):\n new_batch = message_processor.process_messages(outer_message=outer_message)\n\n # we expect just the counter_payload msg to be left, as that one didn't\n # cause/depend on string writes that have been rate limited\n expected_msg = message_batch[0]\n expected_new_batch = [\n Message(\n BrokerValue(\n KafkaPayload(\n None,\n json.dumps(__translated_payload(counter_payload)).encode(\"utf-8\"),\n [(\"metric_type\", \"c\")],\n ),\n expected_msg.value.partition,\n expected_msg.value.offset,\n expected_msg.value.timestamp,\n )\n )\n ]\n compare_message_batches_ignoring_metadata(new_batch, expected_new_batch)\n assert \"dropped_message\" in caplog.text", "def test_kafka_group_io_dataset_primary_cg():\n dataset = tfio.experimental.streaming.KafkaGroupIODataset(\n topics=[\"key-partition-test\"],\n group_id=\"cgtestprimary\",\n servers=\"localhost:9092\",\n configuration=[\n \"session.timeout.ms=7000\",\n \"max.poll.interval.ms=8000\",\n \"auto.offset.reset=earliest\",\n ],\n )\n assert np.all(\n sorted(k.numpy() for (k, _) in dataset)\n == sorted((\"D\" + str(i)).encode() for i in range(10))\n )", "def test_format_of_generated_msgs():\n for msg in it.islice(generate_msgs(), 0, 5):\n message = Message.parse(msg) # checks the json keys have the right names\n assert type(message.timestamp) is datetime\n assert type(message.power) is int", "def batch_size(self):\n return self.size", "def test_variable_length():\n size = 1350\n batch_size = 4\n dataset = datasets.digit(\n split=\"train\", epochs=1, batch_size=batch_size, dataset_dir=DATASET_DIR,\n )\n assert dataset.batches_per_epoch == (size // batch_size + bool(size % batch_size))\n\n x, y = dataset.get_batch()\n assert x.dtype == object\n assert x.shape == (batch_size,)\n for x_i in x:\n assert x_i.ndim == 1\n assert 1148 <= len(x_i) <= 18262\n assert y.shape == (batch_size,)", "def test_batch_size(self, mock_auto_create):\n\n batch = Batch(Mock())\n self.check_instance(batch)\n\n self.assertIsNone(batch.batch_size)\n self.check_instance(batch)\n\n #######################################################################\n # test batch_size: None -> int\n batch.batch_size = 10\n self.assertEqual(batch.batch_size, 10)\n self.check_instance(batch,\n batch_size=10,\n batching_type='fixed',\n recom_num_obj=10,\n recom_num_ref=10\n )\n mock_auto_create.assert_called()\n mock_auto_create.reset_mock()\n\n #######################################################################\n # test batch_size: int -> int and dynamic enabled, one recommended set to None\n ## set some attributes manually (ONLY WHEN TESTING!!!)\n batch._batching_type = 'dynamic'\n batch._recommended_num_objects = None\n\n batch.batch_size = 200\n self.assertEqual(batch.batch_size, 200)\n self.check_instance(batch,\n batch_size=200,\n batching_type='dynamic',\n recom_num_obj=200,\n recom_num_ref=10\n )\n mock_auto_create.assert_called()\n mock_auto_create.reset_mock()\n\n #######################################################################\n # test batch_size: int -> None\n batch.batch_size = None\n self.assertIsNone(batch.batch_size)\n self.check_instance(batch,\n batch_size=None,\n batching_type=None,\n recom_num_obj=200,\n recom_num_ref=10\n )\n mock_auto_create.assert_not_called()\n mock_auto_create.reset_mock()\n\n #######################################################################\n # test exceptions\n ## messages\n type_error = f\"'batch_size' must be of type {int}.\"\n value_error = \"'batch_size' must be positive, i.e. greater that zero (>0).\"\n\n with self.assertRaises(TypeError) as error:\n batch.batch_size = False\n check_error_message(self, error, type_error)\n self.check_instance(batch,\n batch_size=None,\n batching_type=None,\n recom_num_obj=200,\n recom_num_ref=10\n )\n mock_auto_create.assert_not_called()\n\n with self.assertRaises(TypeError) as error:\n batch.batch_size = 100.5\n check_error_message(self, error, type_error)\n self.check_instance(batch,\n batch_size=None,\n batching_type=None,\n recom_num_obj=200,\n recom_num_ref=10\n )\n mock_auto_create.assert_not_called()\n\n with self.assertRaises(ValueError) as error:\n batch.batch_size = 0\n check_error_message(self, error, value_error)\n self.check_instance(batch,\n batch_size=None,\n batching_type=None,\n recom_num_obj=200,\n recom_num_ref=10\n )\n mock_auto_create.assert_not_called()\n\n with self.assertRaises(ValueError) as error:\n batch.batch_size = -100\n check_error_message(self, error, value_error)\n self.check_instance(batch,\n batch_size=None,\n batching_type=None,\n recom_num_obj=200,\n recom_num_ref=10\n )\n mock_auto_create.assert_not_called()", "def test_pipeline2(self):\n\n nlp = Pipeline[MultiPack]()\n reader = MultiPackSentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config,\n selector=FirstPackSelector())\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_pack(\"pack\").get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_bob_unread(self):\n messages = list(self.bob_storage.unread)\n self.assertEqual(2, len(messages))\n self.assertIn(self.bob_message_to_group1, messages)\n self.assertNotIn(self.read_message, messages)\n self.assertIn(self.alice_message_to_bob, messages)\n self.assertNotIn(self.archived_message, messages)", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def test_n_splits(self):\n for n_splits, n_jobs in product([1, 6], [None, 2, 8]):\n with self.subTest(input='list', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=n_splits,\n n_jobs=n_jobs), n_splits)\n\n with self.subTest(input='numpy', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None,\n n_splits=n_splits, n_jobs=n_jobs), n_splits)", "def test_get_messages(self, mock_logging):\n self.client.get_messages()\n\n assert_equal(len(self.client.received_messages), 1)\n assert_true(mock_logging.info.called)", "def calculate_batch_number(train_data, val_data, batch_size, nested=False):\n batch_train, batch_val = ([] for _ in range(2))\n for key in train_data:\n if nested:\n batch_train.append(int(len(train_data[key][next(iter(train_data[key]))]) / batch_size))\n batch_val.append(math.ceil(len(val_data[key][next(iter(val_data[key]))]) / batch_size))\n else:\n batch_train.append(int(len(train_data[key]) / batch_size))\n batch_val.append(math.ceil(len(val_data[key]) / batch_size))\n\n return batch_train, batch_val", "def test_context_data_info_message_for_multiple_result(self):\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum 2')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '2 results found.')", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def test_table_counts():\n number_of_test_run = 2 # Run the pipeline twice\n for i in range(number_of_test_run):\n dp = DataPipeline()\n dp.run()\n\n dp = DataPipeline()\n assert dp.get_product_count() == (500000,)\n assert dp.get_duplicate_count(from_table=\"products\") == (0,)\n assert dp.get_aggregate_table_result_count() == (222024, )\n 222024\n dp.close()", "def test_n_jobs(self):\n for n_jobs in [1, 6]:\n with self.subTest(input='list', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data)))\n\n with self.subTest(input='numpy', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data_numpy)))", "def test_read_count(self):\n self.assertEqual(1, self.alice_storage.read_count)\n self.assertEqual(1, self.bob_storage.read_count)\n self.assertEqual(0, self.carol_storage.read_count)\n self.assertEqual(0, self.anonymous_storage.read_count)" ]
[ "0.64023566", "0.6360138", "0.63011515", "0.6271337", "0.61075103", "0.6065402", "0.60378826", "0.60176194", "0.59386075", "0.5936413", "0.59298617", "0.59279746", "0.5922301", "0.5909433", "0.5898732", "0.58711314", "0.58583987", "0.58453405", "0.5823402", "0.5822543", "0.5801078", "0.579912", "0.57942057", "0.5789146", "0.57880837", "0.5764335", "0.5763675", "0.5749102", "0.5746237", "0.5725947", "0.5722312", "0.57209504", "0.5717708", "0.57061386", "0.56726027", "0.56414604", "0.5630748", "0.56256074", "0.5612088", "0.5602219", "0.5592045", "0.5583038", "0.55809367", "0.5562033", "0.55598176", "0.5548312", "0.55339146", "0.5524446", "0.5512881", "0.55108935", "0.55066437", "0.5500779", "0.54962516", "0.54891634", "0.54877067", "0.5469624", "0.5460988", "0.54591393", "0.54576063", "0.5456079", "0.54475087", "0.543648", "0.54346645", "0.5422258", "0.5399644", "0.5399113", "0.53805447", "0.5377119", "0.534444", "0.53441393", "0.53389096", "0.5326169", "0.53214574", "0.5311266", "0.53080046", "0.5307213", "0.53052676", "0.5303249", "0.5300147", "0.5294653", "0.52808344", "0.5280498", "0.52758205", "0.5274006", "0.52707654", "0.5267397", "0.52454805", "0.52432203", "0.52361226", "0.5233372", "0.52288836", "0.5227804", "0.5227074", "0.5226693", "0.5226642", "0.52235717", "0.5220425", "0.52131337", "0.5210123", "0.52085537" ]
0.684737
0
Test the functionality of the KafkaBatchIODataset by training a model directly on the incoming kafka message batch(of type tf.data.Dataset), in an onlinetraining fashion.
def test_kafka_batch_io_dataset(): dataset = tfio.experimental.streaming.KafkaBatchIODataset( topics=["mini-batch-test"], group_id="cgminibatchtrain", servers=None, stream_timeout=5000, configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest", ], ) NUM_COLUMNS = 1 model = tf.keras.Sequential( [ tf.keras.layers.Input(shape=(NUM_COLUMNS,)), tf.keras.layers.Dense(4, activation="relu"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(1, activation="sigmoid"), ] ) model.compile( optimizer="adam", loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=["accuracy"], ) assert issubclass(type(dataset), tf.data.Dataset) for mini_d in dataset: mini_d = mini_d.map( lambda m, k: ( tf.strings.to_number(m, out_type=tf.float32), tf.strings.to_number(k, out_type=tf.float32), ) ).batch(2) assert issubclass(type(mini_d), tf.data.Dataset) # Fits the model as long as the data keeps on streaming model.fit(mini_d, epochs=5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, batch):\n pass", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, num_batches: int):", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def test_model(self, batch_size):\n (_, gen_val, gen_test) = self.dataset.data_loaders(\n batch_size=batch_size,\n split=(0.01, 0.01)\n )\n print('Num Test Batches: ', len(gen_test))\n mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)\n print('Test Epoch:')\n print(\n '\\tTest Loss: ', mean_loss_test, '\\n'\n '\\tTest Accuracy: ', mean_accuracy_test * 100\n )", "def train_online(\n self,\n dataset: Union[str, dict, pd.DataFrame],\n training_set_metadata: Union[str, dict] = None,\n data_format: str = \"auto\",\n random_seed: int = default_random_seed,\n ) -> None:\n training_set_metadata = training_set_metadata or self.training_set_metadata\n preprocessing_params = get_preprocessing_params(self.config_obj)\n\n with provision_preprocessing_workers(self.backend):\n # TODO (Connor): Refactor to use self.config_obj\n training_dataset, _, _, training_set_metadata = preprocess_for_training(\n self.config_obj.to_dict(),\n training_set=dataset,\n training_set_metadata=training_set_metadata,\n data_format=data_format,\n skip_save_processed_input=True,\n preprocessing_params=preprocessing_params,\n backend=self.backend,\n random_seed=random_seed,\n callbacks=self.callbacks,\n )\n\n if not self.training_set_metadata:\n self.training_set_metadata = training_set_metadata\n\n if not self.model:\n update_config_with_metadata(self.config_obj, training_set_metadata)\n self.model = LudwigModel.create_model(self.config_obj, random_seed=random_seed)\n # update config with properties determined during model instantiation\n update_config_with_model(self.config_obj, self.model)\n set_saved_weights_in_checkpoint_flag(self.config_obj)\n\n if not self._online_trainer:\n self._online_trainer = self.backend.create_trainer(\n config=self.config_obj.trainer, model=self.model, random_seed=random_seed\n )\n\n self._tune_batch_size(self._online_trainer, dataset, random_seed=random_seed)\n\n self.model = self._online_trainer.train_online(training_dataset)", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def run_epoch(session,\n model,\n dataset,\n is_train=False,\n plot_attention_weights=False):\n assert dataset is not None\n n_words = len([word for sample in dataset for word in sample if word > 0])\n epoch_size = int(math.ceil(len(dataset) / model.batch_size))\n # producer = lm_data_producer(dataset, model.batch_size, model.num_steps)\n\n fetches = {\"step_cost\": model.batch_loss, \"niters\": model.nwords}\n if is_train:\n fetches[\"eval_op\"] = model.train_op\n if plot_attention_weights:\n fetches[\"weights\"] = model.attention_weights\n\n costs = 0.0\n iters = 0\n start_time = time.time()\n # for step, (x, y) in enumerate(producer):\n for step in range(epoch_size):\n step_time = time.time()\n vals = session.run(fetches, {})\n step_cost = vals[\"step_cost\"]\n costs += step_cost\n # iters += np.sum(x > 0)\n iters += vals[\"niters\"]\n\n # print information regarding the current training process\n if is_train:\n if step % (epoch_size // 20) == 10:\n print(\"{:.3f} - aprox. loss {:.8f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, costs / (step + 1),\n iters / (time.time() - start_time)))\n # print information regarding the current training process\n else:\n if step % (epoch_size // 10) == 5:\n print(\"{:.3f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, iters / (time.time() - start_time)))\n\n return np.exp(costs / n_words)", "def train(Dataset, model, criterion, epoch, optimizer, writer, device, args):\n\n # Create instances to accumulate losses etc.\n losses = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n # train\n for i, (inp, target) in enumerate(Dataset.train_loader):\n inp = inp.to(device)\n target = target.to(device)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # compute model forward\n output = model(inp)\n\n # calculate loss\n loss = criterion(output, target)\n\n # record precision/accuracy and losses\n prec1 = accuracy(output, target)[0]\n top1.update(prec1.item(), inp.size(0))\n losses.update(loss.item(), inp.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print progress\n if i % args.print_freq == 0:\n print('Training: [{0}][{1}/{2}]\\t' \n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch+1, i, len(Dataset.train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1))\n\n # TensorBoard summary logging\n writer.add_scalar('training/train_precision@1', top1.avg, epoch)\n writer.add_scalar('training/train_class_loss', losses.avg, epoch)\n writer.add_scalar('training/train_average_loss', losses.avg, epoch)\n\n print(' * Train: Loss {loss.avg:.5f} Prec@1 {top1.avg:.3f}'.format(loss=losses, top1=top1))", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def train(self, training_data):\n pass", "def train_loop(args, train_dataset, dev_dataset, global_mean=0.0, test_dataset=None):\n mirrored_strategy = tf.distribute.MirroredStrategy()\n with mirrored_strategy.scope():\n # build model\n user_ids = keras.Input(shape=(), dtype=tf.int32, name=\"user_id\")\n movie_ids = keras.Input(shape=(), dtype=tf.int32, name=\"movie_id\")\n item_bin_ids = keras.Input(shape=(), dtype=tf.int32, name=\"item_time_bias\")\n user_time_dev = keras.Input(shape=(), dtype=tf.float32, name=\"user_time_dev\")\n batch_score = MF_Netflix(args.user_count, args.item_count, args.hidden_dim, global_mean)(\\\n [user_ids, movie_ids, item_bin_ids, user_time_dev])\n model = keras.Model(inputs={\"user_id\":user_ids, \"movie_id\":movie_ids, \\\n \"item_time_bias\": item_bin_ids, \"user_time_dev\": user_time_dev}, \\\n outputs=batch_score)\n # build the model train setting\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n args.learning_rate,\n decay_steps=20000,\n decay_rate=0.96,\n staircase=True)\n optimizer = keras.optimizers.Adam(args.learning_rate)\n #optimizer = keras.optimizers.RMSprop(args.learning_rate)\n #optimizer = keras.optimizers.SGD(args.learning_rate)\n loss = keras.losses.MeanSquaredError()\n metrics = [keras.metrics.MeanSquaredError()]\n model.compile(optimizer, loss=loss, metrics=metrics)\n # make the training loop and evaluation\n checkpoint_callback = keras.callbacks.ModelCheckpoint(\\\n filepath=args.model_path, save_best_only=True, save_weights_only=True)\n tensorbaord_callback = keras.callbacks.TensorBoard(log_dir=args.summary_dir, \\\n histogram_freq=1)\n steps_per_epoch = args.steps_per_epoch\n model.fit(train_dataset, epochs=args.epochs, \\\n callbacks=[checkpoint_callback, tensorbaord_callback], \\\n validation_data=dev_dataset, steps_per_epoch=steps_per_epoch, \\\n validation_steps=args.val_steps)", "def train():\n\n dataset_train = SequenceDataset(\n subset=\"train\",\n config_dir=FLAGS.config_dir,\n data_dir=FLAGS.data_dir,\n batch_size=FLAGS.batch_size,\n input_size=FLAGS.input_dim,\n output_size=FLAGS.output_dim,\n num_threads=FLAGS.num_threads,\n use_bucket=True,\n infer=False,\n name=\"dataset_train\")()\n\n dataset_valid = SequenceDataset(\n subset=\"valid\",\n config_dir=FLAGS.config_dir,\n data_dir=FLAGS.data_dir,\n batch_size=FLAGS.batch_size,\n input_size=FLAGS.input_dim,\n output_size=FLAGS.output_dim,\n num_threads=FLAGS.num_threads,\n use_bucket=True,\n infer=False,\n name=\"dataset_valid\")()\n\n model = TfModel(\n rnn_cell=FLAGS.rnn_cell,\n dnn_depth=FLAGS.dnn_depth,\n dnn_num_hidden=FLAGS.dnn_num_hidden,\n rnn_depth=FLAGS.rnn_depth,\n rnn_num_hidden=FLAGS.rnn_num_hidden,\n output_size=FLAGS.output_dim,\n bidirectional=FLAGS.bidirectional,\n rnn_output=FLAGS.rnn_output,\n cnn_output=FLAGS.cnn_output,\n look_ahead=FLAGS.look_ahead,\n mdn_output=FLAGS.mdn_output,\n mix_num=FLAGS.mix_num,\n name=\"tf_model\")\n\n # Build a reinitializable iterator for both dataset_train and dataset_valid.\n iterator = tf.data.Iterator.from_structure(\n dataset_train.batched_dataset.output_types,\n dataset_train.batched_dataset.output_shapes)\n (input_sequence, input_sequence_length,\n target_sequence, target_sequence_length) = iterator.get_next()\n\n training_init_op = iterator.make_initializer(dataset_train.batched_dataset)\n validation_init_op = iterator.make_initializer(dataset_valid.batched_dataset)\n\n # Build the model and get the loss.\n output_sequence_logits, train_final_state = model(\n input_sequence, input_sequence_length)\n loss = model.loss(\n output_sequence_logits, target_sequence, target_sequence_length)\n tf.summary.scalar(\"loss\", loss)\n\n learning_rate = tf.get_variable(\n \"learning_rate\",\n shape=[],\n dtype=tf.float32,\n initializer=tf.constant_initializer(FLAGS.learning_rate),\n trainable=False)\n reduce_learning_rate = learning_rate.assign(\n learning_rate * FLAGS.reduce_learning_rate_multiplier)\n\n global_step = tf.get_variable(\n name=\"global_step\",\n shape=[],\n dtype=tf.int64,\n initializer=tf.zeros_initializer(),\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])\n\n # Set up optimizer with global norm clipping.\n trainable_variables = tf.trainable_variables()\n optimizer = tf.train.AdamOptimizer(learning_rate)\n grads, _ = tf.clip_by_global_norm(\n tf.gradients(loss, trainable_variables),\n FLAGS.max_grad_norm)\n\n train_step = optimizer.apply_gradients(\n zip(grads, trainable_variables),\n global_step=global_step)\n\n show_all_variables()\n merged_all = tf.summary.merge_all()\n saver = tf.train.Saver(max_to_keep=FLAGS.max_epochs)\n\n # Train\n config = tf.ConfigProto()\n # Prevent exhausting all the gpu memories\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n # Run init\n sess.run(tf.global_variables_initializer())\n\n summary_writer = tf.summary.FileWriter(\n os.path.join(FLAGS.save_dir, \"nnet\"), sess.graph)\n\n if FLAGS.resume_training:\n restore_from_ckpt(sess, saver)\n\n # add a blank line for log readability\n print()\n sys.stdout.flush()\n\n sess.run(validation_init_op)\n loss_prev = eval_one_epoch(sess, loss, dataset_valid.num_batches)\n tf.logging.info(\"CROSSVAL PRERUN AVG.LOSS %.4f\\n\" % loss_prev)\n\n for epoch in range(FLAGS.max_epochs):\n # Train one epoch\n time_start = time.time()\n sess.run(training_init_op)\n tr_loss = train_one_epoch(sess, summary_writer, merged_all, global_step,\n train_step, loss, dataset_train.num_batches)\n time_end = time.time()\n used_time = time_end - time_start\n\n # Validate one epoch\n sess.run(validation_init_op)\n val_loss = eval_one_epoch(sess, loss, dataset_valid.num_batches)\n\n # Determine checkpoint path\n FLAGS.learning_rate = sess.run(learning_rate)\n cptk_name = 'nnet_epoch%d_lrate%g_tr%.4f_cv%.4f' % (\n epoch + 1, FLAGS.learning_rate, tr_loss, val_loss)\n checkpoint_path = os.path.join(FLAGS.save_dir, \"nnet\", cptk_name)\n\n # accept or reject new parameters\n if val_loss < loss_prev:\n saver.save(sess, checkpoint_path)\n # logging training loss along with validation loss\n tf.logging.info(\n \"EPOCH %d: TRAIN AVG.LOSS %.4f, (lrate%g) \"\n \"CROSSVAL AVG.LOSS %.4f, TIME USED %.2f, %s\" % (\n epoch + 1, tr_loss, FLAGS.learning_rate, val_loss,\n used_time, \"nnet accepted\"))\n loss_prev = val_loss\n else:\n tf.logging.info(\n \"EPOCH %d: TRAIN AVG.LOSS %.4f, (lrate%g) \"\n \"CROSSVAL AVG.LOSS %.4f, TIME USED %.2f, %s\" % (\n epoch + 1, tr_loss, FLAGS.learning_rate, val_loss,\n used_time, \"nnet rejected\"))\n restore_from_ckpt(sess, saver)\n # Reducing learning rate.\n sess.run(reduce_learning_rate)\n\n # add a blank line for log readability\n print()\n sys.stdout.flush()", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def train_and_eval(params: flags.FlagValues) -> tf.keras.callbacks.History:\n logging.info('Run training for {} with {}'.format(params.model_name,\n params.dataset_name))\n logging.info('The CLI params are: {}'.format(params.flag_values_dict()))\n d_config = _get_dataset_config().get(params.dataset_name)()\n m_config = _get_model_config().get(params.model_name)()\n\n logging.info('Training dataset configuration:', d_config)\n logging.info('Training model configuration:', m_config)\n\n # override the model params with CLI params\n m_config.num_classes = d_config.num_classes\n m_config.dropout_keep_prob = 1 - params.dropout_rate\n m_config.weight_decay = params.std_weight_decay\n m_config.stddev = params.truncated_normal_stddev\n m_config.batch_norm_decay = params.batch_norm_decay\n\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n # override the dataset params with CLI params\n if params.data_dir:\n d_config.data_dir = params.data_dir\n global_batch_size = params.batch_size * strategy.num_replicas_in_sync\n\n # override the dataset params with CLI params\n # for distributed training, update batch size\n d_config.batch_size = global_batch_size\n # determine whether one_hot is used based on label_smoothing\n d_config.one_hot = params.label_smoothing and params.label_smoothing > 0\n\n # build train dataset\n train_dataset = get_dataset(d_config)\n # build validation dataset\n d_config.split = 'validation'\n eval_dataset = get_dataset(d_config)\n\n # compute number iterations per epoch\n steps_per_epoch = d_config.num_examples // d_config.batch_size\n eval_steps = d_config.num_eval_examples // d_config.batch_size\n\n # build the model\n keras_model = build_model(\n model_name=params.model_name,\n dataset_config=d_config,\n model_config=m_config\n )\n\n # build the optimizer\n learning_params = defaults.LR_CONFIG_DEFAULT\n learning_params.update({'initial_lr': params.lr,\n 'decay_epochs': params.lr_decay_epochs,\n 'decay_rate': params.lr_decay_rate})\n optimizer_params = defaults.OP_CONFIG_DEFAULT\n optimizer_params.update({'decay': params.op_decay_rate,\n 'momentum': params.op_momentum})\n optimizer = _get_optimizer(\n batch_size=global_batch_size,\n steps_per_epoch=steps_per_epoch,\n lr_name=params.learning_scheduler_name,\n optimizer_name=params.optimizer_name,\n lr_params=learning_params,\n optimizer_params=optimizer_params\n )\n\n logging.info('Exponential decay rate:{}'.format(params.ma_decay_rate))\n if params.ma_decay_rate:\n optimizer = tfa.optimizers.MovingAverage(\n optimizer=optimizer,\n average_decay=params.ma_decay_rate)\n\n # compile model\n if d_config.one_hot:\n loss_obj = tf.keras.losses.CategoricalCrossentropy(\n label_smoothing=params.label_smoothing)\n else:\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()\n\n keras_model.compile(\n optimizer=optimizer,\n loss=loss_obj,\n metrics=[_get_metrics(one_hot=d_config.one_hot)['acc']],\n )\n\n logging.info(keras_model.summary())\n\n initial_epoch = 0\n if params.resume_checkpoint:\n initial_epoch = _resume_from_checkpoint(model=keras_model,\n model_dir=params.model_dir,\n train_steps=steps_per_epoch)\n\n # Callbacks\n callbacks_to_use = _get_callback(model_dir=params.model_dir)\n\n # Train model\n history = keras_model.fit(\n train_dataset,\n steps_per_epoch=steps_per_epoch,\n epochs=params.epochs,\n validation_data=eval_dataset,\n validation_steps=eval_steps,\n initial_epoch=initial_epoch,\n verbose=1,\n callbacks=callbacks_to_use\n )\n\n return history", "def train(self, session, *args, train_data_iterator=None,\n dev_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement train() method\")", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def _training_loop(model, datasets, optimizer, loss_function, initial_epoch, epochs, callbacks,\n steps_per_epoch, train_on_batch, evaluate_model, metrics=[], weight_decay=0,\n evaluation_freq=1):\n tf.keras.backend.set_learning_phase(1)\n\n train_generators = [_to_infinite_iterator(d[0]) for d in datasets]\n valid_generators = [d[1] for d in datasets]\n\n for c in callbacks:\n c.on_train_begin(model)\n\n cumulative_batch_id = 0\n\n for epoch in range(initial_epoch, epochs):\n logger.info(f\"Start epoch {epoch}\")\n\n epoch_logs = {}\n\n for c in callbacks:\n c.on_epoch_begin(epoch, epoch_logs)\n for batch_id in range(steps_per_epoch):\n cumulative_batch_id += 1\n batch_logs = {}\n\n x_trains, y_trains, y_trains_a, y_trains_b, lams = [], [], [], [], []\n\n for dataset_id in range(len(datasets)):\n x_train, y_train = next(train_generators[dataset_id])\n\n x_trains.append(x_train)\n y_trains.append(y_train)\n\n if isinstance(x_train, dict):\n batch_logs.update({\"size:\" + str(dataset_id): len(list(x_train.values())[0])})\n else:\n batch_logs.update({\"size:\" + str(dataset_id): len(x_train)})\n\n for c in callbacks:\n c.on_batch_begin(batch=batch_id, logs=batch_logs)\n\n batch_logs_step = train_on_batch(model, optimizer, x_trains, y_trains,\n metrics,\n loss_function,\n weight_decay=weight_decay)\n\n batch_logs.update(batch_logs_step)\n for k in batch_logs:\n if hasattr(batch_logs[k], \"numpy\"):\n batch_logs[k] = batch_logs[k].numpy()\n\n # if isinstance(batch_logs[k], tf.Tensor):\n if (hasattr(batch_logs[k], 'ndim') and batch_logs[k].ndim > 0) or isinstance(batch_logs[k], list):\n batch_logs[k] = batch_logs[k] # .numpy()\n if isinstance(batch_logs[k], list):\n batch_logs[k] = np.array(batch_logs[k])\n else:\n batch_logs[k] = float(batch_logs[k])\n\n for c in callbacks:\n c.on_batch_end(batch=batch_id, logs=batch_logs)\n\n if evaluation_freq > 0 and (epoch % evaluation_freq == 0 or epoch == epochs - 1):\n tf.keras.backend.set_learning_phase(0)\n val_results = evaluate_model(model, valid_generators, loss_function, metrics)\n tf.keras.backend.set_learning_phase(1)\n for k, v in val_results.items():\n epoch_logs[f'val_{k}'] = v\n else:\n if evaluation_freq > 0:\n for k in previous_epoch_logs:\n if k not in epoch_logs:\n epoch_logs[k] = np.nan\n\n for c in callbacks:\n c.on_epoch_end(epoch, epoch_logs)\n\n logger.info('End of epoch {}, loss={}'.format(epoch, epoch_logs['loss:0']))\n\n previous_epoch_logs = dict(epoch_logs)\n\n for c in callbacks:\n c.on_train_end(model)", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def train(self, trainData):\n pass", "def train(self, dataset):\n if not self.model:\n self._build()\n\n samples_per_batch = self.dataset.number_of_examples_train() // self.config.batch_size\n\n # Train over multiple epochs\n with tf.Session() as sess:\n best_loss = float('inf')\n best_val_epoch = 0\n sess.run(self.init)\n\n # train until we reach the maximum number of epochs\n for epoch in range(self.config.max_epochs):\n total_training_loss = 0\n num_correct = 0\n prev_prediction = 0\n\n print(\" \")\n print('Epoch {}'.format(epoch))\n# start = time.time()\n\n for i in range(samples_per_batch):\n tr_elems, answers, i_seq_len, q_seq_len, imask = self.dataset.next_batch(self.config.batch_size)\n tr_elems, answers, imask = self.preprocess_batch(tr_elems[0], tr_elems[1], answers, imask)\n ans = np.zeros((self.config.batch_size, self.dataset.vocab_size))\n for i in np.arange(self.config.batch_size):\n ans[i][answers[i]] = 1.\n # ans[np.arange(self.config.batch_size), answers] = 1.0\n print(\"ans\", ans)\n print(\"answers\", answers)\n print(\"ans shape\", ans.shape)\n\n # For debugging:\n # Input module: _input_tensor - self.input_only_for_testing\n # Question module: _question_representation - self.question_representation\n # Episode module: _e_i - self.e_i / _e_m_s - self.episodic_memory_state\n loss, _, pred_prob, _projections = sess.run(\n [self.cost, self.optimizer, self.prediction, self.projections],\n feed_dict={self.input_placeholder: tr_elems[0],\n self.input_length_placeholder: i_seq_len,\n self.end_of_sentences_placeholder: imask,\n self.question_placeholder: tr_elems[1],\n self.question_length_placeholder: q_seq_len,\n self.labels_placeholder: ans,\n #self.gate_placeholder: [float(self.train_gate[i])]\n })\n\n total_training_loss += loss\n\n if np.argmax(pred_prob) == np.argmax(ans):\n num_correct += 1\n\n if i % self.config.update_length == 0:\n print \"Current average training loss: {}\".format(total_training_loss / (i + 1))\n print \"Current training accuracy: {}\".format(float(num_correct) / (i + 1))\n print(\"Ans: \" + str(self.dataset.ivocab[np.argmax(ans)]))\n print(\"Pred: \" + str(self.dataset.ivocab[np.argmax(pred_prob)]))", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def train(self, data):\n pass", "def test_training(self):\n\t\tpass", "def train(self, train_fn, dev_fn):\n X_train, Y_train = self.load_dataset(train_fn)\n X_dev, Y_dev = self.load_dataset(dev_fn)\n logging.debug(\"Classes: {}\".format((self.num_of_classes(), self.classes_())))\n # Set model params, called here after labels have been identified in load dataset\n self.model_fn()\n\n # Create a callback to print a sample after each epoch\n logging.debug(\"Training model on {}\".format(train_fn))\n self.model.fit(X_train, Y_train,\n batch_size = self.batch_size,\n epochs = self.epochs,\n validation_data = (X_dev, Y_dev),\n callbacks = self.get_callbacks(X_train))", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train_batch(\n self, batch: TorchData, model: nn.Module, epoch_idx: int, batch_idx: int\n ) -> Union[torch.Tensor, Dict[str, Any]]:\n pass", "def train_batch(model, session_batch):\n if len(session_batch) == 0:\n return\n batch_size = len(session_batch)\n batch = np.zeros((batch_size, INPUT_SIZE))\n for i in range(batch_size):\n batch[i] = session_to_input(session_batch[i])\n pred = predict(model, session_batch)\n model.fit(batch, pred, batch_size=batch_size, verbose=0)", "def build_validation_iterator(dataset_name, batch_size, prepro_fn):\n dataset, dataset_info = tfds.load(\n dataset_name,\n split=tfds.Split.VALIDATION,\n as_supervised=True,\n with_info=True\n )\n n_samples = dataset_info.splits['validation'].num_examples\n steps_per_epoch = int(math.ceil(n_samples / batch_size))\n if prepro_fn is not None:\n dataset = dataset.map(prepro_fn, num_parallel_calls=AUTOTUNE)\n\n # Batch\n batched_dataset = dataset.padded_batch(\n batch_size,\n get_output_shapes(dataset),\n padding_values=get_padding_values(get_output_types(dataset)),\n drop_remainder=False\n )\n return batched_dataset, steps_per_epoch", "def _main(\n get_data: callable,\n EPOCHS: int = 10,\n PERIOD: int = 5,\n BATCH_SIZE: int = 256,\n LR: float = 1e-5,\n NEURONS: list = [128, 128],\n forecast: bool = False,\n tuning: bool = True,\n) -> None:\n @tf.function\n def train_step(x, y):\n with tf.GradientTape() as tape:\n pred = model(x)\n loss = loss_object(y, pred)\n grad = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grad, model.trainable_variables))\n train_loss.update_state(loss)\n train_accuracy.update_state(y, pred)\n\n\n @tf.function\n def test_step(x, y):\n # Test and validation step have the same operation.\n pred = model(x)\n loss = loss_object(y, pred)\n dev_loss.update_state(loss)\n dev_accuracy.update_state(y, pred)\n\n print(\"Reading data...\")\n X_train, X_dev, y_train, y_dev, X_test = get_data()\n print(\"X_train@{}, X_dev@{}\".format(X_train.shape, X_dev.shape))\n train_ds = tf.data.Dataset.from_tensor_slices(\n (X_train, y_train)).shuffle(int(1e6)).batch(BATCH_SIZE)\n\n dev_ds = tf.data.Dataset.from_tensor_slices(\n (X_dev, y_dev)).batch(BATCH_SIZE)\n\n num_fea = X_train.shape[1]\n model = NN(num_neurons=NEURONS)\n\n loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n optimizer = tf.keras.optimizers.Adam(learning_rate=LR)\n\n train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n train_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"train_accuracy\")\n\n dev_loss = tf.keras.metrics.Mean(name=\"dev_loss\")\n dev_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"dev_accuracy\")\n\n trace = {\"train\": [], \"val\": []}\n for epoch in range(EPOCHS):\n train_loss.reset_states()\n train_accuracy.reset_states()\n dev_loss.reset_states()\n dev_accuracy.reset_states()\n # Loop over batches.\n for x, y in train_ds:\n # x @ (batch_size, num_features)\n # y @ (batch_size, 1) --> probit\n train_step(x, y)\n\n for t_x, t_y in dev_ds:\n test_step(t_x, t_y)\n\n if (epoch+1) % PERIOD == 0:\n report = \"Epoch {:d}, Loss: {:0.6f}, Accuracy: {:0.6f}, Validation Loss: {:0.6f}, Validation Accuracy: {:0.6f}\"\n print(report.format(\n epoch+1,\n train_loss.result(),\n train_accuracy.result()*100,\n dev_loss.result(),\n dev_accuracy.result()*100))\n\n # Record loss\n trace[\"train\"].append(train_loss.result())\n trace[\"val\"].append(dev_loss.result())\n\n # AUC\n pred_train = model(X_train).numpy()\n pred_dev = model(X_dev).numpy()\n\n auc_train = metrics.roc_auc_score(y_true=y_train, y_score=pred_train)\n auc_dev = metrics.roc_auc_score(y_true=y_dev, y_score=pred_dev)\n\n print(\"AUC on Training Set: {: 0.6f}\".format(auc_train))\n print(\"AUC on Developing Set: {: 0.6f}\".format(auc_dev))\n\n if forecast:\n pred = model(X_test)\n return pred.numpy()\n if tuning:\n return {\n \"EPOCHS\": EPOCHS,\n \"BATCH_SIZE\": BATCH_SIZE,\n \"LR\": LR,\n \"NEURONS\": NEURONS,\n \"AUC_TRAIN\": auc_train,\n \"AUC_DEV\": auc_dev,\n \"LOSS_TRAIN\": train_loss.result().numpy(),\n \"LOSS_DEV\": dev_loss.result().numpy(),\n \"ACCURACY_TRAIN\": train_accuracy.result().numpy(),\n \"ACCURACY_DEV\": dev_accuracy.result().numpy(),\n }\n\n plt.plot(np.log(trace[\"train\"]))\n plt.plot(np.log(trace[\"val\"]))\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Log Cross Entropy Loss\")\n plt.legend([\"Training\", \"Validation\"])\n plt.title(\"LR={}, AUC_train={:0.3f}, AUC_dev={:0.3f}\".format(LR, auc_train, auc_dev))\n plt.show()", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def input_fn(is_training, data_dir, reid_data_dir= None,batch_size=32, num_epochs=1):\n dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))\n dataset_seg = dataset.flat_map(tf.data.TFRecordDataset)\n\n # dataset_reid = tf.data.Dataset.from_tensor_slices(get_filenames_reid(is_training, reid_data_dir))\n # dataset_reid = dataset_reid.flat_map(tf.data.TFRecordDataset)\n\n\n if is_training:\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes have better performance.\n # is a relatively small dataset, we choose to shuffle the full epoch.\n dataset_seg = dataset_seg.shuffle(buffer_size=_NUM_IMAGES['train'])\n # dataset_reid = dataset_reid.shuffle(buffer_size=30248)\n\n\n dataset_seg = dataset_seg.map(parse_record)\n dataset_seg = dataset_seg.map(lambda image, label: preprocess_image(image, label, is_training))\n dataset_seg = dataset_seg.prefetch(batch_size)\n dataset_seg = dataset_seg.repeat(num_epochs)\n dataset_seg = dataset_seg.batch(batch_size)\n\n # dataset_reid = dataset_reid.map(parse_record_reid)\n # dataset_reid = dataset_reid.map(lambda image, label: preprocess_image_reid(image, label, is_training))\n # dataset_reid = dataset_reid.prefetch(batch_size)\n # dataset_reid = dataset_reid.repeat(num_epochs)\n # dataset_reid = dataset_reid.batch(batch_size)\n\n # iterator = dataset_reid.make_one_shot_iterator()\n # images_reid, label_reid = iterator.get_next()\n\n train_record_file = os.path.join(reid_data_dir, 'train-512-170.tfrecords')\n val_record_file = os.path.join(reid_data_dir, 'val-512-170.tfrecords')\n\n train_images, train_labels = read_records(train_record_file, _HEIGHT, _WIDTH, type='normalization')\n train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=True)\n print(\"reid2222222\", train_images_batch.shape, train_labels_batch.shape)\n val_images, val_labels = read_records(val_record_file, _HEIGHT, _WIDTH, type='normalization')\n val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=False)\n images_reid = train_images_batch\n label_reid = train_labels_batch\n # if is_training:\n # images_reid = train_images_batch\n # label_reid = train_labels_batch\n # else:\n # images_reid = val_images_batch\n # label_reid = val_labels_batch\n iterator = dataset_seg.make_one_shot_iterator()\n images_seg, label_seg = iterator.get_next()\n\n images = {\"seg\": images_seg, \"reid\": images_reid}\n labels = {\"seg\": label_seg, \"reid\": label_reid}\n\n # labels_seg_reid = tf.zeros(shape=[batch_size, labels_nums], dtype=tf.int32)\n # labels_reid_seg = tf.zeros(shape=[batch_size, 512, 170, 1], dtype=tf.int32)\n\n # images = tf.concat([images_seg, images_reid], 0)\n # labels_seg_all = tf.concat([label_seg, labels_reid_seg], 0)\n # labels_reid_all = tf.concat([labels_seg_reid, label_reid], 0)\n # labels = {\"seg\": labels_seg_all, \"reid\": labels_reid_all}\n # batch_out= 1\n\n return images, labels", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def run_testing_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 0.0\n fetches = [self.loss, self.predictions]\n loss, probabilities = session.run(fetches, feed_dict=feed_dict)\n return loss, probabilities", "def train_epoch(model, data_loader, data_iter, optimizer, device,\n epoch_size=None, eval_cluster_error=True, core_reset=False,\n eval_rank=False, mc_mode=False, lip_mode=False):\n data_tic = epoch_tic = time.time()\n data_rtime, reset_rtime = 0.0, 0.0\n metrics = None\n conf_mats = ut.AverageMeter() if eval_cluster_error else None\n resets = [] if core_reset else None\n comp_err = ut.AverageMeter() if mc_mode else None\n itr, epochN = 1, 0\n epoch_stop = False\n if data_iter is None:\n data_iter = iter(data_loader)\n model.epoch_init()\n while not epoch_stop:\n try:\n data_tup = next(data_iter)\n except StopIteration:\n if epoch_size is None or epochN >= epoch_size:\n data_iter, epoch_stop = None, True\n break\n else:\n data_iter = iter(data_loader)\n data_tup = next(data_iter)\n if epochN >= epoch_size:\n epoch_stop = True\n\n if len(data_tup) == 3:\n x, groups, x0 = data_tup\n if x0 is not None:\n x0 = x0.to(device)\n else:\n x, groups = data_tup\n x0 = None\n x = x.to(device)\n batch_size = x.shape[0]\n epochN += batch_size\n data_rtime += time.time() - data_tic\n\n # opt step\n optimizer.zero_grad()\n (batch_obj_mean, batch_obj, batch_loss,\n batch_reg_in, batch_reg_out) = model.objective(x)\n\n if torch.isnan(batch_obj_mean.data):\n raise RuntimeError('Divergence! NaN objective.')\n\n batch_obj_mean.backward()\n optimizer.step()\n\n batch_metrics = [batch_obj, batch_loss, batch_reg_in, batch_reg_out]\n if metrics is None:\n metrics = [ut.AverageMeter() for _ in range(len(batch_metrics))]\n for kk in range(len(batch_metrics)):\n metrics[kk].update(batch_metrics[kk].cpu(), batch_size)\n\n # eval batch cluster confusion\n if eval_cluster_error:\n batch_conf_mats = torch.stack([\n torch.from_numpy(ut.eval_confusion(model.groups[:, ii], groups,\n model.k, true_classes=data_loader.dataset.classes))\n for ii in range(model.replicates)])\n conf_mats.update(batch_conf_mats, 1)\n\n # eval batch completion if in missing data setting\n if mc_mode and x0 is not None:\n batch_comp_err = model.eval_comp_error(x0)\n comp_err.update(batch_comp_err.cpu(), batch_size)\n\n if core_reset:\n reset_tic = time.time()\n batch_resets = model.core_reset()\n if batch_resets.shape[0] > 0:\n rIdx = np.unique(batch_resets[:, 0].astype(np.int64))\n ut.reset_optimizer_state(model, optimizer, rIdx)\n batch_resets = np.insert(batch_resets, 0, itr, axis=1)\n resets.append(batch_resets)\n reset_rtime += time.time() - reset_tic\n\n itr += 1\n data_tic = time.time()\n\n # evaluate summary metrics\n metrics = torch.stack([met.avg for met in metrics])\n conf_mats, errors, error_stats = _cluster_error_summary(eval_cluster_error,\n conf_mats, model)\n resets, reset_count, rep_reset_counts = _resets_summary(core_reset, resets,\n model)\n svs, rank_stats = _rank_summary(eval_rank, model)\n comp_err, comp_err_stats = _comp_err_summary(mc_mode, comp_err, model)\n lip, lip_stats = _lip_summary(lip_mode, model)\n\n rtime = time.time() - epoch_tic\n sampsec = epochN / rtime\n\n metrics, metrics_summary = _all_metrics_summary(metrics, errors, error_stats,\n reset_count, rep_reset_counts, rank_stats, comp_err, comp_err_stats, lip,\n lip_stats, sampsec, rtime, data_rtime, reset_rtime)\n return metrics_summary, metrics, conf_mats, resets, svs, data_iter", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def train_input_per_data(config, params, num_dataset):\n Nb = params.Nb_list[num_dataset]\n mapping = params.training_lids2cids\n if num_dataset == 1 and hasattr(params, 'additional_lids2cids'):\n mapping = params.additional_lids2cids\n with tf.device('/cpu:0'):\n dataset = tf.data.TFRecordDataset(params.tfrecords_list[num_dataset])\n # uncomment next line when shuffle_and_repeat becomes available\n # dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(params.Nb * 100))\n dataset = dataset.shuffle(buffer_size=params.Nb * 100)\n dataset = dataset.repeat()\n dataset = dataset.map(parse_func, num_parallel_calls=8)\n dataset = dataset.map(\n lambda image, label, paths: (paths, *prepare_data(image, label, mapping, params)))\n dataset = dataset.map(lambda paths, image, label:\n (paths, image, label, *preprocess_train(image, label, params)), num_parallel_calls=8)\n dataset = dataset.batch(Nb)\n dataset = dataset.prefetch(Nb * 2)\n iterator = dataset.make_one_shot_iterator()\n return iterator.get_next()", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def train(model, device, train_dl, loss_func, opt_func, epoch_idx):\n running_loss = 0.0\n batches_processed = 0\n for batch_idx, (x, y) in enumerate(train_dl, 1):\n x, y = x.to(device), y.to(device) # Push data to GPU\n\n opt_func.zero_grad() # Reset gradients\n # Forward pass\n output = model(x)\n loss = loss_func(output, y)\n # Backward pass\n loss.backward()\n # Optimizer step\n opt_func.step()\n\n # print statistics\n running_loss += loss.item()\n batches_processed += 1\n print(f'Train loss [Epoch {epoch_idx}]: {running_loss/batches_processed : .2f})')", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def get_input_fn_training(Xtrain_ul, Xtrain_l, Xtest, ytrain_ul, ytrain_l, ytest, batch_size, num_labeled):\n dataset = input_data.Data(Xtrain_ul,\n Xtrain_l,\n Xtest,\n ytrain_ul,\n ytrain_l,\n ytest,\n num_labeled, \n batch_size, \n shuffle=True)\n return dataset.next_batch()", "def train(self, session, train_dataset, val_dataset, train_dir):\n\n #self.saver=saver\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n # context_ids, question_ids, answer_spans, ctx_mask ,q_mask, train_context = dataset\n # train_dataset = [context_ids, question_ids, answer_spans, ctx_mask ,q_mask]\n\n # val_context_ids, val_question_ids, val_answer_spans, val_ctx_mask, val_q_mask, val_context = val_dataset\n # val_dataset = [val_context_ids, val_question_ids, val_answer_spans, val_ctx_mask, val_q_mask]\n\n \n num_epochs = self.flags.epochs\n\n # print train_dataset[0].shape,train_dataset[1].shape\n # print val_dataset[0].shape,val_dataset[1].shape\n\n #if self.flags.debug:\n # train_dataset = [elem[:self.flags.batch_size*1] for elem in train_dataset]\n # val_dataset = [elem[:self.flags.batch_size*1] for elem in val_dataset]\n # num_epochs = 100\n \n # print train_dataset[0].shape,train_dataset[1].shape\n # print val_dataset[0].shape,val_dataset[1].shape\n # assert False\n\n for epoch in range(num_epochs):\n logging.info(\"Epoch %d out of %d\", epoch + 1, self.flags.epochs)\n self.run_epoch(sess=session,\n train_set=train_dataset, \n val_set=val_dataset)\n logging.info(\"Saving model in %s\", train_dir)\n self.saver.save(session, train_dir+\"/\"+self.flags.run_name+\".ckpt\")", "def _setup_trainer(tmpdir):\n SAMPLE_SIZE = 8\n TRAIN_BATCH_SIZE = 4\n VAL_BATCH_SIZE = 1\n NUM_CLASSES = 3\n SCALE = (8, 16, 32)\n ASPECT_RATIO = (.5, 1., 2.)\n NUM_ANCHORS = 9\n NUM_EPOCHS = 2\n LABEL_TEMPLATE = np.array([0., 0., 0.9, 0.9, 1, 1])\n LEARNING_RATE = .1\n\n model = YOLOV2\n grid_size = (model.GRID_H, model.GRID_W)\n num_anchors = len(SCALE) * len(ASPECT_RATIO)\n eval_epochs = NUM_CLASSES\n\n image_height = model.GRID_H * model.SCALE\n image_width = model.GRID_W * model.SCALE\n\n with tf.device('/cpu:0'):\n anchor_priors = generate_anchor_priors(grid_size, SCALE, ASPECT_RATIO)\n anchor_converter = AnchorConverter(anchor_priors)\n\n train_batch = {\n 'image':\n tf.convert_to_tensor(\n np.ones([SAMPLE_SIZE, image_height, image_width, 3]),\n dtype=tf.float32),\n 'label':\n tf.convert_to_tensor(\n np.tile(\n LABEL_TEMPLATE,\n [SAMPLE_SIZE, model.GRID_H, model.GRID_W, num_anchors, 1]),\n dtype=tf.float32),\n }\n val_batch = {\n 'image':\n tf.convert_to_tensor(\n np.ones([SAMPLE_SIZE, image_height, image_width, 3]),\n dtype=tf.float32),\n 'label':\n tf.convert_to_tensor(\n np.tile(\n LABEL_TEMPLATE,\n [SAMPLE_SIZE, model.GRID_H, model.GRID_W, num_anchors, 1]),\n dtype=tf.float32),\n }\n train_dataset = tf.data.Dataset.from_tensor_slices(train_batch)\n train_dataset = train_dataset.batch(TRAIN_BATCH_SIZE)\n train_iterator = train_dataset.make_initializable_iterator()\n val_dataset = tf.data.Dataset.from_tensor_slices(val_batch)\n val_dataset = val_dataset.batch(VAL_BATCH_SIZE)\n val_iterator = val_dataset.make_initializable_iterator()\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n with tf.device('/gpu:0'):\n model_ins = model(NUM_CLASSES, num_anchors)\n\n optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)\n\n dut = Trainer(\n model_ins,\n NUM_CLASSES,\n TRAIN_BATCH_SIZE,\n VAL_BATCH_SIZE,\n train_iterator,\n val_iterator,\n anchor_converter,\n yolo_detection_loss,\n optimizer,\n global_step,\n str(tmpdir),\n num_epochs=NUM_EPOCHS,\n evaluate_epochs=eval_epochs)\n\n return dut", "def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])", "def _train(args, pretrain_args):\n start_time = time.time()\n print('Training', ', '.join(args.speakers), '...')\n\n # randomly sample validation set monte_carlo_cv_num times\n for num in range(args.monte_carlo_cv_num):\n # get seed used to sub-sample validation dataset (use 42 for 1st run)\n seed = utils.get_seed(num)\n\n # get train/valid/test data and convert to sequences\n train_data, valid_data, test_data, id_to_word = data_reader.get_data(\n args, seed=seed)\n # set configurations/hyperparameters for model\n config, test_config = utils.set_config(args, id_to_word)\n\n # initialize word embeddings\n init_embed = utils.init_embedding(id_to_word, dim=args.embed_size,\n init_scale=args.init_scale,\n embed_path=args.embed_path)\n\n with tf.Graph().as_default():\n # initializer used to initialize TensorFlow variables\n initializer = tf.random_uniform_initializer(-config['init_scale'],\n config['init_scale'])\n # create Train model\n with tf.name_scope('Train'):\n with tf.variable_scope('Model', reuse=None,\n initializer=initializer):\n m_train = model.Model(args, is_training=True, config=config,\n init_embed=init_embed, name='Train')\n m_train.build_graph()\n\n # create Valid model\n with tf.name_scope('Valid'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_valid = model.Model(args, is_training=False, config=config,\n init_embed=init_embed, name='Valid')\n m_valid.build_graph()\n\n # create Test model\n with tf.name_scope('Test'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_test = model.Model(args, is_training=False, config=test_config,\n init_embed=init_embed, name='Test')\n m_test.build_graph()\n\n # create summaries to be viewed in TensorBoard\n tb_summaries = utils.TensorBoardSummaries()\n tb_summaries.create_ops()\n\n init = tf.global_variables_initializer()\n\n # if pretrained, must create dict to initialize TF Saver\n if bool(pretrain_args):\n # get trainable variables and convert to dict for Saver\n reuse_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n reuse_vars_dict = dict(\n [(var.op.name, var) for var in reuse_vars])\n # create saver for TF session (see function for addl details)\n saver = utils.create_tf_saver(args, pretrain_args,\n reuse_vars_dict)\n else:\n saver = tf.train.Saver()\n\n # ppls dict has perplexities that are stored in results database\n ppls = {}\n ppls, _ = _update_ppls(ppls, initialize=True)\n\n with tf.Session() as sess:\n sess.run(init)\n\n if args.load_path != '':\n print('Restoring model...')\n saver.restore(sess, args.load_path)\n\n for epoch in range(config['max_epoch']):\n print('Epoch: {0} Learning rate: {1:.3f}\\n'.format(\n epoch + 1, sess.run(m_train.lr)))\n for i, speaker in enumerate(args.speakers):\n print('Training {0} ...'.format(speaker))\n\n # run epoch on training data\n train_perplexity = _run_epoch(sess, m_train, args, train_data,\n i, tb_summaries, id_to_word,\n train_op=m_train.train_op,\n verbose=True)\n print('Epoch: {0} Train Perplexity: {1:.3f}'.format(\n epoch + 1, train_perplexity))\n ppls, _ = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=train_perplexity,\n dataset='train')\n\n print('Validating...')\n # run epoch on validation data\n valid_perplexity = _run_epoch(sess, m_valid, args,\n valid_data, i, tb_summaries,\n id_to_word, verbose=True)\n print('Epoch: {0} Valid Perplexity: {1:.3f}'.format(\n epoch + 1, valid_perplexity))\n ppls, improved = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=valid_perplexity,\n dataset='valid')\n\n if improved:\n # save model if valid ppl is lower than current\n # best valid ppl\n if args.save_path != '':\n print('Saving model to {0}.'.format(\n args.save_path))\n saver.save(sess, args.save_path)\n\n for i, speaker in enumerate(args.speakers):\n print('Testing {0} ...'.format(speaker))\n print('Restoring best model for testing...')\n saver.restore(sess, args.save_path)\n # run model on test data\n test_perplexity = _run_epoch(sess, m_test, args, test_data, i)\n ppls['test_ppl_' + speaker] = test_perplexity\n print('Test Perplexity: {0:.3f}'.format(test_perplexity))\n\n if args.insert_db == 'True':\n # write params/config/results to sql database\n results_db.insert_results(args, config, start_time, ppls)", "def test_training():\n # delete old database if it exists\n conn = pm.MongoClient(host=testhost,\n port=testport)\n\n # set up the parameters\n params = {}\n params['model_params'] = {'func': model.mnist_tfutils}\n params['save_params'] = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0',\n 'save_valid_freq': 20,\n 'save_filters_freq': 200,\n 'cache_filters_freq': 100,\n }\n params['train_params'] = {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'train',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 500\n }\n params['learning_rate_params'] = {'learning_rate': 0.05,\n 'decay_steps': num_batches_per_epoch,\n 'decay_rate': 0.95,\n 'staircase': True}\n params['validation_params'] = {'valid0': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 10,\n 'agg_func': utils.mean_dict}}\n params['skip_check'] = True\n\n # actually run the training\n base.train_from_params(**params)\n\n DEBUG = OrderedDict()\n\n # test if results are as expected\n\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0'}).count() == 26\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'saved_filters': True}).distinct('step') == [0, 200, 400]\n\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'step': 0})[0]\n asserts_for_record(r, params, train=True)\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'step': 20})[0]\n asserts_for_record(r, params, train=True)\n\n # run another 500 steps of training on the same experiment id.\n params['train_params']['num_steps'] = 1000\n base.train_from_params(**params)\n # test if results are as expected\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0'}).count() == 51\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0',\n 'saved_filters': True}).distinct('step') == [0, 200, 400, 600, 800, 1000]\n assert conn['tfutils-test']['testcol.files'].distinct('exp_id') == ['training0']\n\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'step': 1000})[0]\n\n asserts_for_record(r, params, train=True)\n\n # run 500 more steps but save to a new experiment id.\n params['train_params']['num_steps'] = 1500\n params['load_params'] = {'exp_id': 'training0'}\n params['save_params']['exp_id'] = 'training1'\n\n base.train_from_params(**params)\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training1',\n 'saved_filters': True}).distinct('step') == [1200, 1400]", "def train(self, train_data, train_labels, batch_size=50, num_epochs=5):\n raise NotImplementedError", "def train():\n with tf.Graph().as_default():\n global_step = tf.contrib.framework.get_or_create_global_step()\n\n # Get images and labels\n # here must also pass do_enqueues op to add to session\n images, labels, rsq, enqueueOP = buildModel.distorted_inputs()\n\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = buildModel.inference(images)\n\n # Calculate loss.\n loss = buildModel.loss(logits, labels)\n\n # Build a Graph that trains the model with one batch of examples and\n # updates the model parameters.\n train_op, qr = buildModel.train(loss, global_step, rsq, enqueueOP)\n\n class _LoggerHook(tf.train.SessionRunHook):\n \"\"\"Logs loss and runtime.\"\"\"\n\n def begin(self):\n self._step = -1\n\n def before_run(self, run_context):\n self._step += 1\n self._start_time = time.time()\n return tf.train.SessionRunArgs(loss) # Asks for loss value.\n\n def after_run(self, run_context, run_values):\n duration = time.time() - self._start_time\n loss_value = run_values.results\n if self._step % 2 == 0:\n num_examples_per_step = FLAGS.batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print (format_str % (datetime.now(), self._step, loss_value,\n examples_per_sec, sec_per_batch))\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=FLAGS.train_dir,\n hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),\n tf.train.NanTensorHook(loss),\n _LoggerHook()],\n config=tf.ConfigProto(\n log_device_placement=FLAGS.log_device_placement)) as mon_sess:\n\n coord = tf.train.Coordinator()\n enqueue_threads = qr.create_threads(mon_sess, coord=coord, start=True)\n\n while not mon_sess.should_stop():\n mon_sess.run(train_op)\n\n coord.request_stop()\n coord.join(enqueue_threads)", "def train_epoch(model, train_dataloader, optimizer, loss_fn):\n model.train()\n total_training_loss = 0\n for batch_index, batch in enumerate(train_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n loss = loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_training_loss += loss", "def batch_fit(self, train_loader: torch.utils.data.DataLoader,\n test_loader: torch.utils.data.DataLoader,\n train_size: int, test_size: int, epochs: int = 1,\n calc_mapk: bool = True):\n\n for epoch in range(epochs):\n stats = {'epoch': epoch+1}\n\n print('Training begins...')\n train_loss = self._training(train_loader, train_size)\n stats['train_loss'] = train_loss\n\n print('Validation begins...')\n if calc_mapk:\n print('validation with mapk')\n val_loss, val_mapk = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_mapk'] = val_mapk\n else:\n print('validation without mapk')\n val_loss = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_loss'] = val_loss\n print(stats)\n\n self.metrics.append(stats)", "def train(model, infer_train, infer_val, load_checkpoint=None):\n\n global checkpoint_name\n print('Initialising {}'.format(cfg['experiment_name']))\n checkpoint_folder = 'checkpoints/{}/'.format(cfg['experiment_name'])\n\n if not os.path.exists(checkpoint_folder):\n os.makedirs(checkpoint_folder)\n\n tb_folder = 'tb/{}/'.format(cfg['experiment_name'])\n if not os.path.exists(tb_folder):\n os.makedirs(tb_folder)\n\n writer = SummaryWriter(logdir=tb_folder, flush_secs=30)\n optimiser = Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=cfg['weight_decay'])\n\n train_dataset = TweetDataset(dataset_type='train')\n train_loader = DataLoader(train_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=True, pin_memory=True)\n\n val_dataset = TweetDataset(dataset_type='val')\n val_loader = DataLoader(val_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=False, pin_memory=True)\n\n if load_checkpoint:\n checkpoint = torch.load(load_checkpoint)\n assert model.config == checkpoint['net_config'], \\\n \"The provided checkpoint has a different configuration, loading is impossible\"\n start_epoch = checkpoint['epoch'] + 1\n epochs = cfg['epochs'] + start_epoch\n step = checkpoint['step']\n model.load_state_dict(checkpoint['model'])\n optimiser.load_state_dict(checkpoint['optimiser'])\n print(\"Loaded the checkpoint at {}\".format(load_checkpoint))\n else:\n start_epoch, step = 0, 0\n epochs = cfg['epochs']\n\n init_loss = 0.\n avg_loss = AverageMeter()\n best_mae = 1e10\n\n print('Sanity val')\n val(model, val_loader, writer, 0, infer_val)\n model.train()\n\n print('Starting training')\n for epoch in range(start_epoch, epochs):\n loader_length = len(train_loader)\n epoch_start = time.time()\n\n for batch_idx, batch in enumerate(train_loader):\n optimiser.zero_grad()\n\n loss = infer_train(model, batch)\n loss.backward()\n\n if epoch == 0 and batch_idx == 0:\n init_loss = loss\n\n # logging\n elapsed = time.time() - epoch_start\n progress = batch_idx / loader_length\n est = datetime.timedelta(seconds=int(elapsed / progress)) if progress > 0.001 else '-'\n avg_loss.update(loss)\n suffix = '\\tloss {:.4f}/{:.4f}\\tETA [{}/{}]'.format(avg_loss.avg, init_loss,\n datetime.timedelta(seconds=int(elapsed)), est)\n printProgressBar(batch_idx, loader_length, suffix=suffix,\n prefix='Epoch [{}/{}]\\tStep [{}/{}]'.format(epoch, epochs - 1, batch_idx, loader_length))\n\n writer.add_scalar('Steps/train_loss', loss, step)\n\n # saving the model\n if step % cfg['checkpoint_every'] == 0:\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n step += 1\n optimiser.step()\n\n # validating\n if step % cfg['val_every'] == 0:\n mae = val(model, val_loader, writer, step, infer_val)\n if mae < best_mae:\n best_mae = mae\n print('Best model with V{:.2f}'.format(best_mae))\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n '{}/best.pth'.format(checkpoint_folder))\n model.train()\n\n # end of epoch\n print('')\n writer.add_scalar('Epochs/train_loss', avg_loss.avg, epoch)\n avg_loss.reset()\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': loader_length, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n\n # finished training\n writer.close()\n print('Training finished :)')", "def test_num_training_batches(tmpdir):\n # when we have fewer batches in the dataloader we should use those instead of the limit\n model = EvalModelTemplate()\n trainer = Trainer(limit_val_batches=100, limit_train_batches=100, max_epochs=1)\n trainer.fit(model)\n\n assert len(model.train_dataloader()) == 10\n assert len(model.val_dataloader()) == 10\n assert isinstance(trainer.num_val_batches, list)\n assert trainer.num_val_batches[0] == 10\n assert trainer.num_training_batches == 10\n\n # when we have more batches in the dataloader we should limit them\n model = EvalModelTemplate()\n trainer = Trainer(limit_val_batches=7, limit_train_batches=7, max_epochs=1)\n trainer.fit(model)\n\n assert len(model.train_dataloader()) == 10\n assert len(model.val_dataloader()) == 10\n assert isinstance(trainer.num_val_batches, list)\n assert trainer.num_val_batches[0] == 7\n assert trainer.num_training_batches == 7", "def train(config: Config, device: torch.device, resume: Optional[ResumeInfo]) -> None:\n # Load datasets\n print(colored(\"loading training datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory()\n datasets, preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n # Create model runner\n print(colored(\"model:\", attrs=[\"bold\"]))\n runner_factory = RunnerFactory()\n runner = runner_factory.create(config, device, preprocessors, datasets, resume)\n print(f\"{runner.model=}\")\n print(f\"{runner.criterion=}\")\n print(f\"{runner.optimiser=}\")\n\n print(colored(\"training:\", attrs=[\"bold\"]))\n runner.train()", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def train(model, epochs, num_batches=0, batch_size=50, batch_callback=None, use_cuda=False):\n\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False,\n train_batch_size=batch_size)\n\n optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum'])\n\n for epoch in range(1, epochs + 1):\n model.train()\n for batch_idx, (data, target) in enumerate(data_loader.train_loader):\n data, target = data.to(data_loader.device), target.to(data_loader.device)\n optimizer.zero_grad()\n output = model(data)\n loss = functional.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n if num_batches != 0:\n if batch_idx == num_batches:\n return\n\n if batch_callback is not None:\n batch_callback(model, batch_idx)\n\n if batch_idx % args['log_interval'] == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(epoch, batch_idx * len(data),\n len(data_loader.train_loader.dataset),\n 100. * batch_idx / len(data_loader.train_loader),\n loss.item()))", "def test_batch(self):\n pass", "def train_and_test(self, data):\n\n np.random.shuffle(data)\n datalist = self.unpack_data(data)\n\n logger.info('[*] 75-25 partition of datasets ...')\n\n markline1 = math.floor(0.75*(len(datalist['features'])))\n markline2 = math.floor(0.75*len(datalist['labels']))\n\n train_features = datalist['features'][:(markline1)]\n test_features = datalist['features'][(markline1):]\n \n train_labels = datalist['labels'][:(markline2)]\n test_labels = datalist['labels'][(markline2):]\n\n logger.info('[*] Training started with 75% Dataset ...')\n\n self.knn_model.fit(train_features, train_labels)\n\n logger.info('[*] Testing started with 25% Dataset ...')\n print('\\n/---------------Accuracy----------------/') \n \n accuracy = self.knn_model.score(train_features, train_labels)\n print('Test set accuracy {:.2f} %'.format(accuracy*100))\n\n if accuracy < 0.40:\n logger.warning('[-.-!] Thanks for tryin\\' but this machine ain\\'t learning.')\n\n return True", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train(self, bytes_gen: Iterator[bytes] = None, **kwargs):\n self._call_client(bytes_gen, mode='train', **kwargs)", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = AverageMeter()\n epoch_time = Timer()\n \n for batch_idx, (input_idxs, target_idxs, input_tokens, target_tokens) in enumerate(data_loader):\n # input_idxs and target_idxs have dim (batch_size x max_len)\n # they are NOT sorted by length\n\n lengths = (input_idxs != 0).long().sum(dim=1)\n sorted_lengths, order = torch.sort(lengths, descending=True)\n\n input_variable = Variable(input_idxs[order, :][:, :max(lengths)])\n target_variable = Variable(target_idxs[order, :])\n \n model.optimizer.zero_grad()\n output_log_probs, output_ses = model(input_variable,\n list(sorted_lengths),\n targets=target_variable)\n \n batch_size = input_variable.shape[0]\n flattened_outputs = output_log_probs.view(batch_size * model.max_length, -1)\n \n batch_loss = model.citerion(flattened_outputs, target_variable.contiguous().view(-1))\n batch_loss.backward()\n model.optimizer.step()\n \n model.updates += 1\n \n train_loss.update(batch_loss[0], batch_size)\n \n if batch_idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], batch_idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n \n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n \n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "def run_training_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 1.0\n fetches = [self.loss, self.train_op]\n\n # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n \n loss, _ = session.run(fetches, feed_dict=feed_dict)\n # loss, _ = session.run(fetches,\n # feed_dict=feed_dict,\n # options=options,\n # run_metadata=run_metadata)\n \n # fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n # chrome_trace = fetched_timeline.generate_chrome_trace_format()\n # with open('timeline.json', 'w') as f:\n # f.write(chrome_trace)\n \n return loss", "def train(self, train_ids_file):\n\t\t# TODO(student): Feel free to remove if you do not use.\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\tfile_pairs = tf.data.Dataset.zip(get_filename_data_readers(train_ids_file, True))\n\t\t#training_dataset = file_pairs.shuffle(buffer_size=2500).map(read_image_pair_with_padding).batch(self.batch_size)\n\t\ttraining_dataset = file_pairs.map(read_image_pair_with_padding).batch(self.batch_size)\n\t\ttraining_dataset_X = training_dataset.map(lambda a, b: a)\n\t\ttraining_dataset_Y = training_dataset.map(lambda a, b: b)\n\n\t\ttraining_iterator_X = training_dataset_X.make_initializable_iterator()\n\t\ttraining_iterator_Y = training_dataset_Y.make_initializable_iterator()\n\n\t\tfor i in range(5):\n\t\t\tself.sess.run(training_iterator_X.initializer)\n\t\t\tself.sess.run(training_iterator_Y.initializer)\n\t\t\ttraining_handle_X = self.sess.run(training_iterator_X.string_handle())\n\t\t\ttraining_handle_Y = self.sess.run(training_iterator_Y.string_handle())\n\t\t\tj = 0\n\t\t\tloss_ary = []\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\t[train_loss, train_step, train_pred] = self.sess.run(\n\t\t\t\t\t\t[self.loss, self.train_op, self.pred],\n\t\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\t\tself.is_training: True,\n\t\t\t\t\t\t\t\tself.handle_X: training_handle_X,\n\t\t\t\t\t\t\t\tself.handle_Y: training_handle_Y,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t\t'''\n\t\t\t\t\tif j == 0:\n\t\t\t\t\t\tplt.imshow(train_pred[0])\n\t\t\t\t\t\tplt.colorbar()\n\t\t\t\t\t\tplt.show()\n\t\t\t\t\t\tpdb.set_trace()\n\t\t\t\t\t'''\n\t\t\t\t\tloss_ary.append(train_loss)\n\t\t\t\t\tj += 1\n\t\t\t\t\tprint('Epoch', i, 'Batch', j, train_loss)\n\t\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\t\tbreak\n\t\t\tself.sess.run(self.lr_decay_op)\n\t\t\tprint('Apply lr decay, new lr: %f' % self.sess.run(self.lr))\n\t\t\tprint(f'Epoch: {i}, Avg Loss: {np.mean(loss_ary)}')\n\t\t\tself.save(\"model_file_no_{}.pickle\".format(i))\n\n\n\t\tprint('Done training')", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def train(self, ckpt=None, verbose=True):\n\t\t\n\t\tsess = self.sess\n\t\tdatasource = self.datasource\n\n\t\tif FLAGS.resume:\n\t\t\tif ckpt is None:\n\t\t\t\tckpt = tf.train.latest_checkpoint(FLAGS.logdir)\n\t\t\tself.saver.restore(sess, ckpt)\n\t\tsess.run(self.init_op)\n\n\t\tt0 = time.time()\n\t\ttrain_dataset = datasource.get_dataset('train')\n\t\ttrain_dataset = train_dataset.batch(FLAGS.batch_size)\n\t\ttrain_dataset = train_dataset.shuffle(buffer_size=10000)\n\t\ttrain_iterator = train_dataset.make_initializable_iterator()\n\t\tnext_train_batch = train_iterator.get_next()\n\n\t\tvalid_dataset = datasource.get_dataset('valid')\n\t\tvalid_dataset = valid_dataset.batch(FLAGS.batch_size)\n\t\tvalid_iterator = valid_dataset.make_initializable_iterator()\n\t\tnext_valid_batch = valid_iterator.get_next()\n\n\t\tself.train_writer = tf.summary.FileWriter(FLAGS.outdir + '/train', graph=tf.get_default_graph())\n\t\tself.valid_writer = tf.summary.FileWriter(FLAGS.outdir + '/valid', graph=tf.get_default_graph())\n\n\t\tepoch_train_losses = []\n\t\tepoch_valid_losses = []\n\t\tepoch_save_paths = []\n\n\t\tfor epoch in range(FLAGS.n_epochs):\n\t\t\tsess.run(train_iterator.initializer)\n\t\t\tsess.run(valid_iterator.initializer)\n\t\t\tepoch_train_loss = 0.\n\t\t\tnum_batches = 0.\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tself.training = True\n\t\t\t\t\tif (not self.is_binary) and (self.datasource.target_dataset != 'celebA'):\n\t\t\t\t\t\tx = sess.run(next_train_batch)[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\t# no labels available for binarized MNIST\n\t\t\t\t\t\tx = sess.run(next_train_batch)\n\t\t\t\t\tif self.noisy_mnist:\n\t\t\t\t\t\t# print('training with noisy MNIST...')\n\t\t\t\t\t\tfeed_dict = {self.x: (x + np.random.normal(0, 0.5, x.shape)), self.true_x: x}\n\t\t\t\t\telse:\n\t\t\t\t\t\tfeed_dict = {self.x: x}\n\n\t\t\t\t\t# REINFORCE-style training with VIMCO or vanilla gradient update\n\t\t\t\t\tif not self.discrete_relax:\n\t\t\t\t\t\tsess.run([self.discrete_train_op1, self.discrete_train_op2], feed_dict)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# this works for both gumbel-softmax\n\t\t\t\t\t\tsess.run(self.train_op, feed_dict)\n\n\t\t\t\t\tbatch_loss, train_summary, gs = sess.run([\n\t\t\t\t\t\tself.reconstr_loss, self.summary_op, self.global_step], feed_dict)\n\t\t\t\t\tepoch_train_loss += batch_loss\n\n\t\t\t\t\t# self.train_writer.add_summary(train_summary, gs)\n\t\t\t\t\tnum_batches += 1\n\n\t\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\t\tbreak\n\t\t\t# end of training epoch; adjust temperature here if using Gumbel-Softmax\n\t\t\t# if self.discrete_relax:\n\t\t\t# \tif (counter % 1000 == 0) and (counter > 0):\n\t\t\t# \t\tself.adj_temp = np.maximum(self.tau * np.exp(-self.anneal_rate * counter), self.min_temp)\n\t\t\t# \t\tprint('adjusted temperature to: {}'.format(self.adj_temp))\n\t\t\t# enter validation phase\n\t\t\tif verbose:\n\t\t\t\tepoch_train_loss /= num_batches\n\t\t\t\tself.training = False\n\t\t\t\tif (not self.is_binary) and (self.datasource.target_dataset != 'celebA'):\n\t\t\t\t\tx = sess.run(next_valid_batch)[0]\n\t\t\t\telse:\n\t\t\t\t\t# no labels available for binarized MNIST and celebA\n\t\t\t\t\tx = sess.run(next_valid_batch)\n\t\t\t\tif self.noisy_mnist:\n\t\t\t\t\t# print('training with noisy MNIST...')\n\t\t\t\t\tfeed_dict = {self.x: (x + np.random.normal(0, 0.5, x.shape)), self.true_x: x}\n\t\t\t\telse:\n\t\t\t\t\tfeed_dict = {self.x: x}\n\n\t\t\t\t# save run stats\n\t\t\t\tepoch_valid_loss, valid_summary, gs = sess.run([self.test_loss, self.summary_op, self.global_step], feed_dict=feed_dict)\n\t\t\t\tif epoch_train_loss < 0: # note: this only applies to non-binary data since it's L2 loss\n\t\t\t\t\tprint('Epoch {}, (no sqrt) l2 train loss: {:0.6f}, l2 valid loss: {:0.6f}, time: {}s'. \\\n\t\t\t\tformat(epoch+1, epoch_train_loss, np.sqrt(epoch_valid_loss), int(time.time()-t0)))\n\t\t\t\telse:\n\t\t\t\t\tprint('Epoch {}, l2 train loss: {:0.6f}, l2 valid loss: {:0.6f}, time: {}s'. \\\n\t\t\t\t\t\t\tformat(epoch+1, np.sqrt(epoch_train_loss), np.sqrt(epoch_valid_loss), int(time.time()-t0)))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tsave_path = self.saver.save(sess, os.path.join(FLAGS.logdir, 'model.ckpt'), global_step=gs)\n\t\t\t\tepoch_train_losses.append(epoch_train_loss)\n\t\t\t\tepoch_valid_losses.append(epoch_valid_loss)\n\t\t\t\tepoch_save_paths.append(save_path)\n\t\tbest_ckpt = None\n\t\tif verbose:\n\t\t\tmin_idx = epoch_valid_losses.index(min(epoch_valid_losses))\n\t\t\tprint('Restoring ckpt at epoch', min_idx+1,'with lowest validation error:', epoch_save_paths[min_idx])\n\t\t\tbest_ckpt = epoch_save_paths[min_idx]\n\t\treturn (epoch_train_losses, epoch_valid_losses), best_ckpt", "def _train_input_fn():\n\t\tprint('\\n\\nRunning _train_input_fn\\n\\n')\n\t\tfeatures_placeholder = {\n\t\t\t#'query_unigrams' : tf.placeholder(tf.string, (len(features['query_unigrams']), features['query_unigrams'][0].shape[0])),\n\t\t\t#'doc_unigrams' : tf.placeholder(tf.string, (len(features['doc_unigrams']), len(features['doc_unigrams'][0]), features['doc_unigrams'][0][0].shape[0]))\n\t\t\t#'query_unigrams' : tf.placeholder(tf.string, (len(features['query_unigrams']), len(features['query_unigrams'][0]))),\n\t\t\t#'doc_unigrams' : tf.placeholder(tf.string, (len(features['doc_unigrams']), len(features['doc_unigrams'][0]), len(features['doc_unigrams'][0][0])))\n\t\t\t#k: tf.placeholder(tf.string, get_shape(v)) for k, v in six.iteritems(features)\n\t\t\tk: tf.placeholder(v.dtype, v.shape) for k, v in six.iteritems(features)\n\t\t\t#k: tf.sparse.placeholder(tf.string, [len(v)] + v[0].shape.as_list(), name=k) for k, v in six.iteritems(features)\n\t\t\t#k: tf.placeholder(tf.string, (len(v), len(v[0]), len(v[0][0]))) for k, v in six.iteritems(features)\n\t\t}\n\t\tlabels_placeholder = tf.placeholder(labels.dtype, labels.shape, name='label')\n\t\tdataset = tf.data.Dataset.from_tensor_slices((features_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\tlabels_placeholder))\n\t\t#dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n\t\tdataset = dataset.shuffle(batch_size*10).repeat().batch(batch_size)\n\t\titerator = dataset.make_initializable_iterator()\n\t\tfeed_dict = {labels_placeholder: labels}\n\t\tfeed_dict.update(\n\t\t\t\t{features_placeholder[k]: features[k] for k in features_placeholder})\n\n\t\tprint('feed_dict')\n\t\tfor k,v in six.iteritems(feed_dict):\n\t\t\tprint(k.shape)\n\t\t#\tprint(v.shape)\n\t\trun_options = tf.RunOptions(report_tensor_allocations_upon_oom = True)\n\t\titerator_initializer_hook.iterator_initializer_fn = (\n\t\t\t\tlambda sess: sess.run(iterator.initializer, feed_dict=feed_dict, options=run_options))\n\t\treturn iterator.get_next()", "def train_on_device(data_dir, dataset_id, model_path, ckpt_path, weight_updates_path):\r\n\r\n # Store pre-trained model and weights\r\n old_model = load_model(model_path)\r\n old_model.load_weights(ckpt_path)\r\n\r\n # Initialize model and checkpoint, which are obtained from server\r\n device_model = load_model(model_path)\r\n device_model.load_weights(ckpt_path)\r\n\r\n # print(device_model.summary())\r\n\r\n # load the vocabulary\r\n vocab_filename = os.path.join(data_dir, \"vocab.txt\")\r\n vocab = load_doc(vocab_filename)\r\n vocab = vocab.split()\r\n vocab = set(vocab)\r\n\r\n # create the tokenizer\r\n tokenizer = Tokenizer()\r\n\r\n # loading tokenizer from file\r\n tokenizer_filename = os.path.join(data_dir, \"tokenizer.pickle\")\r\n with open(tokenizer_filename, \"rb\") as handle:\r\n tokenizer = pickle.load(handle)\r\n\r\n # Get training data present on device\r\n X_train, y_train = get_data(data_dir, dataset_id, vocab, tokenizer, is_train=True)\r\n\r\n X_test, y_test = get_data(data_dir, dataset_id, vocab, tokenizer, is_train=False)\r\n\r\n scores = device_model.evaluate(X_test, y_test, verbose=0)\r\n print(\"Test Accuracy(before training): %.2f%%\" % (scores[1] * 100))\r\n\r\n # Train model\r\n device_model.fit(\r\n X_train, y_train, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE,\r\n )\r\n\r\n scores = device_model.evaluate(X_test, y_test, verbose=0)\r\n print(\"Test Accuracy(after training): %.2f%%\" % (scores[1] * 100))\r\n\r\n # Load model to store weight updates\r\n weight_updates = load_model(model_path)\r\n\r\n # Number of batches trained on device\r\n num_batches = X_train.shape[0] // BATCH_SIZE\r\n\r\n # Calculate weight updates\r\n for i in range(len(device_model.layers)):\r\n\r\n # Pre-trained weights\r\n old_layer_weights = old_model.layers[i].get_weights()\r\n\r\n # Post-trained weights\r\n new_layer_weights = device_model.layers[i].get_weights()\r\n\r\n # Weight updates calculation\r\n weight_updates.layers[i].set_weights(\r\n num_batches\r\n * (np.asarray(new_layer_weights) - np.asarray(old_layer_weights)),\r\n )\r\n\r\n # print(\"old weights: \", old_layer_weights)\r\n # print(\"new weights: \", new_layer_weights)\r\n # print(\"weight updates: \", weight_updates.layers[i].get_weights())\r\n\r\n # Save weight updates\r\n weight_updates.save_weights(weight_updates_path)\r\n\r\n return (num_batches, weight_updates_path)", "def train(self, training_steps=10):", "def _local_train(self, dataloader_with_memory, num_updates):\n # Local train\n _size = len(dataloader_with_memory)\n self.model = self.model.train()\n for _batch in range(num_updates):\n X, y = dataloader_with_memory.get_samples()\n X, y = X.to(self._device), y.to(self._device)\n if _batch == 0:\n # Initialize the batch-size using the first batch to avoid\n # edge cases with drop_last=False\n _batch_size = X.shape[0]\n _num_batches_per_epoch = (_size // _batch_size) + int(\n (_size % _batch_size) != 0\n )\n # Compute prediction and loss\n _pred = self.model(X)\n _loss = self._loss(_pred, y)\n\n # Backpropagation\n _loss.backward()\n self._optimizer.step()\n self._optimizer.zero_grad()\n self.num_batches_seen += 1\n _loss, _current_epoch = (\n _loss.item(),\n self.num_batches_seen // _num_batches_per_epoch,\n )\n\n if self.log:\n if _batch % self.log_period == 0:\n print(\n f\"loss: {_loss:>7f} after {self.num_batches_seen:>5d}\"\n f\" batches of data amounting to {_current_epoch:>5d}\"\n \" epochs.\"\n )\n self.writer.add_scalar(\n f\"client{self.client_id}/train/Loss\",\n _loss,\n self.num_batches_seen,\n )\n\n if _current_epoch > self.current_epoch:\n # At each epoch we look at the histograms of all the\n # network's parameters\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(\n f\"client{self.client_id}/{name}\", p, _current_epoch\n )\n\n self.current_epoch = _current_epoch", "def train(self, dataset):\n train_loss = []\n test_images = np.array([get_image(dataset[i], OUT_SIZE, CHANNELS)\n for i in range(64)]).astype(np.float32)\n\n start = time.time()\n print(\"[+] Starting Training\")\n\n for iters in range(EPOCHS):\n history = self.vae.fit_generator(\n prepare_epoch(dataset), steps_per_epoch=PER_EPOCH, epochs=1)\n\n loss = history.history['loss'][-1]\n train_loss.append(loss)\n print(\"Epoch {} Loss: {}\\n[+] Time since start: {}\".format(\n iters, str(loss), time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start))))\n plotScores(train_loss, [], \"EncoderScores.png\")\n\n # Every X iterations, save & test the model\n if iters % 1 == 0:\n self.vae.save_weights(WEIGHTS)\n\n # Example tests\n faces = self.vae.predict(test_images)\n imsave(faces, [8, 8], \"./tests/test{}.png\".format(iters))\n # make_gif()\n print(\"[+] Saved\")", "def train(model, train_loader, epochs, optimizer, loss_fn, device):\n \n # TODO: Paste the train() method developed in the notebook here.\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n batch_X = batch_X.unsqueeze(-1)\n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n model.zero_grad()\n output=model.forward(batch_X)\n loss=loss_fn(output.squeeze(),batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n if epoch%10 == 0:\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))", "def train_epoch(data_loader, model, optimizer, criterion, device, fold, epoch):\n\tmodel.train()\n\tfor inputs, input_lens, labels in tqdm.tqdm(data_loader, ncols=100, desc=f\"train-- F: {fold} -- E: {epoch}\"):\n\t\tinputs = inputs.to(device)\n\t\tlabels = labels.to(device)\n\t\t#input_lens = input_lens.to(device)\n\n\t\toptimizer.zero_grad()\n\t\tpreds = model(inputs, input_lens)\n\t\t\n\t\tloss = criterion(preds, labels.unsqueeze(1))\n\t\tloss.backward()\n\t\toptimizer.step()", "def train(args, model, train_data_loader, dev_data_loader, device):\n\n\tmodel.train()\n\toptimizer = torch.optim.Adam(model.parameters())\n\tprint_loss_total = 0\n\tepoch_loss_total = 0\n\tstart = time.time()\n\n\t#### modify the following code to complete the training funtion\n\n\tbest_train_acc, best_dev_acc = 0.0, 0.0\n\n\tfor idx, batch in enumerate(train_data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t#### Your code here ----\n\n\t\t# zero out\n\t\toptimizer.zero_grad()\n\n\t\t# get output from model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# use loss_fn defined above to calculate loss\n\t\tloss = loss_fn(logits, labels)\n\n\t\t# use accuracy_fn defined above to calculate 'error' and number of examples ('num_examples') used to\n\t\t# calculate accuracy below.\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# backprop\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\t###Your code ends ---\n\t\taccuracy = 1 - error / num_examples\n\t\tclip_grad_norm_(model.parameters(), 5)\n\t\tprint_loss_total += loss.data.numpy()\n\t\tepoch_loss_total += loss.data.numpy()\n\n\t\tif (idx + 1) % args.checkpoint == 0 and idx > 0:\n\t\t\tprint_loss_avg = print_loss_total / args.checkpoint\n\n\t\t\tdev_acc = evaluate(dev_data_loader, model, device)\n\n\t\t\tprint('number of steps: %d, train loss: %.5f, train acc: %.3f, dev acc: %.3f, time: %.5f'\n\t\t\t % (idx + 1, print_loss_avg, accuracy, dev_acc, time.time() - start))\n\t\t\tprint_loss_total = 0\n\t\t\tif accuracy > best_train_acc:\n\t\t\t\tbest_train_acc = accuracy\n\t\t\tif dev_acc > best_dev_acc:\n\t\t\t\tbest_dev_acc = dev_acc\n\n\treturn best_train_acc, best_dev_acc", "def train(model, train_dataloader, val_dataloader=None, epochs=4, evaluation=False):\n # Start training loop\n print(\"Start training...\\n\")\n for epoch_i in range(epochs):\n # =======================================\n # Training\n # =======================================\n # Print the header of the result table\n print(f\"{'Epoch':^7} | {'Batch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Elapsed':^9}\")\n print(\"-\"*70)\n\n # Measure the elapsed time of each epoch\n t0_epoch, t0_batch = time.time(), time.time()\n\n # Reset tracking variables at the beginning of each epoch\n total_loss, batch_loss, batch_counts = 0, 0, 0\n\n # Put the model into the training mode\n model.train()\n\n # For each batch of training data...\n for step, batch in enumerate(train_dataloader):\n batch_counts +=1\n # Load batch to GPU\n b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)\n \n # Always clear any previously calculated gradients before performing a\n # backward pass. PyTorch doesn't do this automatically because \n # accumulating the gradients is \"convenient while training RN\n # Zero out any previously calculated gradients\n model.zero_grad()\n\n # Perform a forward pass. This will return logits.\n logits = model(b_input_ids, b_attn_mask)\n \n # Accumulate the training loss over all of the batches so that we can\n # calculate the average loss at the end. `loss` is a Tensor containing a\n # single value; the `.item()` function just returns the Python value \n # from the tensor.\n\n # Compute loss and accumulate the loss values\n loss = loss_fn(logits, b_labels.long())\n batch_loss += loss.item()\n total_loss += loss.item()\n\n # Perform a backward pass to calculate gradients\n loss.backward()\n\n # Clip the norm of the gradients to 1.0 to prevent \"exploding gradients\"\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # Update parameters and the learning rate\n optimizer.step()\n scheduler.step()\n\n # Print the loss values and time elapsed for every 20 batches\n if (step % 20 == 0 and step != 0) or (step == len(train_dataloader) - 1):\n # Calculate time elapsed for 20 batches\n time_elapsed = time.time() - t0_batch\n\n # Print training results\n print(f\"{epoch_i + 1:^7} | {step:^7} | {batch_loss / batch_counts:^12.6f} | {'-':^10} | {'-':^9} | {time_elapsed:^9.2f}\")\n\n # Reset batch tracking variables\n batch_loss, batch_counts = 0, 0\n t0_batch = time.time()\n\n # Calculate the average loss over the entire training data\n avg_train_loss = total_loss / len(train_dataloader)\n \n\n print(\"-\"*70)\n # =======================================\n # Evaluation\n # =======================================\n if evaluation == True:\n # After the completion of each training epoch, measure the model's performance\n # on our validation set.\n val_loss, val_accuracy = evaluate(model, val_dataloader)\n\n # Print performance over the entire training data\n time_elapsed = time.time() - t0_epoch\n \n print(f\"{epoch_i + 1:^7} | {'-':^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {time_elapsed:^9.2f}\")\n print(\"-\"*70)\n print(\"\\n\")\n \n print(\"Training complete!\")", "def train_model(model, ds_train, ds_valid, epochs):\n print(\"-- Train Model:\")\n for epoch in tf.range(1, epochs + 1):\n\n for features, labels in ds_train:\n train_step(model, features, labels)\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n\n logs = \"Epoch={}, Loss: {}, Accuracy: {},\\nValid Loss: {}, Valid Accuracy: {}\"\n if epoch % 1 == 0:\n printbar()\n tf.print(\n tf.strings.format(\n logs,\n (\n epoch,\n train_loss.result(),\n train_metric.result(),\n valid_loss.result(),\n valid_metric.result(),\n ),\n )\n )\n\n train_loss.reset_states()\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)", "def test_batch(batch_size):\n X, Y = generatedata(isTrain=False, batch_size=batch_size)\n feed_dict = {enc_inp[t]: X[t] for t in range(len(enc_inp))}\n feed_dict.update({expected_sparse_output[t]: Y[t] for t in range(len(expected_sparse_output))})\n loss_t = sess.run([loss], feed_dict)\n return loss_t[0]", "def test(self):\n batch = get_test_batch(c.BATCH_SIZE, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def train(model: ContinualModel, dataset: ContinualDataset,\n args: Namespace) -> None:\n model.net.to(model.device)\n results, results_mask_classes = [], []\n \n model_stash = create_stash(model, args, dataset)\n \n if args.csv_log:\n csv_logger = CsvLogger(dataset.SETTING, dataset.NAME, model.NAME)\n if args.tensorboard:\n tb_logger = TensorboardLogger(args, dataset.SETTING, model_stash)\n model_stash['tensorboard_name'] = tb_logger.get_name()\n \n dataset_copy = get_dataset(args)\n for t in range(dataset.N_TASKS):\n model.net.train()\n _, _ = dataset_copy.get_data_loaders()\n if model.NAME != 'icarl' and model.NAME != 'pnn':\n random_results_class, random_results_task = evaluate(model, dataset_copy)\n \n print(file=sys.stderr)\n for t in range(dataset.N_TASKS):\n model.net.train()\n train_loader, test_loader = dataset.get_data_loaders()\n if hasattr(model, 'begin_task'):\n model.begin_task(dataset)\n if t:\n accs = evaluate(model, dataset, last=True)\n results[t - 1] = results[t - 1] + accs[0]\n if dataset.SETTING == 'class-il':\n results_mask_classes[t - 1] = results_mask_classes[t - 1] + accs[1]\n for epoch in range(args.n_epochs):\n for i, data in enumerate(train_loader):\n if hasattr(dataset.train_loader.dataset, 'logits'):\n inputs, labels, not_aug_inputs, logits = data\n inputs = inputs.to(model.device)\n labels = labels.to(model.device)\n not_aug_inputs = not_aug_inputs.to(model.device)\n logits = logits.to(model.device)\n loss = model.observe(inputs, labels, not_aug_inputs, logits)\n else:\n inputs, labels, not_aug_inputs = data\n inputs, labels = inputs.to(model.device), labels.to(\n model.device)\n not_aug_inputs = not_aug_inputs.to(model.device)\n loss = model.observe(inputs, labels, not_aug_inputs)\n \n progress_bar(i, len(train_loader), epoch, t, loss)\n \n if args.tensorboard:\n tb_logger.log_loss(loss, args, epoch, t, i)\n \n model_stash['batch_idx'] = i + 1\n model_stash['epoch_idx'] = epoch + 1\n model_stash['batch_idx'] = 0\n model_stash['task_idx'] = t + 1\n model_stash['epoch_idx'] = 0\n \n if hasattr(model, 'end_task'):\n model.end_task(dataset)\n \n accs = evaluate(model, dataset)\n results.append(accs[0])\n results_mask_classes.append(accs[1])\n \n mean_acc = np.mean(accs, axis=1)\n print_mean_accuracy(mean_acc, t + 1, dataset.SETTING)\n \n model_stash['mean_accs'].append(mean_acc)\n if args.csv_log:\n csv_logger.log(mean_acc)\n if args.tensorboard:\n tb_logger.log_accuracy(np.array(accs), mean_acc, args, t)\n \n if args.csv_log:\n csv_logger.add_bwt(results, results_mask_classes)\n csv_logger.add_forgetting(results, results_mask_classes)\n if model.NAME != 'icarl' and model.NAME != 'pnn':\n csv_logger.add_fwt(results, random_results_class,\n results_mask_classes, random_results_task)\n \n if args.tensorboard:\n tb_logger.close()\n if args.csv_log:\n csv_logger.write(vars(args))", "def _run_epoch(sess, model, args, data, index=0, tb_summaries=None,\n id_to_word=None, train_op=None, verbose=False):\n epoch_start_time = time.time()\n # total cost and number of words evaluated in this epoch\n costs, total_words = 0.0, 0.0\n # epoch size is number of batches in each epoch\n epoch_size = (len(data[index]) - 1) // model.config['batch_size']\n state = sess.run(model.initial_state)\n\n # iterate through batches\n for step, (x, y) in enumerate(data_reader.batch_iterator(\n data[index], model.config['batch_size'])):\n # return these parameters after running TF session\n fetches = {\n 'cost': model.cost[index],\n 'final_state': model.final_state,\n 'seq_len': model.seq_len\n }\n # only train model has optimizer operation\n if train_op is not None:\n fetches['train_op'] = train_op[index]\n\n # create dict to feed input, targets, and rnn into TF session\n feed_dict = utils.create_feed_dict(model, args, x, y, state)\n # run all parameters in fetches dict\n vals = sess.run(fetches, feed_dict)\n\n costs += vals['cost']\n # number of words evaluated\n total_words += np.sum(vals['seq_len'])\n # use perplexity to evaluate language models\n perplexity = np.exp(costs / total_words)\n\n if verbose and step % (epoch_size // 2) == 1:\n # display perplexity and top word predictions for sequence\n _display_epoch_metrics(step, epoch_size, perplexity, total_words,\n epoch_start_time, args, model, sess,\n index, feed_dict, vals, id_to_word, y)\n\n # generate sample text while training to monitor progress\n if args.display_text == 'True' and model.name == 'Train':\n generate.generate_text(sess, model, id_to_word, train_ind=index)\n\n # write TensorBoard summaries for Train/Valid\n if args.save_path != '' and model.name != 'Test':\n summary = sess.run(tb_summaries.summary_op,\n {tb_summaries.ppl_summary: perplexity})\n model.file_writer.add_summary(summary, get_or_create_global_step().eval())\n\n return perplexity", "def train(self, training_data, cfg, **kwargs):\n pass", "def training(model, train_loader, optimizer, device, writer, epoch, iterator):\r\n model.train()\r\n running_loss = 0.0\r\n numBatches = len(train_loader)\r\n final = False\r\n start = True\r\n # define start time\r\n start_time = time.time()\r\n \r\n # For each batch\r\n for i, data in enumerate(train_loader, 0):\r\n \r\n data = data.to(device)\r\n \r\n # Set the gradients to zero\r\n optimizer.zero_grad() \r\n \r\n # If first or last batch \r\n if i == 1:\r\n start = False\r\n \r\n if i+1 == numBatches:\r\n final = True\r\n \r\n # Forward pass, backward pass, optimize\r\n prediction = model(data, final, start)\r\n \r\n # Calculate loss of the batch\r\n loss = torch.nn.functional.mse_loss(prediction, data.y)\r\n \r\n loss.backward()\r\n optimizer.step()\r\n \r\n # Progress bar and dump to tensorboard file\r\n running_loss += loss.item()\r\n if (i+1) % 10 == 0:\r\n print(\"Epoch {}, {:d}% \\t train_loss: {:.2f} took: {:.2f}s\".format(\r\n epoch+1, int(100 * (i+1) / numBatches), running_loss / 10, time.time() - start_time))\r\n\r\n # write the train loss to tensorboard\r\n running_loss_norm = running_loss / 10\r\n writer.write_loss_train(running_loss_norm, iterator)\r\n\r\n # Reset running loss and time\r\n running_loss = 0.0\r\n start_time = time.time()\r\n \r\n # set iterator to -1 if training failed\r\n if torch.isnan(loss):\r\n iterator = -1\r\n valLoss = 'Converged to NaN'\r\n pickle.dump(valLoss, open(os.path.join(log_path,'valLoss.data'), 'wb'))\r\n break\r\n \r\n iterator += 1\r\n \r\n return iterator", "def train_test_model_stream():\n train=learning.Train_kmer_clf()\n train.run()\n #with open(os.path.join(cfg.pathtoxp, cfg.xp_name, cfg.id, f'{cfg.model}_CVresults.pkl'), 'rb') as f:\n # dic=pickle.load(f)\n #test=learning.Test_streaming(batchsize=1, kmer_to_index=dic['features'], clf=dic['classifier'])\n test=learning.Test_streaming(batchsize=1, kmer_to_index=train.kmer_to_index, clf=train.cv_clf)\n test.run()", "def train_fru(model, epochs=EPOCHS):\n train(model, epochs=epochs, dataset=FRUDataset)", "def train(self, data, epochs, batch_size, save_dir, save_prefix,\n dropout_keep_prob=1.0, evaluate=True):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n for epoch in range(1, epochs + 1):\n self.logger.info('Training the model for epoch {}'.format(epoch))\n train_batches = data.gen_mini_batches('train', batch_size, pad_id, shuffle=True)\n train_loss = self._train_epoch(train_batches, dropout_keep_prob,\n data, batch_size, save_dir, save_prefix)\n self.logger.info('Average train loss for epoch {} is {}'.format(epoch, train_loss))\n\n if evaluate:\n self.logger.info('Evaluating the model after epoch {}'.format(epoch))\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n else:\n self.save(save_dir, save_prefix + '_' + str(epoch))", "def train_model(model, epochs, optimizer, loss_function, train_iterator, valid_iterator):\n for epoch in range(epochs):\n model.train()\n train_loss = 0.0\n train_acc = 0.0\n for i, batch in enumerate(train_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n optimizer.zero_grad()\n\n output = model(feature, batch_length)\n\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n train_acc += acc.item()\n print(\n f\"Train:: Epoch: {epoch}, Loss: {train_loss / len(train_iterator)}, Accuracy: {train_acc / len(train_iterator)}\")\n\n model.eval()\n val_loss = 0.0\n val_acc = 0.0\n for i, batch in enumerate(valid_iterator):\n (feature, batch_length), label = batch.overview, batch.genre\n batch_length = batch_length.to('cpu')\n label = label.float()\n\n output = model(feature, batch_length)\n loss = loss_function(output, label)\n acc = model_accuracy(output, label)\n\n val_loss += loss.item()\n val_acc += acc.item()\n\n print(\n f\"Validation:: Epoch: {epoch}, Loss: {val_loss / len(valid_iterator)}, Accuracy: {val_acc / len(valid_iterator)}\")\n print(\"\")", "def test_custom_training():\n testcol = testcol_cust\n conn = pm.MongoClient(host=testhost,\n port=testport)\n\n # set up the parameters\n params = {}\n # everything is list except 'train_params'\n params['model_params'] = {'func': model.mnist_tfutils}\n params['save_params'] = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0',\n 'save_valid_freq': 20,\n 'save_filters_freq': 200,\n 'cache_filters_freq': 100,\n }\n params['train_params'] = {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'train',\n 'n_threads': 4},\n 'train_loop': {'func': custom_train_loop},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 500\n }\n params['learning_rate_params'] = {'learning_rate': 0.05,\n 'decay_steps': num_batches_per_epoch,\n 'decay_rate': 0.95,\n 'staircase': True}\n params['validation_params'] = {'valid0': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 10,\n 'agg_func': utils.mean_dict}}\n params['skip_check'] = True\n\n # actually run the training\n base.train_from_params(**params)\n # test if results are as expected\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0'}).count() == 26\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'saved_filters': True}).distinct('step') == [0, 200, 400]\n\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'step': 0})[0]\n asserts_for_record(r, params, train=True)\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'step': 20})[0]\n asserts_for_record(r, params, train=True)\n\n # run another 500 steps of training on the same experiment id.\n params['train_params']['num_steps'] = 1000\n base.train_from_params(**params)\n\n # test if results are as expected\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0'}).count() == 51\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training0',\n 'saved_filters': True}).distinct('step') == [0, 200, 400, 600, 800, 1000]\n assert conn[testdbname][testcol + '.files'].distinct('exp_id') == ['training0']\n r = conn[testdbname][testcol + '.files'].find({'exp_id': 'training0', 'step': 1000})[0]\n asserts_for_record(r, params, train=True)\n\n # run 500 more steps but save to a new experiment id.\n params['train_params']['num_steps'] = 1500\n params['load_params'] = {'exp_id': 'training0'}\n params['save_params']['exp_id'] = 'training1'\n\n base.train_from_params(**params)\n assert conn[testdbname][testcol + '.files'].find({'exp_id': 'training1',\n 'saved_filters': True}).distinct('step') == [1200, 1400]", "def train(model, train_loader, val_loader, epochs, optimizer, loss_fn, device):\n \n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n optimizer.zero_grad()\n # Forward\n output = model(batch_X)\n loss = loss_fn(output, batch_y)\n # Backward\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n log_eval_metrics(model, val_loader, device, epoch)", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def train(self, dataset=None, epochs=2, verbose=1, workers=1):\n dataset = utils.prepare_dataset(dataset,\n self.config.batch_size,\n self.config.inputs,\n self.dtype,\n self.config.batch_decay)\n callbacks = [ModelCheckpoint(os.path.join(self.config.model_folder,\n '{epoch:03d}.hdf5'),\n monitor='val_loss',\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n mode='auto'),\n GeneratorCallback(self.config.test_string,\n self.config.inputs,\n self.config.generated_characters,\n self.dtype)\n ]\n for i in range(epochs):\n self.model.fit(dataset,\n initial_epoch=i,\n epochs=i + 1,\n verbose=verbose,\n use_multiprocessing=True,\n workers=workers,\n callbacks=callbacks)" ]
[ "0.725532", "0.704524", "0.6841591", "0.6800355", "0.67653406", "0.67564285", "0.66826314", "0.6670544", "0.6661017", "0.6658394", "0.65870076", "0.6586631", "0.65774953", "0.65341204", "0.6532416", "0.6496773", "0.6493735", "0.64827245", "0.6464911", "0.64211214", "0.64112926", "0.64072704", "0.6394867", "0.6388188", "0.6367342", "0.63665897", "0.636315", "0.63601536", "0.63552636", "0.6353317", "0.63510466", "0.6350223", "0.6345173", "0.63214666", "0.6312164", "0.63001204", "0.62983066", "0.6297224", "0.6291201", "0.62890154", "0.62700194", "0.6268946", "0.6260809", "0.6259575", "0.6259575", "0.6259575", "0.6259575", "0.6254122", "0.6251192", "0.6244679", "0.6243698", "0.6241429", "0.6224756", "0.6218478", "0.6215859", "0.6202448", "0.62022185", "0.6190727", "0.61886585", "0.6187943", "0.61872864", "0.61824954", "0.6181487", "0.61707276", "0.61699283", "0.6161154", "0.615788", "0.6157683", "0.6154649", "0.6149763", "0.61483943", "0.6146658", "0.6145606", "0.61443055", "0.6135323", "0.6126624", "0.6120323", "0.6116762", "0.61044455", "0.6103971", "0.6103336", "0.6103275", "0.60982054", "0.6089471", "0.60886145", "0.6087098", "0.6081443", "0.60776484", "0.6077536", "0.6067704", "0.6064057", "0.6063395", "0.6063366", "0.60628444", "0.60497665", "0.6048002", "0.60421", "0.60420257", "0.60362965", "0.60361075" ]
0.7809584
0
Hook to be use by subclasses to define default ACLs in context.
def __base_acl__(self) -> list: _acls = [ (Allow, 'g:briefy_qa', ['add', 'delete', 'edit', 'list', 'view']) ] return _acls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __acl__():", "def get_acl(registry=None):", "def load_acl(self):\n macl_class = self.app.config.get('MACL_CLASS', None)\n macl_dict = self.app.config.get('MACL_DEFINITION', None)\n default_roles = self.app.config.get('MACL_DEFAULT_ROLES', None)\n\n if default_roles is not None:\n self._current_roles = default_roles\n\n if macl_class is not None:\n self.load_from_class(macl_class)\n\n if macl_dict is not None:\n self.load_from_dict(macl_dict)", "def __base_acl__(self) -> list:\n _acls = [\n (Allow, 'g:professionals', ['list', 'view', 'edit']),\n ]\n return _acls", "def _reload_acls(self):\n\t\tself.acls = ACLs()", "def create_acl(self, context, sg):\n self.security_group_driver.create_acl(context, sg)", "def __acl__(self):\n yield 'Allow', 'system.Everyone', 'none'\n yield security.DENY_ALL", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def pre_access_control_list_create(self, resource_dict):\n pass", "def __acl__(self):\n # type: () -> AccessControlListType\n user = self.request.user\n # allow if role MAGPIE_ADMIN_PERMISSION is somehow directly set instead of inferred via members of admin-group\n acl = [(Allow, get_constant(\"MAGPIE_ADMIN_PERMISSION\", self.request), ALL_PERMISSIONS)]\n admin_group_name = get_constant(\"MAGPIE_ADMIN_GROUP\", self.request)\n admins = GroupService.by_group_name(admin_group_name, db_session=self.request.db)\n if admins:\n # need to add explicit admin-group ALL_PERMISSIONS otherwise views with other permissions than the\n # default MAGPIE_ADMIN_PERMISSION will be refused access (e.g.: views with MAGPIE_LOGGED_PERMISSION)\n acl += [(Allow, \"group:{}\".format(admins.id), ALL_PERMISSIONS)]\n if user:\n # user-specific permissions (including group memberships)\n permissions = UserService.permissions(user, self.request.db)\n user_acl = permission_to_pyramid_acls(permissions)\n # allow views that require minimally to be logged in (regardless of who is the user)\n auth_acl = [(Allow, user.id, Authenticated)]\n acl += user_acl + auth_acl\n return acl", "def ApplyAclChanges(self, uri):\n try:\n current_acl = uri.get_def_acl()\n except GSResponseError as e:\n if (e.code == 'AccessDenied' and e.reason == 'Forbidden'\n and e.status == 403):\n self._WarnServiceAccounts()\n self.logger.warning('Failed to set default acl for {0}: {1}'\n .format(uri, e.reason))\n return\n\n modification_count = 0\n for change in self.changes:\n modification_count += change.Execute(uri, current_acl)\n if modification_count == 0:\n self.logger.info('No changes to {0}'.format(uri))\n return\n\n # TODO: Add if-metageneration-match when boto provides access to bucket\n # metageneration.\n\n # If this fails because of a precondition, it will raise a\n # GSResponseError for @Retry to handle.\n try:\n uri.set_def_acl(current_acl, validate=False)\n except GSResponseError as e:\n # Don't retry on bad requests, e.g. invalid email address.\n if getattr(e, 'status', None) == 400:\n raise CommandException('Received bad request from server: %s' % str(e))\n raise\n self.logger.info('Updated default ACL on {0}'.format(uri))", "def config_mgmt_acl(zdcli, **kwargs):\n option = {}\n if kwargs: option.update(kwargs)\n \n logging.info(\"Create acl %s\" % option)\n cmd_block = _define_mgmt_ip_acl_cmd_block(option)\n zdcli.do_cfg(cmd_block)", "def __acl__(self):\n # type: () -> AccessControlListType\n acl = []\n if self.owner_user_id:\n acl.append((Allow, self.owner_user_id, ALL_PERMISSIONS))\n if self.owner_group_id:\n acl.append((Allow, \"group:%s\" % self.owner_group_id, ALL_PERMISSIONS))\n return acl", "def get_acl(self):\n\n return 'private'", "def __init__(self):\n\n self._authorize()", "def default_acl(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"default_acl\")", "def init_app(self):\n self.app.config.setdefault('MACL_DEFINITION', None)\n self.app.config.setdefault('MACL_CLASS', None)\n self.app.config.setdefault('MACL_ERROR_MESSAGE',\n 'You do not have access to this resource')\n\n self.app.miracle_acl_manager = self\n\n self.load_acl()", "def configure_extra(context):\n portal_url = getToolByName(context, 'portal_url')\n pm = getToolByName(context, 'portal_membership')\n portal = portal_url.getPortalObject()\n security = ISecuritySchema(portal)\n\n if not security.enable_self_reg:\n security.enable_self_reg = True\n\n if not security.enable_user_pwd_choice:\n security.enable_user_pwd_choice = True\n\n if not security.enable_user_folders:\n security.enable_user_folders = True\n pm.memberarea_type = 'MemberFolder'", "def default_acl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_acl\")", "def default_acl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_acl\")", "def create_custom_permissions(self) -> None:\n self.add_permission_view_menu(\"all_datasource_access\", \"all_datasource_access\")\n self.add_permission_view_menu(\"all_database_access\", \"all_database_access\")\n self.add_permission_view_menu(\"all_query_access\", \"all_query_access\")\n self.add_permission_view_menu(\"can_share_dashboard\", \"Superset\")\n self.add_permission_view_menu(\"can_share_chart\", \"Superset\")", "def __init__(self, app=None, acl=None):\n self.app = app\n self.acl = acl or MiracleAcl()\n\n if app is not None:\n self.init_app()", "def SetAclCommand(self, args, unused_sub_opts=None, headers=None, debug=0):\n acl_arg = args[0]\n uri_args = args[1:]\n provider = None\n first_uri = None\n # Do a first pass over all matched objects to disallow multi-provider\n # setacl requests, because there are differences in the ACL models.\n for uri_str in uri_args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n if not provider:\n provider = uri.scheme\n elif uri.scheme != provider:\n raise CommandException('\"setacl\" command spanning providers not '\n 'allowed.')\n if not first_uri:\n first_uri = uri\n\n # Get ACL object from connection for the first URI, for interpreting the\n # ACL. This won't fail because the main startup code insists on 1 arg\n # for this command.\n storage_uri = first_uri\n acl_class = storage_uri.acl_class()\n canned_acls = storage_uri.canned_acls()\n\n # Determine whether acl_arg names a file containing XML ACL text vs. the\n # string name of a canned ACL.\n if os.path.isfile(acl_arg):\n acl_file = open(acl_arg, 'r')\n acl_txt = acl_file.read()\n acl_file.close()\n acl_obj = acl_class()\n h = handler.XmlHandler(acl_obj, storage_uri.get_bucket())\n try:\n xml.sax.parseString(acl_txt, h)\n except xml.sax._exceptions.SAXParseException, e:\n raise CommandException('Requested ACL is invalid: %s at line %s, '\n 'column %s' % (e.getMessage(), e.getLineNumber(),\n e.getColumnNumber()))\n acl_arg = acl_obj\n else:\n # No file exists, so expect a canned ACL string.\n if acl_arg not in canned_acls:\n raise CommandException('Invalid canned ACL \"%s\".' % acl_arg)\n\n # Now iterate over URIs and set the ACL on each.\n for uri_str in uri_args:\n for uri in self.CmdWildcardIterator(uri_str, headers=headers,\n debug=debug):\n print 'Setting ACL on %s...' % uri\n uri.set_acl(acl_arg, uri.object_name, False, headers)", "def authentication_hook(self):\n pass", "def init() -> None:\n appbuilder.add_permissions(update_perms=True)\n security_manager.sync_role_definitions()", "def __init__(__self__, *,\n bucket: pulumi.Input[str],\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"bucket\", bucket)\n if default_acl is not None:\n pulumi.set(__self__, \"default_acl\", default_acl)\n if predefined_acl is not None:\n pulumi.set(__self__, \"predefined_acl\", predefined_acl)\n if role_entities is not None:\n pulumi.set(__self__, \"role_entities\", role_entities)", "def __init__(__self__,\n resource_name: str,\n args: BucketACLArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _set_override_role_called(self):\n self.__override_role_called = True", "def __init__(self, first_name, last_name, location, job_title):\n \"\"\"Then initialize attributes of the child class.\"\"\"\n\n super().__init__(first_name, last_name, location, job_title)\n self.permissions = 'root'", "def test_acl_configuration(self, env):\n # Create ACL Expression\n self.suite_logger.debug(\"Create and Verify ACL Expression\")\n expressions = [(1, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01'), ]\n env.switch[1].ui.create_acl(expressions=expressions)\n # Verify ACL Expression\n expression = env.switch[1].ui.get_table_acl(\"ACLExpressions\")[0]\n assert expression['data'] == '00:00:00:01:01:01'\n assert expression['mask'] == 'FF:FF:FF:FF:FF:FF'\n assert expression['expressionId'] == 1\n assert expression['field'] == 'DstMac'\n\n # Create ACL Actions\n self.suite_logger.debug(\"Create and Verify ACL Action\")\n actions = [(1, 'Drop', ''), ]\n env.switch[1].ui.create_acl(actions=actions)\n # Verify ACL Action\n action = env.switch[1].ui.get_table_acl(\"ACLActions\")[0]\n assert action['action'] == 'Drop'\n assert action['param'] == ''\n assert action['actionId'] == 1\n\n # Create ACL Rule\n self.suite_logger.debug(\"Create and Verify ACL Rule\")\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), ]\n # Note: ACL Rule should be assigned to ports\n env.switch[1].ui.create_acl(ports=[1, ], rules=rules)\n # Verify ACL Rule\n rule = env.switch[1].ui.get_table_acl(\"ACLRules\")[0]\n assert rule['ruleId'] == 1\n assert rule['expressionId'] == 1\n assert rule['actionId'] == 1\n assert rule['stage'] == 'Ingress'\n assert rule['enabled'] == 'Enabled'\n assert rule['priority'] == 0", "def create_acl_rule(self, context, sgr):\n self.security_group_driver.create_acl_rule(context, sgr)", "def init_with_context(self, context):\n return super(Menu, self).init_with_context(context)", "def __init__(__self__, *,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if bucket is not None:\n pulumi.set(__self__, \"bucket\", bucket)\n if default_acl is not None:\n pulumi.set(__self__, \"default_acl\", default_acl)\n if predefined_acl is not None:\n pulumi.set(__self__, \"predefined_acl\", predefined_acl)\n if role_entities is not None:\n pulumi.set(__self__, \"role_entities\", role_entities)", "def pre_access_control_list_read(self, resource_id):\n pass", "def default_access_control(self):\n return self._default_access_control", "def get_processed_acl(self):\n return self.get_storage().default_acl", "def __init__(self):\n self.privileges=[\"can add post\", \"can delete post\", \"can ban user\"]", "def accessControlList(self):\n return allACL", "def set_permissions(self, object, replace=False):\r\n if isinstance(self.config.origin, S3Origin):\r\n if self.config.origin.origin_access_identity:\r\n id = self.config.origin.origin_access_identity.split('/')[-1]\r\n oai = self.connection.get_origin_access_identity_info(id)\r\n policy = object.get_acl()\r\n if replace:\r\n policy.acl = ACL()\r\n policy.acl.add_user_grant('READ', oai.s3_user_id)\r\n object.set_acl(policy)\r\n else:\r\n object.set_canned_acl('public-read')", "def __init__(self):\n\t\tself.privileges = [\"can add post\", \"can delete post\", \"can ban user\"]", "def __init__(__self__,\n resource_name: str,\n args: EndpointAclPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def AddAclSample():\n client = CreateClient()\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc)\n acl_entry = gdata.docs.data.AclEntry(\n scope=gdata.acl.data.AclScope(value='[email protected]', type='user'),\n role=gdata.acl.data.AclRole(value='reader'),\n )\n client.AddAclEntry(doc, acl_entry, send_notifications=False)", "def test_make_user_acl(self, make_acl_mock):\n zk = zksasl.SASLZkClient()\n zk.make_user_acl('foo', 'rw')\n\n make_acl_mock.assert_called_once_with(\n scheme='sasl', credential='foo', read=True,\n write=True, create=False, delete=False, admin=False\n )", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def authn_and_authz():\n authentication()\n authorization()", "def _fetch_appropriate_acl(self, ctx):\n\n bimodal_checker = ctx.req.environ[utils.ENV_BIMODAL_CHECKER]\n\n if ctx.req.method in ('GET', 'HEAD') and ctx.container_name:\n container_info = get_container_info(\n ctx.req.environ, bimodal_checker,\n swift_source=\"PFS\")\n return container_info['read_acl']\n elif ctx.object_name and ctx.req.method in (\n 'PUT', 'POST', 'DELETE', 'COALESCE'):\n container_info = get_container_info(\n ctx.req.environ, bimodal_checker,\n swift_source=\"PFS\")\n return container_info['write_acl']\n else:\n return None", "def manage_afterAdd(self, item, container) :\n item.manage_permission(Permissions.AddPortalContent,\n ['Manager'])\n item.manage_permission(Permissions.AccessContentsInformation,\n ['Member', 'Manager'])\n item.manage_permission(Permissions.View,\n ['Manager',])\n BaseTool.inheritedAttribute('manage_afterAdd')(self, item, container)", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(codename=explore_keyword_rights.explore_keyword_access)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions for core_explore_keyword_app : ' + e.message)", "def test_make_role_acl(self, make_acl_mock):\n zk = zksasl.SASLZkClient()\n zk.make_role_acl('servers', 'ra')\n\n make_acl_mock.assert_called_once_with(\n scheme='sasl', credential='file:///treadmill/roles/servers',\n read=True, write=False, delete=False, create=False, admin=True\n )", "def initial(self, request, *args, **kwargs):\n super(OdooApi, self).initial(request, *args, **kwargs)\n self.check_service_permission(request, kwargs.get('service_path'))", "def post_access_control_list_create(self, resource_dict):\n pass", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(\n name=main_rights.DEFAULT_GROUP\n )\n\n # Get explore keyword permissions\n explore_access_perm = permission.objects.get(\n codename=explore_keyword_rights.EXPLORE_KEYWORD_ACCESS\n )\n\n # Add permissions to default group\n default_group.permissions.add(explore_access_perm)\n except Exception as exception:\n logger.error(\n \"Impossible to init explore_keyword permissions: %s\"\n % str(exception)\n )", "def init():\n click.secho(\"[+] Initialize permissions\", fg=\"cyan\")\n init_permissions()\n click.secho(\"[+] Initialize permissions successfully\", fg=\"green\")", "def __attrs_post_init__(self):\n super().__attrs_post_init__()\n if self.config.get(\"open_mode\", False) is False:\n # If the master is not configured to be in open mode, register an auth event callback\n # If we were passed an auth event callback, it needs to get this master as the first\n # argument\n if self.on_auth_event_callback:\n auth_event_callback = partial(self.on_auth_event_callback, self)\n else:\n auth_event_callback = self._on_auth_event\n self.before_start(\n self.event_listener.register_auth_event_handler, self.id, auth_event_callback\n )\n self.after_terminate(self.event_listener.unregister_auth_event_handler, self.id)", "def init_with_context(self, context):\n return super(MainMenu, self).init_with_context(context)", "def allow_access(self, base, share, access):\n def cbk(ddict, edir, host):\n if edir not in ddict:\n ddict[edir] = []\n if host in ddict[edir]:\n return True\n ddict[edir].append(host)\n self._manage_access(share['name'], access['access_type'],\n access['access_to'], cbk)", "def initialize(context):\n ##code-section custom-init-top #fill in your manual code here\n ##/code-section custom-init-top\n\n # imports packages and types for registration\n import content\n\n\n # Initialize portal content\n all_content_types, all_constructors, all_ftis = process_types(\n listTypes(PROJECTNAME),\n PROJECTNAME)\n\n cmfutils.ContentInit(\n PROJECTNAME + ' Content',\n content_types = all_content_types,\n permission = DEFAULT_ADD_CONTENT_PERMISSION,\n extra_constructors = all_constructors,\n fti = all_ftis,\n ).initialize(context)\n\n # Give it some extra permissions to control them on a per class limit\n for i in range(0,len(all_content_types)):\n klassname=all_content_types[i].__name__\n if not klassname in ADD_CONTENT_PERMISSIONS:\n continue\n\n context.registerClass(meta_type = all_ftis[i]['meta_type'],\n constructors= (all_constructors[i],),\n permission = ADD_CONTENT_PERMISSIONS[klassname])\n\n ##code-section custom-init-bottom #fill in your manual code here\n ##/code-section custom-init-bottom", "def pre_access_control_list_update(self, resource_id, resource_dict):\n pass", "def initial(self, request, *args, **kwargs):\n\t\t# Ensure that the incoming request is permitted\n\t\t# self.perform_authentication(request)\n\t\t# self.check_permissions(request)\n\t\t# self.check_throttles(request)\n\t\tpass", "def _add_acl_sequence_numbers(self):\n\n ipv4_acl_sw = 'ip access-list'\n # ipv6_acl_sw = ('ipv6 access-list')\n if self.host.os in ['ios']:\n acl_line_sw = ('permit', 'deny')\n else:\n acl_line_sw = ('permit', 'deny', 'remark')\n for child in self.children:\n if child.text.startswith(ipv4_acl_sw):\n sn = 10\n for sub_child in child.children:\n if sub_child.text.startswith(acl_line_sw):\n sub_child.text = \"{} {}\".format(sn, sub_child.text)\n sn += 10\n\n return self", "def __init__(__self__,\n resource_name: str,\n args: AclRuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def permissions():\n pass", "def test_set_get_acls(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"set_acls %s/one 'world:anyone:r digest:%s:cdrwa'\" % (\n self.tests_path, self.auth_digest))\n self.shell.onecmd(\"get_acls %s/one\" % (self.tests_path))\n\n if PYTHON3:\n user_id = \"Id(scheme='digest', id='%s')\" % (self.auth_digest)\n else:\n user_id = \"Id(scheme=u'digest', id=u'%s')\" % (self.auth_digest)\n\n user_acl = \"ACL(perms=31, acl_list=['ALL'], id=%s)\" % (user_id)\n expected_output = \"/tests/one: ['WORLD_READ', %s]\\n\" % (user_acl)\n self.assertEqual(expected_output, self.output.getvalue())", "def test_set_get_bad_acl(self):\n path_one = \"%s/one\" % (self.tests_path)\n auth_id = \"username_password:user:user\"\n self.shell.onecmd(\"create %s 'hello'\" % (path_one))\n self.shell.onecmd(\"set_acls %s 'world:anyone:r %s'\" % (\n path_one, auth_id))\n expected_output = \"Failed to set ACLs: \"\n expected_output += \"Bad ACL: username_password:user:user. \"\n expected_output += \"Format is scheme:id:perms.\\n\"\n self.assertEqual(expected_output, self.output.getvalue())", "def __init__(__self__,\n resource_name: str,\n args: AccessConfigurationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")", "def configure_aaa_default_group_methods(device,server_grp,server_grp_name):\n logger.info(f\"Configuring aaa default group methods\")\n\n configs=[\n\t f\"aaa authentication dot1x default {server_grp} {server_grp_name}\",\n\t\tf\"aaa authorization network default {server_grp} {server_grp_name}\",\n\t\tf\"aaa authorization network MLIST {server_grp} {server_grp_name}\",\n\t\tf\"aaa authorization auth-proxy default {server_grp} {server_grp_name}\",\n\t\tf\"aaa accounting network default start-stop {server_grp} {server_grp_name}\"\n\t]\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(f\"Could not configure aaa default group methods. Error:\\n{e}\")", "def configure_aaa_authorization_exec_default(device,auth_type,group_name=''):\n logger.info(f\"Configuring aaa authorization exec default\")\n\n configs=f\"aaa authorization exec default {auth_type}\"\n if group_name:\n configs+=f' group {group_name}'\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(f\"Could not configure aaa authorization exec default. Error:\\n{e}\")", "def __init__(self, first_name, last_name):\r\n\r\n super().__init__(first_name, last_name)\r\n \"\"\"Initialize the son class attributes\"\"\"\r\n\r\n self.privileges = ['add post', 'delete post']", "def __init__(self, params=None):\n\n rights = access.Checker(params)\n rights['create'] = ['checkIsDeveloper']\n rights['delete'] = [('checkCanEditGroupApp',\n [org_app_logic.logic]),\n ('checkIsActivePeriod', ['org_signup', 'scope_path'])]\n rights['edit'] = [('checkCanEditGroupApp',\n [org_app_logic.logic]),\n ('checkIsActivePeriod', ['org_signup', 'scope_path'])]\n rights['list'] = ['checkIsDeveloper']\n rights['list_self'] = ['checkIsUser']\n rights['show'] = ['allow']\n rights['review'] = ['checkIsHostForProgramInScope',\n ('checkCanReviewGroupApp', [org_app_logic.logic])]\n rights['review_overview'] = ['checkIsHostForProgramInScope']\n rights['bulk_accept'] = ['checkIsHostForProgramInScope']\n rights['bulk_reject'] = ['checkIsHostForProgramInScope']\n rights['apply'] = ['checkIsUser',\n ('checkCanCreateOrgApp', ['org_signup']),\n 'checkIsNotStudentForProgramInScope']\n\n new_params = {}\n\n new_params['rights'] = rights\n new_params['logic'] = org_app_logic.logic\n\n new_params['scope_view'] = program_view\n new_params['scope_redirect'] = redirects.getCreateRedirect\n\n new_params['sidebar_grouping'] = 'Organizations'\n\n new_params['list_key_order'] = [\n 'link_id', 'scope_path', 'name', 'home_page', 'email',\n 'description', 'why_applying','pub_mailing_list','irc_channel',\n 'member_criteria', 'prior_participation', 'prior_application',\n 'license_name', 'ideas', 'dev_mailing_list', 'contrib_template',\n 'contrib_disappears', 'member_disappears', 'encourage_contribs',\n 'continued_contribs']\n\n patterns = [(r'^%(url_name)s/(?P<access_type>apply)/%(scope)s$',\n 'soc.views.models.%(module_name)s.create',\n 'Create an %(name_plural)s'),\n (r'^%(url_name)s/(?P<access_type>bulk_accept)/%(scope)s$',\n 'soc.views.models.%(module_name)s.bulk_accept',\n 'Bulk Acceptation of %(name_plural)s'),\n (r'^%(url_name)s/(?P<access_type>bulk_reject)/%(scope)s$',\n 'soc.views.models.%(module_name)s.bulk_reject',\n 'Bulk Rejection of %(name_plural)s'),]\n\n new_params['extra_django_patterns'] = patterns\n new_params['extra_key_order'] = ['admin_agreement',\n 'agreed_to_admin_agreement']\n\n new_params['extra_dynaexclude'] = ['applicant', 'backup_admin', 'status',\n 'created_on', 'last_modified_on']\n\n new_params['create_dynafields'] = [\n {'name': 'link_id',\n 'base': forms.fields.CharField,\n 'label': 'Organization Link ID',\n },\n ]\n\n new_params['create_extra_dynaproperties'] = {\n 'scope_path': forms.fields.CharField(widget=forms.HiddenInput,\n required=True),\n 'contrib_template': forms.fields.CharField(\n widget=helper.widgets.FullTinyMCE(\n attrs={'rows': 25, 'cols': 100})),\n 'description': forms.fields.CharField(\n widget=helper.widgets.FullTinyMCE(\n attrs={'rows': 25, 'cols': 100})),\n 'admin_agreement': forms.fields.Field(required=False,\n widget=widgets.AgreementField),\n 'agreed_to_admin_agreement': forms.fields.BooleanField(\n initial=False, required=True),\n\n 'clean_description': cleaning.clean_html_content('description'),\n 'clean_contrib_template': cleaning.clean_html_content(\n 'contrib_template'),\n 'clean_ideas': cleaning.clean_url('ideas'),\n 'clean': cleaning.validate_new_group('link_id', 'scope_path',\n model_logic.organization, org_app_logic)}\n\n # get rid of the clean method\n new_params['edit_extra_dynaproperties'] = {\n 'clean': (lambda x: x.cleaned_data)}\n\n new_params['name'] = \"Organization Application\"\n new_params['name_plural'] = \"Organization Applications\"\n new_params['name_short'] = \"Org App\"\n new_params['url_name'] = \"org_app\"\n new_params['group_name'] = \"Organization\"\n new_params['group_url_name'] = 'org'\n\n new_params['review_template'] = 'soc/org_app/review.html'\n # TODO use a proper template that works for each program\n new_params['accepted_mail_template'] = \\\n 'soc/org_app/mail/accepted_gsoc2009.html'\n new_params['rejected_mail_template'] = 'soc/org_app/mail/rejected.html'\n\n params = dicts.merge(params, new_params)\n\n super(View, self).__init__(params=params)", "def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)", "def init_with_context(self, context):\n return super(CustomAppIndexDashboard, self).init_with_context(context)", "def test_init_defaults(self):\n self._set_args(log_path=None,\n state='present',\n username='myBindAcct',\n password='myBindPass',\n server='ldap://example.com:384',\n search_base='OU=Users,DC=example,DC=com',\n role_mappings={'.*': ['storage.monitor']},\n )\n\n ldap = Ldap()", "def change_acl(self, acl):\n try:\n bucket_name = app.config['S3_BUCKET_NAME']\n s3_client = app.config['S3']\n\n keys = []\n list_objects = s3_client.list_objects(Bucket=bucket_name,\n Prefix=self.build_s3_base_prefix())\n if list_objects is not None and 'Contents' in list_objects:\n for ob in s3_client \\\n .list_objects(Bucket=bucket_name,\n Prefix=self.build_s3_base_prefix())['Contents']:\n keys.append(ob['Key'])\n\n for key in keys:\n s3_client.put_object_acl(Bucket=bucket_name, Key=key,\n ACL=acl)\n except Exception as e:\n app.logger.error(e)\n return False\n return True", "def test_set_get_acls_recursive(self):\n path_one = \"%s/one\" % (self.tests_path)\n path_two = \"%s/one/two\" % (self.tests_path)\n self.shell.onecmd(\"create %s 'hello'\" % (path_one))\n self.shell.onecmd(\"create %s 'goodbye'\" % (path_two))\n self.shell.onecmd(\"set_acls %s 'world:anyone:r digest:%s:cdrwa' true\" % (\n path_one, self.auth_digest))\n self.shell.onecmd(\"get_acls %s 0\" % (path_one))\n\n if PYTHON3:\n user_id = \"Id(scheme='digest', id='%s')\" % (self.auth_digest)\n else:\n user_id = \"Id(scheme=u'digest', id=u'%s')\" % (self.auth_digest)\n\n user_acl = \"ACL(perms=31, acl_list=['ALL'], id=%s)\" % (user_id)\n expected_output = \"\"\"/tests/one: ['WORLD_READ', %s]\n/tests/one/two: ['WORLD_READ', %s]\n\"\"\" % (user_acl, user_acl)\n\n self.assertEqual(expected_output, self.output.getvalue())", "def _change_access(course, user, level, action):\r\n\r\n try:\r\n role = ROLES[level](course.id)\r\n except KeyError:\r\n raise ValueError(\"unrecognized level '{}'\".format(level))\r\n\r\n if action == 'allow':\r\n role.add_users(user)\r\n elif action == 'revoke':\r\n role.remove_users(user)\r\n else:\r\n raise ValueError(\"unrecognized action '{}'\".format(action))", "def replace_sample_acls(self, ctx, params):\n # ctx is the context object\n #BEGIN replace_sample_acls\n id_ = _get_id_from_object(params, 'id', required=True)\n acls = _acls_from_dict(params)\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.FULL,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'replace_sample_acls', ctx.log_info, skip_check=not params.get('as_admin'))\n self._samples.replace_sample_acls(id_, _UserID(ctx[_CTX_USER]), acls, as_admin=admin)\n #END replace_sample_acls", "def customise_auth_user_resource(r, tablename):\n\n auth = current.auth\n\n def approve_user(r, **args):\n\n from gluon import redirect\n\n db = current.db\n user = db(db.auth_user.id == r.id).select(limitby = (0, 1)\n ).first()\n org_group_id = user.org_group_id\n if org_group_id:\n # Check if this is a COVID-19 Test Station\n ogtable = current.s3db.org_group\n org_group = db(ogtable.id == org_group_id).select(ogtable.name,\n limitby = (0, 1)\n ).first()\n if org_group and org_group.name == TESTSTATIONS:\n # Custom Approval process\n redirect(URL(c= \"default\", f=\"index\", args=[\"approve\", r.id]))\n\n # Default Approval\n auth.s3_approve_user(user)\n current.session.confirmation = T(\"User Account has been Approved\")\n redirect(URL(args=[r.id, \"roles\"]))\n\n current.s3db.configure(\"auth_user\",\n approve_user = approve_user,\n )", "def customise_auth_user_resource(r, tablename):\n\n auth = current.auth\n\n def approve_user(r, **args):\n\n from gluon import redirect\n\n db = current.db\n user = db(db.auth_user.id == r.id).select(limitby = (0, 1)\n ).first()\n org_group_id = user.org_group_id\n if org_group_id:\n # Check if this is a COVID-19 Test Station\n ogtable = current.s3db.org_group\n org_group = db(ogtable.id == org_group_id).select(ogtable.name,\n limitby = (0, 1)\n ).first()\n if org_group and org_group.name == TESTSTATIONS:\n # Custom Approval process\n redirect(URL(c= \"default\", f=\"index\", args=[\"approve\", r.id]))\n\n # Default Approval\n auth.s3_approve_user(user)\n current.session.confirmation = T(\"User Account has been Approved\")\n redirect(URL(args=[r.id, \"roles\"]))\n\n current.s3db.configure(\"auth_user\",\n approve_user = approve_user,\n )", "def _set_authenticator(self):\n pass", "def extended_acl(self) -> bool:\n return pulumi.get(self, \"extended_acl\")", "def __init__(self, name, email, interest, credential, reference):\n super().__init__(name, email, interest, credential, reference)\n self.UserLevel = 'SuperUser'", "def _ensure_initial_admin(config):\n if get_api_version() > 2:\n manager = get_manager()\n default_domain_id = create_or_show_domain(DEFAULT_DOMAIN)\n leader_set({'default_domain_id': default_domain_id})\n admin_domain_id = create_or_show_domain(ADMIN_DOMAIN)\n leader_set({'admin_domain_id': admin_domain_id})\n create_or_show_domain(SERVICE_DOMAIN)\n create_tenant(\"admin\", ADMIN_DOMAIN)\n create_tenant(config(\"service-tenant\"), SERVICE_DOMAIN)\n leader_set({'service_tenant_id': manager.resolve_tenant_id(\n config(\"service-tenant\"),\n domain=SERVICE_DOMAIN)})\n create_role('service')\n create_tenant(\"admin\", DEFAULT_DOMAIN)\n create_tenant(config(\"service-tenant\"), DEFAULT_DOMAIN)\n # User is managed by ldap backend when using ldap identity\n if not (config('identity-backend') ==\n 'ldap' and config('ldap-readonly')):\n\n admin_username = config('admin-user')\n if get_api_version() > 2:\n passwd = create_user_credentials(admin_username,\n get_admin_passwd,\n set_admin_passwd,\n domain=ADMIN_DOMAIN)\n if passwd:\n create_role('Member')\n # Grant 'Member' role to user ADMIN_DOMAIN/admin-user in\n # project ADMIN_DOMAIN/'admin'\n # ADMIN_DOMAIN\n grant_role(admin_username, 'Member', tenant='admin',\n user_domain=ADMIN_DOMAIN,\n project_domain=ADMIN_DOMAIN)\n create_role(config('admin-role'))\n # Grant admin-role to user ADMIN_DOMAIN/admin-user in\n # project ADMIN_DOMAIN/admin\n grant_role(admin_username, config('admin-role'),\n tenant='admin', user_domain=ADMIN_DOMAIN,\n project_domain=ADMIN_DOMAIN)\n # Grant domain level admin-role to ADMIN_DOMAIN/admin-user\n grant_role(admin_username, config('admin-role'),\n domain=ADMIN_DOMAIN, user_domain=ADMIN_DOMAIN)\n else:\n create_user_credentials(admin_username, get_admin_passwd,\n set_admin_passwd, tenant='admin',\n new_roles=[config('admin-role')])\n\n create_service_entry(\"keystone\", \"identity\",\n \"Keystone Identity Service\")\n\n for region in config('region').split():\n create_keystone_endpoint(public_ip=resolve_address(PUBLIC),\n service_port=config(\"service-port\"),\n internal_ip=resolve_address(INTERNAL),\n admin_ip=resolve_address(ADMIN),\n auth_port=config(\"admin-port\"),\n region=region)", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any commands you add below will overload the default ones.\n #", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any commands you add below will overload the default ones.\n #", "def determine_perms(self, request, *args, **kwargs):\n if hasattr(request, \"user\") and request.user in Collection.objects.get(id=kwargs['pk']).curators.all():\n # The user is a curator, so they can view and edit\n return {\"can_edit\": True, \"can_view\": True}\n else:\n # The default inherited permission system\n return super().determine_perms(request, *args, **kwargs)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def init_permissions(apps):\n try:\n group = apps.get_model(\"auth\", \"Group\")\n permission = apps.get_model(\"auth\", \"Permission\")\n\n # Get or Create the default group\n default_group, created = group.objects.get_or_create(name=main_rights.default_group)\n\n # Get explore example permissions\n explore_access_perm = permission.objects.get(codename=explore_example_rights.explore_example_access)\n explore_save_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_save_query)\n explore_delete_query_perm = permission.objects.get(codename=explore_example_rights.explore_example_delete_query)\n\n # add permissions to default group\n default_group.permissions.add(explore_access_perm,\n explore_save_query_perm,\n explore_delete_query_perm)\n except Exception, e:\n print('ERROR : Impossible to init the permissions : ' + e.message)", "def get_acls():\n return config.get_cfg_storage(ID_ACL)", "def __init__(self):\n self.admin = Admin()\n self.current_user = self.admin.username\n Controller.generate_random_roles()\n self.admin_cli_handler = AdminCLIHandler(self)\n self.user_cli_handler = UserCLIHandler(self)\n self.user_to_action_mapping = {\n 'admin': {\n 0: self.admin_cli_handler.help,\n 1: self.admin_cli_handler.relogin,\n 2: self.admin_cli_handler.view_all_roles,\n 3: self.admin_cli_handler.new_user,\n 4: self.admin_cli_handler.new_role,\n 5: self.admin_cli_handler.edit_role,\n 6: self.admin_cli_handler.delete_role,\n 7: self.admin_cli_handler.view_request,\n 8: self.admin_cli_handler.delete_user,\n 9: self.admin_cli_handler.list_users,\n 10: self.admin_cli_handler.assign_role_to_user\n },\n 'user': {\n 0: self.user_cli_handler.help,\n 1: self.user_cli_handler.relogin,\n 2: self.user_cli_handler.view_all_roles,\n 3: self.user_cli_handler.request_new_role,\n 4: self.user_cli_handler.view_my_roles\n }\n }\n\n print(f\"\"\"\\n\\n\n Hi! You are logged in as {self.admin.username}\n Enter 0(numerical zero) for help\n Enter Ctrl+C to exit!\n \"\"\")", "def user_init(self):\n pass", "def acl_in_dp():\n return {}", "def authorization():\n pass", "def post_setup(cls):\n super().post_setup()\n cls.REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"] = (\n \"magnify.apps.core.authentication.DelegatedJWTAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n )", "def post_setup(self, context):\n pass", "def initialize(self, context):\r\n pass", "def initial(self, request, *args, **kwargs):\n\n # It's checks the permissions for the third party endpoint or not. It give access if key present.\n bool_value, message = self.check_api_keys(request)\n if bool_value:\n super(ProjectRestrictedGenericViewSet, self).initial(request, *args, **kwargs)\n # Check action permissions\n self.check_action_permissions(request)\n else:\n self.app_permission_denied(request, message)", "def _access_control(self, instance, host, mask=32, port=None,\n protocol='tcp', access_type='allow'):\n\n if access_type == 'allow':\n access_type = 'ACCEPT'\n elif access_type == 'deny':\n access_type = 'REJECT'\n else:\n LOG.error('Invalid access_type: %s' % access_type)\n raise exception.Error('Invalid access_type: %s' % access_type)\n\n if port == None:\n port = ''\n else:\n port = '--dport %s' % (port,)\n\n # Create our table instance\n tables = [\n linux_net.iptables_manager.ipv4['filter'],\n linux_net.iptables_manager.ipv6['filter']\n ]\n\n rule = '-s %s/%s -p %s %s -j %s' % \\\n (host, mask, protocol, port, access_type)\n\n for table in tables:\n table.add_rule(instance['name'], rule)\n\n # Apply the rules\n linux_net.iptables_manager.apply()", "def _idempotent_acl_check(self):\n\n if self.host.os in {'iosxr'}:\n if self.parent is not self.root:\n acl = ('ipv4 access-list ', 'ipv6 access-list ')\n if self.parent.text.startswith(acl):\n return True\n return False", "def _before_request():\n\n g.user = current_user" ]
[ "0.6822682", "0.6064362", "0.6048131", "0.6034133", "0.58422464", "0.5725353", "0.57216316", "0.5718573", "0.5646452", "0.55869544", "0.5586848", "0.55686396", "0.55665946", "0.545657", "0.5445769", "0.5405563", "0.5397825", "0.53963953", "0.5391042", "0.5391042", "0.5389581", "0.53651476", "0.5345793", "0.533071", "0.52745396", "0.5259861", "0.52582747", "0.52539986", "0.5239515", "0.5238178", "0.5215225", "0.5190424", "0.5189352", "0.5173402", "0.517222", "0.5140738", "0.51146597", "0.51129454", "0.5106282", "0.50825036", "0.5080566", "0.50732046", "0.50511336", "0.5041243", "0.5005703", "0.49906135", "0.49776617", "0.4972781", "0.49592665", "0.4948427", "0.49385023", "0.49376133", "0.49337667", "0.49333566", "0.49239272", "0.49161384", "0.49143448", "0.49073288", "0.49073225", "0.4906601", "0.49001044", "0.48923287", "0.48783654", "0.48780057", "0.48716706", "0.48694614", "0.48675597", "0.48662788", "0.48633328", "0.48620203", "0.4858854", "0.4853366", "0.48400018", "0.48396283", "0.48355588", "0.48354962", "0.4832738", "0.4830698", "0.4830698", "0.4825513", "0.48247677", "0.4818233", "0.48166287", "0.48124957", "0.48124957", "0.4810886", "0.48095247", "0.48053178", "0.48050186", "0.47997925", "0.47987375", "0.4793373", "0.47821626", "0.4781574", "0.47744906", "0.47736678", "0.47701332", "0.47692135", "0.47654304", "0.47539973" ]
0.62626284
1
List of fields allowed in filtering and sorting.
def filter_allowed_fields(self): allowed_fields = super().filter_allowed_fields # Remove assignment_id allowed_fields.remove('assignment_id') return allowed_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def _field_names(self):\n return [self._sanitize_field_name(field_name)\n for field_name in self._all_fields]", "def get_readonly_fields(self, request, obj=None):\n return [field.name for field in self.model._meta.fields]", "def get_allowed_fields(request, model):\r\n opts = model._meta\r\n fields = opts.fields + opts.many_to_many\n permissions = getattr(model, 'FieldPermissions', None)\r\n if not fields:\r\n return [f.name for f in fields]\r\n if not request.person:\r\n return []\r\n return [f.name \r\n for f in fields \r\n if request.person.has_roles(\r\n getattr(permissions, f.name, '')\r\n )]", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def listFields(self):\n return self.get_json('/field')", "def fields(self):\r\n return self._by_name.iteritems()", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def get_readonly_fields(self, request, obj=None):\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]", "def get_readonly_fields(self, request, obj=None):\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]", "def _select_fields(self):\r\n return []", "def get_field_names() -> Sequence[str]:\n raise NotImplementedError", "def fields(self):\r\n pass", "def get_sorted_fields(cls):\n return sorted(\n cls.get_fields(), key=lambda x: (x._primary and 1 or 2, x._order))", "def fieldNames(self):\n return self.__fieldNames", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def _list_fields(self):\n return list(self._state.keys())", "def get_fields(self):\n \n fields = []\n for order in self.order_lst:\n fields += order.get_fields()\n \n fields = list(set(fields))\n \n out_fields = self.eod.sort_fields(fields)\n \n return out_fields", "def get_fields(self):\n return list(self.metadata.keys())", "def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list", "def all_fields(cls):\n return cls.__by_name.values()", "def only(self, *fields):\n from jetengine.fields.base_field import BaseField\n\n only_fields = {}\n for field_name in fields:\n if isinstance(field_name, (BaseField,)):\n field_name = field_name.name\n\n only_fields[field_name] = QueryFieldList.ONLY\n\n # self.only_fields = fields.keys()\n return self.fields(True, **only_fields)", "def get_fields(self):\r\n return self.fields", "def field_names(self):\n return self.base_field_names() + list(self.data.keys())", "def get_filter_parameters(self):\n if not self.should_filter():\n return []\n\n fields = []\n for filter_backend in self.view.filter_backends:\n fields += self.get_filter_backend_parameters(filter_backend())\n\n return fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n\n\t\treturn self.__fields", "def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def list_fields(self):\n return list(self._state.keys())", "def get_boolean_fields():\n exclude = ['is_montessori', 'is_special_ed']\n\n fields = []\n for field in Location._meta.fields:\n if field.get_internal_type() == 'NullBooleanField' and \\\n not field.get_attname() in exclude:\n fields.append((field.get_attname(),field.verbose_name,))\n\n return fields", "def field_names(self):\n if not self._field_names:\n self._field_names.update(self.properties.keys())\n\n self._field_names = [attr for attr in self._field_names if not attr.startswith(\"_\")]\n\n return self._field_names", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def get_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ('nwr_rev', 'description', 'works')", "def sortedFields(cls):\n return [\n i[0] for i in sorted(cls._nameToValue.items(), key=lambda item: item[1])\n ]", "def _fields_names(cls) -> List:\n return list(field.name for field in dataclasses.fields(cls))", "def field_names(self):\n ...", "def fields(cls):\n return cls._nameToValue", "def get_fields(self):\n fields = []\n for items in self.order_items:\n fields += items.get_fields()\n \n fields = list(set(fields))\n \n field_order = ['recordId', 'orderId', 'itemId', 'collectionId']\n \n out_fields = field_order\n \n for f in fields:\n if f not in field_order:\n out_fields.append(f)\n \n return out_fields", "def get_field_names(cls):\n return cls._meta.get_field_names()", "def _get_fields(self):\n fields = self.table[0]\n fields = filter(None.__ne__, fields)\n return list(map(str.lower, fields))", "def get_fields(self):\n \n return self.metadata.keys()", "def get_fields(self, exclude=('id',)):\n fields = {}\n for field in self._meta.fields:\n if not field.name in exclude and getattr(self, field.name):\n fields[field.name] = getattr(self, field.name)\n return fields", "def filter_serializer_fields_by_opt():\n def _filter_serializer_fields_by_opt(serializer, attr, value):\n fields = []\n for fname, fopts in serializer.fields.items():\n rendered_fname = serializer.render_field_name(fname)\n if fopts.get(attr, None) == value:\n fields.append(rendered_fname)\n return fields\n return _filter_serializer_fields_by_opt", "def _get_fields(self):\n return self._fields", "def fields(self) -> Mapping[str, str]:\n return pulumi.get(self, \"fields\")", "def get_field_names(self):\n return self._keys", "def field_names(self):\r\n return self._names", "def raw_fields(self):\n pass", "def get_readonly_fields(self, request, obj=None):\n if not self.all_fields_readonly or (request.user.is_superuser and self.superuser_skips_all_readonly):\n return self.readonly_fields\n print self.fieldsets\n print list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))\n if self.fieldsets:\n return flatten_fieldsets(self.fieldsets)\n \n else:\n return list(set(\n [field.name for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many]\n ))", "def get_fieldnames(self):\n fieldnames = self._fields.keys()\n fieldnames.remove('time')\n fieldnames.remove('lon')\n fieldnames.remove('lat')\n return fieldnames", "def fields(self) -> List[SingleField]:\n return self._fields", "def input_fields(self):\r\n return self.input.fields", "def non_state_fields(self):\n field_names = set()\n for field in self._meta.fields:\n if not field.primary_key and field.name not in self.state_fields:\n field_names.add(field.name)\n\n if field.name != field.attname:\n field_names.add(field.attname)\n return field_names", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields", "def get_filter_fields(result, verbose=False):\n result_info = get_result(result)\n filter_fields = result_info[\"filter_fields\"]\n if verbose:\n pprint(filter_fields)\n return filter_fields", "def filter_excluded_fields(fields, Meta, exclude_dump_only):\n exclude = getattr(Meta, \"exclude\", [])\n if exclude_dump_only:\n exclude += getattr(Meta, \"dump_only\", [])\n\n filtered_fields = OrderedDict(\n (key, value) for key, value in fields.items() if key not in exclude\n )\n\n return filtered_fields", "def get_field_names(self, declared_fields, info):\n return self._requested_fields", "def fields(cls):\n if not hasattr(cls, '_fields'):\n cls.parse_attributes()\n return cls._fields", "def fields(self):", "def required_fields():\n return tuple(MIMARKS._fields.keys())", "def model_fields(cls):\n members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))\n return [m for m in members if issubclass(m[1].__class__, fields.Field)]", "def get_field_names(self):\n return {rv[0] for rv in self.iter_fields()}", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def get_fields(cls):\n return cls.fields.values()", "def _member_field_names_for_protect(self, protect):\n result = []\n fields = self.MEMBER_DEFAULT_FIELDS.copy()\n fields.update((k,v) for k,v in self.SUPPLEMENTARY_FIELDS.iteritems() if (v['OBJECT'] == 'MEMBER'))\n for (name, spec) in fields.iteritems():\n if spec['PROTECT'] == protect:\n result.append(name)\n return result", "def fields(self):\n ...", "def get_readonly_fields(self, request, obj=None):\n if self.readonly_model:\n return fields_for_model(model=self.model)\n if obj is None:\n return list(self.add_readonly_fields) + list(self.readonly_fields)\n else:\n return list(self.change_readonly_fields) + list(self.readonly_fields)", "def get_readonly_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ()", "def get_readonly_fields(self, request, obj=None):\n readonly_fields = super(BaseAdmin, self).get_readonly_fields(request,\n obj=obj)\n if obj:\n readonly_fields = list(readonly_fields)\n fieldnames_for_object = map(lambda f: f.name, obj._meta.fields)\n for fieldname in self._READONLY_FIELDS_AFTER_CREATION:\n if fieldname in fieldnames_for_object:\n readonly_fields.append(fieldname)\n return readonly_fields", "def test_get_field_names():\n assert set(get_field_names(AuditModel)) == {\n \"data_before\",\n \"data_after\",\n \"acting_user\",\n \"created_on\",\n \"updated_on\",\n }", "def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}", "def get_readonly_fields(self, request, obj=None):\n fields = set(super(RegistrationPeriodAdmin, self).get_readonly_fields(request, obj))\n if obj:\n right_now = now()\n if obj.start_time <= right_now:\n fields.add('start_time')\n if obj.end_time <= right_now:\n fields.add('end_time')\n return list(fields)", "def _fields_as_string(self):\n return ', '.join([\n field.name for field in self.fields if field.required\n ] + [\n '[%s]' % field.name for field in self.fields if not field.required\n ])", "def get_fieldlist(cls):\n return cls.fieldlist", "def Fields(self):\n return self._fields", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def _build_field_request(self):\n include = []\n for field in self.fields:\n if self.fields[field] is True:\n include.append(field)\n include = '&printFields=' + ','.join(include)\n return include", "def get_show_columns(self, model):\n return [\n getattr(field, 'di_display_name', False) or field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def get_default_field_names(self, declared_fields, model_info):\n return (\n list(declared_fields.keys()) +\n list(model_info.fields.keys())\n )", "def field_selectors(self, fields):\r\n\r\n selectors = []\r\n\r\n for field in fields:\r\n flag = (self.drop and field.name not in self.drop) \\\r\n or (self.keep and field.name in self.keep) \\\r\n or not (self.keep or self.drop)\r\n selectors.append(flag)\r\n\r\n return selectors", "def get_readonly_fields(self, request, obj=None):\n if obj:\n return self.readonly_fields\n return ()", "def base_field_names(self):\n return self._base_field_names()", "def get_fields(self):\n fields = {}\n allowed_types = (\n SerializerMethodField,\n Field,\n Serializer,\n )\n for attr in dir(self):\n if attr == 'data':\n continue\n\n if isinstance(getattr(self, attr), allowed_types):\n fields[attr] = getattr(self, attr)\n\n return fields", "def get_field_names_for_model(self, model):\n return [field.name for field in model._meta.fields if field.name != \"id\" and not\n (field.get_internal_type() == \"DateTimeField\" and\n (field.auto_now is True or field.auto_now_add is True))]", "def required_fields():\n module_logger.debug(\"In required_fields.\")\n return (\"comment\", \"lib_layout\", \"lib_selection\",\n \"ncbi_taxon_id\", \"prep_id\", \"sequencing_center\",\n \"sequencing_contact\", \"storage_duration\", \"tags\")", "def select_fields_as_sql(self):\n fields = '*'\n if self._fields:\n fields = comma_join('`%s`' % field for field in self._fields)\n return fields", "def fields(self, _only_called=False, **kwargs):\n\n # Check for an operator and transform to mongo-style if there is one\n operators = [\"slice\"]\n cleaned_fields = []\n for key, value in kwargs.items():\n parts = key.split(\"__\")\n if parts[0] in operators:\n op = parts.pop(0)\n value = {\"$\" + op: value}\n\n key = \".\".join(parts)\n try:\n field_name, value = self._check_valid_field_name_to_project(key, value)\n except ValueError as e:\n raise e\n\n cleaned_fields.append((field_name, value))\n\n # divide fields on groups by their values\n # (ONLY group, EXCLUDE group etc.) and add them to _loaded_fields\n # as an appropriate QueryFieldList\n fields = sorted(cleaned_fields, key=operator.itemgetter(1))\n for value, group in itertools.groupby(fields, lambda x: x[1]):\n fields = [field for field, value in group]\n self._loaded_fields += QueryFieldList(fields, value=value, _only_called=_only_called)\n\n return self", "def fields(self, names = None):\r\n\r\n if not names:\r\n return self._fields\r\n\r\n fields = [self._field_dict[name] for name in names]\r\n\r\n return fields", "def registrantFieldNames(self):\n fields = self.context.fgFields(self.request)\n field_names = []\n for field in fields:\n field_names.append(field.getName())\n return field_names", "def get_model_fields(self):\n fields = []\n\n for field in self.model._meta.get_fields():\n fields.append(field.name)\n\n return fields", "def get_readonly_fields(self, request, obj=None):\n if obj and obj.source == DigitizedWork.HATHI:\n return self.hathi_readonly_fields + self.readonly_fields\n return self.readonly_fields", "def get_query_fields(cls):\n ...", "def get_empty_required_fields(self):\n empty_fields = self.get_empty_fields()\n return [f for f in empty_fields if f in self.REQUIRED_FIELDS]", "def get_all_fields(self, filtered=False):\n\n return QueryBuilder.columns_to_dict(self, self.columns, filtered=filtered)", "def fields(proto):\n return [x[0].name for x in proto.ListFields()]" ]
[ "0.7203694", "0.70437205", "0.69849616", "0.69056183", "0.6868574", "0.6849462", "0.67701536", "0.67247665", "0.67190754", "0.6691722", "0.668793", "0.668793", "0.66687506", "0.66492057", "0.6648726", "0.66373897", "0.66218805", "0.6619963", "0.6591684", "0.65700096", "0.656766", "0.65337723", "0.6494321", "0.64858246", "0.64798325", "0.6474777", "0.64714354", "0.6444965", "0.6444965", "0.643374", "0.6431077", "0.6423467", "0.64143294", "0.6396924", "0.63758814", "0.63697964", "0.6368252", "0.63681287", "0.6347916", "0.6347094", "0.634319", "0.63280964", "0.63270354", "0.6322225", "0.63205045", "0.631832", "0.630977", "0.6308423", "0.6304844", "0.6289346", "0.62862384", "0.6278321", "0.6272661", "0.6262903", "0.62534213", "0.6241686", "0.623783", "0.62252516", "0.6224312", "0.62206036", "0.6215656", "0.6210845", "0.6209896", "0.6207285", "0.61931694", "0.61885405", "0.61795646", "0.6154143", "0.6153223", "0.6131768", "0.6122987", "0.6121775", "0.61138177", "0.61076874", "0.61060977", "0.60989165", "0.60983765", "0.60921", "0.60875857", "0.60847956", "0.60563344", "0.6052331", "0.60423756", "0.60249513", "0.6022719", "0.60198635", "0.60164154", "0.60134906", "0.60126644", "0.6008842", "0.60024464", "0.59998894", "0.5999651", "0.5998481", "0.5998011", "0.59881073", "0.5985507", "0.5982039", "0.59687334", "0.59686047" ]
0.64064956
33
Default filters for this Service.
def default_filters(self, query) -> object: assignment_id = self.request.matchdict.get('assignment_id') if assignment_id: query.filter(self.model.assignment_id == assignment_id) return query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def std_filters():\n kwargs = {\n \"sentence_filters\":[punctuation_filter],\n \"word_filters\":[small_word_filter, stopword_filter, stemming_filter]\n }\n return kwargs", "def get_filters(self):", "def default_search_filters(cls):\n q = QueryDict(mutable=True)\n q.setlist('status', cls.DEFAULT_SEARCH)\n return q.urlencode()", "def _defaultFilter(self, *args, **kwargs):\n\n return True", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def set_default_filters(fprime_test_api):\n set_event_filter(fprime_test_api, \"COMMAND\", True)\n set_event_filter(fprime_test_api, \"ACTIVITY_LO\", True)\n set_event_filter(fprime_test_api, \"ACTIVITY_HI\", True)\n set_event_filter(fprime_test_api, \"WARNING_LO\", True)\n set_event_filter(fprime_test_api, \"WARNING_HI\", True)\n set_event_filter(fprime_test_api, \"DIAGNOSTIC\", False)", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def filter(self, filters):", "def filters(self):\n\t\treturn self.local_filter", "def filters(self):\n return self.__filters", "def setDefaultFilter(self):\n self.logsItem.setDefaultFilter()", "def filters(self, filters):\n\n self._filters = filters", "def get_filters(self) -> dict:\n return self._filters", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def filters(self):\n return self._filters", "def condition_filters(self):\r\n return filters.Filters(self)", "def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }", "def get_filter_args(self):\n return []", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def test_default_filter(self):\n request = RequestFactory().get('/?foo=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.data.getlist('status'), ['active', 'paused'])\n self.assertEquals(filter.data.getlist('tags'), ['foo'])\n self.assertEquals(filter.data.getlist('foo'), ['bar'])", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def filter_default(self, req, qs):\n return qs", "def get_default_filters(self, **resources):\r\n return dict((k, (v, False)) for k, v in resources.items()\r\n if k in self._meta.fields)", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def setFilters(self, filters):\n self.__filters = filters", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self):\n return self.England_filter", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def RegisterCommonFilters(filtermap):\n\n # General casing for output naming\n filtermap['camelcase'] = stringcase.camelcase\n filtermap['capitalcase'] = stringcase.capitalcase\n filtermap['constcase'] = stringcase.constcase\n filtermap['pascalcase'] = stringcase.pascalcase\n filtermap['snakecase'] = stringcase.snakecase\n filtermap['spinalcase'] = stringcase.spinalcase", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def process_filters(self, filters, queryset, view):\n return filters", "def default_filter(fName, fObj):\n return True", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def get_filters() -> dict:\n if environment is None or not hasattr(environment, 'loader'):\n return {}\n return environment.filters", "def default_filters(self, query) -> object:\n user = self.request.user\n model = self.model\n custom_filter = self.request.params.get('_custom_filter')\n if 'g:professionals' in user.groups and custom_filter == 'pool':\n # disable security for this custom filter\n self.enable_security = False\n professional = Professional.get(user.id)\n pool_ids = [item.id for item in professional.pools]\n query = query.filter(\n model.pool_id.in_(pool_ids),\n model.state == 'published'\n )\n elif custom_filter == 'late_first_submission':\n config_delta = timedelta(seconds=int(LATE_SUBMISSION_SECONDS))\n date_limit = datetime_utcnow() - config_delta\n query = query.filter(\n model.scheduled_datetime <= date_limit,\n model.state == 'awaiting_assets',\n model.last_approval_date.is_(None)\n )\n elif custom_filter == 'late_re_submission':\n config_delta = timedelta(seconds=int(LATE_SUBMISSION_SECONDS))\n date_limit = datetime_utcnow() - config_delta\n query = query.filter(\n model.last_approval_date <= date_limit,\n model.state == 'awaiting_assets',\n model.submission_path.isnot(None),\n )\n return query", "def filters(self):\n return {\n 'port_channels': port_channels\n }", "def build_filters(self, filters=None):\n filters.pop('username')\n return super(UserResource, self).build_filters(filters)", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def event_filters(self) -> pulumi.Input[Sequence[pulumi.Input['EventFilterArgs']]]:\n return pulumi.get(self, \"event_filters\")", "def standard_filters():\n classes = []\n filters_dir = __path__[0]\n for dirpath, dirnames, filenames in os.walk(filters_dir):\n relpath = os.path.relpath(dirpath, filters_dir)\n if relpath == '.':\n relpkg = ''\n else:\n relpkg = '.%s' % '.'.join(relpath.split(os.sep))\n for fname in filenames:\n root, ext = os.path.splitext(fname)\n if ext != '.py' or root == '__init__':\n continue\n module_name = \"%s%s.%s\" % (__package__, relpkg, root)\n mod_classes = _get_filter_classes_from_module(module_name)\n classes.extend(mod_classes)\n return classes", "def filter(self, filter_dict):\n pass", "def GetFilters(self, filt_defs):\n # The artifact isn't actually used for anything, it's just required to\n # initialize handlers.\n probe = rdfvalue.Probe(artifact=\"Data\", filters=filt_defs)\n return probe.filters", "def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def get_request_filters(self):\n # build the compiled set of all filters\n requested_filters = OrderedDict()\n for filter_name, f in self.filters.items():\n requested_filters[filter_name] = f\n\n # exclusion params\n exclude_name = '%s!' % filter_name\n if related(self, exclude_name) in self.data:\n # deepcopy the *base* filter to prevent copying of model & parent\n f_copy = copy.deepcopy(self.base_filters[filter_name])\n f_copy.parent = f.parent\n f_copy.model = f.model\n f_copy.exclude = not f.exclude\n\n requested_filters[exclude_name] = f_copy\n\n return requested_filters", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def before_any(self) -> None:\r\n for a_filter in self.filters:\r\n a_filter.before_any()", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def filters(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"input must be a dictionary\")\n\n self._filters = value", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def set_scanning_filter(self, **kwargs):\n for k, v in kwargs.get(\"filters\", {}).items():\n if k == \"UUIDs\":\n self._filters[k] = Variant(\"as\", v)\n elif k == \"RSSI\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Pathloss\":\n self._filters[k] = Variant(\"n\", v)\n elif k == \"Transport\":\n self._filters[k] = Variant(\"s\", v)\n elif k == \"DuplicateData\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Discoverable\":\n self._filters[k] = Variant(\"b\", v)\n elif k == \"Pattern\":\n self._filters[k] = Variant(\"s\", v)\n else:\n logger.warning(\"Filter '%s' is not currently supported.\" % k)\n\n if \"Transport\" not in self._filters:\n self._filters[\"Transport\"] = Variant(\"s\", \"le\")", "def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]", "def reference_filters(self, version, options):\n pass", "def test_filter_settings(self):\n self.es.register_filter(foo='bar')\n self.assertTrue(callable(self.es.filter['all'][0]))\n self.es.register_filter(bar='baz')\n self.assertLength(self.es.filter['all'], 2)", "def get_filters(self):\n if self.filters is not None:\n return self.filters\n elif self.parent is not None:\n return self.parent.get_filters()\n else:\n return None", "def setFilters(self, regex=None):\n if regex is not None:\n try:\n self.__regex = re.compile(regex)\n except Exception as e:\n return\n\n self.__all_filters = (self.__regex,)\n\n self.__customFilterEnabled = any(self.__all_filters)\n self.invalidateFilter()", "def filters(self) -> typing.Optional[typing.List[aws_cdk.aws_s3.NotificationKeyFilter]]:\n return self._values.get('filters')", "def testUsingFilterTool(self):\n pass", "def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def set_filters(self, can_filters=None):\n self.sw_filters = can_filters or []\n\n if not len(self.sw_filters):\n logger.info(\"Filtering has been disabled\")\n else:\n for can_filter in can_filters:\n can_id = can_filter[\"can_id\"]\n can_mask = can_filter[\"can_mask\"]\n logger.info(\n \"Filtering on ID 0x%X, mask 0x%X\", can_id, can_mask)", "def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()", "def filter(self, *args, **kwargs):\n clone = self._clone()\n for f in args:\n clone.filter_obj.add_filter(f)\n for key, value in kwargs.items():\n clone.filter_obj.add_filter_param(key, value)\n return clone", "def getFilter(self):\n\n return self.filter", "def event_filters(self) -> pulumi.Output[Sequence['outputs.EventFilterResponse']]:\n return pulumi.get(self, \"event_filters\")", "def summary_parameters(self):\n return {'filters': ', '.join(self.getOption('filters'))}", "def _get_filter(self, args):\n\n # Create the filters list\n filter_list = []\n \n # If we want to record all requests, add the file logger filter\n if args.record:\n filter_list.append(filters.StoreLoggerFilter(args.url))\n\n # Add the whitelist filter\n wl_filter = filters.WhitelistedSiteFilter(args.url)\n filter_list.append(wl_filter)\n\n # Create the ACL filter that filters all requests from devices\n acl_filter = filters.DeviceACLFilter(filter_list, args.url)\n\n return acl_filter", "def _base_proxies_filter(self, category: str, filters: list) -> list:\n\n data_filtered = []\n \n if category == 'country':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=0, filters=filters)\n )\n \n elif category == 'anonymity':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=1, filters=filters)\n )\n\n elif category == 'protocol':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=2, filters=filters)\n )\n \n elif category == 'google_passed':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=3, filters=filters)\n )\n\n return data_filtered", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter", "def test_filter_function_settings(self):\n def foo():\n \"\"\"Dummy function.\"\"\"\n return True\n\n self.es.register_filter(foo)\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'], [])\n\n self.es.register_filter(foo, ftype='none')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'][0], foo)\n\n self.es.register_filter(foo, ftype='any')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'][0], foo)\n self.assertEqual(self.es.filter['none'][0], foo)", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def get_default_filters_dict(class_of_filters,measure,**filters):\n\tif \"datadrop__in\" in filters:\n\t\tfilters.pop(\"datadrop__in\")\n\tif class_of_filters==\"short_student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'All Lower':{'upn__wide_banding':\"L\"},\n\t\t\t'All Middle':{'upn__wide_banding':\"M\"},\n\t\t\t'All Higher':{'upn__wide_banding':\"H\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"}\n\t\t\t}\n\telif class_of_filters==\"student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':\"Lx\"},\n\t\t\t'Lower':{'upn__narrow_banding':\"L\"},\n\t\t\t'Middle':{'upn__narrow_banding':\"M\"},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':\"Ml\"},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':\"Mh\"},\n\t\t\t'Higher':{'upn__narrow_banding':\"H\"},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':\"Hx\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"},\n\t\t\t'Low Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\"},\n\t\t\t'Middle Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\"},\n\t\t\t'High Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\"},\n\t\t\t'Low Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\"},\n\t\t\t'Middle Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'Low PP Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Middle PP Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'High PP Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Low PP Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'Middle PP Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'High PP Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t}\n\telif class_of_filters==\"att8bucket\":\n\t\treturnDict= {'All':{},\n\t\t\t'Maths':{'subject__attainment8bucket':'ma'},\n\t\t\t'English':{'subject__attainment8bucket':'en'},\n\t\t\t'EBacc':{'subject__attainment8bucket':'eb'},\n\t\t\t'Open':{'subject__attainment8bucket':'op'},\n\t\t\t}\n\telif class_of_filters==\"banding\":\n\t\treturnDict= {'All':{},\n\t\t\t'All Lower':{'upn__wide_banding':'L'},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':'Lx'},\n\t\t\t'Lower':{'upn__narrow_banding':'L'},\n\t\t\t'All Middle':{'upn__wide_banding':'M'},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':'Ml'},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':'Mh'},\n\t\t\t'All Higher':{'upn__wide_banding':'H'},\n\t\t\t'Higher':{'upn__narrow_banding':'H'},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':'Hx'},\n\t\t\t'No Banding':{'upn__wide_banding':'N'},\n\t\t\t}\n\telif class_of_filters==\"subject_blocks\":\n\t\treturnDict= {'All':{},\n\t\t\t'Core':{'subject__option_subject':False},\n\t\t\t'Option':{'subject__option_subject':True},\n\t\t\t'EBacc':{'subject__ebacc_subject':True},\n\t\t\t'Non-EBacc':{'subject__ebacc_subject':False},\n\t\t\t}\n\telif \"staff\" in class_of_filters:\n\t\tfilters.pop('datadrop',None)\n\t\tfilters.pop('datadrop__name',None)\n\t\tif \"classgroup\" in filters:\n\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\tfilters.pop('classgroup',None)\n\t\treturnDict={'All':{}}\n\t\tstaff_set=set(classgroup.objects.filter(**filters).exclude(staff=\"---\")\n\t\t\t.values_list('staff').distinct())\n\t\tstaff_list=[]\n\t\tfor st in staff_set:\n\t\t\tfor s in st:\n\t\t\t\tstaff_list.append(s)\n\t\tstaff_list.sort()\n\t\tfor code in staff_list:\n\t\t\tclasses=classgroup.objects.filter(staff=code,**filters).distinct()\n\t\t\tif \"short\" not in class_of_filters:\n\t\t\t\tfor cl in classes:\n\t\t\t\t\treturnDict[code+\" \"+cl.class_code]={\"classgroup\":cl}\n\t\t\treturnDict['All ' +code]={\"classgroup__in\":classes}\n\telse:\n\t\t\"\"\"if not a fixed set of filters, populate from objects in db based on\n\t\tclass, code specific to each class removes invalid filters and replaces\n\t\tthem with valid ones where possible\"\"\"\n\t\tif class_of_filters==\"classgroup\" :\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"subject\" or class_of_filters==\"faculty\":\n\t\t\tif \"subject\" in filters:\n\t\t\t\tfilters['name']=filters['subject'].name\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['name']=filters['subject__name']\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tif \t\"datadrop__name\" in filters:\n\t\t\t\tfilters['name']=filters['datadrop__name']\n\t\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"datadrop\" in filters:\n\t\t\t\tfilters['id']=filters['datadrop'].id\n\t\t\t\tfilters.pop('datadrop',None)\n\t\t\tif \"subject\" in filters or \"faculty\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject=filters['subject'])\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject__name__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['cohort']=filters['classgroup'].cohort\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"yeargroup\" :\n\t\t\tif \"subject__name\" in filters and measure==\"progress\":\n\t\t\t\tfilters['subject__in']=subject.objects.filter(\n\t\t\t\t\tname__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"cohort\" in filters and measure==\"progress\":\n\t\t\t\tfilters['cohort']=filters['cohort'].cohort\n\t\t\tfilters.pop('subject',None)\n\n\t\t#get queryset or set of objects from db based on filters\n\t\tif class_of_filters in ['yeargroup','datadrop','subject',\n\t\t'classgroup']:\n\t\t\tqset=apps.get_model('analysis',class_of_filters).\\\n\t\t\t\tobjects.filter(**filters)\n\t\telif class_of_filters==\"faculty\":\n\t\t\tqset=['Maths','English','Science','Humanities','MFL',\n\t\t\t\t'Arts','Technology','IT',None]\n\t\t\tfor sub in subject.objects.filter(**filters):\n\t\t\t\tif sub.faculty not in qset:\n\t\t\t\t\tqset.add(sub.faculty)\n\n\t\t#sorting set for each class\n\t\tif class_of_filters==\"yeargroup\":\n\t\t\tclass_of_filters=\"subject__cohort\"\n\t\t\tqset=qset.order_by('cohort')\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tqset=qset.order_by('cohort','-date')\n\t\telif class_of_filters==\"subject\":\n\t\t\tqset=qset.order_by('name','faculty')\n\t\telif class_of_filters==\"classgroup\":\n\t\t\tqset=qset.order_by('class_code')\n\t\telif class_of_filters==\"faculty\":\n\t\t\tclass_of_filters=\"subject__faculty\"\n\t\t#populate returning dictionary with set/queryset\n\t\treturnDict={}\n\t\treturnDict['All']={}\n\t\tif class_of_filters==\"subject\":\n\t\t\tfor q in qset:\n\t\t\t\treturnDict[q.name]={'subject__name':q.name}\n\t\telse:\n\t\t\tfor q in qset:\n\t\t\t\tif q is None and \"faculty\" in class_of_filters:\n\t\t\t\t\treturnDict[\"Other\"]={class_of_filters:q}\n\t\t\t\telse:\n\t\t\t\t\treturnDict[q.__str__()]={class_of_filters:q}\n\tif measure in avg_headline_measures or measure in pct_headline_measures:\n\t\tfor outerkey,dict in returnDict.items():\n\t\t\tdict=clean_filters(dict)\n\treturn returnDict", "def apply_filters(filters, items):\n return scom.apply_filters(filters, items)", "def build_ip_filters(self):\n if is_list_empty(self.data['ipfilter']['myfilter']) is False:\n for item in self.data['ipfilter']['myfilter']:\n self.cidr_filter_list.append(item)\n else:\n logger.warning(\n \"my filter field is empty in the given input file , rules for the same will not be created in \"\n \"Nginx configuration\")", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def get_v1_filters(args: Dict[str, Any]) -> List[str]:\n filters = []\n args_name_to_filter_name = {\n 'alert-status': 'alert.status',\n 'policy-name': 'policy.name',\n 'policy-label': 'policy.label',\n 'policy-compliance-standard': 'policy.complianceStandard',\n 'cloud-account': 'cloud.account',\n 'cloud-account-id': 'cloud.accountId',\n 'cloud-region': 'cloud.region',\n 'alert-rule-name': 'alertRule.name',\n 'resource-id': 'resource.id',\n 'resource-name': 'resource.name',\n 'resource-type': 'resource.type',\n 'alert-id': 'alert.id',\n 'cloud-type': 'cloud.type',\n 'policy-type': 'policy.type',\n 'policy-severity': 'policy.severity',\n }\n for arg_name, filter_name in args_name_to_filter_name.items():\n if arg_value := args.get(arg_name):\n filters.append(f'{filter_name}={arg_value}')\n\n return filters", "def get_default_gramplets(self):\n return ((\"Person Filter\",),\n ())", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def filter(self, name=None):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n if _name in self._filters:\n raise Error(\"Filter already defined: {0}\".format(_name))\n\n self._filters[_name] = fn\n return fn\n return wrapper", "def set_filters(self, filters: List[DataGridFilter]):\n self.filters = filters", "def filter(self, *args, **kwargs):", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def __init__(self, filter_methods: ConfigNodePropertyArray=None, filter_enable_safe_user_agents: ConfigNodePropertyBoolean=None, filter_safe_user_agents: ConfigNodePropertyArray=None, filter_excluded_paths: ConfigNodePropertyArray=None): # noqa: E501\n self.openapi_types = {\n 'filter_methods': ConfigNodePropertyArray,\n 'filter_enable_safe_user_agents': ConfigNodePropertyBoolean,\n 'filter_safe_user_agents': ConfigNodePropertyArray,\n 'filter_excluded_paths': ConfigNodePropertyArray\n }\n\n self.attribute_map = {\n 'filter_methods': 'filter.methods',\n 'filter_enable_safe_user_agents': 'filter.enable.safe.user.agents',\n 'filter_safe_user_agents': 'filter.safe.user.agents',\n 'filter_excluded_paths': 'filter.excluded.paths'\n }\n\n self._filter_methods = filter_methods\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents\n self._filter_safe_user_agents = filter_safe_user_agents\n self._filter_excluded_paths = filter_excluded_paths", "def filter_queryset(self,queryset):\n filters = {}\n for backend in list(self.filter_backends):\n backendobj = backend()\n queryset = backendobj.filter_queryset(self.request, queryset, self)\n if hasattr(backendobj,'get_applied_filters'):\n filters.update(backendobj.get_applied_filters())\n self. applied_filters = OrderedDict()\n for key,value in filters.items():\n if isinstance(value,datetime.datetime):\n self.applied_filters[key]=value\n del filters[key]\n self.applied_filters.update(sorted(filters.items(),key=itemgetter(1),reverse=True))\n return queryset", "def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))", "def group_filters(self, per_page=None, page=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'filters')\r\n return http.Request('GET', url, params), parsers.parse_json", "def __init__(self, config, *parse_list):\n super(ParsingFilter, self).__init__()\n self.config = config\n try:\n if (\n self.config[\"filter\"][\"whitelist\"]\n and self.config[\"filter\"][\"blacklist\"]\n ):\n _LOGGER.warning(\n _(\n \"Both whitelist and blacklist filters found in configuration. \"\n \"Only one can be used at a time - only the whitelist filter will be used.\"\n )\n )\n self.parse_list = [\n logging.Filter(name) for name in parse_list[0][\"whitelist\"]\n ]\n except KeyError:\n self.parse_list = parse_list[0].get(\"whitelist\") or parse_list[0].get(\n \"blacklist\"\n )\n\n self.parse_list = [logging.Filter(name) for name in self.parse_list]", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def filter(self):\n return self._filter" ]
[ "0.725708", "0.7212429", "0.71636355", "0.6952968", "0.69228226", "0.6897654", "0.6820288", "0.6785757", "0.6766489", "0.6697421", "0.6685094", "0.6655373", "0.66130894", "0.6598326", "0.6537085", "0.6485721", "0.6480506", "0.6385164", "0.6371988", "0.63620067", "0.6340479", "0.6302228", "0.62903553", "0.62714326", "0.6223476", "0.62069535", "0.6190333", "0.6190333", "0.6173894", "0.61693084", "0.6161277", "0.6149671", "0.6149054", "0.6142109", "0.61198664", "0.61046875", "0.6091077", "0.60852426", "0.6064798", "0.6034536", "0.6019485", "0.6008492", "0.59872806", "0.5978196", "0.5910978", "0.59063506", "0.590376", "0.5872586", "0.5854972", "0.58371323", "0.58309025", "0.5807994", "0.5787381", "0.57673717", "0.575881", "0.575274", "0.57324356", "0.5731061", "0.57231784", "0.57212967", "0.57087976", "0.57009614", "0.56809455", "0.5662523", "0.5648926", "0.564705", "0.56437874", "0.5628167", "0.5624637", "0.5621887", "0.5617396", "0.5607092", "0.5604287", "0.5603227", "0.5601399", "0.5593829", "0.55912846", "0.5590739", "0.5590172", "0.55869025", "0.55865544", "0.5557327", "0.55554396", "0.5529343", "0.5520462", "0.5516033", "0.5511326", "0.5510067", "0.5493857", "0.5492954", "0.5489638", "0.5476201", "0.54761904", "0.54636645", "0.5455411", "0.5452105", "0.5450159", "0.54490906", "0.54481643", "0.54468817", "0.54455054" ]
0.0
-1
Creates a new pathfinding service
def __init__( self, web3: Web3, contract_manager: ContractManager, registry_address: Address, sync_start_block: int = 0, required_confirmations: int = 8, poll_interval: int = 10, ): super().__init__() self.web3 = web3 self.contract_manager = contract_manager self.registry_address = registry_address self.sync_start_block = sync_start_block self.required_confirmations = required_confirmations self.poll_interval = poll_interval self.chain_id = int(web3.net.version) self.is_running = gevent.event.Event() self.token_networks: Dict[Address, TokenNetwork] = {} self.token_network_listeners: List[BlockchainListener] = [] self.is_running = gevent.event.Event() log.info('Starting TokenNetworkRegistry Listener (required confirmations: {})...'.format( self.required_confirmations, )) self.token_network_registry_listener = BlockchainListener( web3=web3, contract_manager=self.contract_manager, contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY, contract_address=self.registry_address, required_confirmations=self.required_confirmations, poll_interval=self.poll_interval, sync_start_block=self.sync_start_block, ) log.info( f'Listening to token network registry @ {registry_address} ' f'from block {sync_start_block}', ) self._setup_token_networks()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createService(data):\n return Service(data).create()", "def new(\n cls,\n name: str,\n description: str,\n registration_schema: JSON,\n result_schema: JSON,\n database_session: Session) -> 'Service':\n raise NotImplementedError()", "def CreateFinding(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_service(self, service_id, service_ref):\n raise exception.NotImplemented() # pragma: no cover", "def service_create(path, service_name, definition):\n compose_result, loaded_definition, err = __load_compose_definitions(\n path, definition\n )\n if err:\n return err\n services = compose_result[\"compose_content\"][\"services\"]\n if service_name in services:\n msg = \"Service {} already exists\".format(service_name)\n return __standardize_result(False, msg, None, None)\n services[service_name] = loaded_definition\n return __dump_compose_file(\n path,\n compose_result,\n \"Service {} created\".format(service_name),\n already_existed=True,\n )", "def create_service(self, service_name, *args, **kwargs):\n\n creator = self._service_creators.get(service_name, None)\n\n if creator is None:\n return None\n\n return creator(*args, **kwargs)", "def make_service(self, endpoint_type, service_name, **client_kwargs):\n binding = self._make_binding(endpoint_type, service_name)\n service_cache_key = (binding, str(client_kwargs))\n\n if service_cache_key in self._service_cache:\n srvc = self._service_cache[service_cache_key]\n else:\n client = self._make_client(\n endpoint_type,\n service_name,\n **client_kwargs\n )\n srvc = client.create_service(binding, client.wsdl.location)\n self._service_cache[service_cache_key] = srvc\n return srvc", "def create_nat(self, **attrs):\n return self._create(_gw.Service, tenant_id=self.get_project_id(), **attrs)", "def service_create(service, service_type, api, endpoint):\n db = model.Session()\n _assert_absent(db, model.Service, service)\n api = _must_find(db, model.API, api)\n service = model.Service(service, service_type, api, endpoint)\n db.add(service)\n db.commit()", "def create_service():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=9797)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def create_service(cls, proto_py_module, service_name):\n\n return cls.create_services(proto_py_module, service_name)", "def create_test_service(context, **kw):\n service = get_test_service(context, **kw)\n service.create()\n return service", "def newSDDCService(**kwargs):\n # Test for interactive flag - if False, check to ensure additional arguments were give for service entry\n if kwargs['interactive'] is False and (kwargs['l4_protocol'] is None or kwargs['dest_ports'] is None):\n print(\"Error - if not using interactive mode, at least protocol and destination port(s) must be configured. Source port(s) optional, based on your application.\")\n sys.exit(1)\n elif kwargs['interactive'] is True and (kwargs['l4_protocol'] is not None or kwargs['dest_ports'] is not None or kwargs['source_ports'] is not None):\n print(\"Error - if using interactive mode, please only specify the name of the desired service. All other parameters will be obtained interactively.\")\n sys.exit(1)\n else:\n pass\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n service_id = kwargs['objectname']\n interactive = kwargs['interactive']\n\n if interactive == True:\n service_entry_list = []\n # Start a loop that will run until the user enters 'quit'.\n # Ask the user for a name.\n destination_port = \"\"\n while destination_port != 'done':\n destination_port_list = []\n source_port_list = []\n service_entry_id = input(\"Please enter the Service Entry ID:\")\n l4_protocol = input(\"Please enter the L4 Protocol:\")\n source_port = \"\"\n destination_port = \"\"\n while source_port != 'done':\n source_port = input(\"Plese enter the Source Ports or type 'done' when your list is finished:\")\n if source_port != \"done\":\n source_port_list.append(source_port)\n while (destination_port != 'next') and (destination_port != \"done\"):\n source_port = \"\"\n destination_port = input(\"Plese enter the Destination Ports, type 'next' when you want to define another service entry or 'done' if you have finished:\")\n if (destination_port != 'next') and (destination_port != \"done\"):\n destination_port_list.append(destination_port)\n service_entry = {\n \"l4_protocol\": l4_protocol,\n \"source_ports\": source_port_list,\n \"destination_ports\" : destination_port_list,\n \"resource_type\" : \"L4PortSetServiceEntry\",\n \"id\" : service_entry_id,\n \"display_name\" : service_entry_id }\n service_entry_list.append(service_entry)\n else:\n source_port_list = kwargs['source_ports']\n destination_port_list = kwargs['dest_ports']\n l4_protocol = kwargs['l4_protocol']\n service_entry_list = [\n {\n \"l4_protocol\": l4_protocol,\n \"source_ports\": source_port_list,\n \"destination_ports\": destination_port_list,\n \"resource_type\": \"L4PortSetServiceEntry\",\n \"display_name\": f'{service_id}_svc_entry'\n }\n ]\n json_data = {\n \"service_entries\":service_entry_list,\n \"id\" : service_id,\n \"display_name\" : service_id,\n }\n response = new_sddc_service_json(proxy,sessiontoken,service_id,json_data)\n if response == 200:\n print(f'Service {service_id} successfully updated.')\n params = {'proxy':proxy, 'sessiontoken':sessiontoken, 'objectname':service_id}\n getSDDCService(**params)\n else:\n print(\"Issues creating the service - please check your syntax and try again.\")\n sys.exit(1)", "def CreateService(self, name=\"default_model_container\", type=None, arguments=None):\n\n service = self._service_registry.Create(name, type, arguments)\n\n self._loaded_services.append(service)\n\n return service", "def makeService(self, options):\n s = MultiService()\n\n irp = internet.TCPServer(int(options[\"port\"]), IRPServerFactory())\n irp.setServiceParent(s)\n\n manholeFactory = ShellFactory()\n manholeFactory.username = \"admin\"\n manholeFactory.password = \"admin\"\n manholeFactory.namespace[\"foo\"] = 12\n manholeService = internet.TCPServer(8000, manholeFactory)\n manholeService.setServiceParent(s)\n\n return s", "def _create_service_client(self, srv_name):\n if self._srv:\n self._srv.close()\n\n if srv_name in rosservice.get_service_list():\n rospy.loginfo(\"Creating proxy for service '%s'\" % srv_name)\n self._srv = rospy.ServiceProxy(srv_name, rosservice.get_service_class_by_name(srv_name))", "def test_makeService(self):\n maker = serve.ServiceMaker()\n\n endpoint = object()\n maker._serverFromString = lambda reactor, spec: endpoint\n site = object()\n maker._buildSite = lambda: site\n\n svc = maker.makeService({\"endpoint\": \"something\"})\n self.assertTrue(isinstance(svc, internet.StreamServerEndpointService))\n self.assertIdentical(svc.endpoint, endpoint)\n self.assertIdentical(svc.factory, site)", "def _create_soap_object(self, name):\n return self.client.factory.create(name)", "def create_service(self, url_data):\n data = {key: value[0] for key, value in url_data}\n\n publish_key = uuid.uuid4().hex\n service_id = uuid.uuid4().hex\n service_name = data['name']\n\n self.fastly_cache[service_name] = {\n 'service_details': {\n u'comment': '',\n u'locked': False,\n u'updated_at': u'2014-11-13T14:29:10+00:00',\n u'created_at': u'2014-11-13T14:29:10+00:00',\n u'testing': None,\n u'number': 1,\n u'staging': None,\n u'active': None,\n u'service_id': service_id,\n u'deleted_at': None,\n u'inherit_service_id': None,\n u'deployed': None},\n 'service_name': service_name\n }\n self.fastly_cache[service_id] = self.fastly_cache[service_name]\n\n create_service = {\n u'comment': '',\n u'publish_key': publish_key,\n u'name': service_name,\n u'versions': [{u'comment': '', u'locked': u'0',\n u'service': service_id,\n u'updated_at': u'2014-11-12T18:43:21',\n u'created_at': u'2014-11-12T18:43:21',\n u'testing': None, u'number': u'1',\n u'staging': None,\n u'active': None,\n u'service_id': service_id,\n u'deleted_at': None,\n u'inherit_service_id': None,\n u'deployed': None,\n u'backend': 0}],\n u'created_at': u'2014-11-12T18:43:21+00:00',\n u'updated_at': u'2014-11-12T18:43:21+00:00',\n u'customer_id': data['customer_id'],\n u'id': service_id}\n return create_service", "def create(cls, original_args, process_args, base_url, host_url, services):\n return cls(original_args, process_args, base_url, host_url, services)", "def create_endpoint(path, workspace):\n client = Client()\n\n client.create_endpoint(path, workspace=workspace)", "def create_service(flags, client_id, client_secret):\n flow = OAuth2WebServerFlow(\n client_id=client_id,\n client_secret=client_secret,\n scope='https://www.googleapis.com/auth/drive.readonly',\n redirect_uri='http://localhost')\n storage = Storage('oauth_storage')\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(httplib2.Http())\n return build('drive', 'v2', http=http)", "def create_servicech(self, conf, params):\n\t\tpass", "def new_instance(cls,\n version: date,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'DirectLinkApisV1':\n if version is None:\n raise ValueError('version must be provided')\n\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n version,\n authenticator\n )\n service.configure_service(service_name)\n return service", "def create_service(service, version, creds=None):\n # Instantiate an Http instance\n http = httplib2.Http()\n\n if creds:\n # Authorize the Http instance with the passed credentials\n creds.authorize(http)\n\n return build(service, version, http=http)", "def create_vpnservice(self, body=None):\r\n return self.post(self.vpnservices_path, body=body)", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_Create'))", "def paths_allow_service(ctx, network, destination, source, port):\n source_service = get_service_for_cli(ctx, network, source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.add(source_service, destination_service, port)\n click.echo('Added path from %s to %s in network %s for port %s' % (source, destination,\n network, port))", "def new_instance(cls,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'GlobalLoadBalancersV1':\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n authenticator\n )\n service.configure_service(service_name)\n return service", "def __create_handler():\n if not ServiceHandler.instance:\n ServiceHandler.instance = ServiceHandler()\n return ServiceHandler.instance", "def test_ipam_services_create(self):\n pass", "def create():", "def create():", "def create_service_for_me(self, treatment_id, family, subfamily, physician_id):\n\t\tprint()\n\t\tprint('Create Service Generic - ', subfamily)\n\t\t# init\n\t\tmodel_dic = {\n\t\t\t\t\t\t'all': \t\t\t_model_service,\n\t\t\t\t\t\t#'co2': \t\t_model_ser_co2,\n\t\t\t\t\t\t#'excilite': \t'openhealth.service_excilite',\n\t\t\t\t\t\t#'ipl': \t\t'openhealth.service_ipl',\n\t\t\t\t\t\t#'ndyag': \t\t'openhealth.service_ndyag',\n\t\t\t\t\t\t#'quick': \t\t'openhealth.service_quick',\n\t\t\t\t\t\t#'cosmetology': 'openhealth.service_cosmetology',\n\t\t\t\t\t\t#'medical': \t'openhealth.service_medical',\n\t\t\t\t\t\t#'gynecology': \t'openhealth.service_gynecology',\n\t\t\t\t\t\t#'echography': \t'openhealth.service_echography',\n\t\t\t\t\t\t#'promotion': \t'openhealth.service_promotion',\n\t\t\t\t\t\t#'product': \t'openhealth.service_product',\n\t\t\t}\n\t\tmodel = model_dic[subfamily]\n\n\t\t# open\n\t\treturn {\n\t\t\t\t'type': _model_action,\n\t\t\t\t'name': ' New Service Current',\n\t\t\t\t'res_model': \tmodel,\n\t\t\t\t#'res_id': consultation_id,\n\t\t\t\t\"views\": [[False, \"form\"]],\n\t\t\t\t#'view_type': 'form',\n\t\t\t\t'view_mode': 'form',\n\t\t\t\t'target': 'current',\n\t\t\t\t'flags': \t{\n\t\t\t\t\t\t\t\t'form': {'action_buttons': True, 'options': {'mode': 'edit'}}\n\t\t\t\t\t\t\t\t#'form': {'action_buttons': False, }\n\t\t\t\t\t\t\t},\n\t\t\t\t'context': {\n\t\t\t\t\t\t\t\t'default_family': family,\n\t\t\t\t\t\t\t\t'default_physician': physician_id,\n\t\t\t\t\t\t\t\t#'default_pl_subfamily': subfamily,\n\t\t\t\t\t\t\t\t'default_treatment': treatment_id,\n\t\t\t\t\t\t\t}\n\t\t\t\t}", "def service(self, service_name):\n return Service('/'.join((self._url, self._services_path, service_name)))", "def testServiceFactory(self):\n handler_factory = service_handlers.ServiceHandlerFactory(Service)\n self.assertEquals(Service, handler_factory.service_factory)", "def service(self) -> BaseService:", "def create_services(cls, proto_py_module, *service_names):\n\n return cls(proto_py_module, *service_names).services", "def create_service_object(credentials):\n http_auth = httplib2.Http()\n http_auth = credentials.authorize(http_auth)\n service = discovery.build('analytics', 'v3', http=http_auth)\n return service", "def create_services(\n self,\n body, # type: \"models.MicrosoftGraphPrintService\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrintService\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrintService\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_services.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrintService')\n body_content_kwargs['content'] = body_content\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrintService', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def initService(self):", "def makeService(self, options):\n endpoint = self._serverFromString(reactor, options[\"endpoint\"])\n factory = self._buildSite()\n return internet.StreamServerEndpointService(endpoint, factory)", "def test_add_virtual_service(self):\n pass", "def _fakeService(self, description, factory):\n self._service = _FakeService(description, factory)\n return self._service", "def __init__(__self__, *,\n application_name: pulumi.Input[str],\n cluster_name: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n service_kind: pulumi.Input[Union[str, 'ServiceKind']],\n correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]] = None,\n default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n partition_description: Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]] = None,\n placement_constraints: Optional[pulumi.Input[str]] = None,\n service_dns_name: Optional[pulumi.Input[str]] = None,\n service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,\n service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]] = None,\n service_type_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"application_name\", application_name)\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"service_kind\", service_kind)\n if correlation_scheme is not None:\n pulumi.set(__self__, \"correlation_scheme\", correlation_scheme)\n if default_move_cost is not None:\n pulumi.set(__self__, \"default_move_cost\", default_move_cost)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if partition_description is not None:\n pulumi.set(__self__, \"partition_description\", partition_description)\n if placement_constraints is not None:\n pulumi.set(__self__, \"placement_constraints\", placement_constraints)\n if service_dns_name is not None:\n pulumi.set(__self__, \"service_dns_name\", service_dns_name)\n if service_load_metrics is not None:\n pulumi.set(__self__, \"service_load_metrics\", service_load_metrics)\n if service_name is not None:\n pulumi.set(__self__, \"service_name\", service_name)\n if service_package_activation_mode is not None:\n pulumi.set(__self__, \"service_package_activation_mode\", service_package_activation_mode)\n if service_placement_policies is not None:\n pulumi.set(__self__, \"service_placement_policies\", service_placement_policies)\n if service_type_name is not None:\n pulumi.set(__self__, \"service_type_name\", service_type_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def testFactoryMethod(self):\n factory = service_handlers.ServiceHandlerFactory(Service)\n handler = factory()\n\n self.assertTrue(isinstance(handler, service_handlers.ServiceHandler))\n self.assertTrue(isinstance(handler.service, Service))", "def set_path_service(self, new_path):\n self.__repo.set_path_repo(new_path)", "def start_service(self):\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Starting up service\")\n\n self.start_swarm()\n\n container_spec = docker.types.ContainerSpec(\n image=self.dkr_image,\n command=self.dkr_command,\n env=self.dkr_env\n )\n task_tmpl = docker.types.TaskTemplate(container_spec)\n svc = self.api_client().create_service(\n name=self.dkr_name,\n task_template=task_tmpl)\n\n self.dkr_service = svc", "def __init__(self):\n self.ctrl = src.robot_controller.RobotController()\n self.recorder = robot_recorder.RobotRecorder(save_dir=\"/home/guser/sawyer_data/test_recording\", start_loop=False)\n\n # drive to neutral position:\n self.ctrl.set_neutral()\n # import pdb; pdb.set_trace()\n\n self.num_traj = 10\n\n\n limb = 'right'\n self.name_of_service = \"ExternalTools/\" + limb + \"/PositionKinematicsNode/FKService\"\n self.fksvc = rospy.ServiceProxy(self.name_of_service, SolvePositionFK)\n\n self.run_data_collection()", "def get_service(self):", "def create_controller_query_service():\r\n return ControllerQueryService(settings.OPEN_ENDED_GRADING_INTERFACE, SYSTEM)", "def sample_services(user, **params):\n defaults = {\n 'title' : 'Sample services',\n 'price' : 5.00\n\n }\n defaults.update(params)\n\n return Service.objects.create(user=user, **defaults)", "def __init__(self, service_name):\n self.service_name = service_name", "async def api_create_service(\n data: CreateService, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n try:\n service = await create_service(data=data)\n except Exception as e:\n raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(e))\n\n return service.dict()", "def makeService_Single(self, options):\n def slaveSvcCreator(pool, store, logObserver, storageService):\n\n if store is None:\n raise StoreNotAvailable()\n\n result = self.requestProcessingService(options, store, logObserver)\n\n # Optionally set up push notifications\n pushDistributor = None\n if config.Notifications.Enabled:\n observers = []\n if config.Notifications.Services.APNS.Enabled:\n pushSubService = ApplePushNotifierService.makeService(\n config.Notifications.Services.APNS, store\n )\n observers.append(pushSubService)\n pushSubService.setName(\"APNS\")\n pushSubService.setServiceParent(result)\n if config.Notifications.Services.AMP.Enabled:\n pushSubService = AMPPushMaster(\n None, result,\n config.Notifications.Services.AMP.Port,\n config.Notifications.Services.AMP.EnableStaggering,\n config.Notifications.Services.AMP.StaggerSeconds,\n )\n observers.append(pushSubService)\n if observers:\n pushDistributor = PushDistributor(observers)\n\n directory = store.directoryService()\n\n # Job queues always required\n from twisted.internet import reactor\n\n pool = ControllerQueue(\n reactor, store.newTransaction,\n useWorkerPool=False,\n disableWorkProcessing=config.MigrationOnly,\n )\n store.queuer = store.pool = pool\n pool.setServiceParent(result)\n\n # Optionally set up mail retrieval\n if config.Scheduling.iMIP.Enabled:\n mailRetriever = MailRetriever(\n store, directory, config.Scheduling.iMIP.Receiving\n )\n mailRetriever.setName(\"mailRetriever\")\n mailRetriever.setServiceParent(result)\n else:\n mailRetriever = None\n\n # Start listening on the stats socket, for administrators to inspect\n # the current stats on the server.\n stats = None\n if config.Stats.EnableUnixStatsSocket:\n stats = DashboardServer(logObserver, None)\n stats.store = store\n statsService = GroupOwnedUNIXServer(\n gid, config.Stats.UnixStatsSocket, stats, mode=0660\n )\n statsService.setName(\"unix-stats\")\n statsService.setServiceParent(result)\n if config.Stats.EnableTCPStatsSocket:\n stats = DashboardServer(logObserver, None)\n stats.store = store\n statsService = TCPServer(\n config.Stats.TCPStatsPort, stats, interface=\"\"\n )\n statsService.setName(\"tcp-stats\")\n statsService.setServiceParent(result)\n\n # Optionally set up group cacher\n if config.GroupCaching.Enabled:\n cacheNotifier = MemcacheURLPatternChangeNotifier(\"/principals/__uids__/{token}/\", cacheHandle=\"PrincipalToken\") if config.EnableResponseCache else None\n groupCacher = GroupCacher(\n directory,\n updateSeconds=config.GroupCaching.UpdateSeconds,\n initialSchedulingDelaySeconds=config.GroupCaching.InitialSchedulingDelaySeconds,\n batchSize=config.GroupCaching.BatchSize,\n batchSchedulingIntervalSeconds=config.GroupCaching.BatchSchedulingIntervalSeconds,\n useDirectoryBasedDelegates=config.GroupCaching.UseDirectoryBasedDelegates,\n cacheNotifier=cacheNotifier,\n )\n else:\n groupCacher = None\n\n def decorateTransaction(txn):\n txn._pushDistributor = pushDistributor\n txn._rootResource = result.rootResource\n txn._mailRetriever = mailRetriever\n txn._groupCacher = groupCacher\n\n store.callWithNewTransactions(decorateTransaction)\n\n return result\n\n uid, gid = getSystemIDs(config.UserName, config.GroupName)\n\n # Make sure no old socket files are lying around.\n self.deleteStaleSocketFiles()\n logObserver = RotatingFileAccessLoggingObserver(\n config.AccessLogFile,\n )\n\n # Maybe spawn memcached. Note, this is not going through a\n # ProcessMonitor because there is code elsewhere that needs to\n # access memcached before startService() gets called\n self._spawnMemcached(monitor=None)\n\n return self.storageService(\n slaveSvcCreator, logObserver, uid=uid, gid=gid\n )", "def _hs_service(self, service_name, address, port, properties):\n identifier = service_name.split(\".\")[0]\n name = properties.get(\"Name\")\n hsgid = properties.get(\"hG\")\n service = conf.DmapService(identifier, hsgid, port=port, properties=properties)\n self._handle_service(address, name, service)", "def createUnoService( cClass ):\n oServiceManager = getServiceManager()\n oObj = oServiceManager.createInstance( cClass )\n return oObj", "def createUnoService( cClass ):\n oServiceManager = getServiceManager()\n oObj = oServiceManager.createInstance( cClass )\n return oObj", "def create_service(server: Arma3Server):\n file_name = get_service_file_name(server.id)\n user = Settings.local_steam_user\n\n content = \"[Unit]\\nDescription=Arma 3 Server\\n\\n[Service]\\nUser=\"\n content += user\n content += \"\\nGroup=\" + user\n content += \"\\nWorkingDirectory=/home/\" + user\n content += \"\\nExecStart=/bin/bash \" + get_startup_script_file_name(server.id)\n content += \"\\nRestart=always\\n\\n[Install]\\nWantedBy=multi-user.target\\n\"\n\n with open(file_name, 'w') as f:\n f.write(content)\n\n if Settings.debug_windows:\n logger.info(\"windows create service dummy\")\n return\n\n subprocess.check_call([\"sudo\", \"systemctl\", \"daemon-reload\"])", "def route_creation():\r\n city_ids = json.loads(open(\"cities.json\").read())\r\n cities = []\r\n for id in city_ids:\r\n cities.append(fetch_weather(id))\r\n return Route(cities)", "def main():\n rospy.init_node('dibujo_server', anonymous=True)\n\n ruta = Ruta()\n s = rospy.Service('dibujo', Dibujo, ruta.dibujo)\n print('========= Waiting for service ========')\n rospy.spin()", "def find_service(iface, context, name):", "def main(url, usr, pw, folder_name, service_name):\n # connect to ArcGIS Server instance\n arcserver = admin.ArcServerAdmin(url, usr, pw)\n\n #-----------------------------------------------------------------------------------------------#\n # list services and configured state in a single folder\n folder = arcserver.folder(folder_name)\n for service in folder.iter_services():\n print service.serviceName, service.configuredState\n\n # can stop a service like this\n # service.stop()\n\n # or start like this\n # service.start()\n\n print '\\n' * 3\n\n # show all services and configured state (use iter_services to return restapi.admin.Service() object!)\n for service in arcserver.iter_services():\n print service.serviceName, service.configuredState\n \n print '\\n' * 3\n\n #-----------------------------------------------------------------------------------------------#\n # setting security on a folder\n # make a folder publically available (i.e. unsecure it)\n arcserver.addPermission(folder_name, private=False) # can also do this from a Folder object\n\n # this is now unsecured, let's secure it again\n arcserver.addPermission(folder_name) # by default it will make private True (sets security)\n\n #-----------------------------------------------------------------------------------------------#\n # stop all services in a folder\n arcserver.stopServices(folderName=folder_name) # this can take a few minutes\n\n # look thru the folder to check the configured states, should be stopped\n for service in folder.iter_services():\n print service.serviceName, service.configuredState\n\n # now restart\n arcserver.startServices(folderName=folder_name) # this can take a few minutes\n\n # look thru folder, services should be started\n for service in folder.iter_services():\n print service.serviceName, service.configuredState\n\n #-----------------------------------------------------------------------------------------------#\n # query log files (within last 3 days), need to convert to milliseconds\n threeDaysAgo = restapi.date_to_mil(datetime.datetime.now() - relativedelta(days=3))\n for log in arcserver.queryLogs(startTime=threeDaysAgo, pageSize=25):\n print log.time\n for message in log:\n print message\n print '\\n'\n\n #-----------------------------------------------------------------------------------------------#\n # connect to an individual service (by wildcard) - do not need to include full name, just\n # enough of the name to make it a unique name query\n service = arcserver.service(service_name)\n\n # get original service description\n description = service.description\n\n # now edit the description\n service.edit(description='This is an updated service description')\n\n # edit description again to set it back to the original description\n service.edit(description=description)\n\n #-----------------------------------------------------------------------------------------------#\n # connect to the server's data store\n ds = arcserver.dataStore\n\n # iterate through all items of data store\n for item in ds:\n print item.type, item.path\n # if it is an enterprise database connection, you can get the connection string like this\n if item.type == 'egdb':\n print item.info.connectionString\n # else if a folder, print server path\n elif item.type == 'folder':\n print item.info.path\n print '\\n'", "def CreateScriptService(service, *args, **kwargs):\n # Init at each CreateScriptService() invocation\n # CreateScriptService is usually the first statement in user scripts requesting ScriptForge services\n # ScriptForge() is optional in user scripts when Python process inside LibreOffice process\n if ScriptForge.SCRIPTFORGEINITDONE is False:\n ScriptForge()\n\n def ResolveSynonyms(servicename):\n \"\"\"\n Synonyms within service names implemented in Python or predefined are resolved here\n :param servicename: The short name of the service\n :return: The official service name if found, the argument otherwise\n \"\"\"\n for cls in SFServices.__subclasses__():\n if servicename.lower() in cls.servicesynonyms:\n return cls.servicename\n return servicename\n\n #\n # Check the list of available services\n scriptservice = ResolveSynonyms(service)\n if scriptservice in ScriptForge.serviceslist:\n serv = ScriptForge.serviceslist[scriptservice]\n # Check if the requested service is within the Python world\n if serv.serviceimplementation == 'python':\n return serv(*args)\n # Check if the service is a predefined standard Basic service\n elif scriptservice in ScriptForge.servicesmodules:\n return serv(ScriptForge.servicesmodules[scriptservice], classmodule = SFServices.moduleStandard)\n else:\n serv = None\n # The requested service is to be found in the Basic world\n # Check if the service must review the arguments\n if serv is not None:\n if hasattr(serv, 'ReviewServiceArgs'):\n # ReviewServiceArgs() must be a class method\n args = serv.ReviewServiceArgs(*args, **kwargs)\n # Get the service object back from Basic\n if len(args) == 0:\n serv = ScriptForge.InvokeBasicService('SF_Services', SFServices.vbMethod, 'CreateScriptService', service)\n else:\n serv = ScriptForge.InvokeBasicService('SF_Services', SFServices.vbMethod, 'CreateScriptService',\n service, *args)\n return serv", "def create_service():\n creds = None\n # The file token_sheet.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token_sheet.pickle'):\n with open('token_sheet.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials_sheets.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token_sheet.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n return service", "def __init__(self, url='', credentials=None,\n get_credentials=True, http=None, model=None,\n log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n url = url or self.BASE_URL\n super(BaremetalsolutionV2, self).__init__(\n url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args,\n default_global_params=default_global_params,\n additional_http_headers=additional_http_headers,\n response_encoding=response_encoding)\n self.projects_locations_instanceProvisioningSettings = self.ProjectsLocationsInstanceProvisioningSettingsService(self)\n self.projects_locations_instanceQuotas = self.ProjectsLocationsInstanceQuotasService(self)\n self.projects_locations_instances = self.ProjectsLocationsInstancesService(self)\n self.projects_locations_networkQuotas = self.ProjectsLocationsNetworkQuotasService(self)\n self.projects_locations_networks = self.ProjectsLocationsNetworksService(self)\n self.projects_locations_nfsShares = self.ProjectsLocationsNfsSharesService(self)\n self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)\n self.projects_locations_provisioningConfigs = self.ProjectsLocationsProvisioningConfigsService(self)\n self.projects_locations_provisioningQuotas = self.ProjectsLocationsProvisioningQuotasService(self)\n self.projects_locations_snapshotSchedulePolicies = self.ProjectsLocationsSnapshotSchedulePoliciesService(self)\n self.projects_locations_sshKeys = self.ProjectsLocationsSshKeysService(self)\n self.projects_locations_storageQuotas = self.ProjectsLocationsStorageQuotasService(self)\n self.projects_locations_volumes_luns = self.ProjectsLocationsVolumesLunsService(self)\n self.projects_locations_volumes_snapshots = self.ProjectsLocationsVolumesSnapshotsService(self)\n self.projects_locations_volumes = self.ProjectsLocationsVolumesService(self)\n self.projects_locations = self.ProjectsLocationsService(self)\n self.projects = self.ProjectsService(self)", "def factory(container, name, factory):", "def createService(self, json,uid):\n \n # TODO:SHOULD TAKE PARAMETERS DINAMICALLY CHECKING FOR KEYS\n \n \n # for key in CREATESERVICEKEYS:\n # if key not in json:\n # return jsonify(Error=\"Error in credentials from submission: \"+ str(key)), 400\n \n \n \n \n try:\n websites = WebsiteHandler().unpackWebsites(json=json['websites'])\n if len(websites) > 10:\n return jsonify(Error=\"Improper number of websites provided: \" + str(len(websites))), 400\n except TypeError:\n return jsonify(Error=\"Error in input Parameters (websites)\" ), 400\n except KeyError as e:\n return jsonify(Error=str(e) ), 400\n try:\n phones = PhoneHandler().unpackPhones(json=json['numbers'])\n if len(websites) > 10:\n return jsonify(Error=\"Improper number of websites provided: \" + str(len(websites))), 400\n except TypeError:\n return jsonify(Error=\"Error in input Parameters (numbers)\" ), 400\n except KeyError as e:\n return jsonify(Error=str(e) ), 400\n \n # MAKE DICTIONARY TO CRREATE THESE\n user = uid\n roomID = json['rid']\n name = json['sname']\n description = json['sdescription']\n schedule = json['sschedule']\n dao = ServiceDAO()\n sid = dao.createService(uid=user,\n rid=roomID,\n sname=name,\n sdescription=description,\n sschedule=schedule,\n websites=websites,\n numbers=phones\n )\n \n try:\n \n if isinstance(sid[0],int):\n return (self.getServiceByID(sid[0])),201\n else:\n return jsonify(Error=sid)\n except:\n return jsonify(Error= \"Unique service violation \"+str(sid))", "def create_node(self, address, service):\n node = create_node(address, service)\n node.id = address + \"_\" + service\n return node", "def makeService_Agent(self, options):\n\n # Don't use memcached initially -- calendar server might take it away\n # at any moment. However, when we run a command through the gateway,\n # it will conditionally set ClientEnabled at that time.\n def agentPostUpdateHook(configDict, reloading=False):\n configDict.Memcached.Pools.Default.ClientEnabled = False\n\n config.addPostUpdateHooks((agentPostUpdateHook,))\n config.reload()\n\n # Verify that server root actually exists and is not phantom\n checkDirectory(\n config.ServerRoot,\n \"Server root\",\n access=W_OK,\n wait=True # Wait in a loop until ServerRoot exists and is not phantom\n )\n\n # These we need to set in order to open the store\n config.EnableCalDAV = config.EnableCardDAV = True\n\n def agentServiceCreator(pool, store, ignored, storageService):\n from calendarserver.tools.agent import makeAgentService\n if storageService is not None:\n # Shut down if DataRoot becomes unavailable\n from twisted.internet import reactor\n dataStoreWatcher = DirectoryChangeListener(\n reactor,\n config.DataRoot,\n DataStoreMonitor(reactor, storageService)\n )\n dataStoreWatcher.startListening()\n if store is not None:\n store.queuer = NonPerformingQueuer()\n return makeAgentService(store)\n\n uid, gid = getSystemIDs(config.UserName, config.GroupName)\n svc = self.storageService(\n agentServiceCreator, None, uid=uid, gid=gid\n )\n agentLoggingService = ErrorLoggingMultiService(\n config.ErrorLogEnabled,\n config.AgentLogFile,\n config.ErrorLogRotateMB * 1024 * 1024,\n config.ErrorLogMaxRotatedFiles,\n config.ErrorLogRotateOnStart,\n )\n svc.setName(\"agent\")\n svc.setServiceParent(agentLoggingService)\n return agentLoggingService", "def create_app(service: Service):\n app = FastAPI()\n\n @app.post(\"/query\")\n def query(params: Params):\n \"\"\"The main query endpoint.\"\"\"\n return service.query(**params.query, n_neighbors=params.n_neighbors)\n\n return app", "def create_services(config: Config) -> Dict:\n\n api_config = config.api_config\n db_config = config.db_config\n image_config = config.image_config\n logger_config = config.logger_config\n\n setup_global_logger(file_path=logger_config.file_path)\n\n services = {\n \"gui_api\": GuiApi(\n version=api_config.version,\n host=api_config.host,\n port=api_config.port,\n secret=api_config.secret,\n gui_build_path=api_config.gui_build_path,\n ),\n \"user_controller\": UserController(),\n \"project_controller\": ProjectController(),\n \"issue_controller\": IssueController(),\n # ----Factory Methods ----#\n \"db_accessor\": create_db_accessor(db_config),\n \"image_accessor\": create_image_accessor(image_config),\n }\n\n for _, service in services.items():\n service.inject(**services)\n\n return services", "def foghord_service():\n # create a resource to serve static files\n foghorn = Main()\n\n factory = FoghornDNSServerFactory(\n clients=[foghorn.foghorn, client.Resolver(resolv='/etc/resolv.conf')]\n )\n\n udp_protocol = dns.DNSDatagramProtocol(controller=factory)\n udp_server = internet.UDPServer(foghorn.settings.dns_port, udp_protocol)\n\n tcp_server = internet.TCPServer(foghorn.settings.dns_port, factory)\n\n return [udp_server, tcp_server]", "def create(self, name, service_name, service_plan_name,\n tags=None, parameters=None):\n self._assert_space()\n\n service_instance = self._get_service_instance(name)\n if service_instance:\n return service_instance\n\n service_plan = self._get_service_plan(service_name, service_plan_name)\n\n if not service_plan:\n raise exc.NotFoundException('Service plan not found', 404)\n\n body = dict(\n name=name,\n service_plan_guid=service_plan.guid,\n space_guid=self._space.guid\n )\n if tags is not None:\n body['tags'] = tags\n if parameters is not None:\n body['parameters'] = parameters\n\n res = self._cc.service_instances() \\\n .set_query(accepts_incomplete='true') \\\n .set_params(**body).post()\n return res.resource", "def test_create_routing_slip(session, staff_user_mock):\n routing_slip_payload: Dict[str, any] = {\n 'number': '206380834',\n 'routingSlipDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'paymentAccount': {\n 'accountName': 'TEST'\n },\n 'payments': [{\n 'paymentMethod': PaymentMethod.CHEQUE.value,\n 'paymentDate': datetime.now().strftime(DT_SHORT_FORMAT),\n 'chequeReceiptNumber': '123',\n 'paidAmount': 100\n }]\n }\n\n rs = RoutingSlip_service.create(routing_slip_payload)\n assert rs\n cfs_account_model: CfsAccountModel = CfsAccountModel.find_effective_by_account_id(\n rs.get('payment_account').get('id'))\n assert cfs_account_model.status == CfsAccountStatus.PENDING.value", "def create_tap_service(attrs=None):\n attrs = attrs or {}\n tap_service_attrs = {\n 'id': uuidutils.generate_uuid(),\n 'tenant_id': uuidutils.generate_uuid(),\n 'name': 'test_tap_service' + uuidutils.generate_uuid(),\n 'status': 'ACTIVE',\n }\n tap_service_attrs.update(attrs)\n return copy.deepcopy(tap_service_attrs)", "def makeService(self, options):\n # Change from \"\" non used to something a bit more standard: None\n for k in [\"script\", \"pushover\"]:\n if options[k] == \"\":\n options[k] = None\n\n pushover = None\n if options[\"pushover\"] is not None:\n try:\n with open(options[\"pushover\"], \"r\") as p:\n pushover = simplejson.loads(p.read())\n except IOError:\n sys.stderr.write(\"Could not open: %s\\n\" % options[\"pushover\"])\n except simplejson.JSONDecodeError:\n sys.stderr.write(\"Could not parse JSON: %s\\n\"\n \"\" % options[\"pushover\"])\n # Simple validation\n for p in pushover:\n for k in [\"token\", \"user\"]:\n if k not in p:\n sys.stderr.write(\"Missing: %s from pushover\\n\" % k)\n if not isinstance(p[k], str):\n sys.stderr.write(\"%s is not a string in %s\\n\"\n \"\" % (p[k], k))\n # Check that we're doing something\n if options[\"script\"] is None and (pushover is None or\n len(pushover) == 0):\n sys.stderr.write(\"WARNING: script and pushover are both \"\n \"empty. This will act as only a logger\\n\")\n gitlab = GitLab(options[\"script\"], pushover)\n return internet.TCPServer(int(options[\"port\"]),\n server.Site(gitlab),\n interface=options[\"addr\"])", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlPortFwd_Create'))", "def create(cls, **kwargs):\n all_boxes = []\n lat = kwargs.pop(\"lat\")\n lon = kwargs.pop(\"lon\")\n for (resolution, slice, use_set) in GEOBOX_CONFIGS:\n if use_set:\n all_boxes.extend(geobox.compute_set(lat, lon, resolution, slice))\n else:\n all_boxes.append(geobox.compute(lat, lon, resolution, slice))\n kwargs[\"stop_id\"] = str(kwargs[\"stop_id\"])\n kwargs[\"location\"] = db.GeoPt(lat, lon)\n kwargs[\"key_name\"] = \"stop:%s:%s\" % (kwargs[\"system\"], kwargs[\"stop_id\"])\n kwargs[\"geoboxes\"] = all_boxes\n kwargs[\"has_inbound_routes\"] = bool(len(kwargs[\"in_routes\"]) > 0)\n kwargs[\"has_outbound_routes\"] = bool(len(kwargs[\"out_routes\"]) > 0)\n return cls(**kwargs)", "def Swahili_Speech_Recognition_Service():\n\n # ensure an instance is created only the first time the factory function is called\n if _Swahili_Speech_Recognition_Service._instance is None:\n\n _Swahili_Speech_Recognition_Service._instance = _Swahili_Speech_Recognition_Service()\n\n # Initialize the tokenizer\n _Swahili_Speech_Recognition_Service._processor = Wav2Vec2Processor.from_pretrained(\"alokmatta/wav2vec2-large-xlsr-53-sw\")\n\n # Initialize the model\n _Swahili_Speech_Recognition_Service._model = Wav2Vec2ForCTC.from_pretrained(\"alokmatta/wav2vec2-large-xlsr-53-sw\")\n\n return _Swahili_Speech_Recognition_Service._instance", "def service(service, private_base_url):\n routing_policy_op = {\"operations\": [\n {\"op\": \"==\", \"value\": \"/anything/anything\", \"match\": \"path\"}]}\n proxy = service.proxy.list()\n proxy.policies.insert(0, rawobj.PolicyConfig(\"routing\", {\n \"rules\": [\n {\"url\": private_base_url(\"primary\"),\n \"condition\": routing_policy_op,\n \"replace_path\": \"{{ original_request.path | remove_first: '/anything' }}\"}]}))\n\n return service", "def create_solver(solver_type, paths, nodes, lines):\n if solver_type == NON_LINEAR_SOLVER:\n return NonLinearSolver(paths=paths, nodes=nodes, lines=lines)\n if solver_type == LINEAR_SOLVER:\n return LinearSolver(paths=paths, nodes=nodes, lines=lines)", "def makeService_Utility(self, options):\n\n def toolServiceCreator(pool, store, ignored, storageService):\n return config.UtilityServiceClass(store)\n\n uid, gid = getSystemIDs(config.UserName, config.GroupName)\n return self.storageService(\n toolServiceCreator, None, uid=uid, gid=gid\n )", "def _create_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def _service_object(\n self,\n ports: List[ServicePort],\n service_name: str = None,\n service_type: ServiceType = \"ClusterIP\",\n additional_labels: dict = None,\n additional_selectors: dict = None,\n additional_annotations: dict = None,\n ) -> Service:\n if not service_name:\n service_name = self._app\n labels = {\"app.kubernetes.io/name\": self._app}\n if additional_labels:\n labels.update(additional_labels)\n selector = {\"app.kubernetes.io/name\": self._app}\n if additional_selectors:\n selector.update(additional_selectors)\n return Service(\n apiVersion=\"v1\",\n kind=\"Service\",\n metadata=ObjectMeta(\n namespace=self._namespace,\n name=service_name,\n labels=labels,\n annotations=additional_annotations, # type: ignore[arg-type]\n ),\n spec=ServiceSpec(\n selector=selector,\n ports=ports,\n type=service_type,\n ),\n )", "def testServiceMapping_ByFactory(self):\n self.DoMappingTest({'/my-service': MyService.new_factory('new-value')})", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def makeService(config):\n from twisted.internet import reactor\n\n # We need a HTTP connection pool for rproxy.\n pool = HTTPConnectionPool(reactor)\n\n proxyResource = RProxyResource(\n hosts=hosts,\n pool=pool,\n customHeaders=customHeaders,\n reactor=reactor\n )\n redirectResource = RedirectResource()\n\n secureSite = Site(proxyResource)\n insecureSite = Site(redirectResource)\n\n multiService = service.MultiService()\n multiService.addService(\n strports.service('le:/certs:tcp:' + HTTPS_PORT, secureSite)\n )\n multiService.addService(\n strports.service(\"tcp:\" + HTTP_PORT, insecureSite)\n )\n return multiService", "def __call__(cls,\n service,\n gis,\n initialize=False):\n\n url = service._url\n if isinstance(service, FeatureLayer) or \\\n os.path.basename(url).isdigit():\n parent = Service(url=os.path.dirname(url), server=gis)\n return AdminServiceGen(parent, gis)\n elif isinstance(service, (NetworkDataset)):\n rd = {'naserver', 'MapServer'}\n url = _str_replace(url, rd)\n parent = Service(url=url, server=gis)\n return AdminServiceGen(parent, gis)\n else:\n rd = {'/rest/': '/admin/'}\n connection = service._con\n admin_url = \"%s.%s\" % (\n _str_replace(os.path.dirname(url), rd),\n os.path.basename(url))\n return AdminService(url=admin_url, gis=gis)\n return type.__call__(cls, service, gis, False)", "def build_service():\n creds = None\n\n # the file token.json stores the user's access and refresh tokens, and is \n # created automatically when the authorization flow completes for the first time\n \n if os.path.exists('../creds/token.json'):\n creds = Credentials.from_authorized_user_file('../creds/token.json', SCOPES)\n\n # if there are no (valid) credentials, ask the user to login\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../creds/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n with open('../creds/token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def CommandRecognitionService():\r\n\r\n # ensure an instance is created only the first time the factory function is called\r\n if _CommandRecognitionService._instance is None:\r\n _CommandRecognitionService._instance = _CommandRecognitionService()\r\n _CommandRecognitionService.model = tf.keras.models.load_model(MODEL_PATH)\r\n return _CommandRecognitionService._instance", "def create():\n pass", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def create(self, session):\n # Service expects a naked PUT. Omit properties.\n self.create_by_id(session, None, self.id, path_args=self)\n self._reset_dirty()\n return self", "def create_service(self):\n self.dlg = ServiceCreateDialog(iface=self.iface, backend=self.backend)\n self.dlg.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.dlg.show()", "def _create_compute_service(self, **kwargs):\n\n dic = {'binary': 'nova-compute', 'topic': 'compute',\n 'report_count': 0, 'availability_zone': 'dummyzone'}\n dic['host'] = kwargs.get('host', 'dummy')\n s_ref = db.service_create(self.context, dic)\n if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():\n t = utils.utcnow() - datetime.timedelta(0)\n dic['created_at'] = kwargs.get('created_at', t)\n dic['updated_at'] = kwargs.get('updated_at', t)\n db.service_update(self.context, s_ref['id'], dic)\n\n dic = {'service_id': s_ref['id'],\n 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,\n 'vcpus_used': 16, 'local_gb_used': 10,\n 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,\n 'cpu_info': ''}\n dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)\n dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')\n dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)\n db.compute_node_create(self.context, dic)\n return db.service_get(self.context, s_ref['id'])", "def create(self, validated_data, request: HttpRequest = None):\n # Writing of .get(\"xy\", None) or None makes sure that empty strings will be mapped to None\n user = user_helper.get_user(request=request)\n get_capabilities_uri = validated_data.get(\"uri\", None) or None\n registering_with_group = validated_data.get(\"group\", None) or None\n registering_for_org = validated_data.get(\"for-org\", None) or None\n has_ext_auth = validated_data.get(\"ext-auth\", False) or False\n ext_auth_username = validated_data.get(\"ext-username\", None) or None\n ext_auth_password = validated_data.get(\"ext-password\", None) or None\n ext_auth_type = validated_data.get(\"ext-auth-type\", None) or None\n\n # Split uri in components as it is done with RegisterNewServiceWizardPage1\n url_dict = service_helper.split_service_uri(get_capabilities_uri)\n ogc_request = url_dict[\"request\"]\n ogc_service = url_dict[\"service\"].value\n ogc_version = url_dict[\"version\"]\n uri = url_dict[\"base_uri\"]\n\n init_data = {\n \"ogc_request\": ogc_request,\n \"ogc_service\": ogc_service,\n \"ogc_version\": ogc_version,\n \"uri\": uri,\n \"registering_with_group\": registering_with_group,\n \"registering_for_other_organization\": registering_for_org,\n \"service_needs_authentication\": has_ext_auth,\n \"username\": ext_auth_username,\n \"password\": ext_auth_password,\n \"authentication_type\": ext_auth_type,\n }\n\n # Use RegisterNewResourceWizardPage2 workflow as for frontend registration\n form = RegisterNewResourceWizardPage2(\n data=init_data,\n request=request\n )\n if form.is_valid():\n pending_task = service_helper.create_new_service(form, user)\n return pending_task\n return form", "def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?", "def create_station(self, name, description=\"\", userids=None):\n return self._stations_service.create_station(name, description, userids)", "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def register_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_service_description()\n self._register(description, \"registering agent's service on the SOEF.\")" ]
[ "0.66257864", "0.6032868", "0.60326034", "0.6012139", "0.5914998", "0.5851863", "0.57294667", "0.57228637", "0.5708061", "0.5678534", "0.5656917", "0.56435645", "0.5626607", "0.5623124", "0.5580457", "0.55738074", "0.55718637", "0.55306184", "0.552144", "0.5487335", "0.54739475", "0.5470202", "0.5464204", "0.54504275", "0.54146594", "0.5401627", "0.53975636", "0.5385184", "0.5338696", "0.53134274", "0.5303821", "0.53011817", "0.53011817", "0.53003085", "0.528952", "0.525825", "0.5255525", "0.5246933", "0.52388304", "0.52289736", "0.5216661", "0.5196777", "0.5183559", "0.5174064", "0.5166602", "0.5159912", "0.51567006", "0.5155886", "0.5136153", "0.5124991", "0.51215005", "0.51182586", "0.51166296", "0.51081014", "0.5094856", "0.5093283", "0.50930095", "0.50930095", "0.50847137", "0.50815576", "0.50763863", "0.50719327", "0.5071415", "0.5066877", "0.5063504", "0.5062886", "0.5053826", "0.50537986", "0.50526404", "0.50505453", "0.505048", "0.50415814", "0.5039806", "0.50378335", "0.5032185", "0.5016066", "0.5014273", "0.50053364", "0.50033444", "0.49961233", "0.4994933", "0.49919045", "0.49868387", "0.49855018", "0.4985233", "0.49825823", "0.49796504", "0.49722108", "0.49708152", "0.49683324", "0.49661714", "0.49657318", "0.49646652", "0.4960803", "0.4958961", "0.49470112", "0.49381486", "0.4935965", "0.49332753", "0.4931086", "0.49289012" ]
0.0
-1
Checks if a token network is followed by the pathfinding service.
def follows_token_network(self, token_network_address: Address) -> bool: assert is_checksum_address(token_network_address) return token_network_address in self.token_networks.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_request_to_token_url(self, request):\n if not self.token_url:\n return False\n\n if self.token_url == request.path:\n return True\n\n request.match(self.token_url)\n\n if request.matchdict:\n return True\n\n return False", "def reach(self, from_symbol, to_symbol):\n # type: (Type[Nonterminal], Type[Nonterminal]) -> bool\n return len(self.path_rules(from_symbol, to_symbol)) > 0", "def has_route(self, network):\n try:\n distance = self.networks[network]\n if distance == 0:\n print('Router is an edge router for the network.')\n else:\n print(f'Network {network} is {distance} hops away')\n except KeyError:\n print('Route to the network is unknown.')", "def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")", "def __loop_detection(self, route: Route) -> bool:\n if self.node_id in route.path:\n return True\n return False", "def __loop_detection(self, route: Route) -> bool:\n if self.node_id in route.path:\n return True\n return False", "def check_inflight_already_running(self, user: Identifier) -> bool:\n with self._lock:\n for flow in self.in_flight:\n if flow.requestor == user:\n return True\n return False", "def is_driving(self, first: Waypoint, second: Waypoint) -> bool:\n dist = self.calc_distance(first, second)\n time_delta = (second.timestamp - first.timestamp).seconds\n if dist > GPS_DISTANCE_ACCURATE_METERS and time_delta < STOP_TIME_SECONDS:\n return True\n elif GPS_DISTANCE_ACCURATE_METERS < dist < CONNECTION_LOST_DISTANCE_THRESHOLD_METERS and \\\n time_delta < CONNECTION_LOST_TIMEOUT_SECONDS:\n return True\n else:\n return False", "def ensure_token_network_discovery(\n self, token: CustomToken, token_network_addresses: TokenNetworkAddress\n ) -> None:\n for node in self.node_controller: # type: ignore\n node_endpoint = API_URL_TOKEN_NETWORK_ADDRESS.format(\n protocol=self.protocol,\n target_host=node.base_url,\n token_address=to_checksum_address(token.address),\n )\n address = wait_for_token_network_discovery(\n node_endpoint, self.definition.settings, self.session\n )\n if to_canonical_address(address) != Address(token_network_addresses):\n raise RuntimeError(\n f\"Nodes diverged on the token network address, there should be \"\n f\"exactly one token network available for all nodes. Current \"\n f\"values : {to_hex(token_network_addresses)}\"\n )", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def paths_service_has_access(ctx, network, destination, source, port):\n source_service = get_service_for_cli(ctx, network, source)\n destination_service = get_service_for_cli(ctx, network, destination)\n if ctx.obj['CLIENT'].paths.has_access(source_service, destination_service, port):\n click.echo('Service %s has access to %s in network %s on port %s' % (\n source, destination, network, port))\n else:\n click.echo('Service %s does not have access to %s in network %s on port %s' % (\n source, destination, network, port))", "def verify_token(self, token):\n return False", "def is_authenticated(self):\n if not self.token:\n return False\n\n try:\n self.lookup_token()\n return True\n except Forbidden:\n return False\n except InvalidPath:\n return False\n except InvalidRequest:\n return False", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def has_providers(self, asn):\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n\n # node is a customer of neighbor\n if edge_data[\"relationship\"] == -1 and edge_data[\"as2\"] == asn:\n return True\n return False", "def check_ap(self, targetssid):\n dbg(f\"Scanning for access point [{targetssid}]...\")\n \n self.sta_if.active(True)\n networks = self.sta_if.scan()\n\n for ssid, bssid, channel, rssi, authmode, hidden in sorted(networks, key=lambda x: x[3], reverse=True):\n ssid = ssid.decode('utf-8')\n dbg(f\"Found {ssid}\")\n if ssid == targetssid:\n dbg(\"Found access point!\")\n return True \n \n dbg(\"Access point not found\")\n return False", "def CheckIfStationsAreVisitedInGivenOrder(ConnectionInfo, PathInfo, RouteConditions, OrderedStationList):\r\n\t# shortcuts\r\n\tif not PathInfo or len(PathInfo) < 2:\r\n\t\treturn True \r\n\tif not OrderedStationList or len(OrderedStationList) < 2:\r\n\t\treturn True \r\n\r\n\t# return true if next station is not in OrderedStationList\r\n\tNextStation = ConnectionInfo[ConnInfoInd['station_to']]\r\n\tif not NextStation in OrderedStationList:\r\n\t\treturn True \r\n\telse:\r\n\t\t# get last (highest-order) already visited station in OrderedStationList\r\n\t\tLastListedStation = None\r\n\t\tMaxInd = -1\r\n\t\tfor i in range(1, len(PathInfo)+1):\r\n\t\t\tstation = PathInfo[-i][ConnInfoInd['station_to']]\r\n\t\t\t\r\n\t\t\tif station in OrderedStationList:\r\n\t\t\t\tind = OrderedStationList.index(station)\r\n\t\t\t\tif ind > MaxInd:\r\n\t\t\t\t\tLastListedStation = station \r\n\t\t\t\t\tMaxInd = ind\r\n\r\n\t\t# check station orders (an equal or lower order station can be visited again)\r\n\t\tNextStationIND = OrderedStationList.index(NextStation) + 1\r\n\r\n\t\tLastStationIND = 0\r\n\t\tif LastListedStation:\r\n\t\t\tLastStationIND = OrderedStationList.index(LastListedStation) + 1\r\n\r\n\t\tif NextStationIND <= LastStationIND + 1:\r\n\t\t\treturn True \r\n\t\telse:\r\n\t\t\treturn False", "async def check_token_works(self) -> bool:\n async with self.web_session.get(url=self._user_endpoint, headers=self._headers) as resp:\n self._expired_token = not resp.status == 200\n return not self._expired_token", "def check_token(self):\n return config.outlook_token is not None", "def is_dialing(self) -> bool:", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def __check_registered(self, source_address: Address) -> bool:\n source_ip, source_port = source_address\n source_node = SemiNode(source_ip, source_port)\n return source_node in self.registered", "def verify_local_token(self, token):\n return token == self.master_local_token.get_token()", "def _is_follow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"follow\":\n return True\n else:\n return False", "def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")", "def look_ahead(self, point):\n directions = [N(Point.make(point)), S(Point.make(point)), E(Point.make(point)), W(Point.make(point))]\n for point in directions:\n if not point in self.nodes:\n return True\n return False", "def requires_route(self) -> bool:\n return self.goal.is_specific()", "def check_segment_for_agent(self, segment, agent):\n mappings = agent['configurations'].get('interface_mappings', {})\n tunnel_types = agent['configurations'].get('tunnel_types', [])\n LOG.debug(\"Centec mech driver - Checking segment: %(segment)s \"\n \"for mappings: %(mappings)s \"\n \"with tunnel_types: %(tunnel_types)s\",\n {'segment': segment, 'mappings': mappings,\n 'tunnel_types': tunnel_types})\n network_type = segment[api.NETWORK_TYPE]\n if network_type == 'gre':\n return True\n if network_type == 'local':\n return True\n elif network_type in tunnel_types:\n return True\n elif network_type in 'flat':\n return True\n elif network_type in ['vlan']:\n return segment[api.PHYSICAL_NETWORK] in mappings\n else:\n return False", "def wifi_connectivity_verify(self):\n self.sendline(\"iw %s link\" % self.iface_wifi)\n matched = self.expect([\"Connected\", \"Not connected\", pexpect.TIMEOUT])\n if matched == 0:\n return True\n else:\n return False", "def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True", "async def authenticate(self, token) -> bool:\n return True", "def check_in(self, token: tokenize.TokenInfo) -> bool:\n if self._seen_for:\n if not self._seen_for_in_line:\n if not self._seen_if_in_line:\n self._reported = True\n return False\n return True", "def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False", "def has_token_in(status, token):\n if not hasattr(res, status):\n return False\n return token in getattr(res, status)", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def validate(cls, token, user, service):\n expected = cls.generate(user, service)\n return token == expected", "def check_token(token):\n return conn.hget('login:', token)", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def checklan(ipaddr, network):\n return True", "def _can_ping_url(self, url, headers):\n try:\n self.http_request(url, \"GET\", \"\", headers, timeout=.75)\n return True\n except:\n return False", "def is_connected(self) -> bool:", "def checkToken( self ):\n\n if ( self.token == None ):\n return False\n else :\n d = {\n \"auth_token\" : str(self.token) ,\n \"method\" : \"flickr.auth.checkToken\",\n \"format\" : \"json\",\n \"nojsoncallback\" : \"1\"\n }\n sig = self.signCall( d )\n\n url = self.urlGen( api.rest, d, sig )\n try:\n res = self.getResponse( url )\n if ( self.isGood( res ) ):\n self.token = res['auth']['token']['_content']\n self.perms = res['auth']['perms']['_content']\n return True\n else :\n self.reportError( res )\n except:\n print(str(sys.exc_info()))\n return False", "async def validate_token(self, token):", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def check_token(token):\n token = db.session.query(Token).filter(Token.token==token).first()\n if token == None:\n return False\n #TODO token lifetime\n #if (datetime.datetime.now() - token.date >= datetime.timedelta(day=2)):\n # return False \n return True", "def _do_connectivity(self, tstep):\n return ((tstep > 0) and (tstep % self.overset_update_interval) == 0)", "def test_has_route():\n g = Graph()\n node_1 = Node({'A':['B','C']})\n g.add(node_1)\n node_2 = Node({'B':['C','D']})\n g.add(node_2)\n node_3 = Node({'C':['D']})\n g.add(node_3)\n node_4 = Node({'D':['C']})\n g.add(node_4)\n node_5 = Node({'E':['C']})\n g.add(node_5)\n\n # zero path between node_1 and node_5\n assert g.has_route(node_1, node_5) == False\n # only one path between node_5 and node_4\n assert g.has_route(node_5, node_4) == True\n # three paths between node_1 and node_3,\n assert g.has_route(node_1, node_3) == True", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/readiness'\n response = self.perform_get_request(endpoint)\n\n if response.status_code != 200:\n self.print_error_response(response, \"error\")\n return response.status_code == 200", "def _check_path(G, max_res, min_res, direction, algorithm):\n try:\n if not has_path(G, \"Source\", \"Sink\"):\n raise NetworkXException(\"Disconnected Graph\")\n except NetworkXException as e:\n raise Exception(\"An error occurred: {}\".format(e))", "def is_router(self):\n # @todo: Rewrite\n return self.address_set.count() > 1", "def is_incall_dialing(self) -> bool:", "def inPath(self, oth: 'StateNode') -> bool:\n if self == oth:\n return True\n if self.isSameState(oth):\n return True\n if self.previous is not None:\n return self.previous.inPath(oth)", "def token_relay(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"token_relay\")", "def isConnected():", "def gotConnectionWithServices(timeout):\n\n try:\n rospy.wait_for_service('fluid/take_off', timeout=timeout)\n rospy.wait_for_service('fluid/explore', timeout=timeout)\n rospy.wait_for_service('fluid/travel', timeout=timeout)\n rospy.wait_for_service('fluid/land', timeout=timeout)\n rospy.wait_for_service('fluid/interact', timeout=timeout)\n return True\n except rospy.ROSException:\n return False", "def is_incall_connected(self) -> bool:", "def paths_network_block_has_access(ctx, network, destination, source, port):\n source_block = cloudless.paths.CidrBlock(source)\n destination_service = get_service_for_cli(ctx, network, destination)\n if ctx.obj['CLIENT'].paths.has_access(source_block, destination_service, port):\n click.echo('Network %s has access to %s in network %s on port %s' % (\n source, destination, network, port))\n else:\n click.echo('Network %s does not have access to %s in network %s on port %s' % (\n source, destination, network, port))", "def check_token(self, user, token):\n try:\n data = signing.loads(token, max_age=properties.TOKEN_LOGIN_TIMEOUT)\n except signing.BadSignature:\n return False\n\n return (\n (\n (data['last_login'] is None and user.last_login is None) or\n data['last_login'] == user.last_login.strftime('%s')\n ) and\n data['user_id'] == user.pk\n )", "def is_route_throu(self):\n\n # VPR stores route-through LUTs as \"open\" blocks with mode set to\n # \"wire\".\n return self.is_leaf and self.name == \"open\" and self.mode == \"wire\"", "async def require_authentication(\n token: str = Depends(oauth2_scheme),\n) -> bool: # coverage: ignore\n\n try:\n google.oauth2.id_token.verify_firebase_token(\n token, requests.Request(), \"munro-leagues\"\n )\n except ValueError:\n raise HTTP_401(\"Invalid Authentication Credentials\")\n\n return True", "def wifi_connect_check(self, vap: VirtualAPHostapd) -> bool:\n for _ in range(5):\n self.wifi_connect(vap)\n self.expect(pexpect.TIMEOUT, timeout=10)\n verify_connect = self.wifi_connectivity_verify()\n if verify_connect:\n break\n else:\n self.wifi_disconnect()\n return verify_connect", "def isReachable(self):\n cmd = \"ping -c 1 %s\" % self.ip\n ping_output = commands.getoutput(cmd)\n logger.debug(cmd)\n logger.debug(ping_output)\n return re.search(\"1[\\s\\w]+received\", ping_output) is not None", "def isconnected(self) -> bool:", "def check_remote_pairing(ignore_errors):\n try:\n DeviceApi().get()\n return True\n except HTTPError as e:\n if e.response.status_code == 401:\n return False\n error = e\n except Exception as e:\n error = e\n\n LOG.warning('Could not get device info: {}'.format(repr(error)))\n\n if ignore_errors:\n return False\n\n if isinstance(error, HTTPError):\n if connected():\n raise BackendDown from error\n else:\n raise InternetDown from error\n else:\n raise error", "def check(self):\n # check forward\n self._check_impl(self.key_to_stat_fwd, \"forward\")\n\n # check backward\n self._check_impl(self.key_to_stat_bwd, \"backward\")", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def is_ringing(self) -> bool:", "def check_has_network_code_checkbox(self):\n self.click_element(self.has_network_code_checkbox_locator)", "def check_device_state(self):", "def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True", "def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None", "def valid_spotify_token(token: str) -> bool:\n test_url = \"https://api.spotify.com/v1/tracks/11dFghVXANMlKmJXsNCbNl\"\n headers = {\"Authorization\": \"Bearer {}\".format(token)}\n response = requests.get(test_url, headers=headers)\n return response.status_code == 200", "def isConnectedTo(self, node):\n for arc in self._arcsFrom:\n if arc.getFinish() is node:\n return True\n return False", "def is_connected():\n sta_if = network.WLAN(network.STA_IF)\n return sta_if.isconnected()", "def route_is_contained_in_other_route(route,target):\n id_route = 0\n id_target = 0\n found = True\n while found and id_route < len(route) and id_target < len(target):\n found = False\n while not found and id_target < len(target):\n if route[id_route] == target[id_target]:\n found = True\n else:\n id_target += 1\n id_route += 1\n return found", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def erc20_checker(address, api_key):\n\n url = \"https://api.etherscan.io/api?module=stats&action=tokensupply&contractaddress=\" + address + \"&apikey=\" + api_key\n response = requests.get(url)\n\n if response.status_code != 200:\n print(response.text)\n\n address_content = response.json()\n result = address_content.get(\"result\")\n result_int = int(result)\n\n if result_int > 0:\n return True\n else:\n return False", "def check_directions(next_door, current_node, goal_node, chip, crossroad, travelled_path, colide): \n if next_door[2] < 0 or next_door[2] > 7:\n return crossroad\n\n # Check if the node is off the grid\n if next_door[0] < 0 or next_door[0] > chip.width - 1 or next_door[1] < 0 or next_door[1] > chip.height - 1:\n return crossroad\n\n (x, y, z) = current_node.position\n\n # Check whether a connection is already being used\n if chip.coordinates[z][y][x].connections[next_door].used:\n return crossroad\n\n next_node = chip.coordinates[next_door[2]][next_door[1]][next_door[0]]\n\n neighbour = nd.Node(next_door, current_node, next_node.cost, next_node.cost + next_node.distance_to_goal)\n\n if neighbour != goal_node and chip.coordinates[next_door[2]][next_door[1]][next_door[0]].gate is not None:\n return crossroad\n\n # Check whether the coordinate is already in the current path.\n if neighbour in travelled_path:\n return crossroad\n\n # Check whether neighbor is in open list and if it has a lower cost value\n if add_to_crossroad(neighbour, crossroad, colide):\n crossroad.append(neighbour)\n\n return crossroad", "def is_correct_route(network, route):\n id_actual = 0\n id_next = 1\n while id_next < len(route):\n road_id_actual = route[id_actual]\n road_id_next = route[id_next]\n if get_end(network, road_id_actual) != get_start(network, road_id_next):\n return False\n id_actual += 1\n id_next += 1\n return True", "def _is_correct_lti_request(self):\r\n lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)\r\n return lti_endpoint in self.path", "def _has_arrived(self, context) -> bool:\n return self._target[0] == context.x and self._target[1] == context.y", "def net_check():\n resp = None\n host = \"https://gitlab.manjaro.org\"\n # noinspection PyBroadException\n try:\n resp = urllib.request.urlopen(host, timeout=2)\n except Exception:\n pass\n return bool(resp)", "def check_inflight_flow_triggered(self, cmd: str, user: Identifier) -> Tuple[Optional[Flow], Optional[FlowNode]]:\n log.debug(\"Test if the command %s is a trigger for an inflight flow ...\", cmd)\n # TODO: What if 2 flows wait for the same command ?\n with self._lock:\n for flow in self.in_flight:\n if flow.check_identifier(user):\n log.debug(\"Requestor has a flow %s in flight\", flow.name)\n for next_step in flow.next_steps():\n if next_step.command == cmd:\n log.debug(\"Requestor has a flow in flight waiting for this command !\")\n return flow, next_step\n log.debug(\"None matched.\")\n return None, None", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def started_path(self):\n if self.ros_node.get_data('/diff_drive/path_achieved') is None:\n return False\n return not self.ros_node.get_data('/diff_drive/path_achieved')", "def IsLastRequire(self, token):\n return self._require_tokens and token == self._require_tokens[-1]", "def initialize_network_los() -> bool:\n return True", "def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def has_customers(self, asn):\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n\n # node is a provider of neighbor\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n return True\n return False", "def Check_Gateway(gateway):\n\n global STATUS\n logging.info(\"Pinging gateway\")\n if ping_machine(gateway) != 0:\n add_info(gateway, GATEWAY, \"unpingable\")\n STATUS = 1 # can't work w/out gateway\n return 1\n else:\n add_info(gateway, GATEWAY, \"OK\")\n return 0", "def ds_token_ok(buffer_min=10):\n required = {'ds_expiration', 'ds_access_token', 'ds_account_id'}\n session_keys = set(session.keys())\n ok = session_keys.intersection(required) == required\n if ok:\n token_expiration = session.get(\"ds_expiration\")\n buffer_starts = token_expiration - timedelta(minutes=buffer_min)\n ok = ok and buffer_starts > pytz.utc.localize(datetime.utcnow())\n return ok", "def _checking_path(self, node_name, first_name, path=0):\n if not self.successors[node_name]:\n return True\n for nd_in in self.successors[node_name]:\n if nd_in.name in self.max_paths[first_name].keys():\n # chose the maximum paths\n self.max_paths[first_name][nd_in.name] = max(\n self.max_paths[first_name][nd_in.name], path + 1\n )\n else:\n self.max_paths[first_name][nd_in.name] = path + 1\n self._checking_path(\n node_name=nd_in.name, first_name=first_name, path=path + 1\n )", "def check_nodes(self) -> bool:\n # check the input-output consistency\n for op_name in self.__ops:\n op = cast(Operator, self.__ops[op_name])\n inputs: Dict[str, Operator] = op.input_ops\n for i in inputs.values():\n if op not in i.output_op_list:\n return False\n\n return True" ]
[ "0.5835548", "0.5597924", "0.5571266", "0.5485773", "0.5480768", "0.5480768", "0.54687196", "0.5454214", "0.5441436", "0.5436674", "0.5424187", "0.5416221", "0.53875804", "0.5344155", "0.5312403", "0.529961", "0.5279912", "0.5251868", "0.52469206", "0.52468187", "0.5241061", "0.5233585", "0.52316785", "0.52150893", "0.5156514", "0.5144983", "0.5134208", "0.5131261", "0.5127431", "0.5118042", "0.511172", "0.5099853", "0.5095518", "0.5088308", "0.50866854", "0.5069114", "0.50674903", "0.5066935", "0.5052621", "0.50430816", "0.5028242", "0.5028057", "0.5025936", "0.50203615", "0.500546", "0.50004447", "0.49991727", "0.49823195", "0.49790108", "0.49734735", "0.49733606", "0.49730125", "0.49653676", "0.49445242", "0.49390176", "0.493074", "0.4925887", "0.49248752", "0.491906", "0.4918645", "0.4918134", "0.49178305", "0.49131328", "0.4910877", "0.49103838", "0.49089622", "0.49067017", "0.4906516", "0.4897836", "0.48968142", "0.48927355", "0.4892149", "0.4891443", "0.48858532", "0.487702", "0.48750842", "0.4872992", "0.48702452", "0.486973", "0.48624778", "0.48607826", "0.48550454", "0.48518372", "0.4844427", "0.4841893", "0.48331183", "0.48319945", "0.48283938", "0.4822353", "0.48181403", "0.48155853", "0.4808001", "0.48077565", "0.48024565", "0.48024565", "0.47991416", "0.4794922", "0.4794452", "0.47915393", "0.4788389" ]
0.67394125
0
Returns the `TokenNetwork` for the given address or `None` for unknown networks.
def _get_token_network(self, token_network_address: Address) -> Optional[TokenNetwork]: assert is_checksum_address(token_network_address) if not self.follows_token_network(token_network_address): return None else: return self.token_networks[token_network_address]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_network(address: str, netmask: str) -> IPv4Network:\n net = IPv4Network(f\"{address}/{netmask}\", strict=False)\n return net", "def _get_network(name):\n\n if name not in _NAME_TO_NETS:\n raise ValueError('Network name [%s] not recognized.' % name)\n return _NAME_TO_NETS[name].model", "def get_network_with_name(self, name):\n for network in self.networks:\n if network.name == name:\n return network\n return None", "def IpNetwork(address, version=None):\n\n if version:\n if version == 4:\n return Ipv4Network(address)\n elif version == 6:\n return Ipv6Network(address)\n\n try:\n return Ipv4Network(address)\n except (ValueError):\n pass\n\n try:\n return Ipv6Network(address)\n except (ValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % address)", "def guess_network(self):\n # decide what sort of network we are going to use\n # return the actual type\n # right now we just use the first host only network and that's it\n host_only = list(HostOnlyNetwork.find_networks())\n if host_only:\n return host_only[0]\n else:\n return NewHostOnlyNetwork()", "def _get_network_by_identity(self, identity):\n for network in self.mobile_networks:\n identities = network.get(\"Operator Identities\")\n if identities is not None:\n if identity in identities:\n return network\n return None", "def from_str(cls, address: str) -> Optional[Address]:\n if len(address) < 26 or len(address) > 35:\n return None\n # decode\n data = base58_decode(address)\n if data is None or len(data) != 25:\n return None\n # check code\n prefix = data[:21]\n suffix = data[21:]\n if check_code(prefix) == suffix:\n network = ord(data[:1])\n return cls(address=address, network=network)", "def get_network_by_name(self, name: str) -> Network:\n for network in self._networks:\n if network.name == name:\n return network\n raise errors.NotFoundError(f\"there exists no network named {name!r}\")", "def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]", "def maybe_create_token_network(\n token_network_proxy: TokenNetworkRegistry, token_proxy: CustomToken\n) -> TokenNetworkAddress:\n block_identifier = token_network_proxy.rpc_client.get_confirmed_blockhash()\n token_address = token_proxy.address\n\n token_network_address = token_network_proxy.get_token_network(\n token_address=token_address, block_identifier=block_identifier\n )\n\n if token_network_address is None:\n _, new_token_network_address = token_network_proxy.add_token(\n token_address=token_address,\n channel_participant_deposit_limit=TokenAmount(UINT256_MAX),\n token_network_deposit_limit=TokenAmount(UINT256_MAX),\n given_block_identifier=block_identifier,\n )\n return new_token_network_address\n else:\n return token_network_address", "def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)", "def get_network(self):\n return self.get_ip_network()[-1]", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def from_str(cls, address: str) -> Optional[Address]:\n if is_eth(address=address):\n return cls(address=address)", "def get_network(name, pretrained_model):\n if name == 'fcn8_vgg':\n return networks.FCN8VGG(pretrained_model)\n else:\n print 'network `{:s}` is not supported'.format(name)\n sys.exit()", "def get_address(self, address=None):\n return self.__get_addr_grp('address', address)", "def get_network_on_vc(options):\n datacenter = get_datacenter(options)\n networks = datacenter.network\n\n name = get_network_name(options)\n for network in networks:\n if re.search(name, network.name):\n return network", "def _external_network(self):\n try:\n router = next(self._connection.network.routers.all())\n except StopIteration:\n raise errors.ImproperlyConfiguredError('Could not find tenancy router.')\n return self._connection.network.networks.get(router.external_gateway_info['network_id'])", "def get_network(self, name, disconnected=False):\n return self.get_networks(as_dict=True,\n disconnected=disconnected).get(name)", "def Detect_network(num):\n\tif num.isdigit():\n\t\tif num.startswith(\"077\") or num.startswith(\"078\"):\n\t\t\treturn \"MTN\"\n\t\tif num.startswith(\"075\") or num.startswith(\"070\"):\n\t\t\treturn\"Airtel\"\n\t\tif num.startswith(\"079\"):\n\t\t\treturn \"Africel\"\n\t\tif num.startswith(\"074\"):\n\t\t\treturn \"Smart\"\n\t\tif num.startswith(\"071\"):\n\t\t\treturn \"UTL\"\n\t\tif num.startswith(\"073\"):\n\t\t\treturn \"K2\"\t\n\t\telse:\n\t\t\treturn \"Others\"\t\n\telse:\n\t\treturn \"Invalid\"", "def _get_network_by(self, field, search):\n for network in self.mobile_networks:\n f = network.get(field)\n if f is not None:\n if f == search:\n return network\n return None", "def parse_address(self, address: str) -> Optional[Address]:\n raise NotImplemented", "def get_stored_network(cls):\n store = cls.get_store()\n try:\n network_dict = store['network']\n except KeyError:\n network_dict = {}\n network_name = network_dict.get(\n 'value', ChainID.MAINNET.name)\n network = ChainID[network_name]\n return network", "def get_network(networkToCheck):\r\n\tnet = networkToCheck.get()\r\n\tif netaddr.valid_ipv4(net[:-3]):\r\n\t\tnetworkInfo = f'''NETWORK: {IPNetwork(net).network}\r\nFIRST HOST: {get_first(net)}\r\nLAST HOST: {get_last(net)}\r\nBROADCAST: {IPNetwork(net).broadcast}\r\nNETMASK: {IPNetwork(net).netmask}\r\nNEXT NETWORK: {IPNetwork(net).next()}\\n'''\r\n\t\tnetworkVar.set(networkInfo)\r\n\telse:\r\n\t\tnetworkVar.set(f'**Error**: \"{net}\" is not a valid ip\\nExample: \"192.168.1.0/24\"')", "def get_net_id(self, net_name):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n\n if result is None:\n LOG_OBJ.error(\n \"No response from Server while trying to\"\n \" get networks of tenant: %s\" %\n self.project_info[\"project_id\"])\n return result\n\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network Failed with status %s \" % result.status)\n return result.status\n\n output = json.loads(result.data)\n LOG_OBJ.debug(\"Networks: %s\" % output['networks'])\n\n for nets in output['networks']:\n if nets['name'].lower() == net_name.lower() and \\\n net_name == config.extnet_name:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n if nets['name'].lower() == net_name.lower() and \\\n nets['tenant_id'] == self.project_info[\"project_id\"]:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n\n LOG_OBJ.debug(\"Net:%s Not Found\" % net_name)\n return", "def get_network(self):\n return self._network", "def GetNetwork(self, network, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n \"/%s/networks/%s\" % (GANETI_RAPI_VERSION, network),\n query, None)", "def _tenant_network(self):\n port = self._connection.network.ports.find_by_device_owner('network:router_interface')\n if port:\n return self._connection.network.networks.get(port.network_id)\n else:\n raise errors.ImproperlyConfiguredError('Could not find tenancy network')", "def get_network(self) -> EthereumNetwork:\n return EthereumNetwork(int(self.w3.net.version))", "def cosmo_find_external_net(self):\n nets = self.list_networks()['networks']\n ls = [net for net in nets if net.get('router:external')]\n if len(ls) == 1:\n return ls[0]\n if len(ls) != 1:\n raise RuntimeError(\n \"Expected exactly one external network but found {0}\".format(\n len(ls)))", "def getProfile(self, profile):\n for network in self.networks:\n if network.getProfileName() == profile:\n return network\n else:\n raise Exception('Network with profile name \"%s\" not found' % profile)", "def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id", "def get_by_address(self, address):\n assert len(address) == 20\n accounts = [account for account in self.accounts if account.address == address]\n if len(accounts) == 0:\n raise KeyError('account with address {} not found'.format(encode_hex(address)))\n elif len(accounts) > 1:\n log.warning('multiple accounts with same address found', address=encode_hex(address))\n return accounts[0]", "def __get_account(self, address):\n\t\tfor acct in self.wallet:\n\t\t\tif acct[\"address\"] == address:\n\t\t\t\treturn acct\n\t\traise ValueError(\"The given address does not exist in the bunkr-wallet\")", "def _address_type(self, address):\n parsed_type = None\n parsed = urlparse.urlparse(address)\n if parsed.scheme not in ('http', 'https', 'ipc', 'tcp'):\n raise ValueError('Invalid volttron central address.')\n\n return parsed.scheme", "def get_node_by_address(root_node, address):\n assert address[0] == 0\n if len(address) == 1:\n return root_node\n return get_descendant_by_address(root_node, address[1:])", "def get_network_id_by_name(name: str) -> str:\n networks_info = get_networks()\n\n for network in networks_info[\"networks\"]:\n if network[\"name\"] == name:\n return network[\"id\"]\n\n raise AttributeError(f\"No network named {name}\")", "def reverse(addr:str) -> Optional[str]:\n\t\t\n\t\ttry:\n\t\t\treturn _reverse(addr)[0]\n\t\texcept DNSError:\n\t\t\treturn None", "def get_contract_by_address(self, address: str):\n try:\n contract_records = self._registry.search(contract_address=address)\n except RuntimeError:\n raise self.InterfaceError('Corrupted Registrar') # TODO: Integrate with Registry\n else:\n if not contract_records:\n raise self.InterfaceError(\"No such contract with address {}\".format(address))\n return contract_records[0]", "def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")", "def ensure_token_network_discovery(\n self, token: CustomToken, token_network_addresses: TokenNetworkAddress\n ) -> None:\n for node in self.node_controller: # type: ignore\n node_endpoint = API_URL_TOKEN_NETWORK_ADDRESS.format(\n protocol=self.protocol,\n target_host=node.base_url,\n token_address=to_checksum_address(token.address),\n )\n address = wait_for_token_network_discovery(\n node_endpoint, self.definition.settings, self.session\n )\n if to_canonical_address(address) != Address(token_network_addresses):\n raise RuntimeError(\n f\"Nodes diverged on the token network address, there should be \"\n f\"exactly one token network available for all nodes. Current \"\n f\"values : {to_hex(token_network_addresses)}\"\n )", "def network_get(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.get_network(**kwargs)", "async def get_balance(sochain_url:str, network:str, address:str):\n try:\n balance = await sochain_api.get_balance(sochain_url, network, address)\n if balance == None:\n raise Exception(\"Invalid Address\")\n return balance\n except Exception as err:\n raise Exception(str(err))", "def _get_network_type(self, host):\n network_type = host.get(\"network\")\n default_network = self.config.get(\"default_network\")\n if network_type is None:\n network_type = self._metadata.get(\"network\", default_network)\n if not network_type:\n raise ProvisioningConfigError(\n \"No network type specified and project doesn't have default \"\n \"network type (property 'default_network') specified in \"\n \"provisioning config.\"\n )\n return network_type", "def canonicalize_network_info(name, vpc, region):\n return Network(name=name, network_id=vpc[\"VpcId\"], cidr_block=vpc[\"CidrBlock\"], region=region)", "def detect_network(card_number):\n if len(card_number) == 15 and card_number[:2] in (\"34\", \"37\"):\n return \"American Express\"\n elif len(card_number) == 14 and card_number[:2] in (\"38\", \"39\"):\n return \"Diners Club\"\n return None", "def getSSID(self, ssid):\n for network in self.networks:\n if network.getSSID() == ssid:\n return network\n else:\n raise Exception('Network with SSID \"%s\" not found' % profile)", "def get_nearest_operation(\n db: Redis[bytes], address: hash_t, subdag: Optional[str] = None\n) -> Optional[Operation]:\n root = \"root\"\n art = None\n try:\n node = Operation.grab(db, address)\n return node\n except RuntimeError:\n # one possibility is that address is an artefact...\n try:\n art = Artefact[Any].grab(db, address)\n except RuntimeError:\n raise RuntimeError(\n f\"address {address} neither a valid operation nor a valid artefact.\"\n )\n\n if art.parent == root:\n # We have basically just a single artefact as the network...\n return None\n else:\n node = Operation.grab(db, art.parent)\n return node", "def network_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_id\")", "def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error", "def _get_addr(self, protocol, address):\n if address:\n return address[0]\n else:\n return protocol.transport.getPeer().host", "def dial(address: str, network: Optional[str]=None):\n return NotImplementedError()", "def network(self) -> str:\n return pulumi.get(self, \"network\")", "def get_group(self, address):\n return self.groups[address]", "def _get_reverse_for_ipv4_address(cls, address):\n parts = list(reversed(address.split(\".\")))[1:]\n while parts:\n name = \"%s.in-addr.arpa\" % \".\".join(parts)\n zone = DNSZone.get_by_name(name)\n if zone:\n return zone\n parts.pop(0)\n return None", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")", "def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None", "def lookup(self, address):\n if self.noop:\n return self.default_val\n sock = self.socket_lib\n gsocket = sock.socket(socket.AF_INET, socket.SOCK_STREAM)\n gsocket.settimeout(self.timeout)\n gsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n with statsd.timer('z.geoip'):\n try:\n gsocket.connect((self.host, self.port))\n # Remember, we're using a timeout, so don't call makefile()!\n send = 'GET %s\\n' % address\n tsent = 0\n while tsent < len(send):\n sent = gsocket.send(send[tsent:])\n if sent == 0:\n raise IOError('Socket connection broken')\n tsent += sent\n rcv = ''\n while True:\n try:\n rcvm = gsocket.recv(1)\n if len(rcvm) == 0:\n break\n except StopIteration:\n # This is required for unit testing.\n break\n rcv += rcvm\n reply = json.loads(rcv)\n if 'error' in reply:\n return self.default_val\n else:\n return reply['success']['country_code'].lower()\n except socket.timeout:\n logging.warn('GeoIP server timeout. '\n 'Returning default')\n return self.default_val\n except IOError, e:\n logging.error('GeoIP server down or missing')\n return self.default_val\n except Exception, e:\n logging.error('Unknown exception: %s', str(e))\n return self.default_val\n finally:\n gsocket.close()", "def get_network_type(self):\n net_type = self._data['type']\n if net_type == 'Shared':\n return 'guest'\n elif net_type == 'Isolated':\n return 'isolated'", "def wait_for_network(container, timeout=30):\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if 'eth0' in container.state().network:\n addresses = container.state().network['eth0']['addresses']\n if len(addresses) > 0:\n if addresses[0]['family'] == 'inet':\n return addresses[0]\n return None", "def follows_token_network(self, token_network_address: Address) -> bool:\n assert is_checksum_address(token_network_address)\n\n return token_network_address in self.token_networks.keys()", "def network_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkMode']]]:\n return pulumi.get(self, \"network_mode\")", "def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def get_network_profile(arn=None):\n pass", "def network_address(self):\n\n return self._network_address", "def extract_token(coin_name):\n\n cname = coin_name.lower()\n url = f\"https://coinmarketcap.com/currencies/{cname}/\"\n r = requests.get(url)\n if r.status_code != 200:\n raise ValueError(\"Unknown coin name. Please check the url \"\n \"on coinmarketcap.com\")\n else:\n html_page = r.text\n n = html_page.find(\"https://etherscan.io/token\")\n\n if n == -1:\n raise ValueError(\"Error : Coin not on Ethereum platform\")\n else:\n token = html_page[n+27:n+69]\n return token", "def create_address(self, address: str) -> Optional[Address]:\n raise NotImplemented", "def get_network_type(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetNetworkType', self.handle)", "def get_network(self, name_or_id, filters=None):\n if not filters:\n filters = {}\n return self.network.find_network(\n name_or_id=name_or_id, ignore_missing=True, **filters\n )", "def network(ip):\n ip, prefix = netParse(ip)\n return \"{}/{}\".format(\n ipStr(ip & (0xffffffff << (32 - prefix))),\n prefix\n )", "def _parse_inet(line):\n tokens = line.split()\n return netaddr.IPNetwork(tokens[1])", "def get_zone(cls, name):\n\n def get_closest(n):\n \"\"\"\n Return closest matching zone\n \"\"\"\n while n:\n try:\n return DNSZone.objects.get(name=n)\n except DNSZone.DoesNotExist:\n pass\n n = \".\".join(n.split(\".\")[1:])\n return None\n\n if not name:\n return None\n if is_ipv4(name):\n # IPv4 zone\n n = name.split(\".\")\n n.reverse()\n return get_closest(\"%s.in-addr.arpa\" % (\".\".join(n[1:])))\n elif is_ipv6(name):\n # IPv6 zone\n d = IPv6(name).digits\n d.reverse()\n c = \".\".join(d)\n return get_closest(\"%s.ip6.arpa\" % c) or get_closest(\"%s.ip6.int\" % c)\n else:\n return get_closest(name)", "def cidr_2_nwm(addr):\n if addr is None:\n return (None, None)\n nw_addr, nw_len = addr.split('/')\n nw_len = len_to_wild_mask(int(nw_len))\n return nw_addr, nw_len", "def getSymbolAt(self, address: ghidra.program.model.address.Address, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> ghidra.program.model.symbol.Symbol:\n ...", "def get_range(self, address):\n index = bisect.bisect(self.ranges, MemoryRange.from_addr(address))\n if index >= len(self.ranges):\n return None\n memrange = self.ranges[index-1]\n if address in memrange:\n return memrange\n return None", "def get_floating_ip_by_address(self, context, address):\n # NOTE(vish): This is no longer used but can't be removed until\n # we major version the network_rpcapi.\n return objects.FloatingIP.get_by_address(context, address)", "def network_policy(self) -> Optional[pulumi.Input[Union[str, 'NetworkPolicy']]]:\n return pulumi.get(self, \"network_policy\")", "def get(self, currency, address):\n check_inputs(address=address, currency=currency) # abort if fails\n addr = commonDAO.get_address(currency, address)\n if addr:\n addr['tags'] = commonDAO.list_address_tags(currency, address)\n return addr\n abort(404, \"Address {} not found in currency {}\".format(address,\n currency))", "def network_config(self) -> Optional[pulumi.Input['NodeNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def get_network(session, network):\n # type: (Session, str) -> Dict[str, Any]\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{network}\"\n return _get_dict(session, url_tail)", "def private_network(self) -> str:\n return pulumi.get(self, \"private_network\")", "def returnNetworkNode(self):\n\n networkNodes = cmds.ls(type=\"network\")\n for node in networkNodes:\n attrs = cmds.listAttr(node)\n if \"moduleName\" in attrs:\n if cmds.getAttr(node + \".moduleName\") == self.name:\n networkNode = node\n\n return networkNode", "def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net", "def get_name(self, address):\n our_beacon = self.format_beacon('connected', False)\n machine_name = re.compile('machine=(.*)\\n').search\n\n try:\n tsock = socket.socket()\n tsock.connect((address, 2190))\n self.send_packet(tsock, our_beacon)\n tivo_beacon = self.recv_packet(tsock)\n tsock.close()\n name = machine_name(tivo_beacon).groups()[0]\n except:\n name = address\n\n return name", "def get_default_network_policy(con):\n try:\n return con.network_policy_read(fq_name=conf.get('default_network_policy', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find default_network_policy')\n return None", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def network_profile(self) -> Optional[pulumi.Input['NetworkProfileArgs']]:\n return pulumi.get(self, \"network_profile\")", "def getSymbolAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...", "def get_normal(addr):\n try:\n return o_map[addr]\n except KeyError:\n return None", "def cloud_services_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cloud_services_network_name\")", "def find_stop_near(address):\n latlong = get_lat_long(address)\n latitude = latlong[0]\n longitude = latlong[1]\n nearStop = get_nearest_station(latitude, longitude)\n return nearStop", "def private_network(self, **kwargs):\r\n return self._get_network('backend', **kwargs)", "def network_config(self) -> Optional[pulumi.Input['PrivateCloudNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")", "def add_network(self, addr, netmask):\n\n if len(addr) == 4:\n return ipset.ipset_ipv4_add_network(self.set, addr, netmask)\n\n elif len(addr) == 16:\n return ipset.ipset_ipv6_add_network(self.set, addr, netmask)\n\n else:\n raise ValueError(\"Invalid address\")", "def get_balance_response(address):\n call = Address(address=address)\n response = call.get_address_info()\n if response:\n return response\n else:\n return None", "def get(cls, address):\r\n def lookup():\r\n return cls._targets_by_address.get(address, None)\r\n\r\n target = lookup()\r\n if target:\r\n return target\r\n else:\r\n ParseContext(address.buildfile).parse()\r\n return lookup()" ]
[ "0.63604003", "0.61137646", "0.610317", "0.59229994", "0.57684386", "0.5742403", "0.5735001", "0.5647258", "0.5518795", "0.55021477", "0.5481944", "0.539838", "0.53970176", "0.5341332", "0.52976215", "0.5289916", "0.5274275", "0.52362585", "0.5219444", "0.52164847", "0.52133673", "0.51697457", "0.5158092", "0.5134033", "0.5128291", "0.5112103", "0.5102305", "0.507037", "0.5046694", "0.5031843", "0.5029762", "0.5029646", "0.50269943", "0.5026648", "0.502528", "0.49460715", "0.49423674", "0.4930759", "0.49294895", "0.49238625", "0.4922752", "0.4911804", "0.49092147", "0.49065945", "0.48988816", "0.48886195", "0.48656937", "0.48467195", "0.48407838", "0.4832399", "0.4831196", "0.48295304", "0.48244935", "0.48232782", "0.48149624", "0.4813956", "0.48048168", "0.4788644", "0.47627166", "0.47443345", "0.4738187", "0.47335503", "0.4731636", "0.4710435", "0.47078702", "0.47058094", "0.47013137", "0.4684779", "0.4683347", "0.468246", "0.46820822", "0.46798772", "0.46758544", "0.46527693", "0.46514097", "0.46495852", "0.46495444", "0.4643919", "0.46235308", "0.46206325", "0.4619626", "0.46195287", "0.46163744", "0.46151674", "0.46052074", "0.4599854", "0.4598451", "0.45796084", "0.4579576", "0.45743012", "0.45743012", "0.45718688", "0.45589417", "0.45588753", "0.45498163", "0.45352492", "0.4534043", "0.45294324", "0.4528442", "0.4528213" ]
0.8030924
0
!TXT! Return the URL for the online feedback service
def get_feedback_url(self): return self.get_setting('service_feedback_url')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def support_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"support_url\")", "def Url(self) -> str:", "def feedback():\n return render_template(\"feedback.html\")", "def get_url():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n region = config.get('auth', 'region')\n host = REGION_MAP[region]\n return (\n f\"wss://{host}/speech-to-text/api/v1/recognize\"\n \"?model=en-US_BroadbandModel&x-watson-learning-opt-out=true\"\n )", "def support_url(self) -> str:\n return self._support_url", "async def feedback(self, ctx, *, feedback):\n url = os.environ.get(\"FEEDBACK_WEBHOOK\", None)\n if url:\n webhook = Webhook.from_url(url, adapter=RequestsWebhookAdapter())\n embed = discord.Embed(description=feedback, colour=discord.Colour.teal())\n embed.set_author(name=f\"{ctx.author.name}#{ctx.author.discriminator}\", icon_url=ctx.author.avatar_url)\n embed.set_footer(text=f\"User id: {ctx.author.id}\")\n webhook.send(embed=embed)\n await ctx.send(embed=embeds.success(\"Sent the feedback!\"))\n else:\n await ctx.send(embed=embeds.error(\"This command is disabled.\"))", "def support_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"support_url\")", "def prog_url(self):\n # type: () -> string_types\n return self._prog_url", "def link(self):\n return 'http://{}:{}'.format(self.basic_url, self.port)", "def get_question(url):\n return EXTERNAL_URL_QUESTION.format(url)", "def get_url(self, cfg, option, default):\n if cfg.has_option('kattis', option):\n return cfg.get('kattis', option)\n else:\n return 'https://%s/%s' % (cfg.get('kattis', 'hostname'), default)", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def _url(self):\n return 'contact/{email}'.format(email=self.email)", "def insta_url_dialogue(update: Update, _: CallbackContext) -> int:\n\n update.message.reply_text(\n 'Send url to download',\n reply_markup=ReplyKeyboardRemove(),\n )\n return INSTAGRAM_URL", "def get_api_url() -> str:\n\n\tsite = pywikibot.Site()\n\turl = site.protocol() + \"://\" + site.hostname() + site.apipath()\n\treturn url", "def full_url(self):\n return self.url + \"?channel_id=\" + self.external_id", "def getBuildbotURL():", "def url():\n ...", "def end_user_support_help_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"end_user_support_help_url\")", "def end_user_support_help_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"end_user_support_help_url\")", "def end_user_support_help_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"end_user_support_help_url\")", "def ticket_url(request):\n return custom_message(request, _(\"Permesso negato\"))", "def url(self):\n\n # We run the preset specific to this method.\n self.preset.simple_url()\n # We print the header if it was not done yet.\n PyFunceble.CLICore.print_header()\n\n if self.subject:\n if PyFunceble.CONFIGURATION[\"syntax\"]:\n # The syntax mode is activated.\n\n # We get the status from SyntaxStatus.\n status = SyntaxStatus(self.subject, subject_type=\"url\").get()[\"status\"]\n else:\n # We test and get the status of the domain.\n status = URLStatus(self.subject, subject_type=\"url\").get()[\"status\"]\n\n if PyFunceble.CONFIGURATION[\"simple\"]:\n # The simple mode is activated.\n\n # We print the domain and the status.\n print(\n \"{0} {1}\".format(\n FileCore.get_simple_coloration(status) + self.subject, status\n )\n )\n else:\n PyFunceble.CLICore.print_nothing_to_test()", "def resource_url(self):\n return self.portal_url + \"/\" + \"++resource++plonecommunity.app\"", "def get_api_url() -> str:\n\n site = pywikibot.Site()\n url = site.protocol() + \"://\" + site.hostname() + site.apipath()\n return url", "def _getURL(serviceName, options):\n system = options['System']\n port = options['Port']\n host = socket.gethostname()\n url = 'dips://%s:%s/%s/%s' % (host, port, system, serviceName)\n return url", "def support_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"support_url\")", "def support_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"support_url\")", "def support_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"support_url\")", "def support_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"support_url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def getFlixConnectURL(self):\n output = \"http://\" + Mode().get(\"[flixConnectServer]\") + \":\" + Mode().get(\"[flixConnectPort]\") + \"/\"\n\n# print \"ServerFlixFunctions - getFlixConnectURL url:\", output\n\n return output", "def getUrl(self): #$NON-NLS-1$\r", "def getUrl(self): #$NON-NLS-1$\r", "def url(self):\n return self._client.url", "def _get_uri(plex_server):\n return plex_server.url(\n \"/:/websockets/notifications\", includeToken=True\n ).replace(\"http\", \"ws\")", "def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'", "def url(request):\n return request.config.getoption(\"--url\")", "def api_url(self):\n return self.get_api_url()", "def channelURL(self):\n bytes = self.radioConfig.channel_settings.SerializeToString()\n s = base64.urlsafe_b64encode(bytes).decode('ascii')\n return f\"https://www.meshtastic.org/c/#{s}\"", "def web_url(self) -> str:\n return pulumi.get(self, \"web_url\")", "def get_success_url(self):\n url = reverse(\n \"qa:question-create\"\n ) + \"?success=true\"\n return url", "def feedback(request):\n feedback = request.POST.get('feedback', None)\n if not feedback:\n return {\n 'res': 'failed'\n }\n feedback.replace('\\n', '<br>')\n user = request.user\n subject = email_templates.feedback['subject']\n content = email_templates.feedback['content'] % \\\n (user.username, feedback)\n admin_emails = [admin[1] for admin in ADMINS]\n email_users(subject, content, admin_emails,\n from_email=user.email)\n return {\n 'res': 'success'\n }", "def url(self):\n return 'http://%s:%d' % (self._host, self._port)", "def url(self):\n endpoint = 'taskinfo?taskID=%d' % self.id\n return posixpath.join(self.connection.weburl, endpoint)", "def TrackerURL(issue):\n # make the server/project customizable?\n return 'http://code.google.com/p/lilypond/issues/detail?id=%s' % issue", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.dailymotion.com/%s' % self.get_video_id()", "def _get_api_url(self):\n\n return f'https://api.telegram.org/bot{self.api_key}/sendMessage'", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def get_scraper_url(self):\r\n \r\n return self.reformat_scraper_url()", "def url(self):\n return self.full()", "def get_user_url(self):\n print(\"Please enter the a website you wish to monitor\")\n url = input().lower()\n return url", "def url (self):\n return Links.createURL('/')", "def service_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_url\")", "def parse_url(feedback):\n data = {}\n if 'feedback' in feedback.url or '?' not in feedback.url:\n return data\n split_fields = feedback.url.split('?')[1].split('&')\n for field in split_fields:\n pair = field.split('=')\n data[pair[0]] = pair[1]\n return data", "def getCommentsURL(self, source):\n return \"http://news.ycombinator.com/item?id=\" + str(self.getHNID(source))", "def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/event-notification/kafka/server\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)", "def get_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://www.livestream.com/%s/video?clipId=%s' % (self.get_username(), self.get_video_id())", "def url(self):\n return app.settings.cherrypy.url()", "def url(self):\n ...", "def web_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"web_url\")", "def feed_link(self):\n return self.url.replace(\"http://\", \"feed://\")", "def get_service_url():\n return get_config_handler().get_service_url()", "def marketing_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"marketing_url\")", "def url_shortner(self):", "def openContactUrl(self):\r\n url = QUrl(\"http://freeseer.readthedocs.org/en/latest/contact.html\")\r\n QDesktopServices.openUrl(url)", "def demo_url():\n return render_template('demo_page.html')", "def _get_client_url(self, args):\n if args.os_nal_url:\n return args.os_nal_url\n else:\n return None", "def url(self) -> str:\n if \"main\" not in self._resources:\n self._initialize()\n return self._resources[\"main\"].url", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def url(self):\n url = self.url\n return url", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def discovery_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"discovery_url\")", "def _createSiteUrl(self,code):\n try:\n url='http://boardreader.com/index.php?a=rss&q=%s&p=%s&format=RSS2.0' %\\\n (code,tg.config.get(path='Connector', key='boardreader_numresults'))\n log.debug(self.log_msg(\"seed url : %s\" %(url)))\n return url\n except:\n log.exception(self.log_msg(\"Exception occured while creating url\"))", "def url(self):\r\n return self.urlparts.geturl()", "def url(i, extension='.com'):\r\n\r\n return email(i, extension)", "def tracking_url(self) -> str:\n return pulumi.get(self, \"tracking_url\")", "def get_remote_url(self, alias):\n url = self.url_base + 'download/current/'\n if 'interactions' in alias:\n url += \"interactors/\" + alias + '.txt'\n else:\n url += alias + '.txt'\n return url", "def get_buildbot_url():\n return \"http://10.45.4.98:8001/\"", "def apiurl(self):\n return self._apiurl", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def primary_channel_url(self) -> str:\n return pulumi.get(self, \"primary_channel_url\")", "def marketing_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"marketing_url\")", "def marketing_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"marketing_url\")", "def embed_url(self):\n\n ref_number = self.ID\n embed_link = \"\".join(('https://embeds.datpiff.com/mixtape/', \n str(ref_number),\n '?trackid=1&platform=desktop'))\n return embed_link", "def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/event-notification/kafka/server\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)", "def url(self) -> str:\n return self._url" ]
[ "0.61752695", "0.61325103", "0.6079804", "0.6072169", "0.6037325", "0.59497833", "0.5917915", "0.5912349", "0.5900737", "0.58638495", "0.5862365", "0.58503306", "0.583637", "0.58221316", "0.5819276", "0.580274", "0.5801738", "0.5754639", "0.57428956", "0.57399815", "0.57399815", "0.5737113", "0.5730264", "0.57150906", "0.57073337", "0.57039386", "0.56818527", "0.56818527", "0.56818527", "0.56818527", "0.5678268", "0.5678268", "0.56721175", "0.5665693", "0.5665693", "0.56627524", "0.56487864", "0.5629891", "0.5612051", "0.56077725", "0.55862033", "0.5573215", "0.5560977", "0.55495214", "0.5534975", "0.55338323", "0.5527581", "0.55247724", "0.55247724", "0.55247724", "0.55247724", "0.55247724", "0.55247724", "0.55247724", "0.55244786", "0.5523201", "0.5504216", "0.5493262", "0.5486485", "0.54859", "0.54835933", "0.54792726", "0.54778457", "0.5474142", "0.5471659", "0.5466386", "0.5432691", "0.54314876", "0.5429618", "0.5429247", "0.54285115", "0.5427835", "0.5423893", "0.54224384", "0.54114765", "0.5408705", "0.54051536", "0.5384198", "0.5384198", "0.5384198", "0.53841466", "0.53841466", "0.53808427", "0.5379695", "0.5374139", "0.5368698", "0.53651774", "0.53578734", "0.535267", "0.53435385", "0.5339966", "0.533344", "0.5328356", "0.5328356", "0.53243566", "0.53242624", "0.53242624", "0.53225064", "0.53203064", "0.53202814" ]
0.78211945
0
Load CliMAF standard operators. Invoked by standard CliMAF setup The operators list also show in variable 'cscripts' They are documented elsewhere
def load_standard_operators(): # # Compute scripts # cscript('select' ,scriptpath+'mcdo.sh "${operator}" "${out}" "${var}" "${period_iso}" "${domain}" "${alias}" "${units}" "${missing}" ${ins} ', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('ccdo', scriptpath+'mcdo.sh "${operator}" "${out}" "${var}" "${period_iso}" "${domain}" "${alias}" "${units}" "${missing}" ${ins}') # cscript('minus', 'cdo sub ${in_1} ${in_2} ${out}', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('plus', 'cdo add ${in_1} ${in_2} ${out}', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('space_average', scriptpath+'mcdo.sh fldmean "${out}" "${var}" "${period_iso}" "${domain}" "${alias}" "${units}" "${missing}" ${ins}', commuteWithTimeConcatenation=True) # cscript('time_average' , scriptpath+'mcdo.sh timmean "${out}" "${var}" "${period_iso}" "${domain}" "${alias}" "${units}" "${missing}" ${ins}' , commuteWithSpaceConcatenation=True) # cscript('llbox' , scriptpath+'mcdo.sh "" "${out}" "${var}" "${period_iso}" "${latmin},${latmax},${lonmin},${lonmax}" "${alias}" "${units}" "${missing}" ${ins}', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('regrid' , scriptpath+'regrid.sh ${in} ${in_2} ${out} ${option}', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('regridn' , scriptpath+'regrid.sh ${in} ${cdogrid} ${out} ${option}', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('rescale' , 'cdo expr,\"${var}=${scale}*${var}+${offset};\" ${in} ${out}', commuteWithTimeConcatenation=True, commuteWithSpaceConcatenation=True) # cscript('mean_and_std', scriptpath+'mean_and_std.sh ${in} ${var} ${out} ${out_sdev}', # This tells CliMAF how to compute varname for name output 'sdev' # using input varname sdev_var="std(%s)" , commuteWithTimeConcatenation=True) # # Declare plot scripts cscript('ncview' ,'ncview ${in} 1>/dev/null 2>&1&' ) # cscript('timeplot', 'ncl '+scriptpath+'timeplot.ncl infile=\'\"${in}\"\' outfile=\'\"${out}\"\' ' 'var=\'\"${var}\"\' title=\'\"${title}\"\'',format="png") # # plot: main field (main_file) + auxiliary field (aux_file, optional) + vectors (u_file & v_file, optionals) # cscript('plot' , '(ncl -Q '+ scriptpath +'gplot.ncl main_file=\'\"${in}\"\' aux_file=\'\"${in_2}\"\' ' 'u_file=\'\"${in_3}\"\' v_file=\'\"${in_4}\"\' rotation=${rotation} ' 'plotname=\'\"${out}\"\' cmap=\'\"${color}\"\' vmin=${min} vmax=${max} vdelta=${delta} ' 'main_var=\'\"${var}\"\' aux_var=\'\"${var_2}\"\' u_var=\'\"${var_3}\"\' v_var=\'\"${var_4}\"\' ' 'title=\'\"${title}\"\' scale=${scale} offset=${offset} mpCenterLonF=${mpCenterLonF} ' 'vcRefMagnitudeF=${vcRefMagnitudeF} vcRefLengthF=${vcRefLengthF} vcMinDistanceF=${vcMinDistanceF} ' 'vcGlyphStyle=\'\"${vcGlyphStyle}\"\' vcLineArrowColor=\'\"${vcLineArrowColor}\"\' ' 'units=\'\"${units}\"\' linp=${linp} colors=\'\"${colors}\"\' level=${level} time=${time} ' 'proj=\'\"${proj}\"\' contours=\'\"${contours}\"\' focus=\'\"${focus}\"\' ' 'type=\'\"${format}\"\' resolution=\'\"${resolution}\"\' trim=${trim} ' 'vcb=${vcb} lbLabelFontHeightF=${lbLabelFontHeightF} invXY=${invXY} ' 'tmYLLabelFontHeightF=${tmYLLabelFontHeightF} tmXBLabelFontHeightF=${tmXBLabelFontHeightF} ' 'tmYRLabelFontHeightF=${tmYRLabelFontHeightF} tiXAxisFontHeightF=${tiXAxisFontHeightF} ' 'tiYAxisFontHeightF=${tiYAxisFontHeightF} gsnPolarLabelFontHeightF=${gsnPolarLabelFontHeightF} ' 'tiMainFont=\'\"${tiMainFont}\"\' tiMainFontHeightF=${tiMainFontHeightF} ' 'tiMainPosition=\'\"${tiMainPosition}\"\' gsnLeftString=\'\"${gsnLeftString}\"\' ' 'gsnRightString=\'\"${gsnRightString}\"\' gsnCenterString=\'\"${gsnCenterString}\"\' ' 'gsnStringFont=\'\"${gsnStringFont}\"\' gsnStringFontHeightF=${gsnStringFontHeightF} )', format="graph") # cscript('lines' , '(ncl -Q '+ scriptpath +'lineplot.ncl infile=\'\"${mmin}\"\' ' 'plotname=\'\"${out}\"\' var=\'\"${var}\"\' title=\'\"${title}\"\' ' 'linp=${linp} labels=\'\"${labels}\"\' colors=\'\"${colors}\"\' thickness=${thickness}' 'T_axis=\'\"${T_axis}\"\' fmt=\'\"${fmt}\"\' && ' 'convert ${out} -trim ${out}) ', format="png") # cscript('curves' , '(ncl -Q '+ scriptpath +'curves.ncl infile=\'\"${mmin}\"\' ' 'plotname=\'\"${out}\"\' var=\'\"${var}\"\' title=\'\"${title}\"\' ' 'labels=\'\"${labels}\"\' colors=\'\"${colors}\"\' thickness=${thickness} && ' 'convert ${out} -trim ${out}) ', format="png") # # cpdfcrop : pdfcrop by preserving metadata # cscript('cpdfcrop' , 'pdfcrop ${in} ${out} ', format="pdf") # cscript('ncdump' , 'ncdump -h ${in} ', format="txt") # if (os.system("type cdfmean >/dev/null 2>&1")== 0 ) : load_cdftools_operators() else : clogger.warning("No Cdftool available")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _basic_operators_init():\n global BASIC_OPERATORS\n\n BASIC_OPERATORS = {\n \"angle_between\": {\n \"node\": \"angleBetween\",\n \"inputs\": [\n [\"vector1X\", \"vector1Y\", \"vector1Z\"],\n [\"vector2X\", \"vector2Y\", \"vector2Z\"],\n ],\n \"outputs\": [\n [\"angle\"],\n ],\n },\n\n \"average\": {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": 3,\n },\n\n \"blend\": {\n \"node\": \"blendColors\",\n \"inputs\": [\n [\"color1R\", \"color1G\", \"color1B\"],\n [\"color2R\", \"color2G\", \"color2B\"],\n [\"blender\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"choice\": {\n \"node\": \"choice\",\n \"inputs\": [\n [\"input[{array}]\"],\n [\"selector\"],\n ],\n \"outputs\": [\n [\"output\"],\n ],\n },\n\n \"clamp\": {\n \"node\": \"clamp\",\n \"inputs\": [\n [\"inputR\", \"inputG\", \"inputB\"],\n [\"minR\", \"minG\", \"minB\"],\n [\"maxR\", \"maxG\", \"maxB\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"compose_matrix\": {\n \"node\": \"composeMatrix\",\n \"inputs\": [\n [\"inputTranslateX\", \"inputTranslateY\", \"inputTranslateZ\"],\n [\"inputRotateX\", \"inputRotateY\", \"inputRotateZ\"],\n [\"inputScaleX\", \"inputScaleY\", \"inputScaleZ\"],\n [\"inputShearX\", \"inputShearY\", \"inputShearZ\"],\n [\"inputRotateOrder\"],\n [\"useEulerRotation\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"decompose_matrix\": {\n \"node\": \"decomposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputTranslateX\", \"outputTranslateY\", \"outputTranslateZ\"],\n [\"outputRotateX\", \"outputRotateY\", \"outputRotateZ\"],\n [\"outputScaleX\", \"outputScaleY\", \"outputScaleZ\"],\n [\"outputShearX\", \"outputShearY\", \"outputShearZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"inverse_matrix\": {\n \"node\": \"inverseMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"length\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"point1X\", \"point1Y\", \"point1Z\"],\n [\"point2X\", \"point2Y\", \"point2Z\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"matrix_distance\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"inMatrix1\"],\n [\"inMatrix2\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"mult_matrix\": {\n \"node\": \"multMatrix\",\n \"inputs\": [\n [\n \"matrixIn[{array}]\"\n ],\n ],\n \"outputs\": [\n [\"matrixSum\"],\n ],\n },\n\n \"normalize_vector\": {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": 0,\n },\n\n \"pair_blend\": {\n \"node\": \"pairBlend\",\n \"inputs\": [\n [\"inTranslateX1\", \"inTranslateY1\", \"inTranslateZ1\"],\n [\"inRotateX1\", \"inRotateY1\", \"inRotateZ1\"],\n [\"inTranslateX2\", \"inTranslateY2\", \"inTranslateZ2\"],\n [\"inRotateX2\", \"inRotateY2\", \"inRotateZ2\"],\n [\"weight\"],\n [\"rotInterpolation\"],\n ],\n \"outputs\": [\n [\"outTranslateX\", \"outTranslateY\", \"outTranslateZ\"],\n [\"outRotateX\", \"outRotateY\", \"outRotateZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"point_matrix_mult\": {\n \"node\": \"pointMatrixMult\",\n \"inputs\": [\n [\"inPointX\", \"inPointY\", \"inPointZ\"],\n [\"inMatrix\"],\n [\"vectorMultiply\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n },\n\n \"remap_value\": {\n \"node\": \"remapValue\",\n \"inputs\": [\n [\"inputValue\"],\n [\"outputMin\"],\n [\"outputMax\"],\n [\"inputMin\"],\n [\"inputMax\"],\n ],\n \"outputs\": [\n [\"outValue\"],\n ],\n },\n\n \"set_range\": {\n \"node\": \"setRange\",\n \"inputs\": [\n [\"valueX\", \"valueY\", \"valueZ\"],\n [\"minX\", \"minY\", \"minZ\"],\n [\"maxX\", \"maxY\", \"maxZ\"],\n [\"oldMinX\", \"oldMinY\", \"oldMinZ\"],\n [\"oldMaxX\", \"oldMaxY\", \"oldMaxZ\"],\n ],\n \"outputs\": [\n [\"outValueX\", \"outValueY\", \"outValueZ\"],\n ],\n },\n\n \"transpose_matrix\": {\n \"node\": \"transposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n }\n\n # Fill BASIC_OPERATORS with condition operations\n cond_operators = [\"eq\", \"ne\", \"gt\", \"ge\", \"lt\", \"le\"]\n for i, condition_operator in enumerate(cond_operators):\n BASIC_OPERATORS[condition_operator] = {\n \"node\": \"condition\",\n \"inputs\": [\n [\"firstTerm\"],\n [\"secondTerm\"],\n ],\n # The condition node is a special case! It gets created during\n # the magic-method-comparison and fully connected after being\n # passed on to the condition()-method in this OperatorMetaClass\n \"outputs\": [\n [None],\n ],\n \"operation\": i,\n }\n\n # Fill BASIC_OPERATORS with +,- operations\n for i, add_sub_operator in enumerate([\"add\", \"sub\"]):\n BASIC_OPERATORS[add_sub_operator] = {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with *,/,** operations\n for i, mult_div_operator in enumerate([\"mul\", \"div\", \"pow\"]):\n BASIC_OPERATORS[mult_div_operator] = {\n \"node\": \"multiplyDivide\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with vectorProduct operations\n for i, vector_product_operator in enumerate([\"dot\", \"cross\"]):\n BASIC_OPERATORS[vector_product_operator] = {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }", "def RunOperators(ops_def):\n RunOperatorsCC([_stringify_proto(op_def) for op_def in ops_def])", "def __init__(self):\n \n self.label = \"ArcSDM Tools\"\n self.alias = \"ArcSDM\" \n\n # List of tool classes associated with this toolbox\n self.tools = [PartitionNNInputFiles, CombineNNOutputFiles, NeuralNetworkOutputFiles, NeuralNetworkInputFiles, \n CalculateWeightsTool,SiteReductionTool,CategoricalMembershipToool,\n CategoricalAndReclassTool, TOCFuzzificationTool, CalculateResponse, LogisticRegressionTool, Symbolize, \n ROCTool, AgterbergChengCITest, AreaFrequencyTable, GetSDMValues, GrandWofe]", "def __init__(self):\r\n self.label = \"Toolbox\"\r\n self.alias = \"Geodesic Densification using arcpy\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [GeodesicDensification_arcpy]", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def set_linker_script(self, op):\n self.__linker_script = [\"-T\", op]", "def cmdline(self, executable, options, task, rlimits):\n data_model_param = get_data_model_from_task(task, {ILP32: \"-m32\", LP64: \"-m64\"})\n print(options)\n if data_model_param and not any(\n option.startswith(\"--clang-options=\") for option in options\n ):\n options += [\"--clang-options=\" + data_model_param]\n\n if task.property_file:\n options += [\"--svcomp-property\", task.property_file]\n else:\n raise UnsupportedFeatureException(\n \"SMACK can't execute without a property file.\"\n )\n\n options += [task.single_input_file]\n\n return [executable] + options", "def modulecmds():\n\n class Commands:\n @staticmethod\n def setenv(key, val=None):\n val = val or key\n return \"setenv({0!r}, {1!r})\\n\".format(key, val)\n\n @staticmethod\n def unsetenv(key):\n return \"unsetenv({0!r})\\n\".format(key)\n\n @staticmethod\n def load(x):\n return \"load({0!r})\\n\".format(x)\n\n @staticmethod\n def load_first(*x):\n x = \",\".join(\"{0!r}\".format(_) for _ in x)\n return \"load_first({0})\\n\".format(x)\n\n @staticmethod\n def unload(x):\n return \"unload({0!r})\\n\".format(x)\n\n @staticmethod\n def prepend_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"prepend_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def append_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"append_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def remove_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"remove_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def set_alias(key, val):\n return \"set_alias({0!r},{1!r})\\n\".format(key, val)\n\n @staticmethod\n def unset_alias(key):\n return \"unset_alias({0!r})\\n\".format(key)\n\n @staticmethod\n def set_shell_function(key, val):\n return \"set_shell_function({0!r},{1!r})\\n\".format(key, val)\n\n @staticmethod\n def unset_shell_function(key):\n return \"unset_shell_function({0!r})\\n\".format(key)\n\n @staticmethod\n def use(path):\n return \"use({0!r})\\n\".format(path)\n\n @staticmethod\n def unuse(path):\n return \"unuse({0!r})\\n\".format(path)\n\n @staticmethod\n def swap(a, b):\n return \"swap({0!r}, {1!r})\\n\".format(a, b)\n\n @staticmethod\n def family(x):\n return \"family({0!r})\\n\".format(x)\n\n @staticmethod\n def conflict(x):\n return \"conflict({0!r})\\n\".format(x)\n\n @staticmethod\n def prereq(x):\n return \"prereq({0!r})\\n\".format(x)\n\n @staticmethod\n def prereq_any(*x):\n x = \",\".join(\"{0!r}\".format(_) for _ in x)\n return \"prereq_any({0})\\n\".format(x)\n\n @staticmethod\n def source(f):\n return \"source({0!r})\\n\".format(f)\n\n @staticmethod\n def help(x):\n return \"help({0!r})\\n\".format(x)\n\n @staticmethod\n def whatis(x):\n return \"whatis({0!r})\\n\".format(x)\n\n @staticmethod\n def isloaded(x):\n return \"is_loaded({0!r})\\n\".format(x)\n\n return Commands()", "def cmdline(self, args=()):\r\n cmds = [self._interpreter.binary]\r\n cmds.append(self._pex)\r\n cmds.extend(args)\r\n return cmds", "def __init__(self):\n Cmd.__init__(self)\n self.calc = ReversePolishCalc()", "def cmdline(self, args=()):\n cmds = [self._interpreter.binary]\n cmds.append(self._pex)\n cmds.extend(args)\n return cmds", "def load_operator(file_name=None, data_directory=None, plain_text=False):\n file_path = get_file_path(file_name, data_directory)\n\n if plain_text:\n with open(file_path, 'r') as f:\n data = f.read()\n operator_type, operator_terms = data.split(\":\\n\")\n\n if operator_type == 'FermionOperator':\n operator = FermionOperator(operator_terms)\n elif operator_type == 'BosonOperator':\n operator = BosonOperator(operator_terms)\n elif operator_type == 'QubitOperator':\n operator = QubitOperator(operator_terms)\n elif operator_type == 'QuadOperator':\n operator = QuadOperator(operator_terms)\n else:\n raise TypeError('Operator of invalid type.')\n else:\n with open(file_path, 'rb') as f:\n data = marshal.load(f)\n operator_type = data[0]\n operator_terms = data[1]\n\n if operator_type == 'FermionOperator':\n operator = FermionOperator()\n for term in operator_terms:\n operator += FermionOperator(term, operator_terms[term])\n elif operator_type == 'BosonOperator':\n operator = BosonOperator()\n for term in operator_terms:\n operator += BosonOperator(term, operator_terms[term])\n elif operator_type == 'QubitOperator':\n operator = QubitOperator()\n for term in operator_terms:\n operator += QubitOperator(term, operator_terms[term])\n elif operator_type == 'QuadOperator':\n operator = QuadOperator()\n for term in operator_terms:\n operator += QuadOperator(term, operator_terms[term])\n else:\n raise TypeError('Operator of invalid type.')\n\n return operator", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def load_cli():\n args=IO()\n\n if(args.command is None):\n logging.error('Please provide the appropriate input. Enter \"python -m packman -h\" for more details.')\n exit()\n\n logging.basicConfig(stream=args.logfile)\n\n if(args.pdbid is not None):\n molecule.download_structure(args.pdbid, save_name=args.filename.split('.')[0], ftype=args.filename.split('.')[1])\n\n try:\n extension = args.filename.split('.')[-1]\n mol = molecule.load_structure(args.filename,ftype=extension)\n except:\n logging.warning(\"The filename provided does not appear to have a format extension.\")\n mol = molecule.load_structure(args.filename)\n \n if(args.command == 'hinge'):\n hinge_cli(args,mol)\n elif(args.command == 'hdanm'):\n hdanm_cli(args,mol)\n elif(args.command == 'entropy'):\n entropy_cli(args,mol)\n elif(args.command == 'dci'):\n dci_cli(args,mol)\n\n return True", "def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n self.operation = eval(self._get_op_str())\n self.args = {'operation': self.operation.__name__, 'constargs': constargs, 'randomargs': randomargs}", "def preprocess(self, op):\n args = [self.get_command(), op] + self.__compiler_flags_extra + self.__definitions + self.__include_directories\n if self.command_basename_startswith(\"cl.\"):\n args += [\"/E\"]\n else:\n args += [\"-E\"]\n (so, se) = run_command(args)\n if 0 < len(se) and is_verbose():\n print(se)\n return so", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def set_builtin(self, builtin):\n self.options['builtin'] = builtin", "def test_c_extensions_import():\n import storm_analysis.dbscan.dbscan_c\n \n import storm_analysis.fista.fista_fft_c\n \n import storm_analysis.frc.frc_c\n \n import storm_analysis.L1H.homotopy_imagea_c\n\n import storm_analysis.rolling_ball_bgr.rolling_ball_lib_c\n\n import storm_analysis.sa_library.cs_decon_utilities_c\n import storm_analysis.sa_library.dao_fit_c\n import storm_analysis.sa_library.grid_c\n import storm_analysis.sa_library.ia_utilities_c\n import storm_analysis.sa_library.matched_filter_c\n\n import storm_analysis.sa_utilities.fitz_c\n\n import storm_analysis.simulator.pf_math_c\n import storm_analysis.simulator.draw_gaussians_c\n \n import storm_analysis.spliner.cubic_spline_c\n import storm_analysis.spliner.cubic_fit_c", "def __init__(self):\n self.label = \"Data Assistant\"\n self.alias = \"dla\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Append, Stage, NewFile, Preview, Replace]", "def __init__(self):\r\n self.label = \"OVL Tools\"\r\n self.alias = \"\"\r\n\r\n # List of tool classes associated with this toolbox\r\n self.tools = [OVLtoFeature, BatchOVLtoFeature]", "def ops():\n\tret = open(os.path.join(SERVER_DIR, 'ops.txt')).read().strip().split('\\n')\n\tret = [unicode(name.lower()) for name in ret]\n\treturn ret", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def loadOlcCommands(self, player):\n player.addCommand('newzone', self.commands['newzone']())\n player.addCommand('delzone', self.commands['delzone']())\n player.addCommand('listzone', self.commands['listzone']())\n player.addCommand('newroom', self.commands['newroom']())\n player.addCommand('redit', self.commands['redit']())\n player.addCommand('delroom', self.commands['delroom']())\n player.addCommand('newportal', self.commands['newportal']())\n player.addCommand('delportal', self.commands['delportal']())\n player.addCommand('zedit', self.commands['zedit']())\n player.addCommand('pedit', self.commands['pedit']())\n player.addCommand('newtemplate', self.commands['newtemplate']())", "def loadStdCommands(self, player):\n player.addCommand('spawn', self.commands['spawn']())\n player.addCommand('edit', self.commands['edit']())\n player.addCommand('search', self.commands['search']())\n player.addCommand('warp', self.commands['warp']())\n player.addCommand('addstat', self.commands['addstat']())\n player.addCommand('delstat', self.commands['delstat']())\n player.addCommand('savezone', self.commands['savezone']())\n player.addCommand('obliterate', self.commands['obliterate']())", "def __init_builtins(self) -> None:\n\n self.add_token(BuiltinTypeSymbol(\"INTEGER\"))\n self.add_token(BuiltinTypeSymbol(\"REAL\"))\n self.add_token(BuiltinTypeSymbol(\"STRING\"))\n self.add_token(BuiltinTypeSymbol(\"BOOLEAN\"))", "def test_operation_mode_expressions(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int m = 4\\nMZgate(0, 1) | [m*2, -1+m]\"\n )\n assert bb.operations == [{'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [8, 3]}]", "def commands():", "def main():\n\tcli = Cli()\n\tcli.run()", "def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)", "def main():\n\n BASIC.run(PROGRAM)", "def main(args):\n if isinstance(args, list):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(f\"Starting ({name}) operation...\")\n answer = reduction(args.operands)\n print(f\"{answer}\")\n _logger.info(f\"End climath ({name}).\")", "def cli():\r\n pass", "def declare_operators(*op_list):\n operators.update({op.__name__:op for op in op_list})\n return operators", "def a_action(option,opt_str,value,parser):\n auto_import_commands()", "def __init__(self, orbital_operators, orbital_labels, op_type, prefactor=1.0):\n\n self.orbital_operators = np.array(orbital_operators, dtype=str)\n self.orbital_labels = np.array(orbital_labels, dtype=int)\n self.op_type = op_type\n\n if len(self.orbital_operators) != len(self.orbital_labels):\n ValueError('The number of orbital operators and labels is inconsistent for the OperatorString: {} {}'.format(len(self.orbital_operators), len(self.orbital_labels)))\n\n self.prefactor = prefactor\n\n # Stored for use in computing commutators.\n # A dictionary of the labels to their index in the operator string.\n self._indices_orbital_labels = dict()\n for ind_orbital in range(len(self.orbital_labels)):\n self._indices_orbital_labels[self.orbital_labels[ind_orbital]] = ind_orbital\n \n # Compute the prefactor automatically if a Majorana operator.\n if self.op_type == 'Majorana':\n # Stored for use in computing commutators.\n # The labels of orbital operators that are 'A' or 'B'.\n self._labels_ab_operators = np.array([self.orbital_labels[ind] for ind in range(len(self.orbital_labels)) if self.orbital_operators[ind] in ['A', 'B']], dtype=int)\n num_ab = len(self._labels_ab_operators)\n\n # The prefactor is 1 or 1j, depending\n # on whether reversing the order of operators creates\n # a +1 or -1 sign due to anti-commutation operators.\n num_swaps_to_reorder = (num_ab*(num_ab-1))/2\n if num_swaps_to_reorder % 2 == 1:\n self.prefactor = 1j\n\n if (self.op_type == 'Pauli' and self.prefactor != 1) \\\n or (self.op_type == 'Majorana' and self.prefactor not in [1, 1j]) \\\n or (self.op_type == 'Fermion' and self.prefactor not in [1, 1j]):\n raise ValueError('Invalid prefactor {} for operator string of op_type {}'.format(self.prefactor, self.op_type))\n \n name_list = [str(self.prefactor),' ']\n for (op, la) in zip(self.orbital_operators, self.orbital_labels):\n name_list.extend([op, ' ', str(la), ' '])\n\n self.name = ''.join(name_list)", "def __init__(self):\n\n self._options = docopt(__doc__, version=__version__)\n self._arguments = {\n k: v for k, v in self._options.items()\n if not isinstance(v, bool)\n }\n\n commands_json = json.loads(read_file(COMMANDS_JSON))\n command = list(filter(lambda x: self._is_command(x[\"Conditions\"]), commands_json))[0]\n\n getattr(\n import_module(\"qasm.commands.{0}\".format(command[\"Module Identifier\"])),\n command[\"Class Identifier\"]\n )(self._arguments).run()", "def load(opts, args):\n\n t_lines = codecs.open(args[0], encoding=\"utf-8\").readlines()\n\n data = []\n set_name = \"-\"\n function = None\n\n for line in t_lines:\n l = line.strip()\n if l.startswith(\"#\"): # comment line\n pass\n elif l.startswith(\"%\"): # set name\n set_name = l[1:]\n elif l == \"\" and not function is None: # end of function description\n data.append(function.__dict__)\n function = None\n elif l == \"\" and function is None: # first empty line\n pass\n elif function is None: # function name\n function = Function(l, set_name)\n else: # function attribute\n function.process_attribute(l)\n\n with codecs.open(args[1], \"w\", encoding=\"utf-8\") as f:\n f.write(unicode(json.dumps(data, ensure_ascii=False)))", "def add_standard_builtins(engine, b=None, s=None, sp=None):\n\n # SPECIAL CASES NEED TO BE IN ORDER\n engine.add_builtin('true', 0, b(_builtin_true)) # -1\n engine.add_builtin('fail', 0, b(_builtin_fail)) # -2\n engine.add_builtin('false', 0, b(_builtin_fail)) # -3\n\n engine.add_builtin('=', 2, s(_builtin_eq)) # -4\n engine.add_builtin('\\=', 2, b(_builtin_neq)) # -5\n\n engine.add_builtin('findall', 3, sp(_builtin_findall)) # -6\n engine.add_builtin('all', 3, sp(_builtin_all)) # -7\n engine.add_builtin('all_or_none', 3, sp(_builtin_all_or_none)) # -8\n\n engine.add_builtin('==', 2, b(_builtin_same))\n engine.add_builtin('\\==', 2, b(_builtin_notsame))\n\n engine.add_builtin('is', 2, s(_builtin_is))\n\n engine.add_builtin('>', 2, b(_builtin_gt))\n engine.add_builtin('<', 2, b(_builtin_lt))\n engine.add_builtin('=<', 2, b(_builtin_le))\n engine.add_builtin('>=', 2, b(_builtin_ge))\n engine.add_builtin('=\\=', 2, b(_builtin_val_neq))\n engine.add_builtin('=:=', 2, b(_builtin_val_eq))\n\n engine.add_builtin('var', 1, b(_builtin_var))\n engine.add_builtin('atom', 1, b(_builtin_atom))\n engine.add_builtin('atomic', 1, b(_builtin_atomic))\n engine.add_builtin('compound', 1, b(_builtin_compound))\n engine.add_builtin('float', 1, b(_builtin_float))\n engine.add_builtin('rational', 1, b(_builtin_rational))\n engine.add_builtin('integer', 1, b(_builtin_integer))\n engine.add_builtin('nonvar', 1, b(_builtin_nonvar))\n engine.add_builtin('number', 1, b(_builtin_number))\n engine.add_builtin('simple', 1, b(_builtin_simple))\n engine.add_builtin('callable', 1, b(_builtin_callable))\n engine.add_builtin('dbreference', 1, b(_builtin_dbreference))\n engine.add_builtin('primitive', 1, b(_builtin_primitive))\n engine.add_builtin('ground', 1, b(_builtin_ground))\n engine.add_builtin('is_list', 1, b(_builtin_is_list))\n\n engine.add_builtin('=..', 2, s(_builtin_split_call))\n engine.add_builtin('arg', 3, s(_builtin_arg))\n engine.add_builtin('functor', 3, s(_builtin_functor))\n\n engine.add_builtin('@>', 2, b(_builtin_struct_gt))\n engine.add_builtin('@<', 2, b(_builtin_struct_lt))\n engine.add_builtin('@>=', 2, b(_builtin_struct_ge))\n engine.add_builtin('@=<', 2, b(_builtin_struct_le))\n engine.add_builtin('compare', 3, s(_builtin_compare))\n\n engine.add_builtin('length', 2, s(_builtin_length))\n # engine.add_builtin('call_external', 2, s(_builtin_call_external))\n\n engine.add_builtin('sort', 2, s(_builtin_sort))\n engine.add_builtin('between', 3, s(_builtin_between))\n engine.add_builtin('succ', 2, s(_builtin_succ))\n engine.add_builtin('plus', 3, s(_builtin_plus))\n\n engine.add_builtin('consult', 1, b(_builtin_consult))\n engine.add_builtin('.', 2, b(_builtin_consult_as_list))\n # engine.add_builtin('load_external', 1, b(_builtin_load_external))\n engine.add_builtin('unknown', 1, b(_builtin_unknown))\n\n engine.add_builtin('use_module', 1, b(_builtin_use_module))\n engine.add_builtin('use_module', 2, b(_builtin_use_module2))\n engine.add_builtin('module', 2, b(_builtin_module))\n\n engine.add_builtin('once', 1, _builtin_call)\n engine.add_builtin('call', 1, _builtin_call)\n engine.add_builtin('call_nc', 1, _builtin_call_nc)\n engine.add_builtin('try_call', 1, _builtin_try_call)\n for i in range(2, 10):\n engine.add_builtin('call', i, _builtin_calln)\n engine.add_builtin('call_nc', i, _builtin_calln_nc)\n engine.add_builtin('try_call', i, _builtin_try_calln)\n\n engine.add_builtin('subquery', 2, s(_builtin_subquery))\n engine.add_builtin('subquery', 3, s(_builtin_subquery))\n\n engine.add_builtin('sample_uniform1', 3, sp(_builtin_sample_uniform))\n\n for i in range(1, 10):\n engine.add_builtin('debugprint', i, b(_builtin_debugprint))\n\n for i in range(1, 10):\n engine.add_builtin('write', i, b(_builtin_write))\n\n for i in range(1, 10):\n engine.add_builtin('writenl', i, b(_builtin_writenl))\n engine.add_builtin('writeln', i, b(_builtin_writenl))\n\n for i in range(1, 10):\n engine.add_builtin('error', i, b(_builtin_error))\n\n engine.add_builtin('nl', 0, b(_builtin_nl))\n engine.add_builtin('cmd_args', 1, s(_builtin_cmdargs))\n engine.add_builtin('atom_number', 2, s(_builtin_atom_number))\n engine.add_builtin('nocache', 2, b(_builtin_nocache))\n\n engine.add_builtin('numbervars', 2, s(_builtin_numbervars_0))\n engine.add_builtin('numbervars', 3, s(_builtin_numbervars))\n engine.add_builtin('varnumbers', 2, s(_builtin_varnumbers))\n\n engine.add_builtin('subsumes_term', 2, b(_builtin_subsumes_term))\n engine.add_builtin('subsumes_chk', 2, b(_builtin_subsumes_term))\n\n engine.add_builtin('possible', 1, s(_builtin_possible))\n engine.add_builtin('clause', 2, s(_builtin_clause))\n engine.add_builtin('clause', 3, s(_builtin_clause3))\n\n engine.add_builtin('create_scope', 2, s(_builtin_create_scope))\n\n engine.add_builtin('subquery_in_scope', 3, s(_builtin_subquery_in_scope))\n engine.add_builtin('subquery_in_scope', 4, s(_builtin_subquery_in_scope))\n\n engine.add_builtin('call_in_scope', 2, _builtin_call_in_scope)\n for i in range(2, 10):\n engine.add_builtin('call_in_scope', i + 1, _builtin_calln_in_scope)\n\n engine.add_builtin('find_scope', 2, s(_builtin_find_scope))\n\n builtin.add_builtins(engine, b, s, sp)", "def __init__(self):\r\n\t\tself.label = \"Toolbox\"\r\n\t\tself.alias = \"\"\r\n\r\n\t\t# List of tool classes associated with this toolbox\r\n\t\tself.tools = [LinkedDataSpatialQuery, LinkedDataPropertyEnrich, MergeBatchNoFunctionalProperty, MergeSingleNoFunctionalProperty, LocationPropertyPath, RelFinder]", "def __init__(self):\n self.label = \"Toolbox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [FilesWithin, UpdateAiracInfo, CalculatePolygonRotationUTM33, CalculatePolygonRotationLCC10E, SetLayoutsNorAirac, SetLayoutsSweAirac, SetLayoutsFinDnkAirac, Export330charts]", "def __init__(self):\r\n\t\tself.label = \"smart_move\"\r\n\t\tself.alias = \"Smart Move Operations\"\r\n\r\n\t\t# List of tool classes associated with this toolbox\r\n\t\tself.tools = [SmartMoveFeatures]", "def airflow_commands():\n pass", "def _init_builtins(self):\n for k, rexp in self.expressions.items():\n func = getattr(self, \"%s_processor\"%k)()\n yield (rexp, [func] + self._extra_rules.get(k, []))", "def import_ruleset(command):\n namespace = app.main(command)\n assert namespace.command == 'ir' or namespace.command == \"importruleset\"\n assert namespace.path == \"test\"", "def cli():\n pass", "def __init__(self):\n self.label = \"Surface Generation\"\n self.alias = \"far77\"\n\n # List of tool classes associated with this toolbox\n self.tools = [LineToFar77]", "def __init__(self, filename):\n\n self.filename = filename\n self.arithmetic_two_operands = ['add', 'sub', 'and', 'or']\n self.arithmetic_one_operands = ['neg', 'not']\n self.arithmetic_booleans = ['eq', 'gt', 'lt']", "def fslmaths(source1, target, operator=\"bin\", source2=None):\n if source2 is None:\n cmd = \"fslmaths {} -{} {} \".format(source1, operator, target)\n else:\n cmd = \"fslmaths {} -{} {} {}\".format(source1, operator, source2, target)\n\n result = util.launchCommand(cmd)\n #Spm do not support .gz format, so uncompress nifty file\n util.gunzip(\"{}.gz\".format(target))\n return result", "def builtin(self) :\n\t\ttry :\n\t\t\treturn self._builtin\n\t\texcept Exception as e:\n\t\t\traise e", "def load_commands():\n return [AddBook, FindBook, FindBooks, EditBook, RemoveBook, ReviewBook]", "def cli() -> object:\n parser = argparse.ArgumentParser(description=\"Expression Compiler\")\n parser.add_argument(\"sourcefile\", type=argparse.FileType('r'),\n help=\"Source program text\")\n parser.add_argument(\"outfile\", type=argparse.FileType('w'),\n nargs=\"?\", default=sys.stdout,\n help=\"Output file for assembly code\")\n args = parser.parse_args()\n return args", "def run_cli(cli_config: config.MaukaConfig):\n zmq_context = zmq.Context()\n # noinspection PyUnresolvedReferences\n # pylint: disable=E1101\n zmq_request_socket = zmq_context.socket(zmq.REQ)\n zmq_request_socket.connect(cli_config.get(\"zmq.mauka.plugin.management.req.interface\"))\n prompt = \"opq-mauka> \"\n\n try:\n zmq_request_socket.send_string(\"completions\")\n completions = zmq_request_socket.recv_string()\n vocabulary = set(completions.split(\",\"))\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(make_completer(vocabulary))\n while True:\n cmd = input(prompt).strip()\n\n if cmd == \"exit\":\n logger.info(\"Exiting mauka-cli\")\n sys.exit(0)\n\n if cmd == \"completions\":\n zmq_request_socket.send_string(\"completions\")\n completions = zmq_request_socket.recv_string()\n vocabulary = set(completions.split(\",\"))\n readline.set_completer(make_completer(vocabulary))\n logger.debug(ok(\"Completions updated\"))\n continue\n\n zmq_request_socket.send_string(cmd.strip())\n logger.debug(zmq_request_socket.recv_string())\n except (EOFError, KeyboardInterrupt):\n logger.info(\"Exiting mauka-cli\")\n sys.exit(0)", "def operands(app):\n return cdr(app)", "def test_include_program(self, parse_input, tmpdir):\n program = textwrap.dedent(\n \"\"\"\n name CustomOperation\n version 0.0\n float alpha = 0.3423\n Coherent(alpha, sqrt(pi)) | 0\n MeasureFock() | 0\n \"\"\"\n )\n\n filename = tmpdir.join(\"test.xbb\")\n\n with open(filename, \"w\") as f:\n f.write(program)\n\n test_include = textwrap.dedent(\n \"\"\"\n name test_include\n version 0.0\n include \"{}\"\n CustomOperation | 1\n \"\"\"\n ).format(filename)\n\n bb = parse_input(test_include, cwd=tmpdir)\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [1]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [1]},\n ]\n\n assert bb.operations == expected", "def main():\n model = Calculator()", "def uCSIsMathematicalOperators(code):\n ret = libxml2mod.xmlUCSIsMathematicalOperators(code)\n return ret", "def FbcToCobraConverter_init():\n return _libsbml.FbcToCobraConverter_init()", "def main():\n results_dir = 'results/macros/cube/'\n filename = glob.glob(results_dir+'macro-results.pickle')[-1]\n with open(filename, 'rb') as file:\n search_results = pickle.load(file)\n best_n = search_results[-1]\n best_n = [(score, [a[0] for a in macro]) for score, macro in best_n]\n\n n_macros = len(expert.macros)\n\n clean_macros = []\n for _, macro in best_n:\n if macro != [] and ' '.join(macro) not in primitive_actions:\n clean_macros.append(macro)\n clean_macros = clean_macros[-n_macros:]\n\n #%% Save the results\n os.makedirs('results/macros/cube', exist_ok=True)\n with open('results/macros/cube/clean_macros.pickle', 'wb') as file:\n pickle.dump(clean_macros, file)", "def install_syntax_functions(self):\n self.syntax_functions[':head'] = head_prediction_generator\n self.syntax_functions[':optional'] = optional_prediction_generator\n self.syntax_functions[':sequence'] = sequence_prediction_generator\n self.syntax_functions[':any'] = any_prediction_generator", "def __init__(self):\n self.label = \"PFRR Tools\"\n self.alias = \"PFRR Tools\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Ending_Point, Range_Distance, PFRR]", "def cli():\n\n pass", "def RunOperator(op_def):\n RunOperatorCC(_stringify_proto(op_def))", "def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass" ]
[ "0.5800942", "0.54121894", "0.5350327", "0.52697974", "0.5171791", "0.5129815", "0.511153", "0.5089675", "0.5077119", "0.50660294", "0.50468045", "0.5024355", "0.5013382", "0.5007174", "0.5001452", "0.49958974", "0.49821275", "0.497245", "0.49616897", "0.49017334", "0.4900833", "0.48955673", "0.48875812", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48843795", "0.48713744", "0.48675248", "0.48671174", "0.4856473", "0.48522592", "0.4848732", "0.48357466", "0.48291165", "0.48111618", "0.4810476", "0.48076922", "0.4801472", "0.4799016", "0.4792926", "0.47794354", "0.47751093", "0.4769218", "0.47585234", "0.47519514", "0.47417375", "0.47321117", "0.47215065", "0.47200426", "0.47173253", "0.47119236", "0.470476", "0.46981227", "0.46971288", "0.46917427", "0.46910715", "0.4687722", "0.4685183", "0.46792263", "0.4675985", "0.4673505", "0.466675", "0.4664462", "0.46626806", "0.46606472", "0.4657242", "0.46561304", "0.46524826", "0.46524826", "0.46524826", "0.46524826", "0.46524826", "0.46524826", "0.46524826", "0.46524826" ]
0.7239035
0
Return the query list as a DataFrame.
def get_query_list(): prov_list = QueryProvider.list_data_environments() print("Generating documentation for for the following providers") print(", ".join(list(PROVIDERS))) print("Skipping the following providers") print(", ".join(list(set(prov_list) - set(PROVIDERS)))) env_providers = {prov: QueryProvider(prov) for prov in tqdm.tqdm(PROVIDERS)} query_series = [] for env, env_queries in env_providers.items(): query_names = env_queries.list_queries() for query_name in query_names: q_group, q_name = query_name.split(".") qry = env_queries.query_store.get_query(q_group + "." + q_name) if "table" in qry.default_params: q_table = qry.default_params["table"].get("default", "na").split()[0] elif "table" in qry.required_params: q_table = qry.required_params["table"].get("default", "na").split()[0] else: q_table = "-" q_dict = { "Environment": env, "QueryGroup": q_group, "Query": q_name, "Description": qry.description, "Req-Params": ", ".join( sorted( [ f"{param} ({p_data.get('type')})" for param, p_data in qry.required_params.items() ] ) ), # "OtherParams": ", ".join([f"{param}" for param in qry.default_params]), "Table": q_table, } query_series.append(pd.Series(q_dict)) print() return pd.DataFrame(query_series).sort_values( ["Environment", "QueryGroup", "Query"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_to_df(query):\n df = pd.DataFrame(query.all())\n df.columns = [x['name'] for x in query.column_descriptions]\n return df", "def query2df(query):\n df = pd.DataFrame(data = list(itertools.product([0, 1], repeat=len(query.variables))), columns=query.variables)\n df['p'] = query.values.flatten()\n return df", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n ret_df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return ret_df", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def pd(self, *args, **kwargs):\n return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)", "def to_DataFrame(cls, qs):\n dates = [pd.to_datetime(x[0]) for x in qs.values_list('date')]\n data = qs.values('open', 'close', 'high', 'low', 'volume')\n df = pd.DataFrame.from_records(data, index=dates)\n return df", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def do_query(self) -> pd.DataFrame:\n if self.resultSize > self.step:\n query: str = self.query + f\" LIMIT {self.step}\"\n return pd.concat(\n [self.get_sparql_dataframe(query + f\" OFFSET {value}\", f\"{value} sur {self.resultSize}\") for value in\n range(0, self.resultSize, self.step)])\n return self.get_sparql_dataframe(self.query)", "def to_df(query, cols=None):\n # Try to get column names\n if cols is None:\n cols = [x['name'] for x in query.column_descriptions]\n data = [{k: v for k, v in zip(cols, x)} for x in query]\n if len(data) == 0:\n return pd.DataFrame()\n return pd.DataFrame(data).loc[:, cols]", "def sql(q, database_url):\r\n output, cur_description = Q(q, database_url, out=True, description=True)\r\n # print(cur_description)\r\n cols = [i[0] for i in cur_description]\r\n return pd.DataFrame(output, columns=cols)", "def get_sparql_dataframe(service, query):\n sparql = SPARQLWrapper(service)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)", "def qset_to_df(qset, datatype='object'):\n df = pd.DataFrame(list(qset.values()), dtype=datatype)\n return df", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df", "def multi_query(db, queries):\n return pd.concat((query_to_df(db, q) for q in queries), ignore_index=True)", "def get_frame_from_query(the_query, colnames):\n df = DataFrame.from_records(list(the_query), columns=colnames)\n return df", "def get_sparql_dataframe(query, service = \"https://query.wikidata.org/sparql\"):\n sparql = SPARQLWrapper(service)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def query(self, query, **params):\n chunksize = params.pop(\"chunksize\", 100000)\n to_pandas = params.pop(\"to_pandas\", True)\n with self._cursor() as cursor:\n params = {k: v for k, v in params.items() if k in getargs(cursor.execute).args}\n cursor.execute(query, **params)\n fields = [i[0] for i in cursor.description]\n res = []\n while True:\n result = cursor.fetchmany(chunksize)\n if not result:\n break\n res.append(Frame(result))\n frame = rbind(res, bynames=False)\n if frame.shape == (0, 0):\n frame = Frame({n: [] for n in fields})\n else:\n frame.names = fields\n if to_pandas:\n frame = frame.to_pandas()\n return frame", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def get_dataframe(q):\n cnx = create_engine(postgres_str)\n query = q\n return pd.read_sql_query(query, cnx)", "def as_dataframe(self, keys=None) -> pd.DataFrame:\n lst = self.as_list(keys)\n\n df = pd.DataFrame(lst)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def get_query_result_to_df(self, query):\r\n try:\r\n return pd.read_sql_query(query, self.conn)\r\n except pd.pandas.io.sql.DatabaseError:\r\n print('Execution failed. Database error')", "def sql_return_df(query, params, date_cols):\n conn = sqlite3.connect(db_filepath)\n df = pd.read_sql(query, conn, params=params, parse_dates=date_cols)\n conn.close()\n return df", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def to_df(self):\r\n return pd.DataFrame([dict(self)])", "def query(self, sql):\n df = pd.read_sql(sql, self.conn)\n return df", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n return pandas.DataFrame(\n index=list(self.keys()),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n },\n )", "def dataframe(self):\n dictionary = OrderedDict(zip(self.keys, [[value] for value in self.values]))\n dataframe = pd.DataFrame(dictionary)\n return dataframe", "def get_dataframe(self, params=None, chunksize=None):\n if chunksize:\n raise NotImplementedError(\"Buffered reading not supported yet\")\n # the resulting `rows` of a query provides a nice way to do this, though\n\n query = self.config[\"query\"]\n params = params or {}\n\n logger.debug(\n \"Fetching query {} with params {}...\".format(\n query, params\n )\n )\n rows = self.db.query(query, fetchall=True, **params)\n df = rows.export(\"df\")\n\n return df", "def run_query(query):\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n df = pd.DataFrame.from_records(dbFetched)\n return df", "def create_query_df(self):\n\n # display output message for timeframe\n print(\n f'{Fore.GREEN}\\nQuerying database for tags between the timeframe: '\n f'{Fore.LIGHTGREEN_EX}{str(self._start)}{Fore.GREEN} and {Fore.LIGHTGREEN_EX}{str(self._end)}'\n f'{Style.RESET_ALL}')\n print(\n f'{Fore.GREEN}\\nTIMESPAN: '\n f'{Fore.LIGHTGREEN_EX}{self.time_span} hours'\n f'{Style.RESET_ALL}')\n\n engine = get_db_engine()\n offset = 0\n chunk_size = 100000\n\n dfs = []\n while True:\n sa_select = sa.select(\n [self.data_table],\n whereclause=sa.and_(\n self.data_table.c._TIMESTAMP > '{}'.format(self._start),\n self.data_table.c._TIMESTAMP <= '{}'.format(self._end)),\n limit=chunk_size,\n offset=offset,\n order_by=self.data_table.c._NUMERICID\n )\n dfs.append(pd.read_sql(sa_select, engine))\n offset += chunk_size\n if len(dfs[-1]) < chunk_size:\n break\n\n self.query_df = pd.concat(dfs)", "def get_dataframe(self):\n # Using a list here appears faster than using a generator expression\n df = pd.DataFrame.from_records(\n [{'event_id' : x.event_id,\n 'time_delta' : x.time_delta,\n 'src_id' : x.src_id,\n 't' : x.cur_time,\n 'sink_id' : y}\n for x in self.events\n for y in x.sink_ids]\n )\n return df", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def _list_to_df(self, data):\n indices = pd.tseries.index.DatetimeIndex(\n [data[x]['time'] for x in range(0, len(data))])\n outData = pd.DataFrame(data, index=indices)\n outData.columns = ['Ask_close', 'Bid_close', 'complete',\n 'Ask_high', 'Bid_high', 'Ask_low',\n 'Bid_low', 'Ask_open', 'Bid_open',\n 'time', 'volume']\n return outData", "def db_to_df(query):\n conn = loader.database._connection\n return sql.read_frame(query, conn)", "def get_sparql_dataframe(self, query: str, text: str = \"\") -> pd.DataFrame:\n\n if self.verbose:\n print(tm.strftime(f\"[%H:%M:%S] Transmission {text} en cours...\"), end='')\n\n self.sparql.setQuery(query)\n\n processed_results: Wrapper.QueryResult = self.sparql.query()\n\n # We will check if the results are incomplete due to server limitations\n if 'x-sparql-maxrows' in processed_results.info():\n max_size: int = int(processed_results.info()['x-sparql-maxrows'])\n warnings.warn(f\"Warning: The server has limited the number of rows to {max_size}: result incomplete.\")\n\n if 'x-sql-state' in processed_results.info():\n warnings.warn(\"Warning: The server has limited the time of queries: partial result for a timed out query\")\n\n processed_results: dict = processed_results.convert()\n\n if self.verbose:\n print(tm.strftime(f\"\\r[%H:%M:%S] Transmission {text} réussi, conversion en Data Frame...\"), end='')\n\n cols: list[str] = processed_results['head']['vars']\n\n out: list[list[str]] = [[row.get(c, {}).get('value') for c in cols] for row in\n processed_results['results']['bindings']]\n\n if self.verbose:\n print(tm.strftime(f\" Effectué\"))\n\n return pd.DataFrame(out, columns=cols)", "def get_ts_df(self):\n df = pd.DataFrame(self.ts_list)\n df.columns = self.col_names\n df.sort_values(by=self.col_names[0], inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def query_api_df(query_params: APIQueryParams,\n timeout: Optional[float] = None,\n headers: Optional[Dict[str, str]] = None) -> pandas.DataFrame:\n if timeout is None:\n timeout = api_configuration['timeout']\n\n json_data = _submit_post_request(\n json_dict=dict(token=get_api_token(), query=query_params.to_api_struct()),\n headers=headers,\n timeout=timeout)\n\n df_ = pandas.DataFrame(json_data['data'])\n df_.columns = [c.upper() for c in df_.columns]\n\n return df_", "def create_dataframe(result):\n # List of elements in the search result\n names = []\n snippet = []\n url = []\n \n # Append search results to list\n for j,item in enumerate(result):\n for i,element in enumerate(result[j]['items']):\n names.append(result[j]['items'][i]['title'])\n snippet.append(result[j]['items'][i]['snippet'])\n url.append(result[j]['items'][i]['link'])\n \n # Create a dataframe\n df = pd.DataFrame(list(zip(names, snippet,url)), \n columns =['name', 'snippet','url']) \n \n return df", "def data_frame(records: list) -> pandas.DataFrame:\n return pandas.DataFrame(records, columns=lciafmt_cols)", "def as_named_DataFrame (self):\n if self.sql == \"\":\n return DataFrame([])\n flag, values = self.parse_sql()\n try:\n if flag == 'EXPLICIT':\n return DataFrame(self.table, columns = values)\n elif flag == 'IMPLICIT':\n schema = \"'\" + values[0] + \"'\"\n table = \"'\" + values[1] + \"'\"\n return DataFrame(self.table,columns=self.get_headers(table,schema))\n else:\n return self.as_DataFrame()\n except AssertionError:\n return self.as_DataFrame()", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())", "def create_dataframe(self):\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists", "def to_dataframe(self, attrs_as_columns=False):\n\n # Set up empty dict for dataframe\n ds = {}\n\n # Add every key containing a list into the dict\n keys = [k for k in self.dict.keys()]\n for key in keys:\n if isinstance(self.dict[key], list):\n ds[key] = self.dict[key]\n else:\n if attrs_as_columns:\n ds[key] = self.dict[key]\n\n # Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n # Return dataset\n return ds", "def as_DataFrame (self):\n return DataFrame(self.table)", "def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:\n data = client.read_sparql(query)\n df = None\n if \"results\" in data and \"bindings\" in data[\"results\"]:\n df = pd.DataFrame(data[\"results\"][\"bindings\"])\n df.applymap(lambda x: x[\"value\"])\n else:\n df = pd.DataFrame(data)\n\n return df", "def GetDataFrame(self, q_string, var_tup=None):\n def map_to_dict( results, field_names):\n res_dict = {}\n for fn in field_names:\n res_dict[fn] = []\n for res in results:\n for fn, f in zip(field_names, res):\n res_dict[fn].append(f)\n return res_dict\n def map_to_df( results, field_names):\n return pandas.DataFrame.from_dict(map_to_dict( results, field_names ))\n cursor = self.GetCursor()\n l_logger.debug(\"Query: %s, %r\" % (q_string,var_tup))\n cursor.execute(q_string,var_tup)\n results = cursor.fetchall()\n field_names = [i[0] for i in cursor.description]\n if len(results) == 0:\n return None\n else:\n return map_to_df( results, field_names )", "def data_frame_creator(self):\n\n return pd.DataFrame()", "def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]", "def df(self):\n data = {\"sites\": self.sites, \"values\": self.values,\n \"stdeviations\": self.stdeviations}\n return pd.DataFrame(data, columns=[\"sites\", \"values\", \"stdeviations\"])", "def get_data(self):\n\n return pd.read_sql_query(\"Select * from {table}\".format(table=self.table_name), con=self.con)", "def result_df(self, regex=None) -> pd.DataFrame:\n if regex:\n # get one random item from dict, and get keys from this random (dict) item\n # FIXME: how to do this better? - this is not efficient...\n keys = self.result[next(iter(self.result))].keys()\n\n if type(regex) == str:\n comp_regexe = re.compile(regex)\n columns = list(filter(comp_regexe.search, keys))\n else:\n columns = list(filter(regex.search, keys))\n\n df = pd.DataFrame.from_dict(self.result, orient='index')\n return df[columns]\n else:\n return pd.DataFrame.from_dict(self.result, orient='index')", "def get(self):\n df = pd.DataFrame.from_dict(self.list_values[0], orient=\"index\").T\n\n for i in range(len(self.list_values)):\n df.loc[i] = list(self.list_values[i].values())\n\n return df", "def get_df(self):\n data = self.load_data()\n userID, itemID = self.get_user_and_item_ids(data)\n rating = data[:, 1]\n data_np = np.stack((userID, itemID, rating), axis=-1)\n df = pd.DataFrame(data_np)\n df.columns = [\"userID\", \"itemID\", \"rating\"]\n return df", "def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self.table).filter_parts(self)", "def dataframe(self, *args, **kwargs):\n\n from pandas import DataFrame\n\n # Just normal data, so use the iterator in this object.\n headers = next(islice(self, 0, 1))\n data = islice(self, 1, None)\n\n return DataFrame(list(data), columns=headers)", "def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n children = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n children.append(\n tuple(tuple(sorted(c.codes[0] for c in cs)) for cs in cat.children)\n )\n return pandas.DataFrame(\n index=self.keys(),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n \"children\": children,\n },\n )", "def to_frame(self) -> pd.DataFrame:\n df = pd.DataFrame(data={\n 'Name': [p.name for p in self],\n 'Description': [p.desc for p in self],\n 'Value': [p.value for p in self],\n 'Hyper-Space': [p.hyper_space for p in self]\n }, columns=['Name', 'Description', 'Value', 'Hyper-Space'])\n return df", "def to_pandas_dataframe(self):\n pd_index = self.index().to_pandas_index()\n return pd.DataFrame.from_items(self.collect()).set_index(pd_index)", "def get_dataframe(self):\n self.logger.info('Fetching movie records...')\n session = connect()\n\n cols = [\n Movie.movie_id,\n Movie.title,\n Movie.start_year,\n Movie.genres,\n Movie.description,\n Movie.kind,\n ]\n\n filters = [\n Movie.description.isnot(None),\n Movie.genres.isnot(None),\n ]\n\n query = session.query(*cols).filter(*filters).order_by(Movie.start_year.desc())\n\n try:\n return pd.read_sql(query.statement, session.bind)\n finally:\n session.close()", "def df_from_table(query, carto_sql_client, index=None):\n resp = carto_sql_client.send(query)\n schema = transform_schema(resp['fields'])\n if index:\n return pd.DataFrame(resp['rows']).set_index('cartodb_id').astype(schema)\n else:\n return pd.DataFrame(resp['rows']).astype(schema)", "def get_documents_with_q(self, index, query=Q(), source=None, add_index_name = False):\n \n s = Search(using=self.es, index=index)\n if source:\n s = s.source(source)\n # Dotted fields, replace . by __\n q = s.query(query)\n #print(str(q.to_dict()).replace(\"'\",'\"'))\n results = s.query(query).scan()\n \n if add_index_name:\n all_dicts = []\n for hit in results:\n result_dict = hit.to_dict()\n result_dict['_index'] = hit.meta.index\n all_dicts.append(result_dict)\n \n fa = pd.DataFrame.from_dict(all_dicts)\n else:\n fa = pd.DataFrame([hit.to_dict() for hit in results])\n \n return fa", "def targets_to_dataframe(conn):\n return connect_database.get_table_into_pandas('target_info',conn)", "def as_DF(self):\n\n hc_df = pd.DataFrame(self.s, index=self.s_names)\n hc_df.columns.name = 'type'\n hc_df.index.name = 's'\n\n return hc_df", "def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)", "def as_df(self):\r\n return pd.DataFrame(self.vectors).set_index(self.words)", "def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def cursor_to_dataframe(cur):\n description = cur.description\n column_names = [item.name for item in description]\n data = cur.fetchall()\n df = pandas.DataFrame(data, columns=column_names)\n cur.close()\n return df", "def getDataFrame(self):\n return self.df", "def bc_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.bc_records(run_idxs))", "def extract_data():\n client = MongoClient(HOST, PORT)\n collection = client[DB][COLLECTION]\n df = pd.DataFrame(collection.find().limit(10))\n return df", "def _summary_dataframe(self):\n df = pd.DataFrame(columns=self[0].entry.axes)\n for i, entry in enumerate(self):\n if entry is not None:\n df.loc[i] = list(entry.entry)\n return df", "def to_dataframe(self, timeout_sec: int = DEFAULT_TIMEOUT_SEC) -> pd.DataFrame:\n records = [r for r in self.result(timeout_sec=timeout_sec)]\n return pd.DataFrame.from_records(records)", "def query_to_df(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n return psql.read_sql(sql, conn)", "def to_pandas_df(self):\n data = self._get_data(pd=True)\n return data", "def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df", "def run_analytics_queries(cur, conn):\n \n output = []\n\n for query in analytics_queries:\n cur.execute(query)\n records = cur.fetchall()\n column_names = list(map(lambda x: x[0], cur.description))\n output.append(pd.DataFrame(records, columns=column_names))\n \n for table in output:\n print(table, end='\\n\\n')", "def query_into_pandas(self, query, fields=None, parameters=None, names=None):\n target_url = self.build_query(query, fields=fields, parameters=parameters)\n\n col_id = 'columns'\n col_names = None\n if names is None:\n # If the columns of the query are specified (used for 'tab' or 'txt' value of\n # parameters['format'] only), then we use the same for the DataFrame\n if col_id in parameters:\n col_names = parameters[col_id].split(',')\n else:\n col_names = names\n\n db = pd.read_csv(\n target_url,\n delimiter=\"\\t\",\n skiprows=1,\n header=None,\n names=col_names\n )\n return db", "def query(self, **columns) -> pd.DataFrame:\n conditions = [\"{0} == '{1}'\".format(col, val) for col, val in columns.items()]\n results = self._table.query(\" & \".join(conditions))\n\n return results", "def to_df(self):\n # check read only\n if self.__read_only:\n raise IOError(\"Table is for read only.\")\n\n # convert data to dicts\n data = dict(record.to_id_dict()\n for record in self.__data.values())\n\n # make data frame\n df = pd.DataFrame(data).T\n df.index.name = \"_id\"\n return df", "def query_save_data_frame(self, query):\n self.recordset_df = pd.read_sql_query(query, self.con)\n return self", "def dataframe(self):\n df = pd.DataFrame({'x':self.x, 'y':self.y, 'd':self.d})\n\n if self.z is not None:\n for k, v in self.z.items():\n df[k] = v\n\n return df", "def get_task_df(self, all_data: bool = False) -> pd.DataFrame:\n df = pd.DataFrame([{\n 'task_id': k,\n 'task_name': v['task_name'],\n 'last_modified': v['last_modified'],\n 'task_params': v['task_params'],\n 'task_hash': v['task_hash'],\n 'task_log': v['task_log']\n } for k, v in self.tasks.items()])\n if all_data:\n return df\n return df[['task_id', 'task_name', 'last_modified', 'task_params']]", "def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self)", "def get_endpoints():\n cmd = \"SELECT * FROM endpoint_id_map;\"\n db_cursor.execute(cmd)\n record = db_cursor.fetchall()\n df_end = pd.DataFrame(record, columns=[\"row\",\"endpoint_name\",\"data_type\"])\n print(df_end)", "def create_dataframe(connection: sqlite3.Connection) -> pd.DataFrame:\n dataframe = pd.read_sql_query(f\"\"\"\n SELECT\n combined_jobs.id, combined_jobs.company, combined_jobs.link, combined_jobs.location,\n combined_jobs.date, combined_jobs.content, combined_jobs.title, location_cache.location,\n location_cache.latitude, location_cache.longitude\n FROM\n combined_jobs\n LEFT OUTER JOIN\n location_cache on (combined_jobs.location = location_cache.location)\"\"\",\n connection)\n print(dataframe)\n return dataframe", "def get_df(self):\n return pd.DataFrame(self.points)", "def __init__(self):\n BDLQuery.__init__(self)\n self.pandas_df = []", "def to_df(self):\n if self.shape > 1:\n range_str = [s for s in range(self.shape)]\n iterables = [self.columns, range_str]\n multiindex = pd.MultiIndex.from_product(iterables, names=['song', 'frame'])\n # multiindex = [i for i in itertools.product(self.columns, range_str, repeat=1)]\n df = pd.DataFrame(columns=multiindex, index=self.columns, dtype=np.float64)\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n for s in range_str:\n df.loc[c_1][c_2, s] = self.dict_[c_1][c_2][s]\n df = df.T\n else:\n df = pd.DataFrame(columns=self.columns + ['song'], dtype=np.float64)\n df['song'] = self.columns\n df = df.set_index('song')\n\n for c_1 in self.columns:\n for c_2 in self.columns:\n df.loc[c_1, c_2] = self.max_diff(c_1, c_2)\n\n return df", "def get(self, **kwargs) -> pd.DataFrame:\n\n addr = kwargs.pop(\"address\", [])\n prefix = kwargs.pop(\"prefix\", [])\n columns = kwargs.pop(\"columns\", [])\n ipvers = kwargs.pop(\"ipvers\", \"\")\n user_query = kwargs.pop(\"query_str\", \"\")\n\n if user_query:\n if user_query.startswith('\"') and user_query.endswith('\"'):\n user_query = user_query[1:-1]\n\n vrf = kwargs.pop(\"vrf\", \"\")\n\n addnl_fields = []\n fields = self.schema.get_display_fields(columns)\n self._add_active_to_fields(kwargs.get('view', self.iobj.view),\n fields, addnl_fields)\n\n if prefix:\n addr_types = self.addr_type(prefix)\n else:\n addr_types = self.addr_type(addr)\n\n # Always include ip or mac addresses in the dataframe\n # if there is a filter on them\n\n for x in ['ipAddressList', 'ip6AddressList', 'macaddr']:\n if x not in fields:\n addnl_fields.append(x)\n\n user_query_cols = self._get_user_query_cols(user_query)\n addnl_fields += [x for x in user_query_cols if x not in addnl_fields]\n\n df = super().get(addnl_fields=addnl_fields, columns=fields,\n **kwargs)\n\n if df.empty:\n return df\n\n if 'master' in df.columns:\n df = df.rename({'master': 'vrf'}, axis=1) \\\n .replace({'vrf': {'': 'default'}})\n df.loc[(df.vrf == 'bridge') |\n ((df.ipAddressList.str.len() == 0)\n & (df.ip6AddressList.str.len() == 0)),\n 'vrf'] = ''\n\n query_str = build_query_str([], self.schema, vrf=vrf)\n\n addrcols = []\n if 4 in addr_types or ipvers in [\"v4\", \"\"]:\n # df = df.explode('ipAddressList').fillna({'ipAddressList': ''})\n addrcols.append('ipAddressList')\n\n if 6 in addr_types or ipvers in [\"v6\", \"\"]:\n # df = df.explode('ip6AddressList').fillna({'ip6AddressList': ''})\n addrcols.append('ip6AddressList')\n\n if ('ipAddress' in columns or (columns == ['*'])) and not ipvers:\n ndf = pd.DataFrame(df[addrcols].agg(\n self._merge_address_cols, axis=1),\n columns=['ipAddress'])\n df = pd.concat([df, ndf], axis=1)\n\n v4addr = []\n v6addr = []\n filter_prefix = ''\n\n # Address and prefix filtering are mutual exclusive\n if addr:\n macaddr = []\n for i, a in enumerate(addr):\n if addr_types[i] == 0:\n # convert the macaddr format to internal format\n a = convert_macaddr_format_to_colon(a)\n macaddr.append(a)\n elif addr_types[i] == 4:\n if '/' not in a:\n a += '/'\n v4addr.append(a)\n elif addr_types[i] == 6:\n if '/' not in a:\n a += '/'\n v6addr.append(a)\n\n # IMPORTANT: Don't mess with this order of query.\n # Some bug in pandas prevents it from working if\n # macaddr isn't first and your query\n # contains both a macaddr and an IP address.\n dfmac = dfv4 = dfv6 = pd.DataFrame()\n\n if macaddr:\n dfmac = df[df.macaddr.isin(macaddr)]\n\n if v4addr:\n dfv4 = df[df.ipAddressList.apply(\n lambda x, addrs: any(a.startswith(tuple(addrs))\n for a in x), args=(v4addr,))]\n if v6addr:\n dfv6 = df[df.ip6AddressList.apply(\n lambda x, addrs: any(a.startswith(tuple(addrs))\n for a in x), args=(v6addr,))]\n if v4addr or v6addr or macaddr:\n df = pd.concat([dfv4, dfv6, dfmac])\n elif prefix:\n for i, a in enumerate(prefix):\n if addr_types[i] == 4:\n v4addr.append(a)\n elif addr_types[i] == 6:\n v6addr.append(a)\n\n if v4addr:\n for a in v4addr:\n query_str += (f'{filter_prefix} '\n f'@self._is_in_subnet(ipAddressList,\"{a}\")')\n filter_prefix = 'or'\n if v6addr:\n for a in v6addr:\n query_str += (f'{filter_prefix} '\n f'@self._is_in_subnet(ip6AddressList,\"{a}\")')\n filter_prefix = 'or'\n\n if not query_str:\n if ipvers == \"v4\":\n query_str = 'ipAddressList.str.len() != 0'\n elif ipvers == \"v6\":\n query_str = 'ip6AddressList.str.len() != 0'\n elif ipvers == \"l2\":\n query_str = 'macaddr.str.len() != 0'\n\n if query_str:\n df = df.query(query_str)\n\n df = self._handle_user_query_str(df, user_query)\n return df.reset_index(drop=True)[fields]" ]
[ "0.7733995", "0.7497827", "0.73814756", "0.7326154", "0.7304706", "0.7227527", "0.7195444", "0.71918434", "0.71499586", "0.7113315", "0.70896244", "0.7081603", "0.70788634", "0.70317006", "0.70228714", "0.7020063", "0.7013619", "0.6978598", "0.6976442", "0.68564945", "0.68412757", "0.6794956", "0.6754321", "0.67391896", "0.67391896", "0.67374355", "0.6722536", "0.668966", "0.6687398", "0.66679794", "0.66584176", "0.66584176", "0.66584176", "0.66584176", "0.66584176", "0.6656672", "0.66371983", "0.6622078", "0.66157955", "0.6611857", "0.6605845", "0.65984845", "0.6593902", "0.6591007", "0.65905356", "0.65396523", "0.6536015", "0.65114284", "0.6507385", "0.65025485", "0.64787924", "0.64398915", "0.6427104", "0.64163834", "0.639974", "0.6380213", "0.6359909", "0.6353335", "0.6343662", "0.63418627", "0.6323812", "0.6319823", "0.6316751", "0.63166827", "0.6306835", "0.6306089", "0.6297889", "0.6287762", "0.6287752", "0.6281929", "0.6272974", "0.6265425", "0.62518775", "0.6244322", "0.6229239", "0.62142813", "0.62052184", "0.6203249", "0.619887", "0.61904675", "0.6190431", "0.6184727", "0.61777455", "0.61731267", "0.61534303", "0.6117147", "0.61138", "0.6110524", "0.6109234", "0.6109131", "0.60993534", "0.6098848", "0.60953367", "0.6094942", "0.60942465", "0.6093344", "0.6087814", "0.60819834", "0.6069921", "0.60696155" ]
0.6495787
50
Generate query list document.
def generate_document(query_df): # sourcery skip: identity-comprehension doc_lines = [ "Data Queries Reference", "=" * len("Data Queries Reference"), "", "", ] # This line fails if re-written as dict(query_df.groupby("Environment")) # pylint: disable=unnecessary-comprehension group_dict = {name: group for name, group in query_df.groupby("Environment")} for name, friendly_name in PROVIDERS.items(): if name not in group_dict: continue grp = group_dict[name] doc_lines.append(f"Queries for {friendly_name}") doc_lines.append("-" * len(f"Queries for {friendly_name}")) doc_lines.append(f"\nData Environment identifier: {name}\n") tbl_txt = tabulate( grp.drop(columns="Environment"), headers="keys", showindex="never", tablefmt="rst", ) tbl_txt = [line.strip() for line in tbl_txt.split("\n")] doc_lines.extend(tbl_txt) doc_lines.append("\n\n") return "\n".join(doc_lines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_query(self):\n return", "def get_query_list():\n prov_list = QueryProvider.list_data_environments()\n\n print(\"Generating documentation for for the following providers\")\n print(\", \".join(list(PROVIDERS)))\n print(\"Skipping the following providers\")\n print(\", \".join(list(set(prov_list) - set(PROVIDERS))))\n env_providers = {prov: QueryProvider(prov) for prov in tqdm.tqdm(PROVIDERS)}\n\n query_series = []\n for env, env_queries in env_providers.items():\n query_names = env_queries.list_queries()\n for query_name in query_names:\n q_group, q_name = query_name.split(\".\")\n qry = env_queries.query_store.get_query(q_group + \".\" + q_name)\n if \"table\" in qry.default_params:\n q_table = qry.default_params[\"table\"].get(\"default\", \"na\").split()[0]\n elif \"table\" in qry.required_params:\n q_table = qry.required_params[\"table\"].get(\"default\", \"na\").split()[0]\n else:\n q_table = \"-\"\n q_dict = {\n \"Environment\": env,\n \"QueryGroup\": q_group,\n \"Query\": q_name,\n \"Description\": qry.description,\n \"Req-Params\": \", \".join(\n sorted(\n [\n f\"{param} ({p_data.get('type')})\"\n for param, p_data in qry.required_params.items()\n ]\n )\n ),\n # \"OtherParams\": \", \".join([f\"{param}\" for param in qry.default_params]),\n \"Table\": q_table,\n }\n query_series.append(pd.Series(q_dict))\n print()\n return pd.DataFrame(query_series).sort_values(\n [\"Environment\", \"QueryGroup\", \"Query\"]\n )", "def fetch_document_contents(query, doc_list):\n\n output_str = 'Query [{0}] fetched {1} results:\\n'.format(query, len(doc_list))\n flag = False\n doc_list.sort(key=lambda x: x[1], reverse=True)\n contents = {}\n\n with open(DATA_FILE, \"r\") as file:\n for line in file:\n if re.match(\"<DOC .*\", line): # New Document starts\n doc_id = int(re.search(r'\\d+', line).group())\n contents[doc_id] = ''\n flag = True\n\n if flag:\n contents[doc_id] += line\n if re.match(\"</DOC.*\", line):\n flag = False\n\n file.close()\n\n for item in doc_list:\n output_str += '--------------------------------------------------------------\\n'\n output_str += 'Document= {0} (Score= {1})\\n'.format(item[0], item[1])\n output_str += contents[item[0]]\n\n return output_str", "def command_list(self, query):\n return query", "def getQueries(self):\n return sorted( self.qryDocs.keys() )", "def __generateQuery(self, query):\n if query == None:\n return [\"1=1\"]\n elif type(query) is not list:\n return [query]\n else:\n return query", "def _build_queries_and_headers(self):\n\n dict_list = []\n rendered_headers = []\n pattern = re.compile('[\\W_]+')\n\n headers = Counter([q.title.split(' - ')[0] for q in self.object_list])\n\n for q in self.object_list:\n model_dict = model_to_dict(q)\n header = q.title.split(' - ')[0]\n collapse_target = pattern.sub('', header)\n\n if headers[header] > 1 and header not in rendered_headers:\n dict_list.append({'title': header,\n 'is_header': True,\n 'is_in_category': False,\n 'collapse_target': collapse_target,\n 'count': headers[header]})\n rendered_headers.append(header)\n\n model_dict.update({'is_in_category': headers[header] > 1,\n 'collapse_target': collapse_target,\n 'created_at': q.created_at,\n 'is_header': False,\n 'run_count': q.run_count,\n 'created_by_user': six.text_type(q.created_by_user) if q.created_by_user else None})\n dict_list.append(model_dict)\n return dict_list", "def make_query(self):", "def action_gen():\n for n, doc in enumerate(cursor):\n # print fields\n did = doc.pop('_id')\n if doc == {}:\n print \"Empty document, skipping\"\n continue\n op_dict = {\n '_index': db.lower(),\n '_type': collection,\n '_id': int('0x' + str(did), 16),\n '_source': doc\n }\n #op_dict['doc'] = doc\n yield op_dict", "def buildReport(cls, queryList):\n boxList = list()\n for dslString,filterList in queryList:\n data = cls.__dataRequest(dslString[0])\n if data != '{}':\n for filter in filterList:\n try:\n if filter:\n filterObj = filter()\n filterObj.loadData(data)\n boxList.extend(filterObj.createBoxList())\n except Exception as e:\n devLogger.error(\"Could not create Filter object: \" + str(e))\n return boxList", "def list(self, request, *args, **kwargs):\n self._process_settings(request)\n query_name = None\n if len(args) == 1:\n query_name = args[0]\n # get query DSL from query container\n query_dsl = {}\n es_response_raw = req_session.get(\n '{}/{}/_{document_type}/_search'.format(\n settings.ELASTIC_SEARCH_HOST,\n '{site}__{app}'.format(site=settings.SITE, app=self.app),\n document_type=self.document_type),\n data=json.dumps(query_dsl))\n else:\n es_response_raw = req_session.get(\n '{}/{}/_{document_type}/_search'.format(\n settings.ELASTIC_SEARCH_HOST,\n '{site}__{app}'.format(site=settings.SITE, app=self.app),\n document_type=self.document_type))\n if es_response_raw.status_code != 200:\n exceptions.XimpiaAPIException(_(u'Could not search collection'))\n es_response = es_response_raw.json()\n logger.info(u'DocumentViewSet.list :: Performed search \"{query_name}\" '\n u'for document {document_type}'.format(\n query_name=query_name,\n document_type=self.document_type))\n # make output of logical documents from physical ones\n return Response(es_response['hits']['hits'])", "def obj_get_list(self, request=None, **kwargs):\n filter_object = self.get_filter_object(request)\n list = self.get_collection(request).find(filter_object)\n order_field, direction = self.get_order_field_and_direction(request)\n \n if (order_field is not None):\n list.sort(order_field, direction)\n \n return map(Document, list)", "def documents_dslquery(dsl_dict, **kwargs):\n return _dslquery('documents', dsl_dict, **kwargs)", "def build_docs():\n docs = []\n for base_id in range(DOCUMENTS_PER_LEVEL):\n d = jina_pb2.Document()\n d.granularity = 0\n d.adjacency = 0\n d.id = base_id\n docs.append(d)\n iterate_build(d, 0, 2, 0, 2)\n return docs", "def docs():", "def _generate_recommendation(self,\n query_analysis,\n db_name,\n collection_name):\n index_rec = '{'\n for query_field in query_analysis['analyzedFields']:\n if query_field['fieldType'] is EQUIV_TYPE:\n if len(index_rec) is not 1:\n index_rec += ', '\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\n for query_field in query_analysis['analyzedFields']:\n if query_field['fieldType'] is SORT_TYPE:\n if len(index_rec) is not 1:\n index_rec += ', '\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\n for query_field in query_analysis['analyzedFields']:\n if query_field['fieldType'] is RANGE_TYPE:\n if len(index_rec) is not 1:\n index_rec += ', '\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\n index_rec += '}'\n\n # RECOMMENDATION\n return OrderedDict([('index',index_rec),\n ('shellCommand', self.generate_shell_command(collection_name, index_rec))])", "def _generate_recommendation(self,\r\n query_analysis,\r\n db_name,\r\n collection_name):\r\n index_rec = '{'\r\n for query_field in query_analysis['analyzedFields']:\r\n if query_field['fieldType'] is EQUIV_TYPE:\r\n if len(index_rec) is not 1:\r\n index_rec += ', '\r\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\r\n for query_field in query_analysis['analyzedFields']:\r\n if query_field['fieldType'] is SORT_TYPE:\r\n if len(index_rec) is not 1:\r\n index_rec += ', '\r\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\r\n for query_field in query_analysis['analyzedFields']:\r\n if query_field['fieldType'] is RANGE_TYPE:\r\n if len(index_rec) is not 1:\r\n index_rec += ', '\r\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\r\n index_rec += '}'\r\n\r\n # RECOMMENDATION\r\n return OrderedDict([('index',index_rec),\r\n ('shellCommand', self.generate_shell_command(collection_name, index_rec))])", "def __iter__(self):\n for document in self.query:\n yield self._to_document(document)", "def build_listing(self, num, after, reverse, count):\r\n self.num = num\r\n self.count = count\r\n self.after = after\r\n self.reverse = reverse\r\n\r\n if after is not None:\r\n self.robots = \"noindex,follow\"\r\n\r\n self.query_obj = self.query()\r\n self.builder_obj = self.builder()\r\n self.listing_obj = self.listing()\r\n content = self.content()\r\n res = self.render_cls(content = content,\r\n show_sidebar = self.show_sidebar,\r\n nav_menus = self.menus,\r\n title = self.title(),\r\n infotext = self.infotext,\r\n robots = self.robots,\r\n top_filter = self.top_filter,\r\n header_sub_nav = self.header_sub_nav,\r\n **self.render_params).render()\r\n return res", "def getDocsList(self):\n return self.docs_list", "def build_document(self):\n pass", "def show_documents():\n\n document = Document(connection=connection, cursor=cursor)\n\n all_documents = document.get_all_documents()\n\n context = {\n 'all_documents': all_documents\n }\n\n return render_template('pages/tables/documents.html', **context)", "def query(self):", "def cli_saved_queries_list():\n query_list = None\n try:\n query_list = api.saved_queries_list()\n except NoRecordsFound as error:\n print \"%(error)s\" % locals()\n return \n\n for saved_query in query_list:\n print \"%s: %s\" % (saved_query['name'], ' '.join(saved_query['query_data']))", "def build_query_structure(self):\n query_list = list()\n filter_list = list()\n for key, val in self.q_dict.items():\n if key in self.es_query_keys:\n query_list.append(\n {\"match\": {\".\".join(key.split(\"_\")): val[0]}})\n elif key in self.es_date_keys:\n filter_list.append(\n {\"range\": {\".\".join(key.split(\"_\")): val}})\n elif \":\" in val[0]:\n #for handling queries like dd_dct=gte:1\n range_val = val[0].split(\":\")\n filter_list.append({\"range\": {\".\".join(key.split(\"_\")): {\n range_val[0]: int(range_val[1])}}})\n else:\n filter_list.append(\n {\"terms\": {\".\".join(key.split(\"_\")): val}})\n return query_list, filter_list", "def query(self, query):", "def get_list(self):\n list_id = self.request.path[1:]\n if len(list_id) <= 0:\n list_id = INDEX_LIST_ID\n lst = List.gql(\"where id=:1\", list_id).get()\n if lst is not None:\n q = Page.all()\n q.filter('list_id =', list_id)\n if not users.is_current_user_admin():\n q.filter('is_public',True)\n q.order('-precedence')\n pages = q.fetch(100)\n for page in pages:\n hydrate(page)\n return ModelAndView(view='list.html',\n model={'list': lst,\n 'pages': pages,\n 'syntax_list': get_syntax_list(pages)})\n else:\n return self.get_list_fs(list_id)", "def setup(query):\n\n results = []\n return results", "def setup(query):\n\n results = []\n return results", "def setup(query):\n\n results = []\n return results", "def setup(query):\n\n results = []\n return results", "def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def documentlist_sponsor():\n # get the current user id\n user_id = current_user.id\n\n # Document objects list which includes editors for all objects\n # this logic will only work if document_objects.count() = editor_objects.count()\n # get document objects filtered by the current user\n # attach autodocs based upon revision helper table\n document_objects=db.session.query(Retention.sponsor_id,User.id,Retention.editor_id,Retention.document_id,User.name,Document.document_name,Document.document_body,Autodoc.autodoc_body).\\\n join(Retention, User.id==Retention.editor_id).\\\n join(Document, Document.id==Retention.document_id).\\\n order_by(Retention.sponsor_id).\\\n filter(Retention.sponsor_id == user_id).\\\n join(Revision,Revision.document_id==Document.id).\\\n join(Autodoc,Autodoc.id==Revision.autodoc_id)\n\n # get a count of the document objects\n document_count = document_objects.count()\n\n # blank list to append to for documents and editors\n document_list=[]\n\n # loop through document objects\n for counter in range(0,document_count):\n document_list.append(document_objects[counter])\n\n # show list of document names\n documents = document_list\n\n return render_template(\n 'documentlist_sponsor.jinja2',\n documents=documents,\n )", "def listQueries():\n for query_name in elasticsearch_queries.queries.keys():\n logger.info(\"Query name: %s\" % query_name)", "def getDocumentAll(self, query = {}, keys = []):\n query = query or {}\n if \"include_docs\" not in query:\n query[\"include_docs\"] = True\n\n if not keys:\n return self.client.get(self.name +\"/_all_docs\", query)\n else:\n return self.client.post(self.name +\"/_all_docs\", query,\n {\"keys\": keys}).getBodyData()", "def oparl_documentsss():\n start_time = time.time()\n jsonp_callback = request.args.get('callback', None)\n ref = request.args.get('reference', '')\n references = ref.split(',')\n if references == ['']:\n references = None\n output = request.args.get('output', '').split(',')\n rs = util.get_rs()\n q = request.args.get('q', '*:*')\n fq = request.args.get('fq', '')\n sort = request.args.get('sort', 'score desc')\n start = int(request.args.get('start', '0'))\n numdocs = int(request.args.get('docs', '10'))\n date_param = request.args.get('date', '')\n get_attachments = 'attachments' in output\n get_thumbnails = 'thumbnails' in output and get_attachments\n get_consultations = 'consultations' in output\n get_facets = 'facets' in output\n #get_relations = 'relations' in output\n request_info = {} # Info über die Anfrage\n query = False\n docs = False\n submission_ids = []\n # TODO: entscheiden, was mit get_relations passiert\n \"\"\"\n Anhand der übergebenen Parameter wird entschieden, ob eine ES-Suche\n durchgeführt wird, oder ob die Abfrage direkt anhand von Kennungen\n (references) erfolgen kann.\n \"\"\"\n \n if references is None:\n # Suche wird durchgeführt\n # (References-Liste via Suchmaschine füllen)\n query = db.query_submissions(rs=rs, q=q, fq=fq, sort=sort, start=start,\n docs=numdocs, date=date_param, facets=get_facets)\n if query['numhits'] > 0:\n submission_ids = [x['_id'] for x in query['result']]\n else:\n docs = []\n else:\n # Direkte Abfrage\n request_info = {\n 'references': references\n }\n request_info['output'] = output\n\n # Abrufen der benötigten Dokumente aus der Datenbank\n if references is not None:\n docs = db.get_submissions(rs=rs, references=references,\n get_attachments=get_attachments,\n get_consultations=get_consultations,\n get_thumbnails=get_thumbnails)\n elif len(submission_ids) > 0:\n docs = db.get_submissions(rs=rs, submission_ids=submission_ids,\n get_attachments=get_attachments,\n get_consultations=get_consultations,\n get_thumbnails=get_thumbnails)\n\n ret = {\n 'status': 0,\n 'duration': int((time.time() - start_time) * 1000),\n 'request': request_info,\n 'response': {}\n }\n if docs:\n ret['response']['documents'] = docs\n ret['response']['numdocs'] = len(docs)\n if query and 'maxscore' in query:\n ret['response']['maxscore'] = query['maxscore']\n for n in range(len(docs)):\n docs[n]['reference'] = docs[n]['identifier']\n del docs[n]['identifier']\n\n if query:\n ret['response']['numhits'] = query['numhits']\n if get_facets and 'facets' in query:\n ret['response']['facets'] = query['facets']\n \n ret['response']['start'] = start\n ret['request']['sort'] = sort\n ret['request']['fq'] = fq\n\n json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)\n if jsonp_callback is not None:\n json_output = jsonp_callback + '(' + json_output + ')'\n response = make_response(json_output, 200)\n response.mimetype = 'application/json'\n response.headers['Expires'] = util.expires_date(hours=24)\n response.headers['Cache-Control'] = util.cache_max_age(hours=24)\n return response", "def queries(self):\n request = Request(method=\"get\", endpoint=\"/query/current\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)", "def list(self,**kwargs):\n # import pdb;pdb.set_trace()\n g.title = \"{} Record List\".format(g.title)\n \n self.select_recs(**kwargs)\n \n # ensure that the field list is complete\n self.has_search_fields = False #default state\n self.set_list_fields(self.list_fields)\n \n if self._ajax_request:\n self.list_template = self.list_table_template\n \n return render_template(self.list_template,\n data = self,\n session_fields = ListFilter(), # provides the session field constants\n **kwargs,\n )", "def showQuery(query):\n kind = query._model_class.kind()\n ancestor = query._Query__ancestor\n filters = query._Query__query_set\n orderings = query._Query__orderings\n hint = None\n limit = None\n offset = None\n\n res = [\"%s.all()\" % kind]\n if ancestor is not None:\n res.append(\"ancestor(%r)\" % ancestor)\n for k in sorted(filters):\n res.append(\"filter(%r, %r)\" % (k, filters[k]))\n for p, o in orderings:\n if o==datastore.Query.DESCENDING:\n p = '-'+p\n res.append(\"order(%r)\" % p)\n\n return '.'.join(res)", "def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):\r\n index_analysis = None\r\n recommendation = None\r\n namespace = parsed_query['ns']\r\n indexStatus = \"unknown\"\r\n\r\n index_cache_entry = self._ensure_index_cache(db_uri,\r\n db_name,\r\n collection_name)\r\n\r\n\r\n query_analysis = self._generate_query_analysis(parsed_query,\r\n db_name,\r\n collection_name)\r\n if ((query_analysis['analyzedFields'] != []) and\r\n query_analysis['supported']):\r\n index_analysis = self._generate_index_analysis(query_analysis,\r\n index_cache_entry['indexes'])\r\n indexStatus = index_analysis['indexStatus']\r\n if index_analysis['indexStatus'] != 'full':\r\n recommendation = self._generate_recommendation(query_analysis,\r\n db_name,\r\n collection_name)\r\n # a temporary fix to suppress faulty parsing of $regexes.\r\n # if the recommendation cannot be re-parsed into yaml, we assume\r\n # it is invalid.\r\n if not validate_yaml(recommendation['index']):\r\n recommendation = None\r\n query_analysis['supported'] = False\r\n\r\n\r\n # QUERY REPORT\r\n return OrderedDict({\r\n 'queryMask': parsed_query['queryMask'],\r\n 'indexStatus': indexStatus,\r\n 'parsed': parsed_query,\r\n 'namespace': namespace,\r\n 'queryAnalysis': query_analysis,\r\n 'indexAnalysis': index_analysis,\r\n 'recommendation': recommendation\r\n })", "def run_queries(self, query_list, random_command):\n output_dict = {'postingsList': {},\n 'postingsListSkip': {},\n 'daatAnd': {},\n 'daatAndSkip': {},\n 'daatAndTfIdf': {},\n 'daatAndSkipTfIdf': {},\n 'sanity': self.sanity_checker(random_command)}\n\n for query in tqdm(query_list):\n \"\"\" Run each query against the index. You should do the following for each query:\n 1. Pre-process & tokenize the query.\n 2. For each query token, get the postings list & postings list with skip pointers.\n 3. Get the DAAT AND query results & number of comparisons with & without skip pointers.\n 4. Get the DAAT AND query results & number of comparisons with & without skip pointers, \n along with sorting by tf-idf scores.\"\"\"\n raise NotImplementedError\n\n input_term_arr = [] # Tokenized query. To be implemented.\n\n for term in input_term_arr:\n postings, skip_postings = None, None\n\n \"\"\" Implement logic to populate initialize the above variables.\n The below code formats your result to the required format.\n To be implemented.\"\"\"\n\n output_dict['postingsList'][term] = postings\n output_dict['postingsListSkip'][term] = skip_postings\n\n and_op_no_skip, and_op_skip, and_op_no_skip_sorted, and_op_skip_sorted = None, None, None, None\n and_comparisons_no_skip, and_comparisons_skip, \\\n and_comparisons_no_skip_sorted, and_comparisons_skip_sorted = None, None, None, None\n \"\"\" Implement logic to populate initialize the above variables.\n The below code formats your result to the required format.\n To be implemented.\"\"\"\n and_op_no_score_no_skip, and_results_cnt_no_skip = self._output_formatter(and_op_no_skip)\n and_op_no_score_skip, and_results_cnt_skip = self._output_formatter(and_op_skip)\n and_op_no_score_no_skip_sorted, and_results_cnt_no_skip_sorted = self._output_formatter(and_op_no_skip_sorted)\n and_op_no_score_skip_sorted, and_results_cnt_skip_sorted = self._output_formatter(and_op_skip_sorted)\n\n output_dict['daatAnd'][query.strip()] = {}\n output_dict['daatAnd'][query.strip()]['results'] = and_op_no_score_no_skip\n output_dict['daatAnd'][query.strip()]['num_docs'] = and_results_cnt_no_skip\n output_dict['daatAnd'][query.strip()]['num_comparisons'] = and_comparisons_no_skip\n\n output_dict['daatAndSkip'][query.strip()] = {}\n output_dict['daatAndSkip'][query.strip()]['results'] = and_op_no_score_skip\n output_dict['daatAndSkip'][query.strip()]['num_docs'] = and_results_cnt_skip\n output_dict['daatAndSkip'][query.strip()]['num_comparisons'] = and_comparisons_skip\n\n output_dict['daatAndTfIdf'][query.strip()] = {}\n output_dict['daatAndTfIdf'][query.strip()]['results'] = and_op_no_score_no_skip_sorted\n output_dict['daatAndTfIdf'][query.strip()]['num_docs'] = and_results_cnt_no_skip_sorted\n output_dict['daatAndTfIdf'][query.strip()]['num_comparisons'] = and_comparisons_no_skip_sorted\n\n output_dict['daatAndSkipTfIdf'][query.strip()] = {}\n output_dict['daatAndSkipTfIdf'][query.strip()]['results'] = and_op_no_score_skip_sorted\n output_dict['daatAndSkipTfIdf'][query.strip()]['num_docs'] = and_results_cnt_skip_sorted\n output_dict['daatAndSkipTfIdf'][query.strip()]['num_comparisons'] = and_comparisons_skip_sorted\n\n return output_dict", "def queries_to_dict(queries: List) -> List:\n return [\n {\n \"unannotated_text\": query.query.text,\n \"annotated_text\": dump_query(query),\n \"domain\": query.domain,\n \"intent\": query.intent,\n }\n for query in queries\n ]", "def generate_query_report(self, db_uri, query, db_name, collection_name):\r\n return self._query_analyzer.generate_query_report(db_uri,\r\n query,\r\n db_name,\r\n collection_name)", "def List(cls):\n return WordList.query().fetch_async()", "def rocchio_doc_list(query_vector, corpus, topic):\n #create dict of vectors for each docid that contains\n #at least one non-zero term in query_vector\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n doc_shortlist = dict()\n vector_len = len(query_vector)\n word_list = list(inv_index.keys())\n if corpus == cg.REUTERS:\n topic_docs = list(map(int, text_categorization.get_topic_dict()[topic]))\n else:\n topic_docs = list(range(0, 663))\n for index, weight in enumerate(query_vector):\n word = word_list[index]\n for doc_id in set(inv_index[word]).intersection(set(topic_docs)):\n if doc_id in doc_shortlist:\n #doc already added, just update weight entry for this word\n doc_shortlist[doc_id][index] = inv_index[word][doc_id]['weight']\n else:\n #doc not added yet add doc_id to shortlist,\n #initialize list to 0s for all words in query\n #update weight entry for current word\n entry = np.zeros(vector_len)\n entry[index] = inv_index[word][doc_id]['weight']\n doc_shortlist[doc_id] = entry\n\n return doc_shortlist", "def _make_query(self):\r\n raise NotImplementedError()", "def queryList():\n #f = open(\"/var/log/scidbpy_log.txt\",\"w+\")\n #f.write(\"starting queryList\")\n\n header, rows = querySciDB(\"list('arrays')\")\n names = [row[1].translate(None, \"\\\"\") for row in rows]\n\n return names", "def gen_html_output(strs,q):\n res = []\n res.append('<html>\\n')\n res.append('<head><title>SecPoint.com GoogleDB queries strings</title></head>\\n')\n res.append('<body>\\n')\n res.append('<p>Generated by: <a href=\"http://www.secpoint.com/\">SecPoint.com</a> GoogleDB tool</p>\\n')\n res.append('\\t<ul>\\n')\n for (x,v) in zip(strs,q):\n res.append('\\t\\t<li><a href=\"%s\">%s</a></li>\\n'%(v,x))\n res.append('\\t</ul>\\n')\n res.append('</body>\\n</html>')\n return res", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def document_index_load():\n\n return render_template('components/item-list.html', type='document', headless=True,\n items=loading_list(Document.query))", "def _expand_query_list(session, queries, recursive=False, verbose=False):\n results = []\n\n # If no queries are supplied by the user, default to a query for the\n # current working directory\n if len(queries) == 0:\n queries = [get_cwd()]\n\n # Wildcard expansion is performed first, so it can be combined with other types\n # of expansion, such as recursive expansion of subcollections later. Each collection\n # or data object is expanded only once.\n preprocessed_queries = []\n already_expanded = {}\n for query in queries:\n # Currently only wildcards without a collection path are supported\n # e.g. \"*.dat\", but not \"../*.dat\" or \"*/data.dat\".\n if \"/\" not in query and (\"?\" in query or \"*\" in query):\n for d in get_dataobjects_in_collection(session, get_cwd()):\n if fnmatch(d[\"name\"],\n query) and d[\"full_name\"] not in already_expanded:\n preprocessed_queries.append(d[\"full_name\"])\n already_expanded[d[\"full_name\"]] = 1\n for c in get_direct_subcollections(session, get_cwd()):\n parent, coll = os.path.split(c[\"name\"])\n if fnmatch(coll, query) and d[\"name\"] not in already_expanded:\n preprocessed_queries.append(c[\"name\"])\n already_expanded[d[\"name\"]] = 1\n else:\n preprocessed_queries.append(query)\n\n for query in preprocessed_queries:\n absquery = convert_to_absolute_path(query)\n if collection_exists(session, absquery):\n results.append({\"original_query\": query, \"expanded_query\": absquery,\n \"expanded_query_type\": \"collection\"})\n if verbose:\n print_debug(\"Argument \\\"{}\\\" is a collection.\".format(query))\n if recursive:\n for subcollection in get_subcollections(session, absquery):\n if verbose:\n print_debug(\"Recursively adding subcollection \" +\n subcollection + \" to queries.\")\n results.append({\"original_query\": query,\n \"expanded_query\": subcollection,\n \"expanded_query_type\": \"collection\"})\n elif dataobject_exists(session, absquery):\n results.append({\"original_query\": query, \"expanded_query\": absquery,\n \"expanded_query_type\": \"dataobject\"})\n if verbose:\n print_debug(\"Argument \\\"{}\\\" is a data object.\".format(query))\n else:\n print_error(\n \"Query \\\"{}\\\" could not be resolved. Ignoring ... \".format(query))\n\n return results", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def __str__(self):\n query = []\n\n if self.root:\n query.append('query')\n\n if self.alias is not None:\n query.append(self.alias + ':')\n\n if self.name is not None:\n query.append(str(self.name))\n\n if self.filters:\n query.append('(')\n for k,v in self.filters.items():\n query.append('{k}:\"{v}\"'.format(k=k, v=v))\n query.append(')')\n\n if self.children:\n query.append('{')\n if self.children:\n for idx, f in enumerate(self.children.values()):\n query.append(str(f))\n if idx < len(self.children) - 1:\n query.append(',')\n\n query.append('}')\n\n return ''.join(query)", "def list_documents(\n self, index: str, query: Dict[str, Any] = None\n ) -> Iterable[Dict[str, Any]]:\n return es_scan(self.__client__, query=query or {}, index=index)", "def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):\n index_analysis = None\n recommendation = None\n namespace = parsed_query['ns']\n indexStatus = \"unknown\"\n\n index_cache_entry = self._ensure_index_cache(db_uri,\n db_name,\n collection_name)\n\n\n query_analysis = self._generate_query_analysis(parsed_query,\n db_name,\n collection_name)\n if ((query_analysis['analyzedFields'] != []) and\n query_analysis['supported']):\n index_analysis = self._generate_index_analysis(query_analysis,\n index_cache_entry['indexes'])\n indexStatus = index_analysis['indexStatus']\n if index_analysis['indexStatus'] != 'full':\n recommendation = self._generate_recommendation(query_analysis,\n db_name,\n collection_name)\n # a temporary fix to suppress faulty parsing of $regexes.\n # if the recommendation cannot be re-parsed into yaml, we assume\n # it is invalid.\n if not validate_yaml(recommendation['index']):\n recommendation = None\n query_analysis['supported'] = False\n\n\n # QUERY REPORT\n return OrderedDict({\n 'queryMask': parsed_query['queryMask'],\n 'indexStatus': indexStatus,\n 'parsed': parsed_query,\n 'namespace': namespace,\n 'queryAnalysis': query_analysis,\n 'indexAnalysis': index_analysis,\n 'recommendation': recommendation\n })", "def list():", "def list():", "def _getListing(self):\n\n # lets assure consistent litsting order\n items = self._items.items()\n items.sort()\n return [ \"%s%s%s: %s\" % (_def_sep, str(x[1]), _def_sep, x[1].__doc__)\n for x in items ]", "def createlistquery(filtered=1):\n if filtered == '1':\n query_obj = session.query(Components.ID, Components.Name, Components.CurrentStock,\n Components.ReorderLevel, Components.UnitPrice, Suppliers.Name,\n Locations.Name, Components.Datasheet). \\\n outerjoin(Suppliers, Components.SuppliersID == Suppliers.ID). \\\n outerjoin(Locations, Components.LocationsID == Locations.ID). \\\n filter(Components.CurrentStock <= Components.ReorderLevel). \\\n filter(Components.ReorderLevel != \"\"). \\\n order_by(Components.Name)\n else:\n query_obj = session.query(Components.ID, Components.Name, Components.CurrentStock,\n Components.ReorderLevel, Components.UnitPrice, Suppliers.Name,\n Locations.Name, Components.Datasheet). \\\n outerjoin(Suppliers, Components.SuppliersID == Suppliers.ID). \\\n outerjoin(Locations, Components.LocationsID == Locations.ID). \\\n order_by(Components.Name)\n return query_obj", "def generate_list(self):\r\n\t\tcon = sqlite3.connect(self.bd)\r\n\t\tcursor = con.cursor()\r\n\t\tsql = \"\"\"\r\n\t\t\tSELECT fileID, domain, relativePath\r\n\t\t\tFROM Files\r\n\t\t\tORDER BY domain, relativePath\r\n\t\t\"\"\"\r\n\t\tcursor.execute(sql)\r\n\t\tfor i in cursor:\r\n\t\t\tyield i\r\n\t\t\t#print (i)\r", "def pp_query(query):\n print(format_query(query))", "def test_create_doc(self):\r\n function_name = sys._getframe().f_code.co_name\r\n db_name = \"{}_{}\".format(function_name, \"db\")\r\n db_name_illegal_by_rdb = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n db_name_illegal_by_this_program = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n table_name = \"{}_{}\".format(function_name, \"table\")\r\n table_name_illegal_by_rdb = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n table_name_illegal_by_this_program = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n doc_1 = {\"name\": \"alpha\", \"no\":\"1\"}\r\n doc_2 = {\"name\": \"beta\", \"no\":\"2\"}\r\n doc_3 = {\"name\": \"charlie\", \"no\":\"1\"}\r\n doc_4 = {\"name\": \"charlie\", \"no\":\"3\"}\r\n\r\n test_list_1 = [\r\n db_name,\r\n table_name,\r\n doc_1,\r\n [\"no\"],\r\n False,\r\n None,\r\n None,\r\n False\r\n ]\r\n test_list_2 = [\r\n test_list_1[0],\r\n test_list_1[1],\r\n doc_2,\r\n [\"no\"],\r\n None\r\n ]\r\n test_list_3 = [\r\n test_list_1[0],\r\n test_list_1[1],\r\n doc_3,\r\n [\"no\"],\r\n None\r\n ]\r\n test_list_4 = [\r\n test_list_1[0],\r\n test_list_1[1],\r\n doc_4,\r\n [\"name\", \"no\"],\r\n None\r\n ]\r\n test_list_5 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_rdb,\r\n doc_1,\r\n None\r\n ]\r\n test_list_6 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_this_program,\r\n doc_1,\r\n None\r\n ]\r\n test_list_7 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_rdb,\r\n doc_1,\r\n None\r\n ]\r\n test_list_8 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_this_program,\r\n doc_1,\r\n None\r\n ]\r\n\r\n crd(self.c, test_list_1[0])\r\n crt(self.c, test_list_1[1], test_list_1[0])\r\n test_list_1[len(test_list_1) - 1] = isinstance(\r\n crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0],\r\n _expr=True\r\n ),\r\n r.ast.Insert\r\n )\r\n test_list_1[len(test_list_1) - 2] = crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0]\r\n )\r\n test_list_1[len(test_list_1) - 3] = crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0],\r\n test_list_1[3]\r\n )\r\n test_list_1[len(test_list_1) - 4] = isinstance(\r\n crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0],\r\n [\"name\", \"no\"],\r\n True\r\n ),\r\n r.ast.Insert\r\n )\r\n test_list_2[len(test_list_2) - 1] = crdoc(\r\n self.c,\r\n test_list_2[2],\r\n test_list_2[1],\r\n test_list_2[0],\r\n test_list_2[3]\r\n )\r\n crt(self.c, test_list_3[1], test_list_3[0])\r\n test_list_3[len(test_list_3) - 1] = crdoc(\r\n self.c,\r\n test_list_3[2],\r\n test_list_3[1],\r\n test_list_3[0],\r\n test_list_3[3]\r\n )\r\n test_list_4[len(test_list_4) - 1] = crdoc(\r\n self.c,\r\n test_list_4[2],\r\n test_list_4[1],\r\n test_list_4[0],\r\n test_list_4[3]\r\n )\r\n dd(self.c, test_list_1[0])\r\n\r\n \"\"\"Test 1.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_5[len(test_list_5) - 1] = crdoc(\r\n self.c,\r\n test_list_5[2],\r\n test_list_5[1],\r\n test_list_5[0]\r\n )\r\n\r\n \"\"\"Test 2.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_6[len(test_list_6) - 1] = crdoc(\r\n self.c,\r\n test_list_6[2],\r\n test_list_6[1],\r\n test_list_6[0]\r\n )\r\n\r\n r.db_create(test_list_7[0]).run(self.c)\r\n \"\"\"Test 3.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_7[len(test_list_7) - 1] = crdoc(\r\n self.c,\r\n test_list_7[2],\r\n test_list_7[1],\r\n test_list_7[0]\r\n )\r\n r.db_drop(test_list_7[0]).run(self.c)\r\n\r\n r.db_create(test_list_8[0]).run(self.c)\r\n r.db(test_list_8[0]).table_create(test_list_8[0]).run(self.c)\r\n \"\"\"Test 4.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_8[len(test_list_8) - 1] = crdoc(\r\n self.c,\r\n test_list_8[2],\r\n test_list_8[1],\r\n test_list_8[0]\r\n )\r\n r.db_drop(test_list_8[0]).run(self.c)\r\n\r\n self.assertTrue(test_list_1[len(test_list_1) - 1]) # Test 5.\r\n self.assertIsNotNone(test_list_1[len(test_list_1) - 2]) # Test 6.\r\n self.assertIsNone(test_list_1[len(test_list_1) - 3]) # Test 7.\r\n self.assertFalse(test_list_1[len(test_list_1) - 4]) # Test 8\r\n self.assertIsNotNone(test_list_2[len(test_list_2) - 1]) # Test 9.\r\n self.assertIsNone(test_list_3[len(test_list_3) - 1]) # Test 10.\r\n self.assertIsNotNone(test_list_4[len(test_list_4) - 1]) # Test 11.\r\n self.assertIsNone(test_list_5[len(test_list_5) - 1]) # Test 12.\r\n self.assertIsNone(test_list_6[len(test_list_6) - 1]) # Test 13.\r\n self.assertIsNone(test_list_7[len(test_list_7) - 1]) # Test 14.\r\n self.assertIsNone(test_list_8[len(test_list_8) - 1]) # Test 15.\r", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n program_id = self.request.META.get('HTTP_X_SVMS_PROGRAM_ID')\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n level = self.request.GET.get(\"level\")\n description = self.request.GET.get(\"description\")\n status = self.request.GET.get(\"status\")\n job_tag = self.request.GET.get(\"job_tag\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(program_id=query) |\n Q(category=query) |\n Q(title__icontains=query) |\n #Q(category__category_name__icontains=query) |\n Q(description__icontains=query) |\n Q(job_tag__tag__in=str(query).split(\",\"))\n ), Q.OR)\n\n if query.isnumeric():\n q_object.add(\n Q(level__icontains=int(query)), Q.OR)\n\n q_object.add(Q(status=strtobool(query)), Q.OR) if query in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n else:\n if program_id:\n q_object.add(\n Q(program_id=program_id),\n Q.AND)\n\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n if description:\n q_object.add(\n Q(description__icontains=description), Q.AND)\n\n if job_tag:\n q_object.add(\n Q(job_tag__tag__in=str(job_tag).split(\",\")),\n Q.AND)\n\n if level:\n if level.isnumeric():\n q_object.add(\n Q(level__icontains=int(level)),\n Q.AND)\n else:\n raise Exception(\n ErrorMessage.WRONG_FIELD_TYPE.value.format(\"level\",\n \"numeric\"))\n\n q_object.add(Q(status=strtobool(status)), Q.AND) if status in [\n \"true\", \"True\", \"False\", \"false\"] else None\n\n return q_object", "def list(self):", "def show_list():\n\n response = []\n docs = SUPERHEROES.stream()\n for doc in docs:\n response.append(doc.to_dict())\n return jsonify(response), 201", "def setupQuery(self, file):\n file.write(\"QUERY(FALSE);\\n\")\n file.write(\"COUNTEREXAMPLE;\\n\")\n return", "def do_list(self, args):\n if args.option == 'config':\n print(list_config())\n if args.option == 'queries':\n for k,v in list_queries().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'jobs':\n update_jobs(CLI_GLOBALS.ENGAGEMENT)\n for k,v in list_jobs().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'results':\n for i in list_results():\n print(i)\n if args.option == 'key':\n for k,v in list_key().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'engagement':\n print(list_engagement())", "def test_client_document_list(self):\n pass", "def test_to_query_string(self):\n self.assertEqual(\n ClassicAPIQuery(id_list=[]).to_query_string(),\n \"search_query=&id_list=&start=0&max_results=10\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n search_query=\"all:electron\", id_list=[]\n ).to_query_string(),\n \"search_query=all:electron&id_list=&start=0&max_results=10\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n search_query=\"all:electron\",\n id_list=[\"1705.09169v3\", \"1705.09129v3\"],\n ).to_query_string(),\n \"search_query=all:electron&id_list=1705.09169v3,1705.09129v3\"\n \"&start=0&max_results=10\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n search_query=\"all:electron\",\n id_list=[\"1705.09169v3\", \"1705.09129v3\"],\n page_start=3,\n ).to_query_string(),\n \"search_query=all:electron&id_list=1705.09169v3,1705.09129v3\"\n \"&start=3&max_results=10\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n search_query=\"all:electron\",\n id_list=[\"1705.09169v3\", \"1705.09129v3\"],\n page_start=3,\n size=50,\n ).to_query_string(),\n \"search_query=all:electron&id_list=1705.09169v3,1705.09129v3\"\n \"&start=3&max_results=50\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n search_query=\"all:electron\", page_start=3, size=50\n ).to_query_string(),\n \"search_query=all:electron&id_list=&start=3&max_results=50\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n id_list=[\"1705.09169v3\", \"1705.09129v3\"], page_start=3, size=50\n ).to_query_string(),\n \"search_query=&id_list=1705.09169v3,1705.09129v3\"\n \"&start=3&max_results=50\",\n )\n self.assertEqual(\n ClassicAPIQuery(\n search_query=\"all:electron\", size=50\n ).to_query_string(),\n \"search_query=all:electron&id_list=&start=0&max_results=50\",\n )", "def snippetList(requeset, format = None):", "def documentlist_editor():\n # get the current user id\n user_id = current_user.id\n\n # Document objects and list, as well as Editor objects and list\n # this logic will only work if document_objects.count() = editor_objects.count()\n # get document objects filtered by the current user\n document_objects=db.session.query(Retention.sponsor_id,User.id,Retention.editor_id,Retention.document_id,User.name,Document.document_name,Document.document_body,Autodoc.autodoc_body).\\\n join(Retention, User.id==Retention.editor_id).\\\n join(Document, Document.id==Retention.document_id).\\\n order_by(Retention.sponsor_id).\\\n filter(Retention.editor_id == user_id).\\\n join(Revision,Revision.document_id==Document.id).\\\n join(Autodoc,Autodoc.id==Revision.autodoc_id)\n\n # get a count of the document objects\n document_count = document_objects.count()\n\n # blank list to append to for documents and editors\n document_list=[]\n\n # loop through document objects\n for counter in range(0,document_count):\n document_list.append(document_objects[counter])\n\n # show list of document names\n documents = document_list\n\n\n return render_template(\n 'documentlist_editor.jinja2',\n documents=documents,\n )", "def query(self):\n pass", "def _list(self, **kwargs):\n\n return self._make_request(**kwargs)", "def do_query(documents, config_file=None, logger=None, context=None):\n num_documents = documents.count()\n return {\"num_documents\": num_documents}", "def document(self):\n ...", "def _list(\n self, table, _filter=None, idx=None, reverse=None, ids_only=None, limit=None\n ):\n data = {}\n if _filter:\n data[\"KeyConditionExpression\"] = Key(_filter.get(\"key\")).eq(\n _filter.get(\"value\")\n )\n\n if ids_only:\n data[\"ProjectionExpression\"] = \"resource_id\"\n\n if idx:\n data[\"IndexName\"] = idx\n\n if limit:\n data[\"Limit\"] = limit\n\n # reverse_direction. Used when sorting descending\n if reverse:\n data[\"ScanIndexForward\"] = False\n\n return self._response_handler(table, \"list\", data)", "def _get_queries(args):\n if args.mode == '2DSEQ':\n queries = [\"@type=='2dseq'\", \"@is_spectroscopy==True\", \"@is_complex==True\"]\n elif args.mode == 'FID':\n queries = [\"@type=='fid'\", \"@is_spectroscopy==True\"]\n return queries + args.query", "def _query_builder(self,\n start_date=None,\n end_date=None,\n clipper=None\n ):\n search_string='format=json&lang=fr&q='\n if (start_date is not None):\n if(end_date is not None):\n search_string+='entre+'+start_date+'+et+'+end_date\n else:\n search_string+=start_date\n elif(end_date is not None): \n search_string+=end_date\n \n if(clipper.query is not None):\n query=clipper.query.replace(' ','+')\n search_string+='+'+query\n if(clipper.bbox is not None):\n search_string+='&box='+clipper.bbox\n\n return search_string", "def search_listings(query, offset):\n # Query by tags that contain the query\n tag_subquery = db.session.query(Tag).filter(Tag.name.contains(query)).subquery()\n tag_query_items = db.session.query(Item).join(tag_subquery, Item.tags).all()\n # Query for items that contain the query in the title\n name_query = db.session.query(Item).filter(Item.item_name.contains(query)).all()\n # Query for items that contain the query in the description\n description_query = db.session.query(Item).filter(Item.item_description.contains(query)).all()\n\n # Merge the 3 lists\n complete_list = list(set(tag_query_items + name_query + description_query))\n\n # Sort the 3 lists\n complete_list.sort(key=lambda item: item.date_listed, reverse=True)\n\n # Select by offset and limit\n limit_amount = int(offset) + LIMIT\n if limit_amount > len(complete_list):\n limit_amount = len(complete_list)\n complete_list = complete_list[int(offset):limit_amount]\n\n return jsonify(data=[item.serialize for item in complete_list])", "def list():\n\n page_limit = app.config['PAGINATION_LIMIT']\n page = request.args.get('page') if 'page' in request.args else 1\n per_page = request.args.get('per_page') if 'per_page' in request.args else page_limit\n\n # TODO: Can be done in much more elegant way\n try:\n page = int(page)\n except:\n page = 1\n\n try:\n per_page = int(per_page)\n except:\n per_page = page_limit\n if per_page > page_limit:\n per_page = page_limit\n\n # Get all rows and order by published datetime and paginate by page count and per_page\n posts = YTSearch.query.order_by(desc(YTSearch.published_at)) \\\n .paginate(page, per_page, error_out=True)\n\n # Get JSON data from list of objects\n result = [i.serialize() for i in posts.items]\n return jsonify({'data': result, 'has_next': posts.has_next, 'next_page': posts.next_num,\n 'has_prev': posts.has_prev, 'prev_page': posts.prev_num, 'length': len(result)}), 200", "def document_numbers(self, **kw):\r\n \r\n q = query.And([query.Term(k, v) for k, v in kw.iteritems()])\r\n return q.docs(self)", "def _list(self):\n raise NotImplementedError", "def index():\n response = \"\"\n for table in config.TABLE_SCHEMA.keys():\n response = response + disco.examples(table)\n return response", "def get_list(self, method=\"search\", **kwargs):\r\n\r\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\r\n lg.setLevel(self.log_level)\r\n\r\n args = inspect.getargvalues(inspect.currentframe())[3]\r\n lg.debug(\"\\n****** Args *****:\\n%s\",\r\n pp.pformat(args))\r\n\r\n es = kwargs.get(\"es\",self.es)\r\n doc_type = get2(kwargs, \"doc_type\", self.doc_type)\r\n fields = get2(kwargs, \"fields\")\r\n search_flds = kwargs.get(\"search_flds\")\r\n sort_dir = get2(kwargs,\"sort_dir\", \"asc\")\r\n sort_fields = get2(kwargs,\"sort_fields\", get2(kwargs, \"fields\", []))\r\n size = get2(kwargs,\"size\",10)\r\n term = get2(kwargs,\"term\",'').replace(\"/\",\"//\")\r\n filter_field = kwargs.get('filter_field')\r\n filter_value = kwargs.get('filter_value')\r\n highlight = kwargs.get('highlight',False)\r\n from_ = kwargs.get('from_')\r\n dsl = {}\r\n # set retutn to only return the fields specified or return the whole\r\n # document if not specified\r\n if fields is not None:\r\n dsl[\"_source\"] = fields\r\n else:\r\n fields = []\r\n # set query parameters based on the return method \"list\" or \"search\"\r\n if sort_dir != \"none\" and method == \"list\":\r\n dsl[\"sort\"] = []\r\n for fld in sort_fields:\r\n if fld is not None:\r\n dsl[\"sort\"].append({ fld: sort_dir })\r\n if method == \"search\":\r\n # query in elasticsearch breaks if the is a single open parenthesis\r\n # remove a single parenthesis from the search term\r\n if \"(\" in term and \")\" not in term:\r\n search_term = \"*%s*\" % term.replace(\"(\", \"\")\r\n elif term.startswith(\"<\") and term.endswith(\">\"):\r\n search_term = term\r\n else:\r\n search_term = \"*%s*\" % term\r\n if search_flds is not None and len(search_flds) > 0:\r\n fields_to_search = search_flds\r\n elif len(fields) > 0:\r\n fields_to_search = fields\r\n else:\r\n fields_to_search = []\r\n # dsl['query'] = {\r\n # \"bool\": {\r\n # \"should\": [\r\n # {\r\n # \"query_string\" : {\r\n # \"query\": search_term,\r\n # \"analyzer\": \"default\",\r\n # \"analyze_wildcard\": True\r\n # }\r\n # },\r\n # {\r\n # \"query_string\" : {\r\n # \"query\": search_term,\r\n # \"analyzer\": \"default\",\r\n # \"analyze_wildcard\": True,\r\n # \"fields\": fields_to_search,\r\n # \"boost\": 10\r\n # }\r\n # }\r\n # ]\r\n # }\r\n # }\r\n # dsl['query'] = {\r\n # \"query_string\" : {\r\n # \"query\": search_term,\r\n # \"analyzer\": \"default\",\r\n # \"analyze_wildcard\": True\r\n # }\r\n # }\r\n dsl['query'] = {\r\n \"query_string\" : {\r\n \"query\": search_term\r\n }\r\n }\r\n else:\r\n dsl['query'] = {'bool':{}}\r\n if filter_value:\r\n maps = mapping_ref(self.es_url)\r\n path = '%s/%s' % (self.es_index, doc_type)\r\n filter_types = make_list(maps[path].get(filter_field))\r\n fld_filterable = \\\r\n len(set(['keyword','lower']).intersection(set(filter_types)))\\\r\n > 0\r\n if fld_filterable:\r\n if filter_types[0] == 'text':\r\n filter_field = \"%s.%s\" % (filter_field, filter_types[1])\r\n else:\r\n return {'error':\r\n \"Field %s is not filterable. Use a field that has 'keyword' or 'lower' as a mapping\" % filter_field}\r\n dsl['query']['bool']['filter'] = {\r\n \"term\": { filter_field: filter_value }\r\n }\r\n # if highlight:\r\n # dsl['highlight'] = {\"fields\": {\"bf_contribution.rdf_type\":{}}}\r\n lg.info(\"\\n-------- size: %s\\ndsl:\\n%s\", size, json.dumps(dsl,indent=4))\r\n result = es.search(index=self.es_index,\r\n size=size,\r\n from_=from_,\r\n doc_type=doc_type,\r\n body=dsl)\r\n if kwargs.get(\"calc\"):\r\n result = self._calc_result(result, kwargs['calc'])\r\n lg.debug(pp.pformat(result))\r\n return result", "def build(self) -> str:\n query = \"{Get{\"\n\n for get in self.get_builder:\n query += get.build(wrap_get=False)\n return query + \"}}\"", "def query(self):\r\n raise NotImplementedError", "def produce_query_batches(self):\n pass", "def _serialize_list(query_list, backrefs=None):\n\treturn [_serialize_model(m, backrefs) for m in query_list]", "def get_documents_with_q(self, index, query=Q(), source=None, add_index_name = False):\n \n s = Search(using=self.es, index=index)\n if source:\n s = s.source(source)\n # Dotted fields, replace . by __\n q = s.query(query)\n #print(str(q.to_dict()).replace(\"'\",'\"'))\n results = s.query(query).scan()\n \n if add_index_name:\n all_dicts = []\n for hit in results:\n result_dict = hit.to_dict()\n result_dict['_index'] = hit.meta.index\n all_dicts.append(result_dict)\n \n fa = pd.DataFrame.from_dict(all_dicts)\n else:\n fa = pd.DataFrame([hit.to_dict() for hit in results])\n \n return fa", "def document(self, connection):\n doc = xappy.UnprocessedDocument()\n for iface in providedBy(self.context):\n for field in schema.getFields(iface).values():\n if not isinstance(field, (schema.Text, schema.ASCII)):\n continue\n value = field.query(self.context)\n if value is None:\n value = u''\n if not isinstance(value, (str, unicode)):\n value = unicode(value)\n doc.fields.append(xappy.Field(field.__name__, value))\n return doc", "def all_query() -> list:\n data = []\n posts = Posts.query.all()\n for post in posts:\n x = {\n \"title\": post.title,\n \"body\": post.body,\n \"timestamp\": post.timestamp,\n \"id\": post.id,\n \"url\": make_url_from_title(post.title),\n }\n data.append(x)\n return data", "def list(self, request, *args, **kwargs):\n question_list = self.get_queryset().values_list('question', flat=True)\n queryset = Question.objects.filter(id__in=question_list).order_by('key')\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer_class()(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer_class()(queryset, many=True)\n return Response(serializer.data)", "def _create_queries(self, mode: QueryMode) -> List[Query]:\n queries: List[Query] = []\n for model in self.project.models:\n for explore in model.explores:\n if mode == \"batch\" or (mode == \"hybrid\" and not explore.queried):\n query = self._create_explore_query(explore, model.name)\n queries.append(query)\n elif mode == \"single\" or (mode == \"hybrid\" and explore.errored):\n explore_queries = self._create_dimension_queries(\n explore, model.name\n )\n queries.extend(explore_queries)\n return queries", "def get_all_shopping_list():\n all_shopping_lists = ShoppingList.query.all()\n return create_shopping_list_output(all_shopping_lists)", "def output_results(self, model, limit):\n out = []\n for key in self.results[model].keys():\n qId = key\n count = 1\n sorted_docs = sorted(self.results[model][key].items(), key=operator.itemgetter(1), reverse=True)[:limit] \n for val in sorted_docs:\n dId = val[0]\n dScore = val[1]\n dRank = count\n out.append(str(qId) + \" \" + \"Q0\" + \" \" + str(dId) + \" \" + str(dRank) + \" \" + str(dScore) + \" \" + \"Exp\")\n count += 1\n return out" ]
[ "0.64937806", "0.6459933", "0.60802174", "0.60519475", "0.59186524", "0.57771593", "0.5772095", "0.57147473", "0.5705186", "0.5635158", "0.5617996", "0.5612918", "0.55703384", "0.55453086", "0.5526077", "0.5504283", "0.54998153", "0.54894817", "0.5426078", "0.54242843", "0.54236805", "0.54235244", "0.53957677", "0.53924906", "0.53898674", "0.538541", "0.53734756", "0.5373336", "0.5373336", "0.5373336", "0.5373336", "0.5348192", "0.53404886", "0.5323995", "0.5321392", "0.5312072", "0.5301207", "0.5298224", "0.52946997", "0.52945364", "0.5285112", "0.52843523", "0.5263651", "0.5261771", "0.5258686", "0.52470005", "0.5245313", "0.524216", "0.5239625", "0.5238579", "0.5238579", "0.5238579", "0.52319765", "0.5230035", "0.5224752", "0.5223668", "0.52130216", "0.52110183", "0.5210981", "0.52030516", "0.52030516", "0.52021253", "0.51968485", "0.5182732", "0.51807964", "0.5178374", "0.5164401", "0.5162726", "0.5161121", "0.51594377", "0.5145691", "0.51450235", "0.5144578", "0.51403373", "0.5133729", "0.5131156", "0.5113975", "0.5111951", "0.5101022", "0.5092875", "0.50911903", "0.5074863", "0.5070183", "0.5068955", "0.50684106", "0.5067375", "0.50643057", "0.505592", "0.5055607", "0.50451195", "0.5043324", "0.5042064", "0.50410146", "0.5035105", "0.50312054", "0.5020552", "0.5010413", "0.50065315", "0.50030893", "0.49985036" ]
0.63552195
2
Fit the model from data in X.
def fit(self, X, y=None): if self.n_rows is None: n_rows = X.shape[0] else: n_rows = self.n_rows self.shape_ = n_rows, X.shape[1] self.scaler_ = MinMaxScaler().fit(X) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, X):\n raise NotImplementedError", "def fit(self, X):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X,y):\n pass", "def fit(self, X):\n self._fit_X = X", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X):\n\n return self._fit(X)", "def fit(self, X, Y):\n ...", "def fit(self, x):\n pass", "def fit(self, X):\n \n self._fit(X)\n return self", "def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)", "def fit(self, x):\n raise NotImplementedError()", "def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()", "def fit(self, X, y):\n self.model_x = X\n self.model_y = y", "def fit(self, X, y):\n self.X_data = X\n self.y = y", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def fit(self, X):\n self.fit_transform(X)\n return self", "def fit(self, X):\n self.fit_transform(X)\n return self", "def fit(self, X):\n raise NotImplementedError('Abstract method \"fit\" must be '\n 'specialised!')", "def fit(self, X, y, **fit_params):\n ...", "def fit(self, X, y=None):\n #import pdb\n #pdb.set_trace()\n return self.partial_fit(X, y)", "def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True", "def fit(self, X, y=None, **kwargs):\n raise NotImplementedError()", "def fit(self, X):\n self.rel_to_idx, self.ent_to_idx = create_mappings(X)\n self.is_fitted = True", "def fit(self, X, y=None):\n return self", "def fit(self, X):\n self._reset_state()\n # total number of samples\n total_sample_num = X.shape[0]\n # train in an online form\n for i in range(total_sample_num):\n # input sample\n x = X[i, :]\n self.input_signal(x)\n # self.labels_ = self.__label_samples(X)\n self.__classify()\n # plt.show()\n return self", "def fit(self, X, y=None):\n \n if not self.variables:\n self.X = X.copy()\n self.variables = [x for x in X.columns]\n else:\n self.X = X[self.variables].copy()\n \n self.input_shape_ = X.shape \n \n return self", "def fit(self, X, y):\n\n return self", "def fit(self, X):\n X_ = list(X)\n n = len(X_)\n\n W = [np.zeros((X_[i].shape[1], self.K)) for i in range(n)]\n T = [np.zeros((X_[i].shape[0], self.K)) for i in range(n)]\n P = [np.zeros((X_[i].shape[1], self.K)) for i in range(n)]\n\n # Find model\n func_vals = []\n for k in range(self.K):\n w, t, p, func_val = self._fit(X_)\n\n # Deflate for next component, but do not deflate for last component\n for i in range(n):\n W[i][:, k] = w[i].ravel()\n T[i][:, k] = t[i].ravel()\n P[i][:, k] = p[i].ravel()\n\n X_[i] = X_[i] - np.dot(t[i], p[i].T) # Deflate\n\n func_vals.append(func_val)\n\n self.func_val = func_vals\n self.W = W\n self.T = T\n self.P = P\n\n return self", "def fit(self, X, Y, **kwargs):\n raise NotImplementedError", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit(self, X, y=None):\n return self", "def fit ( self, X: np.ndarray, y: np.ndarray ):\n \n self.X = X\n self.y = y", "def fit(self, X, Y, **fit_params):\n ...", "def fit(self, X, Y, **fit_params):\n ...", "def fit(self, X, y=None):\n\n return self", "def fit(self, X, y=None):\n\n return self", "def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))", "def fit(self, X, y=None):\n if not self.variables:\n self.variables = [var for var in X.columns]\n\n self.input_shape_ = X.shape\n \n return self", "def fit(self, X, y):\n\n # retain columns incase encoding occurs\n self.fit_X_columns = X.columns.tolist()\n\n # generate the imputation datasets from multiple imputation\n # then fit the analysis models on each of the imputed datasets\n self.models_ = self._apply_models_to_mi_data(\n self.linear_models, X, y\n )\n\n # generate the fit statistics from each of the m models\n self.statistics_ = self._get_stats_from_models(self.models_)\n\n # still return an instance of the class\n return self", "def fit(self, X, y=None):\n self._validate_X(X)\n return self", "def fit(self, Y):\n raise NotImplementedError", "def fit(self, X, Y=None):\n return self", "def fit(self, X, y, verbose=None):\n self.__cls.fit(X, y)", "def fit(self, X, **params):\n self.fit_transform(X, **params)\n return self", "def fit(self, x):\n x = np.asarray(x)\n _ = self.fit_transform(x)", "def fit(self, X, y=..., **fit_params):\n ...", "def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass", "def fit(self, X, y=None):\n self.fit_transform(X)\n return self", "def fit(self, X, y=None):\n self.fit_transform(X)\n return self", "def fit(self, X, y=None, **fit_params):\n return self", "def fit(self, X, y=None):\n X = self._validate_data(X, ensure_min_samples=2)\n return self._fit(X)", "def fit(self,X,y):\n self.X = X\n self.y = y\n return self", "def fit(self, X, y):\n self.X_train = X\n self.y_train = y", "def fit(self, X, y=None):\n self.X = X\n self.y = y\n return self", "def fit(self, X, y=None):\n self.X = X\n self.y = y\n return self", "def fit(self, X, y=None):\n self.X = X\n self.y = y\n return self", "def fit(self,\n X, # variable names chosen to correspond to sklearn when possible\n y=None, # y is the confound variables here, not the target!\n ):\n\n return self._fit(X, y) # which itself must return self", "def fit(self,\n X, # variable names chosen to correspond to sklearn when possible\n y=None, # y is the confound variables here, not the target!\n ):\n\n return self._fit(X, y) # which itself must return self", "def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)", "def fit(self, X, y=None, **fit_params):\n return self", "def fit(self, X, y):\n \n # Create a copy of X that has a column for the intercept if the user\n # wants one.\n X_copy = self._add_intercept(X)\n \n # Fit the model coefficients using SVD.\n self._fit_svd(X_copy, y, alpha=0.0)\n \n # Calculate model statistics.\n self._calculate_model_stats_ols(X, y)\n \n return", "def fit(self, X=None, y=None):\n self.X = X\n self.y = y\n return self", "def fit(self, X=None, y=None):\n self.X = X\n self.y = y\n return self", "def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model", "def fit(self, X, y=None):\n self._fit_transform(X, compute_sources=False)\n return self", "def fit(self, X, y=None):\n # X is the featurized calls dataframe\n self.X = X\n self.y = y\n return self", "def fit(self, x, y):\n raise NotImplementedError('Subclass of LinearModel must implement fit method.')", "def fit(self,X,y=None,**fit_params):\n return self", "def fit(self, X, y):\n # Code to fit the model.\n\n train_stuff = self._vectorizer.fit_transform(X, y)\n\n self._classifier.fit(train_stuff, y = y)\n\n\n return self", "def fit(self, X, y=None): # noqa: E501\n return self", "def fit(self, X, y=None):\n X = infer_feature_types(X)\n self._check_input_for_columns(X)\n return self", "def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True", "def _fit(self, X):\n X = self._check_array(X)\n n_samples, n_features = X.shape\n loadings = np.zeros((n_samples, n_features), dtype=np.double, order='F')\n components = np.copy(loadings)\n E = np.copy(loadings)\n s1, s2 = self.sampling\n self.rank_ = self._pyrosl(X, loadings, components, E, n_samples, n_features, self.rank, self.reg, self.tol, self.iters, self._mode, s1, s2, self.verbose)\n \n self.components_ = components[:self.rank_]\n return loadings, components, E", "def fit(self, X, y=None):\n\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y)", "def fit(self, X, y=None):\n #X = check_array(X, accept_sparse='csr')\n return self", "def train(self, X, y):\n self.model.fit(X, y)", "def fit(self, data):\n return self", "def fit(self, X, y=None):\n # X is the calls dataframe\n self.X = X\n self.y = y\n return self", "def fit(self, X, y=None):\n self.fit_transform(X, y)\n return self", "def fit(self, X):\n self._initialize(X)\n\n for i in range(self.max_iter):\n self._expect(X)\n self._maximize(X)\n return self", "def fit(self,X,Y):\n return self._fit(X,Y)", "def fit(self, X, y=None):\n # assume a pandas.DataFrame compatible object\n out = self.act_on(X)\n self.learn_types(X, out)\n return self", "def fit(self, X, y):\n return self # as required by sklearn." ]
[ "0.8456153", "0.84106666", "0.8192276", "0.8192276", "0.8192276", "0.8108174", "0.8097223", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.8097219", "0.808293", "0.8073022", "0.8018419", "0.79754156", "0.7928442", "0.7916275", "0.7837137", "0.7835977", "0.7828352", "0.77976125", "0.77610207", "0.77610207", "0.7760727", "0.7694598", "0.7691961", "0.7663735", "0.76532793", "0.7652715", "0.76465887", "0.7637167", "0.7615855", "0.75933063", "0.75849766", "0.7551196", "0.75451267", "0.75451267", "0.75451267", "0.75451267", "0.75451267", "0.75451267", "0.75451267", "0.75451267", "0.75246", "0.7520147", "0.7520147", "0.7505626", "0.7505626", "0.75038064", "0.74926686", "0.7486904", "0.74754107", "0.74708277", "0.7465757", "0.74654835", "0.7454896", "0.7449779", "0.744782", "0.74427754", "0.7441496", "0.7441496", "0.73722076", "0.7361839", "0.7356338", "0.73546976", "0.7339037", "0.7339037", "0.7339037", "0.73156697", "0.73156697", "0.7300954", "0.7298215", "0.7292899", "0.72907907", "0.72907907", "0.7287198", "0.7286529", "0.72813404", "0.72811085", "0.7274516", "0.7261294", "0.7248744", "0.7228035", "0.7224922", "0.72221214", "0.72202015", "0.72077847", "0.7189995", "0.7188748", "0.71848774", "0.71839166", "0.7181946", "0.717856", "0.71750593", "0.7174252" ]
0.0
-1
Fit the model from data in X. PCA is fit to estimate the rotation and UniformSampler is fit to transformed data.
def fit(self, X, y=None): self.pca_ = self._make_pca() transformed = self.pca_.fit_transform(X) self.sampler_ = UniformSampler(n_rows=self.n_rows).fit(transformed) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_transform(self, X, y=None):\n X = np.asfortranarray(X, dtype=np.float64)\n Q = np.empty(\n (self.n_components, X.shape[1]), dtype=np.float64, order='F')\n U = np.empty(\n (X.shape[0], self.n_components), dtype=np.float64, order='F')\n w = np.empty(self.n_components, dtype=np.float64)\n explained_variance = np.empty(self.n_components, dtype=np.float64)\n explained_variance_ratio = np.empty(self.n_components, dtype=np.float64)\n mean = np.empty(X.shape[1], dtype=np.float64)\n param = parameters()\n param.X_m = X.shape[0]\n param.X_n = X.shape[1]\n param.k = self.n_components\n param.whiten = self.whiten\n\n lib = self._load_lib()\n lib.pca(\n _as_fptr(X), _as_fptr(Q), _as_fptr(w), _as_fptr(U),\n _as_fptr(explained_variance), _as_fptr(explained_variance_ratio),\n _as_fptr(mean), param)\n\n self._w = w\n self._U, self._Q = svd_flip(U, Q) # TODO Port to cuda?\n self._X = X\n n = X.shape[0]\n # To match sci-kit #TODO Port to cuda?\n self.explained_variance = self.singular_values_**2 / (n - 1)\n self.explained_variance_ratio = explained_variance_ratio\n self.mean_ = mean\n\n # TODO noise_variance_ calculation\n # can be done inside lib.pca if a bottleneck\n n_samples, n_features = X.shape\n total_var = np.var(X, ddof=1, axis=0)\n if self.n_components_ < min(n_features, n_samples):\n self.noise_variance_ = \\\n (total_var.sum() - self.explained_variance_.sum())\n self.noise_variance_ /= \\\n min(n_features, n_samples) - self.n_components\n else:\n self.noise_variance_ = 0.\n\n X_transformed = U * w\n return X_transformed", "def _fit(self, X):\n # Raise an error for sparse input.\n # This is more informative than the generic one raised by check_array.\n if issparse(X):\n raise TypeError('PCA does not support sparse input. See '\n 'TruncatedSVD for a possible alternative.')\n\n X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True,\n copy=self.copy)\n\n # Handle n_components==None\n if self.n_components is None:\n if self.svd_solver != 'arpack':\n n_components = min(X.shape)\n else:\n n_components = min(X.shape) - 1\n else:\n n_components = self.n_components\n\n # Handle svd_solver\n self._fit_svd_solver = self.svd_solver\n if self._fit_svd_solver == 'auto':\n # Small problem or n_components == 'mle', just call full PCA\n if (max(X.shape) <= 500\n or n_components == 'mle'\n or n_components == 'latent_root'):\n self._fit_svd_solver = 'full'\n elif n_components >= 1 and n_components < .8 * min(X.shape):\n self._fit_svd_solver = 'randomized'\n # This is also the case of n_components in (0,1)\n else:\n self._fit_svd_solver = 'full'\n\n # Call different fits for either full or truncated SVD\n if self._fit_svd_solver == 'full':\n U, S , V = self._fit_full(X, n_components)\n elif self._fit_svd_solver in ['arpack', 'randomized']:\n U, S, V = self._fit_truncated(X, n_components, self._fit_svd_solver)\n else:\n raise ValueError(\"Unrecognized svd_solver='{0}'\"\n \"\".format(self._fit_svd_solver))\n\n # implmentation of varimax rotation\n if self.rotation == 'varimax':\n if self.n_samples_ > self.n_components_:\n\n factor_matrix = (\n self.components_.T\n * (self.explained_variance_.reshape(1, -1) ** (1/2))\n )\n\n rot_factor_matrix = self._varimax(pd.DataFrame(factor_matrix))\n\n self.explained_variance_ = (rot_factor_matrix ** 2).sum(axis=0)\n\n self.components_ = (\n rot_factor_matrix\n / (self.explained_variance_.reshape(1, -1) ** (1/2))\n ).T\n\n # sort components by explained variance in descanding order\n self.components_ = self.components_[\n np.argsort(self.explained_variance_)[::-1], :\n ]\n\n self.explained_variance_ = np.sort(\n self.explained_variance_\n )[::-1]\n\n total_var = self.n_features_\n self.explained_variance_ratio_ = (\n self.explained_variance_ / total_var\n )\n\n self.singular_values_ = None\n\n if self._fit_svd_solver == 'full':\n if self.n_components_ < min(self.n_features_, self.n_samples_):\n self.noise_variance_ = (\n (total_var - self.explained_variance_.sum())\n / (self.n_features_ - self.n_components_)\n )\n else:\n self.noise_variance_ = 0.\n\n elif self._fit_svd_solver in ['arpack', 'randomized']:\n if self.n_components_ < min(self.n_features_, self.n_samples_):\n\n total_var = np.var(X, ddof=1, axis=0)\n\n self.noise_variance_ = (\n total_var.sum() - self.explained_variance_.sum()\n )\n\n self.noise_variance_ /= (\n min(self.n_features_, self.n_samples_)\n - self.n_components_\n )\n else:\n self.noise_variance_ = 0.\n\n else:\n raise ValueError(\"Unrecognized svd_solver='{0}'\"\n \"\".format(self._fit_svd_solver))\n else:\n raise ValueError(\n \"Varimax rotation requires n_samples > n_components\")\n\n U, S, V = None, None, None\n\n # implmentation of communalties\n self.communalities_ = (self.components_ ** 2).sum(axis=0)\n\n return U, S, V", "def fit_transform(self, X):\n self.fit(X)\n return self.transform(X)", "def fit_transform(self, X):\n self.fit(X)\n return self.transform(X)", "def fit_transform(self, X):\n self.fit(X)\n return self.transform(X)", "def fit_transform ( self, X ):\n self.fit ( X )\n return self.transform ( X )\n # End fit_transform()", "def fit_transform(self, x):\n self.fit(x)\n\n if self.method == \"svd\":\n return self._u * self._s\n else:\n return self._transform_eig(x)", "def fit_transform(self, X, y=...):\n ...", "def fit(self, X):\n self.fit_transform(X)\n return self", "def fit(self, X):\n self.fit_transform(X)\n return self", "def _fit(self, X, _transform=False):\n\n n_cols = X.shape[1]\n\n data = DistributedDataHandler.create(data=X, client=self.client)\n self.datatype = data.datatype\n\n if \"svd_solver\" in self.kwargs and self.kwargs[\"svd_solver\"] == \"tsqr\":\n comms = Comms(comms_p2p=True)\n else:\n comms = Comms(comms_p2p=False)\n\n comms.init(workers=data.workers)\n\n data.calculate_parts_to_sizes(comms)\n\n worker_info = comms.worker_info(comms.worker_addresses)\n parts_to_sizes, _ = parts_to_ranks(\n self.client, worker_info, data.gpu_futures\n )\n\n total_rows = data.total_rows\n\n models = dict(\n [\n (\n data.worker_info[wf[0]][\"rank\"],\n self.client.submit(\n self._create_model,\n comms.sessionId,\n self._model_func,\n self.datatype,\n **self.kwargs,\n pure=False,\n workers=[wf[0]],\n ),\n )\n for idx, wf in enumerate(data.worker_to_parts.items())\n ]\n )\n\n pca_fit = dict(\n [\n (\n wf[0],\n self.client.submit(\n DecompositionSyncFitMixin._func_fit,\n models[data.worker_info[wf[0]][\"rank\"]],\n wf[1],\n total_rows,\n n_cols,\n parts_to_sizes,\n data.worker_info[wf[0]][\"rank\"],\n _transform,\n pure=False,\n workers=[wf[0]],\n ),\n )\n for idx, wf in enumerate(data.worker_to_parts.items())\n ]\n )\n\n wait(list(pca_fit.values()))\n raise_exception_from_futures(list(pca_fit.values()))\n\n comms.destroy()\n\n self._set_internal_model(list(models.values())[0])\n\n if _transform:\n out_futures = flatten_grouped_results(\n self.client, data.gpu_futures, pca_fit\n )\n return to_output(out_futures, self.datatype)\n\n return self", "def fit_transform(self, X):\n \n loadings, components, error = self._fit(X)\n loadings = loadings[:, :self.rank_]\n \n return loadings", "def fit_transform(self, X, y=None):\n self.fit(X)\n return self.transform(X)", "def fit_transform(self, X, y=None):\n return self._fit_transform(X, compute_sources=True)", "def fit_transform(self, X):\n self._method_calling = 2\n self.fit(X)\n\n # Transform - calculate kernel matrix\n # Output is always normalized\n return self._calculate_kernel_matrix()", "def fit(self, X, **params):\n self.fit_transform(X, **params)\n return self", "def fit_transform(self, X, y=None):\n\n U, S, V = self._fit(X)\n X_transformed = self.transform(X)\n\n return X_transformed", "def fit_transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n \n self.fit(X)\n return self.transform(X)", "def fit(self, X):\n self._fit_X = X", "def fit_transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n self.fit(X)\n return self.transform(X)", "def fit(self, X, y):\n self.model = Pipeline([\n ('scalar', RobustScaler()),\n ('classifier', SVC(probability=True, gamma='scale')),\n ])\n self.model.fit(X, y)", "def fit_transform(self, X):\n self.fit(X)\n return self.doc_topic_distr, self.xai", "def fit_transform(\n self,\n X: FEATURES | None = None,\n y: TARGET | None = None,\n **fit_params,\n ) -> PANDAS | tuple[DATAFRAME, PANDAS]:\n return self.fit(X, y, **fit_params).transform(X, y)", "def fit_transform(self, X, y=None):\n return self.fit(X).transform(X)", "def fit_transform(self, X, y=None):\n return self.fit(X).transform(X)", "def fit_transform(self, X, y=None, **kwargs):\n self.fit(X, y=y, **kwargs)\n return self.transform(X)", "def transform(self, X):\r\n check_is_fitted(self)\r\n\r\n X = self._check_test_data(X)\r\n return self._transform(X)", "def fit(self, X, seed=None):\n X = np.copy(X)\n\n if self.featurewise_center:\n self.mean = np.mean(X, axis=0)\n X -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(X, axis=0)\n X /= self.std\n\n if self.zca_whitening:\n flatX = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]))\n fudge = 10e-6\n sigma = np.dot(flatX.T, flatX) / flatX.shape[1]\n U, S, V = linalg.svd(sigma)\n self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + fudge))), U.T)", "def fit_transform(self, X, y=None):\n\n\t\t# check on state of X and cols\n\t\tX, self.cols = validate_is_pd(X, self.cols, assert_all_finite=True)\n\t\t_validate_cols(self.cols)\n\n\t\t## Generate correlation matrix\n\t\tc = X[self.cols or X.columns].corr(method=self.method).apply(lambda x: np.abs(x))\n\n\t\t## get drops list\n\t\td, mac, crz = filter_collinearity(c, self.threshold)\n\t\tself.drop = d if d else None\n\t\tself.mean_abs_correlations_ = mac if mac else None\n\t\tself.correlations_ = crz if crz else None\n\n\t\t# if drop is None, we need to just return X\n\t\tif not self.drop:\n\t\t\treturn X if self.as_df else X.as_matrix()\n\n\t\tdropped = X.drop(self.drop, axis=1)\n\t\treturn dropped if self.as_df else dropped.as_matrix()", "def transform(self, X):\n attrs = [v for v in vars(self)\n if (v.endswith(\"_\") or v.startswith(\"_\"))\n and not v.startswith(\"__\")]\n check_is_fitted(self, attributes=attrs,\n all_or_any=all)\n\n X = check_array(X)\n if self.mean_ is not None:\n X = X - self.mean_\n\n if self.feature_selection == 'all':\n X_transformed = np.dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n\n else:\n X_transformed = X[:, self._get_support_mask()]\n\n return X_transformed", "def fit_transform(self, X, y=None):\n\n X = self._prepare(X)\n\n self.fit(X, y)\n return self.transform(X, y)", "def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)", "def transform(self, X):\n\n t0 = time.perf_counter()\n check_is_fitted(self)\n self.check_external_components_modified()#[WARN] in d3m, primitives can \"restore\" private class variables...\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n t1 = time.perf_counter()\n\n if X.shape[1] != self.components_af_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_af_.shape[1])\n )\n\n #X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n #import pdb; pdb.set_trace()\n X_af = af.interop.from_ndarray(X).as_type(self.components_af_.dtype())\n X_new = af.matmulNT(X_af, self.components_af_)\n X_new = X_new.to_ndarray()\n t2 = time.perf_counter()\n return X_new", "def fit_transform(self, x):\n self.fit(x)\n return self.transform(x)", "def _fit(self, X):\n X = self._check_array(X)\n n_samples, n_features = X.shape\n loadings = np.zeros((n_samples, n_features), dtype=np.double, order='F')\n components = np.copy(loadings)\n E = np.copy(loadings)\n s1, s2 = self.sampling\n self.rank_ = self._pyrosl(X, loadings, components, E, n_samples, n_features, self.rank, self.reg, self.tol, self.iters, self._mode, s1, s2, self.verbose)\n \n self.components_ = components[:self.rank_]\n return loadings, components, E", "def fit(self, X):\n raise NotImplementedError", "def fit(self, X, y=None):\n # self.V_groups = self._find_Vgroups(X) # if you don't trust hardcoded V group\n train = X[self.V_features].copy()\n train[np.isnan(train)] = -1\n\n for group in self.V_groups:\n self.reducer_groups.append(self._pca(train[group]))\n\n return self", "def fit(self, X):\n mean_vec = X.mean(axis=0)\n cov = (X - mean_vec).T.dot((X - mean_vec)) / (X.shape[0]-1)\n eigenvalues, eigenvectors = np.linalg.eig(cov)\n idx = np.abs(eigenvalues)\n self.best_eigenvalues = idx[:self.n_components]\n self.best_eigenvalues = eigenvectors[:, :self.n_components]\n eigenvectors = np.atleast_1d(eigenvectors)[:, :self.n_components]", "def fit(self, X):\n self.train_shape = X.shape\n\n sample_idx = {}\n for i in range(2):\n dim_size = min(X.shape[i], self.size)\n sample_idx[i] = permutation(X.shape[i])[:dim_size]\n\n sample = X[ix_(sample_idx[0], sample_idx[1])]\n\n self.sample_idx_ = sample_idx\n self.sample_ = sample\n\n return self", "def fit(self, X):\n if isinstance(X, np.ndarray) and X.ndim == 2:\n X = [X]\n self.mean_ = np.concatenate(X).mean(axis=0, keepdims=True)\n X_stan = [Xi - self.mean_ for Xi in X]\n uX, sX, vhX = np.linalg.svd(np.concatenate(X_stan), full_matrices=False)\n whiten = vhX.T @ np.diag(1. / sX)\n Xw = [X_stani @ whiten for X_stani in X_stan]\n Xp = [np.diff(Xwi, axis=0) for Xwi in Xw]\n up, sp, vhp = np.linalg.svd(np.concatenate(Xp), full_matrices=False)\n proj = vhp.T\n self.all_coef_ = whiten @ proj[:, ::-1]\n self.all_coef_ /= np.linalg.norm(self.all_coef_, axis=0, keepdims=True)\n self.coef_ = self.all_coef_[:, :self.n_components]\n return self", "def fit_transform(self, X, y=None):\n return self.fit(X, y=y).transform(X)", "def transform(self, X):\n check_is_fitted(self, 'pca_')\n # check on state of X and cols\n X, _ = validate_is_pd(X, self.cols)\n cols = _cols_if_none(X, self.cols)\n\n other_nms = [nm for nm in X.columns if nm not in cols]\n transform = self.pca_.transform(X[cols].as_matrix())\n\n # do weighting if necessary\n if self.weight:\n # get the weight vals\n weights = self.pca_.explained_variance_ratio_\n weights -= np.median(weights)\n weights += 1\n\n # now add to the transformed features\n transform *= weights\n\n left = pd.DataFrame.from_records(data=transform,\n columns=[('PC%i' % (i + 1)) for i in range(transform.shape[1])])\n\n # concat if needed\n x = pd.concat([left, X[other_nms]], axis=1) if other_nms else left\n return x if self.as_df else x.as_matrix()", "def fit(self, X, y=None):\n self._fit_transform(X, compute_sources=False)\n return self", "def fit_transform(self, X, y):\n\n self.fit(X, y)\n return self.transform()", "def fit_transform(self, X, return_df=False):\n\n self._fit(X)\n return self._transform(X, return_df)", "def fit(self, X):\n self.X = X\n self.n_majority_samples, self.n_features = self.X.shape\n\n return self", "def fit_transform(self, data):\n self.fit(data)\n return self.transform(data)", "def fit(self, X):\n\n return self._fit(X)", "def fit_transform(self, X, y=None):\n out = self.transform(X)\n self.learn_types(X, out)\n return self.transform(X)", "def fit_transform(self, X, y=None):\n if isinstance(X, pd.DataFrame):\n self.input_feature_names = list(X.columns.values)\n else:\n self.input_feature_names = range(X.shape[1])\n\n try:\n X_t = self._component_obj.fit_transform(X, y)\n except AttributeError:\n raise RuntimeError(\"Transformer requires a fit_transform method or a component_obj that implements fit_transform\")\n if not isinstance(X_t, pd.DataFrame) and isinstance(X, pd.DataFrame):\n X_dtypes = X.dtypes.to_dict()\n selected_col_names = self.get_names()\n col_types = {key: X_dtypes[key] for key in selected_col_names}\n return pd.DataFrame(X_t, columns=selected_col_names, index=X.index).astype(col_types)\n else:\n return pd.DataFrame(X_t)", "def fit_transform(self, X):\n self._fit(X)\n return self.embedding", "def fit(self, X, y=None):\n self.fit_transform(X)\n return self", "def fit(self, X, y=None):\n self.fit_transform(X)\n return self", "def fit(self, X):\n \n self._fit(X)\n return self", "def fit(self, X, Y):\n X = X.clone().detach()\n Y = Y.clone().detach()\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n n = X.size()[0]\n p = X.size()[1]\n q = Y.size()[1]\n\n # if self.n_components < 1 or self.n_components > p:\n # raise ValueError('Invalid number of components: %d' %\n # self.n_components)\n # if self.algorithm not in (\"svd\", \"nipals\"):\n # raise ValueError(\"Got algorithm %s when only 'svd' \"\n # \"and 'nipals' are known\" % self.algorithm)\n # if self.algorithm == \"svd\" and self.mode == \"B\":\n # raise ValueError('Incompatible configuration: mode B is not '\n # 'implemented with svd algorithm')\n # if self.deflation_mode not in [\"canonical\", \"regression\"]:\n # raise ValueError('The deflation mode is unknown')\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # Residuals (deflated) matrices\n Xk = X\n Yk = Y\n\n # Results matrices\n self.x_scores_ = torch.zeros((n, self.n_components))\n self.y_scores_ = torch.zeros((n, self.n_components))\n self.x_weights_ = torch.zeros((p, self.n_components))\n self.y_weights_ = torch.zeros((q, self.n_components))\n self.x_loadings_ = torch.zeros((p, self.n_components))\n self.y_loadings_ = torch.zeros((q, self.n_components))\n self.n_iter_ = []\n\n # NIPALS algo: outer loop, over components\n Y_eps = torch.finfo(Yk.dtype).eps\n\n for k in range(self.n_components):\n\n if torch.all(torch.mm(Yk.T, Yk) < torch.finfo(torch.float).eps):\n # Yk constant\n warnings.warn('Y residual constant at iteration %s' % k)\n break\n # 1) weights estimation (inner loop)\n # -----------------------------------\n if self.algorithm == \"nipals\":\n # Replace columns that are all close to zero with zeros\n Yk_mask = torch.all(torch.abs(Yk) < 10 * Y_eps, axis=0)\n Yk[:, Yk_mask] = 0.0\n\n x_weights, y_weights, n_iter_ = \\\n _nipals_twoblocks_inner_loop(\n X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,\n tol=self.tol, norm_y_weights=self.norm_y_weights)\n self.n_iter_.append(n_iter_)\n\n elif self.algorithm == \"svd\":\n x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)\n # Forces sign stability of x_weights and y_weights\n # Sign undeterminacy issue from svd if algorithm == \"svd\"\n # and from platform dependent computation if algorithm == 'nipals'\n\n x_weights, y_weights = svd_flip(x_weights, y_weights.T)\n y_weights = y_weights.T\n # columns of u, rows of v\n\n # compute scores\n x_scores = torch.mv(Xk, x_weights)\n\n if self.norm_y_weights:\n y_ss = 1\n else:\n y_ss = torch.dot(y_weights.T, y_weights)\n\n y_scores = torch.mv(Yk, y_weights) / y_ss\n\n # test for null variance\n if torch.dot(x_scores.T, x_scores) < torch.finfo(torch.double).eps:\n warnings.warn('X scores are null at iteration %s' % k)\n break\n # 2) Deflation (in place)\n # ----------------------\n #\n # - regress Xk's on x_score\n\n x_loadings = torch.mv(Xk.T, x_scores) / torch.dot(x_scores.T, x_scores)\n\n # - subtract rank-one approximations to obtain remainder matrix\n\n Xk -= x_scores[:, None] * x_loadings.T\n\n if self.deflation_mode == \"canonical\":\n # - regress Yk's on y_score, then subtract rank-one approx.\n y_loadings = (torch.mv(Yk.T, y_scores)\n / torch.dot(y_scores.T, y_scores))\n Yk -= y_scores[:, None] * y_loadings.T\n if self.deflation_mode == \"regression\":\n # - regress Yk's on x_score, then subtract rank-one approx.\n y_loadings = (torch.mv(Yk.T, x_scores)\n / torch.dot(x_scores.T, x_scores))\n Yk -= x_scores[:, None] * y_loadings.T\n # 3) Store weights, scores and loadings # Notation:\n\n self.x_scores_[:, k] = x_scores.view(-1) # T\n self.y_scores_[:, k] = y_scores.view(-1) # U\n self.x_weights_[:, k] = x_weights.view(-1) # W\n self.y_weights_[:, k] = y_weights.view(-1) # C\n self.x_loadings_[:, k] = x_loadings.view(-1) # P\n self.y_loadings_[:, k] = y_loadings.view(-1) # Q\n\n # Such that: X = TP' + Err and Y = UQ' + Err\n\n # 4) rotations from input space to transformed space (scores)\n # T = X W(P'W)^-1 = XW* (W* : p x k matrix)\n # U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)\n self.x_rotations_ = torch.mm(\n self.x_weights_,\n torch.pinverse(torch.mm(self.x_loadings_.T, self.x_weights_)))\n if Y.size()[1] > 1:\n self.y_rotations_ = torch.mm(\n self.y_weights_,\n torch.pinverse(torch.mm(self.y_loadings_.T, self.y_weights_)))\n else:\n self.y_rotations_ = torch.ones(1)\n\n if True or self.deflation_mode == \"regression\":\n # Estimate regression coefficient\n # Regress Y on T\n # Y = TQ' + Err,\n # Then express in function of X\n # Y = X W(P'W)^-1Q' + Err = XB + Err\n # => B = W*Q' (p x q)\n\n self.coef_ = torch.mm(self.x_rotations_, self.y_loadings_.T)\n self.coef_ = self.coef_\n self.y_std_ = self.y_std_\n # self.coef_ = torch.mv(self.coef_, self.y_std_)\n self.coef_ = self.coef_[:, None] * self.y_std_\n self.coef_ = self.coef_[:,0,:]\n\n return self", "def fit_transform(\n self, X: Sequence, y: Optional[Sequence] = None\n ) -> np.ndarray:\n self.fit(X)\n # assert(self.fitted_ is True)\n self.X_ = check_array(X, dtype=np.float64)\n return self.transform(X, y)", "def fit_transform(self, X, a, y):\n self.fit(X, a, y)\n return self.transform(X, a, y)", "def fit_transform(self, X: TransformType, y=None, **fit_params) -> TransformType:\n X = self._validate_datafold_data(\n X,\n array_kwargs=dict(ensure_min_samples=max(2, self.n_kernel_eigenvectors)),\n tsc_kwargs=dict(ensure_min_samples=max(2, self.n_kernel_eigenvectors)),\n )\n self.fit(X=X, y=y, **fit_params)\n\n return self._jointly_smooth_functions_", "def fit(self, X):\r\n\t\tself.data = check_array(X)\r\n\t\tn_var = self.data.shape[1]\r\n\r\n\t\tU = np.arange(n_var)\r\n\t\tK = []\r\n\t\tX_ = np.copy(X)\r\n\t\tfor _ in range(0, n_var):\r\n\t\t\tcu_i = self._search_exogenous_x(X_, U)\r\n\t\t\tfor i in U:\r\n\t\t\t\tif i != cu_i:\r\n\t\t\t\t\tX_[:, i] = self._residual( X_[:, i], X_[:, cu_i] )\r\n\t\t\tK.append(cu_i)\r\n\t\t\tU = U[U != cu_i]\r\n\r\n\t\tself._causal_order = K\r\n\t\tself._estimate_adjacency_matrix(X)", "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)", "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)", "def fit(self, x):\n x = np.asarray(x)\n _ = self.fit_transform(x)", "def fit(self, X):", "def fit(self, X, random_state=None):\n # TODO: Use np.linalg.svd or scipy.sparse.linalg.svds when possible!\n\n if isinstance(X, list):\n X = X[0]\n\n P = []\n T = []\n for k in range(self.K):\n\n p, t = self._compute_component(X)\n\n P.append(p)\n T.append(t)\n\n if k < self.K:\n X = self._deflate(X, p, t)\n\n self.P = np.hstack(P)\n self.T = np.hstack(T)\n\n return self", "def fit_transform(self, X, n_components=None):\n if n_components is None:\n n_components = self.n_components\n self.fit(X)\n return self.transform(X, n_components=n_components)", "def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True", "def fit(self, X):\n\t\tself._snapshots, self._snapshots_shape = self._col_major_2darray(X)\n\n\t\tcompressed_snapshots = self._compress_snapshots()\n\n\t\tn_samples = compressed_snapshots.shape[1]\n\t\tX = compressed_snapshots[:, :-1]\n\t\tY = compressed_snapshots[:, 1:]\n\n\t\tX, Y = self._compute_tlsq(X, Y, self.tlsq_rank)\n\n\t\tU, s, V = self._compute_svd(X, self.svd_rank)\n\n\t\tself._Atilde = self._build_lowrank_op(U, s, V, Y)\n\n\t\t# No projected modes for cdmd\n\t\tself._eigs, self._modes = self._eig_from_lowrank_op(\n\t\t\tself._Atilde, self._snapshots[:, 1:], U, s, V, True\n\t\t)\n\n\t\tself._b = self._compute_amplitudes(\n\t\t\tself._modes, self._snapshots, self._eigs, self.opt\n\t\t)\n\n\t\t# Default timesteps\n\t\tself.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n\t\tself.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n\n\t\treturn self", "def fit_transform(self, data, target):\n\n self.fit(data, target) # Fit data\n return self.transform(data) # Perform feature selection", "def fit_transform(self, X, y=None):\n\n return self.fit(X,y).transform(X,y)", "def fit_transform(self, data):\n p = self._get_pipeline(self._params)\n return p.fit_transform(data)", "def fit_transform(self, X, y=None):\r\n return self.fit(X, y).transform(X, y)", "def fit_transform(self, X, y=None):\r\n return self.fit(X, y).transform(X, y)", "def transform(self, X):\n self._check_is_fitted('transform')\n return self.best_estimator_.transform(X)", "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X, y)", "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X, y)", "def fit(self, X, y):\n if self.scaler is not None:\n x_scaled = self.scaler.fit_transform(X)\n else:\n x_scaled = X\n x_reduced = self.prcomp.fit_transform(x_scaled)\n self.regression.fit(x_reduced, y)\n return self", "def fit_transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:\n self.fit(X, y)\n return self.transform(X)", "def fit(self, X):\n X_ = list(X)\n n = len(X_)\n\n W = [np.zeros((X_[i].shape[1], self.K)) for i in range(n)]\n T = [np.zeros((X_[i].shape[0], self.K)) for i in range(n)]\n P = [np.zeros((X_[i].shape[1], self.K)) for i in range(n)]\n\n # Find model\n func_vals = []\n for k in range(self.K):\n w, t, p, func_val = self._fit(X_)\n\n # Deflate for next component, but do not deflate for last component\n for i in range(n):\n W[i][:, k] = w[i].ravel()\n T[i][:, k] = t[i].ravel()\n P[i][:, k] = p[i].ravel()\n\n X_[i] = X_[i] - np.dot(t[i], p[i].T) # Deflate\n\n func_vals.append(func_val)\n\n self.func_val = func_vals\n self.W = W\n self.T = T\n self.P = P\n\n return self", "def fit_transform(self, X):\n X_sparse = X.copy().astype(np.float64)\n self.X_sparse = X_sparse\n self._fit()\n return self.sample_weights, self.s, self.feature_weights", "def fit_transform(self, data):\n return self.transform(data)", "def fit_transform(self, X):\n\n if self.n_cpu == 1:\n self.pool = None\n else:\n self.pool = Pool(self.n_cpu)\n Xm = ma.masked_equal(X, self.missing_values)\n best_tc = -np.inf\n for n_rep in range(self.n_repeat):\n\n self.initialize_parameters(X)\n\n for nloop in range(self.max_iter):\n\n self.log_p_y = self.calculate_p_y(self.p_y_given_x)\n self.theta = self.calculate_theta(Xm, self.p_y_given_x)\n\n if self.n_hidden > 1: # Structure learning step\n self.update_alpha(self.p_y_given_x, self.theta, Xm, self.tcs)\n\n self.p_y_given_x, self.log_z = self.calculate_latent(self.theta, Xm)\n\n self.update_tc(self.log_z) # Calculate TC and record history to check convergence\n\n self.print_verbose()\n if self.convergence():\n break\n\n if self.verbose:\n print('Overall tc:', self.tc)\n if self.tc > best_tc:\n best_tc = self.tc\n best_dict = self.__dict__.copy() # TODO: what happens if n_cpu > 1 and n_repeat > 1? Does pool get copied? Probably not...just a pointer to the same object... Seems fine.\n self.__dict__ = best_dict\n if self.verbose:\n print('Best tc:', self.tc)\n\n self.sort_and_output(Xm)\n if self.pool is not None:\n self.pool.close()\n self.pool = None\n return self.labels", "def fit_transform(self, X):\n self.fit(X)\n return self.embedding_", "def fit(self, X, y=None, **fitparams):\n \n self.fitted_transformers_ = []\n for transformer in self.list_of_transformers:\n fitted_trans = clone(transformer).fit(X, y=None, **fitparams)\n self.fitted_transformers_.append(fitted_trans)\n return self", "def fit_transform(self, X, y=None, sample_weight=None):\r\n # Currently, this just skips a copy of the data if it is not in\r\n # np.array or CSR format already.\r\n # XXX This skips _check_test_data, which may change the dtype;\r\n # we should refactor the input validation.\r\n return self.fit(X, sample_weight=sample_weight)._transform(X)", "def fit(self, X, y):\n self.X_data = X\n self.y = y", "def fit_transform(self, x):\n return self.fit(x).transform(x)", "def fit(self, X):\n if self.n_components_ > X.shape[2]:\n raise ValueError(\"n_components is greater than number of features in X.\")\n\n if len(X.shape) != 3:\n raise ValueError(\"Data must be in 3 dimensions (conditions, time, features).\")\n\n if self.mean_subtract_:\n self.cross_condition_mean_ = np.mean(X, axis=0, keepdims=True)\n X = X - self.cross_condition_mean_\n\n X_flat = np.concatenate(X, axis=0)\n self.pca_ = PCA(n_components=self.n_components_)\n self.pca_.fit(X_flat)\n\n X_red = [self.pca_.transform(Xi) for Xi in X]\n dX = np.concatenate([np.diff(Xi, axis=0) for Xi in X_red], axis=0)\n X_prestate = np.concatenate([Xi[:-1] for Xi in X_red], axis=0)\n M_skew = self._fit_skew(X_prestate, dX)\n\n self.eigen_vals_, self.eigen_vecs_ = self._get_jpcs(M_skew)\n\n self.proj_vectors_ = []\n for i in range(len(self.eigen_vecs_) // 2):\n v1 = self.eigen_vecs_[2 * i]\n v2 = self.eigen_vecs_[2 * i + 1]\n real_v1 = np.real(v1 + v2)\n real_v1 /= np.linalg.norm(real_v1)\n real_v2 = np.imag(v1 - v2)\n real_v2 /= np.linalg.norm(real_v2)\n self.proj_vectors_.append(real_v1)\n self.proj_vectors_.append(real_v2)\n self.proj_vectors_ = np.array(self.proj_vectors_)\n return self", "def fit_transform(self, X, y=None, **inputs):\n return self.fit(X, y=y, **inputs).transform(X, y)", "def fit(self, X, y):\n # Code to fit the model.\n\n train_stuff = self._vectorizer.fit_transform(X, y)\n\n self._classifier.fit(train_stuff, y = y)\n\n\n return self", "def fit_transform(self, X, y=None, sample_weight=None):\n\n X = check_array(X, accept_sparse=\"csr\")\n\n if not issparse(X):\n X = csr_matrix(X)\n\n if sample_weight is not None:\n NotImplementedError(\"Sample weights not supported in distributed\")\n # sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float32)\n\n if np.any(X.data < 0):\n raise ValueError(\n \"PLSA is only valid for matrices with non-negative \" \"entries\"\n )\n\n row_sums = np.array(X.sum(axis=1).T)[0]\n good_rows = row_sums != 0\n\n if not np.all(good_rows):\n zero_rows_found = True\n data_for_fitting = X[good_rows]\n else:\n zero_rows_found = False\n data_for_fitting = X\n\n U, V = plsa_fit(\n data_for_fitting,\n self.n_components,\n n_row_blocks=self.n_row_blocks,\n n_col_blocks=self.n_col_blocks,\n init=self.init,\n n_iter=self.n_iter,\n n_iter_per_test=self.n_iter_per_test,\n tolerance=self.tolerance,\n e_step_thresh=self.e_step_thresh,\n random_state=self.random_state,\n )\n\n if zero_rows_found:\n self.embedding_ = np.zeros((X.shape[0], self.n_components))\n self.embedding_[good_rows] = U\n else:\n self.embedding_ = U\n\n self.components_ = V\n self.training_data_ = X\n\n return self.embedding_", "def fit(self, X, y=None):\n if self.column_type == \"numeric\":\n self.columns_to_transform_ = get_numerical_columns(\n data_frame=X,\n ignore_columns=self.ignore_columns,\n uniqueness_thresshold=self.uniqueness_thresshold,\n )\n else:\n self.columns_to_transform_ = get_categorical_columns(\n data_frame=X, uniqueness_thresshold=self.uniqueness_thresshold\n )\n\n if isinstance(self.transformation, BaseEstimator):\n self.transformation.fit(X[self.columns_to_transform_])\n\n return self", "def fit_transform(self, X, y):\n return self.fit(X, y).transform(X)", "def fit_transform(self, X, y):\n return self.fit(X, y).transform(X)", "def fit(self, X, Y):\n ...", "def fit(self, X):\n n_components = X.shape[1] if self.n_components is None else self.n_components\n diff = np.vstack([i[1:] - i[:-1] for i in X])\n cov = scipy.cov(diff, rowvar=0)\n u, _, _ = scipy.linalg.svd(cov, full_matrices=False)\n u = u[:, -n_components:][:, ::-1]\n\n self.weights = u", "def fit ( self, X: np.ndarray, y: np.ndarray ):\n \n self.X = X\n self.y = y", "def fit_transform(self, x, y=None):\n return self.fit(x).transform(x)", "def fit_transform(self, X, y):\n return self.fit(X, y).transform(X, y)", "def fit(self, X):\n self.rel_to_idx, self.ent_to_idx = create_mappings(X)\n self.is_fitted = True", "def fit(self, dataset: NumpyOrPandas):\n for check_func in self._fit_checks:\n check_func(dataset)\n\n if self.cache_dir is not None:\n if not os.path.exists(self.cache_dir):\n os.makedirs(self.cache_dir)\n # set transformer features\n\n # convert to accepted dtype and get attributes\n dataset = dataset.to_pandas()\n df = dataset.data\n\n # fit\n if self.subs is not None and df.shape[0] >= self.subs:\n subs = df.sample(n=self.subs, random_state=self.random_state)\n else:\n subs = df\n\n names = []\n for n, i in enumerate(subs.columns):\n feats = [self._fname_prefix + \"_\" + self._emb_name + \"_\" + str(x) + \"__\" + i for x in range(self.emb_size)]\n self.dicts[i] = {\n \"transformer\": deepcopy(self.transformer.fit(subs[i])),\n \"feats\": feats,\n }\n names.extend(feats)\n\n self._features = names\n return self" ]
[ "0.74559265", "0.7146048", "0.7100563", "0.7100563", "0.7100563", "0.7031072", "0.6953819", "0.68829376", "0.6859341", "0.6859341", "0.6859318", "0.6813972", "0.67715836", "0.6764781", "0.67534685", "0.6715386", "0.66989195", "0.6691517", "0.6683969", "0.6655785", "0.6646973", "0.65752107", "0.65549403", "0.65459955", "0.65459955", "0.65405804", "0.652953", "0.65152395", "0.6509386", "0.65003324", "0.64865637", "0.6474996", "0.6471654", "0.64641917", "0.64577967", "0.64502704", "0.64127415", "0.6408743", "0.63825715", "0.63759494", "0.6365381", "0.6365377", "0.6350715", "0.6347849", "0.6342948", "0.6339658", "0.63339996", "0.6327852", "0.63262403", "0.63241744", "0.63235176", "0.63226664", "0.63226664", "0.63160324", "0.63153726", "0.63117343", "0.6309922", "0.630934", "0.63086593", "0.6289966", "0.6289966", "0.62875354", "0.62849915", "0.627957", "0.6278395", "0.6268159", "0.626726", "0.62661386", "0.6234892", "0.6233605", "0.6233392", "0.6233392", "0.62331885", "0.62287056", "0.62287056", "0.622127", "0.6215436", "0.6214233", "0.6196698", "0.61819106", "0.6175357", "0.6170462", "0.61459035", "0.6141328", "0.6138176", "0.61369395", "0.61366934", "0.6134729", "0.6129416", "0.61257297", "0.6125486", "0.6125294", "0.6125294", "0.61212", "0.6102801", "0.60866815", "0.60857004", "0.6084683", "0.6082639", "0.607601" ]
0.7536608
0
Return specific sample Sample is generated from transformed distribution and transformed back to the original space.
def get_sample(self, seed): transformed = self.sampler_.get_sample(seed) return self.pca_.inverse_transform(transformed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_sample(self):\n mu = self._get_mean()\n sample = self.random.normal(mu)\n return sample", "def _get_sample(self):\n mu = self._get_mean().reshape((1, self.out_dim))\n sigma = self.variables[\"s\"]\n sample = self.random.normal(mu, sigma)\n sample = sample.reshape(self.out_dim)\n return sample", "def sample_one(self):\n # x = self.mean + self.sigma * np.random.normal()\n x = self.dist.sample(1)\n return x", "def sample(self):\n sampleIndices = self.random_state.choice(len(self.X), int(len(self.X)*self.sample_ratio), replace=False)\n\n return self.X[sampleIndices]\n pass", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)", "def __getitem__(self, idx):\n \n sample = get_samples(s_sample = self.s_sample,\n nsamples = 1,\n test_coords = self.t_coords,\n f = self.f)\n \n if self.rotate_cubes == True:\n # 0.5 probability of rotation\n if random.choice([True, False]) == True:\n t = random.randint(1,4)\n sample = np.rot90(sample, t)\n\n #transforms happens here\n # scale_01 / scale_neg11 / root / root_scale_01 / root_scale_neg11\n sample = transform_func(cube = sample,\n inverse_type = self.transform,\n self = self)\n\n sample = np.array(sample).reshape((1,self.s_sample,self.s_sample,self.s_sample))\n\n return sample", "def sample(self, n_samples: int) -> torch.Tensor:\n return self.dist.sample((n_samples,))", "def _get_sample(self):\n p = self._get_mean()\n u = self.random.random_sample(p.shape)\n sample = u < p\n return sample", "def sample_from(self):\n return numpy.random.normal(self.mu, math.sqrt(self.sigma))", "def sample(self):\n return gc.rand_state.choice(self.domain)", "def sample(self):\n return gc.rand_state.uniform(low=self.bounds[0], high=self.bounds[1])", "def sample(self, nsamples):\n return self.dist.sample(nsamples)", "def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()", "def sample(self):\n # return [v.sample() for v in self.variables]\n return self.domain[gc.rand_state.choice(len(self.domain))]", "def _transform(self, sample):\n sample = self.transformation(sample)\n inputs, targets = self.split_sample(sample)\n inputs = self._cast(inputs)\n targets = self._cast(targets, set_dtype=False)\n\n return inputs, targets", "def sample(self):\n return self._sample_func", "def sample(self):\n return self(np.random.randn(self.num_xi)).reshape(self.X.shape)", "def random_transform(self, x):\n pass", "def sample(self):\n # CITATION corrected by : https://github.com/l5shi/Multi-DDPG-with-parameter-noise/blob/master/Multi_DDPG_with_parameter_noise.ipynb\n x = self.x_prev\n dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.sqrt(\n self.dt\n ) * np.random.normal(size=self.mu.shape)\n self.x_prev = x + dx\n return self.x_prev", "def sample_from_prior(self):\n raise NotImplementedError", "def sample(self):\n M = np.random.normal(self._mu.reshape(-1), self._sig).reshape(self.shape)\n return M", "def sample(self, sample_shape=torch.Size()):\n with torch.no_grad():\n return self.rsample(sample_shape=sample_shape)", "def __call__(self, sample_shape=torch.Size()):\n return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)", "def sample_from(self):\n return numpy.random.lognormal(mean=self.mu, sigma=self.sigma)", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def sample(self):\n\n\t\t# get newest sample\n\t\ts = self.eyetribe.sample()\n\t\t\n\t\t# invalid data\n\t\tif s == (None,None):\n\t\t\treturn (-1,-1)\n\t\t\n\t\t# check if the new sample is the same as the previous\n\t\tif s != self.prevsample:\n\t\t\t# update the current sample\n\t\t\tself.prevsample = copy.copy(s)\n\t\t\n\t\treturn self.prevsample", "def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:\n return sample", "def _graph_fn_sample_stochastic(distribution):\n return distribution.sample()", "def sample(self):\n high = self.high.type(torch.float64) if self.dtype.is_floating_point else self.high.type(torch.int64) + 1\n sample = torch.empty(self.shape, dtype=torch.float64)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n # Vectorized sampling by interval type\n sample[unbounded] = torch.randn(unbounded[unbounded].shape, dtype=torch.float64)\n\n sample[low_bounded] = (-torch.rand(low_bounded[low_bounded].shape, dtype=torch.float64)).exponential_() + \\\n self.low[low_bounded]\n\n sample[upp_bounded] = self.high[upp_bounded] - (\n -torch.rand(upp_bounded[upp_bounded].shape, dtype=torch.float64)).exponential_()\n\n sample[bounded] = (self.low[bounded] - high[bounded]) * torch.rand(bounded[bounded].shape,\n dtype=torch.float64) + high[bounded]\n\n if not self.dtype.is_floating_point: # integer\n sample = torch.floor(sample)\n\n return sample.type(self.dtype)", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def sample_from_prior(self, n_samples):\n\n p0 = self.rng.normal(loc=self.mean, scale=self.sigma, size=n_samples)\n return p0[:, np.newaxis]", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def post_process_sample(self, sample: DataSample) -> DataSample:\n return sample", "def _generate_sample(self, sample):\n sample = super()._generate_sample(sample)\n sample.target_shape = self.out_shape\n return sample", "def _get_sample(self, view, sample):\n return view[sample]", "def getSample(self, sample_name, tax_level):\n self.sample_name = sample_name\n columns = self.tax_levels + [sample_name, 'masked']\n self.sample = self.abundance_df[columns]\n self.sample = self.sample[self.sample[sample_name] > 0]\n self.sample = self.sample.rename(columns = {sample_name: 'abundance'})\n self.sample[self.sample['masked']==False]\n self.sample.index.name = None\n self.tax_level = tax_level", "def sample (self, n):\n y = self.bins\n x = np.r_[0, self.values.cumsum ()] / self.sum\n # interpolate inverse CDF\n out = np.interp (np.random.random (n), x, y)\n if n == 1:\n return out[0]\n else:\n return out.reshape ((n,))", "def get_sample(x, y):\n return noise[x][y]", "def sample_transform(sample):\n sample[sample >= 0.5] = 1\n sample[sample < 0.5] = 0\n return sample", "def fit(sample):\r\n if not hasattr(sample, \"stddev\"):\r\n sample = Sample(sample)\r\n m = sample.mean\r\n d = sqrt(12*sample.variance)/2\r\n return Uniform(m-d, m+d)", "def get_sample(self, idx):\n sample = self.buffer[idx]\n if self.gamma == 0 or sample[4] is True:\n return sample\n\n if idx + 1 < len(self.buffer):\n sample[2] = sample[2] + self.gamma * self.get_sample(idx + 1)[2]\n\n return sample", "def sample(self):\r\n raise NotImplementedError", "def sample(self, num_samples = 1):\n\n # shortcut\n shape = self.shape\n loc = self.loc\n scale = self.scale\n\n # some sampling\n U = self.UG.sample(num_samples)\n X = 1 / scale * (-np.log(U)) ** (1 / shape)\n return scale * X + loc", "def __call__(self, sample, random_state=None):\n raise NotImplementedError", "def sample(self, Z=None):\n Z = Z if Z is not None else self.sum() # normalize if desired / by default\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n pSoFar = 0.0\n pDraw = Z * np.random.random_sample()\n it = np.nditer(self.t, op_flags=['readonly'], flags=['multi_index']) # for tuple return\n #it = np.nditer(self.t, op_flags=['readonly'], flags=[orderMethod+'_index']) # for index return\n while not it.finished:\n pSoFar += it[0]\n if ( pSoFar > pDraw ):\n return it.multi_index # multi_index for tuple return\n #return it.index # index for index return\n it.iternext()\n return self.v.ind2sub(self.numel()-1) # if numerical issue: return final state", "def sample_from(space):\n distrs = {\n 'choice': choice,\n 'randint': randint,\n 'uniform': uniform,\n 'normal': normal,\n }\n s = space[0]\n\n np.random.seed(int(time.time() + np.random.randint(0, 300)))\n\n log = s.startswith('log_')\n s = s[len('log_'):] if log else s\n\n quantized = s.startswith('q')\n s = s[1:] if quantized else s\n\n distr = distrs[s]\n if s == 'choice':\n return distr(space[1])\n samp = distr(space[1], space[2])\n if log:\n samp = np.exp(samp)\n if quantized:\n samp = round((samp / space[3]) * space[3])\n return samp", "def sample(self):\n x = self.state\n# dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def _reparameterized_sample(self, mean, std):\n eps = torch.zeros(std.size(), device=self.device, dtype=torch.float32).normal_()\n eps = eps.requires_grad_()\n return eps.mul(std).add_(mean)", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.rand(*x.shape) \n self.state = x + dx\n return self.state", "def transform(self, X, y, sample_weight, copy=None):\n \n for classlabel in self.scale_:\n sample_weight[y==classlabel]*=self.scale_[classlabel]\n return X", "def transform(self, X, y, sample_weight, copy=None):\n \n for classlabel in self.scale_:\n sample_weight[y==classlabel]*=self.scale_[classlabel]\n return X", "def get_item(self, index, rng=None):\n if index is None:\n rng = kwarray.ensure_rng(rng)\n index = rng.randint(0, self.n_samples)\n\n if index < self.n_positives:\n sample = self.get_positive(index, rng=rng)\n else:\n index = index - self.n_positives\n sample = self.get_negative(index, rng=rng)\n return sample", "def sample(self):\n return np.random.randint(self._dim)", "def sample_prior(size):\n return torch.randn(size)", "def _sample_posterior(self):\n\n latent_dim = self.network_architecture['latent_dim']\n\n # Sample eps from standard Normal\n eps = tf.random_normal([self.batch_size, latent_dim], 0, 1,\n dtype=tf.float32)\n\n # Transform using Z = mean + root_cov*eps\n samp = self.rec_mean + tf.mul(tf.sqrt(tf.exp(self.rec_log_sigma_sq)),\n eps)\n return samp", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n # dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def _get_sample(self):\n prev = self.prev_img\n curr = self.curr_img\n prevbb = self._last_bbox\n prev_sample, opts_prev = crop_sample({'image': prev, 'bb': prevbb})\n curr_sample, opts_curr = crop_sample({'image': curr, 'bb': prevbb})\n prev_img = bgr2rgb(self.scale(prev_sample, opts_prev)['image'])\n curr_img = bgr2rgb(self.scale(curr_sample, opts_curr)['image'])\n sample = {'previmg': prev_img, 'currimg': curr_img}\n self.curr_img = curr\n self.opts = opts_curr\n return sample", "def sample_from_prior(self, n_samples):\n\n p0 = self.min + self.rng.rand(n_samples) * (self.max - self.min)\n return p0[:, np.newaxis]", "def _construct_sample_from_prior(self):\n z_sym = T.matrix()\n x_sym = T.matrix()\n irs = self.ir_steps\n oputs = [self.obs_transform(self.s0)]\n oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])\n _, hi_zmuv = self._construct_zmuv_samples(x_sym, 1)\n sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \\\n givens={ self.z: z_sym, \\\n self.x_in: T.zeros_like(x_sym), \\\n self.x_out: T.zeros_like(x_sym), \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.scan_updates)\n def prior_sampler(samp_count):\n x_samps = to_fX( np.zeros((samp_count, self.obs_dim)) )\n old_switch = self.train_switch.get_value(borrow=False)\n # set model to generation mode\n self.set_train_switch(switch_val=0.0)\n z_samps = to_fX( npr.randn(samp_count, self.z_dim) )\n model_samps = sample_func(z_samps, x_samps)\n # set model back to either training or generation mode\n self.set_train_switch(switch_val=old_switch)\n return model_samps\n return prior_sampler", "def __getitem__(self, index):\n return self.data_source.get_sample(index)", "def get_transformation(name, generator, device):\n transformation = None\n if name is None:\n transformation = None\n elif name.startswith(\"rand_proj_gauss_sign\"):\n temp = re.findall(r\"\\d+\", name)\n [N_in, N_out] = list(map(int, temp))\n # create the random transformation\n transformation = transformations.RandomProjection(device, N_in, N_out)\n elif name == \"scattering2D\":\n N_in = generator.N_out\n N_out = 0 # is set by the transformation itself\n transformation = transformations.Scattering2D(device, N_in, N_out)\n else:\n raise ValueError(\"Did not recognise the transformation, will exit now.\")\n\n return transformation", "def sample_from_prior(self, n_samples):\n\n p0 = self.rng.lognormal(mean=self.mean, sigma=self.sigma, size=n_samples)\n return p0[:, np.newaxis]", "def sample(self):\r\n x = self.state\r\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\r\n self.state = x + dx\r\n return self.state", "def apply_transform(self, sample):\n for image_dict in sample.values():\n if not is_image_dict(image_dict):\n continue\n if image_dict['type'] != INTENSITY:\n continue\n array = image_dict['data'].numpy()\n pa, pb = self.percentiles\n cutoff = np.percentile(array, (pa, pb))\n np.clip(array, *cutoff, out=array)\n array -= array.min() # [0, max]\n array /= array.max() # [0, 1]\n out_range = self.out_max - self.out_min\n array *= out_range # [0, out_range]\n array -= self.out_min # [out_min, out_max]\n image_dict['data'] = torch.from_numpy(array)\n return sample", "def sample_from_prior(self, n_samples):\n pass", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.uniform(-1.0, 1.0, len(self.mu))\n\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))\n self.state = x + dx\n return self.state", "def get_p_sample(self):\n control = self.p_control * self.n_control\n treatment = self.p_treatment * self.n_treatment\n sample = self.n_control + self.n_treatment\n\n p_sample = (control + treatment) / sample\n\n self.p_sample = p_sample\n\n return p_sample", "def fit(sample):\r\n if not hasattr(sample, \"stddev\"):\r\n sample = Sample(sample)\r\n return Normal(sample.mean, sample.stddev)", "def sample(self):\n return random.sample(self.memory, k=self.batch_size)", "def sample_from_concept(self):\n return random.choice(self.active_concept.extension)", "def normal_sample(mu, sigma):\n return mu + sigma * torch.randn_like(sigma)", "def _sample_synthetic(self, X):\n n_samples = X.shape[0]\n self.y = np.concatenate((np.ones(n_samples), np.zeros(n_samples)))\n \n random_state = _forest.check_random_state(self.random_state) \n\n X_synth = np.asarray([np.apply_along_axis(random_state.choice, 0, X) for _ in range(n_samples)])\n self.X = np.concatenate((X, X_synth))\n\n return self.X, self.y", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array(\n [random.random() for i in range(len(x))]\n )\n self.state = x + dx\n return self.state", "def posterior_sample(self):\n pass", "def generate_sample(self):\n\n sample_one = np.zeros(self.n_dimensions)\n\n \"\"\"C: Huge gains here from using numpy functions at once instead of a Python loop\"\"\"\n \"\"\"S: How..?\"\"\"\n\n for i in range(self.n_dimensions):\n self.Y[i] ^= self.V[\n self.index_of_least_significant_zero_bit(self.current - 1), i\n ]\n sample_one[i] = float(self.Y[i] / math.pow(2, self.scale))\n self.current += 1\n return sample_one", "def _sample(self, X, y):\n self._validate_estimator()\n X_res, y_res = self.tomek_.fit_sample(X, y)\n\n return self.smote_.fit_sample(X_res, y_res)", "def get_example(self, idx=-1):\n if idx < 0:\n idx = np.random.randint(len(self))\n return self[idx]", "def random_sample(self) -> Union[DeclarativeMeta, AliasedClass]:\n if self._profile_sample_query:\n return self._fetch_sample_data_with_query_object()\n\n if not self.profile_sample:\n if self._partition_details:\n return self._random_sample_for_partitioned_tables()\n\n return self.table\n\n # Add new RandomNumFn column\n rnd = self.get_sample_query()\n session_query = self.session.query(rnd)\n\n # Prepare sampled CTE\n sampled = session_query.where(rnd.c.random <= self.profile_sample).cte(\n f\"{self.table.__tablename__}_sample\"\n )\n # Assign as an alias\n return aliased(self.table, sampled)", "def getSampler(self, *args):\n return _osgAnimation.Channel_getSampler(self, *args)", "def post(self, s):\n return np.random.choice(self.sample_list)", "def sample(self, state):\n state = torch.FloatTensor(state)\n\n action_prob = self.network(state)\n action_distribution = Categorical(action_prob)\n action = action_distribution.sample()\n\n return action.cpu().item()", "def _call(self, *args, samples=1, virtual=None, sample_chunksize=None, **kwargs): \n\n return virtually_sample_distribution(self._distr, samples, *args,\n sample_chunksize=sample_chunksize,\n **kwargs)", "def sample_from_prior(self, n_samples):\n\n lamda = np.abs(self.rng.standard_cauchy(size=n_samples))\n\n p0 = np.log(np.abs(self.rng.randn() * lamda * self.scale))\n return p0[:, np.newaxis]", "def sample(self, bn, observed, resample=False, step=1):\n return self.forward(bn, observed, resample, step)", "def sample(self, n_samples):\n\n z = sample_prior((n_samples,) + self.flow.z_shape)\n ldj = torch.zeros(z.size(0))\n\n z, ldj = self.flow (z, ldj, reverse=True)\n z, ldj = self.logit_normalize(z, ldj, reverse=True)\n\n return z", "def sample_from_prior(self, *args, **kwargs):\n pass", "def sample(self, s):\n rng = np.random.default_rng()\n return rng.choice(np.arange(self.n_actions), p=self.eval(s))", "def sample(self):\n sample = np.zeros(self.n, dtype=self.dtype)\n sample[self.np_random.choice(self.n, self.np_random.random_integers(low=self.low_limit, high=self.high_limit),\n replace=False)] = 1\n return sample", "def AddSample(self, sample):\n if not isinstance(sample, Sample):\n raise TypeError(\"Sample must be instance of Sample\")\n\n self.samples.append(sample.val)\n\n self.samples.reverse()\n self.samples.pop()\n self.samples.reverse()\n\n filteredValue = sum([a*b for a, b in zip(self.coef, self.samples)])\n\n return Sample(sample.t, filteredValue)", "def __getitem__(self, idx):\n return self.samples[idx]", "def sample_latent(self, x):\n latent_dist = self.encoder(x)\n latent_sample = self.reparameterize(*latent_dist)\n return latent_sample", "def sample(self, n_samples, sample_seed):\n self.seed_samples(sample_seed)\n samples = torch.zeros([self.batch_size, n_samples, self.Y_dim], device=self.device)\n # Determine first vs. second Gaussian\n unif2 = torch.rand(self.batch_size, n_samples)\n second_gaussian = (self.w2 > unif2)\n # Sample from second Gaussian\n samples2 = self.sample_full_rank(n_samples, self.mu2, self.tril_elements2, as_numpy=False)\n samples[second_gaussian, :] = samples2[second_gaussian, :]\n # Sample from first Gaussian\n samples1 = self.sample_full_rank(n_samples, self.mu, self.tril_elements, as_numpy=False)\n samples[~second_gaussian, :] = samples1[~second_gaussian, :]\n samples = samples.data.cpu().numpy()\n return samples" ]
[ "0.69831187", "0.6970329", "0.6696096", "0.66158164", "0.64837795", "0.642031", "0.6373376", "0.63586676", "0.6337623", "0.62676066", "0.62550586", "0.62331295", "0.62140554", "0.6144703", "0.614365", "0.6135289", "0.61312723", "0.6130087", "0.61157453", "0.6106311", "0.60812193", "0.6064497", "0.6051039", "0.60408616", "0.6000375", "0.5989007", "0.5975468", "0.5962804", "0.59527296", "0.59404", "0.59404", "0.59228647", "0.5895333", "0.58922786", "0.58879215", "0.58794904", "0.5878402", "0.5875986", "0.5866988", "0.58442384", "0.5841475", "0.5833832", "0.5810497", "0.58008486", "0.57998127", "0.5748091", "0.5744232", "0.57367766", "0.57331455", "0.57280976", "0.5722513", "0.5719219", "0.5719219", "0.57166624", "0.5709252", "0.57090455", "0.5691043", "0.5690707", "0.56827027", "0.56655437", "0.5663888", "0.56617457", "0.56471133", "0.5645733", "0.56425357", "0.5635868", "0.5632027", "0.5628023", "0.5628023", "0.5628023", "0.5628023", "0.5628023", "0.561482", "0.56136197", "0.5611237", "0.5608448", "0.56082225", "0.5598955", "0.55977094", "0.5574815", "0.5571827", "0.5560508", "0.55529934", "0.5547116", "0.554105", "0.5539682", "0.55339104", "0.5529277", "0.5526217", "0.5523108", "0.5519115", "0.55164945", "0.5510801", "0.5510037", "0.55071384", "0.5502495", "0.5497762", "0.54971486", "0.54952186", "0.5491496" ]
0.6645877
3
Optimise the marginal likelihood. work with the log of beta fmin works better that way.
def optimise_GP_kernel(self,iters=1000): new_params=SCG(self.ll_hyper,self.ll_hyper_grad,np.hstack((self.DGPLVM_tar.GP.kernel.get_params(), np.log(self.DGPLVM_tar.GP.beta))),maxiters=iters,display=True,func_flg=0) #gtol=1e-10,epsilon=1e-10, # new_params = fmin_cg(self.ll,np.hstack((self.kernel.get_params(), np.log(self.beta))),fprime=self.ll_grad,maxiter=iters,gtol=1e-10,disp=False) self.DGPLVM_src.GP.set_params(new_params) self.DGPLVM_tar.GP.set_params(new_params) self.DGPLVM_all.GP.set_params(new_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def log_marginal_likelihood(self, eval_gradient=False):\n A, B, mu_tilda, gamma_tilda, r_grid, n_grid = self.calc_tau(return_all=True)\n\n gamma = self.GAMMA\n gamma_y = self.GAMMA_Y\n\n A1 = np.copy(self.baseTau0)\n A1[np.diag_indices_from(A1)] = 2 * self.ndim\n\n # A1 = (A - B) / gamma_y\n A2 = np.diag(n_grid)\n\n B_inv = np.linalg.inv(B)\n A_inv = np.linalg.inv(A)\n\n M_lambda = B - B.dot(np.linalg.inv(A).dot(B))\n\n # log_likelihood = mu_tilda.dot(M_lambda).dot(\n # mu_tilda[:, np.newaxis]) - np.nan_to_num(np.log(np.linalg.det(M_lambda)))\n log_likelihood = mu_tilda.dot(M_lambda).dot(mu_tilda[:, np.newaxis])\n\n # log_likelihood = -log_likelihood\n print('log_likelihood: %f' % log_likelihood)\n\n Lambda = B - B.dot(A_inv.dot(B))\n Lambda_inv = np.linalg.inv(Lambda)\n\n M_gamma = A2 - A2.dot(2 * gamma * A_inv - (gamma ** 2) * A_inv.dot(A2).dot(A_inv)).dot(A2)\n\n mean_r_grid = np.nan_to_num(np.array([r.sum() for r in r_grid]) / n_grid)\n\n log_likelihood_grad_gamma = mean_r_grid.dot(M_gamma.dot(mean_r_grid[:, np.newaxis]))[0]\n\n tmpMat = A2.dot(A_inv).dot(A1).dot(A_inv).dot(A2)\n\n log_likelihood_grad_gamma_y = (gamma ** 2) * (\n np.trace(Lambda_inv.dot(tmpMat) + mean_r_grid.dot(tmpMat.dot(mean_r_grid[:, np.newaxis]))[0]))\n\n log_likelihood_gradient = np.array([log_likelihood_grad_gamma, log_likelihood_grad_gamma_y])\n print('log_likelihood_grad: %s' % log_likelihood_gradient)\n\n if eval_gradient:\n return log_likelihood, log_likelihood_gradient\n else:\n return log_likelihood", "def compute_log_marginal_likelihood(\n K_i: torch.Tensor,\n logDetK: torch.Tensor,\n y: torch.Tensor,\n normalize: bool = True,\n log_prior_dist=None,\n):\n lml = (\n -0.5 * y.t() @ K_i @ y\n + 0.5 * logDetK\n - y.shape[0]\n / 2.0\n * torch.log(\n 2\n * torch.tensor(\n np.pi,\n )\n )\n )\n if log_prior_dist is not None:\n lml -= log_prior_dist\n return lml / y.shape[0] if normalize else lml", "def log_marginal_likelihood(self, X, W):\n phi_X = self.phi(X, W)\n S_n = phi_X.T @ phi_X + np.eye(self.M)\n mu_n = np.linalg.inv(S_n) @ phi_X.T @ self.Y\n a_n = self.gamma_a0 + self.N / 2\n A = np.diag(self.Y.T @ self.Y)\n C = np.diag(mu_n.T @ S_n @ mu_n)\n b_n = self.gamma_b0 + 0.5 * (A - C)\n\n # Compute Lambda term.\n sign, logdet = np.linalg.slogdet(S_n)\n lambda_term = -0.5 * sign * logdet\n\n # Compute b_n term.\n b_term = self.gamma_a0 * np.log(self.gamma_b0) - a_n * np.log(b_n)\n\n # Compute a_n term.\n gamma_term = gammaln(a_n) - gammaln(self.gamma_a0)\n\n # Compute sum over all y_n.\n return np.sum(gamma_term + b_term + lambda_term)", "def log_marginal_likelihood_normal_cdf(self):\n #we define the loop for the batchsize\n num_batches = int(np.ceil(self.W.shape[0] / self.batchsize_dim))\n slices=np.array_split(np.arange(0,self.W.shape[0]),num_batches)\n def batch_indices(iter):\n idx = iter \n return slice(slices[idx][0],slices[idx][-1]+1)\n \n batch_slices=[batch_indices(iter) for iter in range(num_batches)]\n #print(batch_slices,num_batches,self.batchsize_dim)\n def innerloop(slices):\n if type(slices)!=list:\n slices=[slices]\n #print(slices)\n ml=[]\n for idx in slices:\n if self.type_y=='affine':\n γp, Γp, _, _ = self.compute_gammas_affine(self.params,self.X,self.W[idx,:],self.Z[idx,:])\n elif self.type_y=='mixed':\n γp, Γp, _, _, _ = self.compute_gammas_mixed(self.params,self.X,self.Y,self.C,self.W[idx,:],self.Z[idx,:])\n #print(y1.shape)\n res = gaussianCDF(Γp,-np.ones((γp.shape[0],1))*np.inf,γp)\n ml.append(res)\n return ml\n \n if self.type_y=='affine':\n \n results = Parallel(n_jobs=self.num_cores )(delayed(innerloop)(b) for b in batch_slices)\n #print(results)\n res1=np.sum(results)\n \n _, _, γ, Γ = self.compute_gammas_affine(self.params,self.X,self.W[[0],:],self.Z[[0],:])#we only need γ, Γ\n #print()\n if self.latent_dim>0:\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n logres2 = np.log(res2+1e-200)\n else:\n logres2 = 0.0\n #print( np.log(res1+1e-300),logres2)\n res= np.log(res1+1e-300)-logres2 \n elif self.type_y=='regression':\n if self.latent_dim>0:\n γp, Γp, γ, Γ = self.compute_gammas_regression(self.params,self.X,self.Y,self.C)\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n #from scipy.stats import multivariate_normal\n try:\n res1 = gaussianCDF(Γp,-np.ones((γp.shape[0],1))*np.inf,γp)\n res= np.log(res1+1e-300)-np.log(res2+1e-300)\n except:\n #print(self.params, Γp)\n res=-10.0**300\n else:\n return 0.0\n elif self.type_y=='mixed':\n results = Parallel(n_jobs=self.num_cores )(delayed(innerloop)(b) for b in batch_slices)\n res1=np.sum(results)\n _, _, γ, Γ = self.compute_gammas_affine(self.params,self.X,self.W[[0],:],self.Z[[0],:])#we only need γ, Γ\n if self.latent_dim>0:\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n logres2 = np.log(res2+1e-200)\n else:\n logres2 = 0.0\n res= np.log(res1+1e-300)-logres2\n if np.isnan(res):\n return -10.0**300 \n else:\n return res", "def BMA_Marginal_Likelihood(df_train, df_test, iterations, burnin, p_1):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n\n # g-prior parameter\n c = float(K**2)\n\n # separate real value and individual forecasts\n y = df_train.iloc[:, 0]\n F = df_train.iloc[:, 1:]\n\n # initializations\n models = np.full((iterations, K), fill_value=False, dtype=bool)\n theta_hats = []\n marginal_lik = np.full(iterations, fill_value=np.nan, dtype=float)\n\n # RJ-MCMC\n # initial state = null model\n # marginal likelihood for the initial state\n y_bar = np.mean(y)\n S_right = sum((y - y_bar)**2)/(c+1)\n\n Z = np.insert(F.iloc[:, models[0, :]].values, 0, 1, axis=1)\n theta_hat = np.dot(\n np.linalg.inv(np.dot(np.transpose(Z), Z)),\n np.dot(np.transpose(Z), y)\n )\n SSR = sum((y - np.dot(Z, theta_hat))**2)\n S_left = (c/(c+1))*SSR\n S = S_left + S_right\n\n # scale to first decimal place for numerical reasons\n # find scaling order\n S_rescaled = S\n scaling_factor = 1\n while (S_rescaled < 0.1):\n S_rescaled *= 10\n scaling_factor *= 10\n # rescale S (prevent overflow)\n S = S*scaling_factor\n\n k = sum(models[0, :])\n marginal_lik[0] = ((c+1)**(-k))*(S**(-((T-1)/2)))\n theta_hats += [theta_hat]\n\n # initialization of the proposal model\n m_star = np.full(K, fill_value=True, dtype=bool)\n\n # main iteration loop\n for i in range(0, iterations-1):\n\n # decide which move to perform\n epsilon_move = np.random.random()\n\n # move 1\n if epsilon_move <= p_1 or sum(models[0]) == 0:\n\n # drawn one of the forecasts from the model\n drawn_f_index = np.random.randint(0, K)\n\n # if it is in the model, drop it, if not, add it\n np.copyto(m_star, models[i, :])\n m_star[drawn_f_index] = ~models[i, drawn_f_index]\n\n # if there are too many variables (d.f. = 0), the change is revert\n if sum(m_star) > (T-2):\n m_star[drawn_f_index] = models[i, drawn_f_index]\n\n # move 2\n else:\n\n # drawn one of the forecasts from the model and one outside\n index_in = np.arange(K)[models[i, :]]\n index_out = np.arange(K)[~models[i, :]]\n drawn_f_index_in = np.random.choice(index_in)\n drawn_f_index_out = np.random.choice(index_out)\n\n # swap these two variables\n np.copyto(m_star, models[i, :])\n m_star[drawn_f_index_in] = ~m_star[drawn_f_index_in]\n m_star[drawn_f_index_out] = ~m_star[drawn_f_index_out]\n\n # calculate the probability of acceptance\n Z = np.insert(F.iloc[:, m_star].values, 0, 1, axis=1)\n theta_hat = np.dot(\n np.linalg.inv(np.dot(np.transpose(Z), Z)),\n np.dot(np.transpose(Z), y)\n )\n SSR = sum((y - np.dot(Z, theta_hat))**2)\n S_left = (c/(c+1))*SSR\n S = S_left + S_right\n\n # rescale S (prevent overflow)\n S = S*scaling_factor\n\n k = sum(m_star)\n marginal_lik_m_star = ((c+1)**(-k))*(S**(-((T-1)/2)))\n\n alpha = min(1, marginal_lik_m_star/marginal_lik[i])\n\n # if accepted, move to m_star, else retain the same model\n epsilon_accept = np.random.random()\n\n if epsilon_accept <= alpha:\n np.copyto(models[i+1, :], m_star)\n marginal_lik[i+1] = marginal_lik_m_star\n theta_hats += [theta_hat]\n else:\n np.copyto(models[i+1, :], models[i, :])\n marginal_lik[i+1] = marginal_lik[i]\n theta_hats += [theta_hats[-1]]\n\n # discard the burnin draws\n models = models[burnin:, :]\n marginal_lik = marginal_lik[burnin:]\n theta_hats = theta_hats[burnin:]\n\n # unique models visited by the chain\n models_uni, index_uni = np.unique(models, axis=0, return_index=True)\n marginal_lik_uni = marginal_lik[index_uni]\n # theta_hats_uni = [theta_hats[i] for i in index_uni]\n theta_hats_uni = list(np.array(theta_hats)[index_uni])\n\n # model posterior probabilities\n marginal_lik_sum = sum(marginal_lik_uni)\n posterior_prob = marginal_lik_uni / marginal_lik_sum\n\n # generate prediction for testing set\n model_fcts = np.full(\n (df_test.shape[0], len(theta_hats_uni)),\n np.nan,\n dtype=float\n )\n\n # forecast from different models\n for j in range(model_fcts.shape[1]):\n\n model_fcts[:, j] = np.dot(\n np.insert(df_test.iloc[:, models_uni[j]].values, 0, 1, axis=1),\n theta_hats_uni[j]\n )\n\n pred = np.dot(model_fcts, posterior_prob)\n\n df_pred = pd.DataFrame(\n {\"BMA (Marginal Likelihood)\": pred},\n index=df_test.index)\n\n return df_pred", "def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - [email protected]\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*[email protected]\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik", "def log_marginal_likelihood(self, theta=None, gradient=True, opt_flag=False):\n if theta != None:\n self.kernel.hyper = np.exp(theta) if opt_flag else theta\n # self.kernel.hyper\n if gradient:\n K, grad = self.kernel.estimate(self.X_train, gradient=True)\n else:\n K = self.kernel.estimate(self.X_train)\n \n K += self.id_mat * self.sigma_n\n\n try:\n L = np.linalg.cholesky(K)\n except np.linalg.LinAlgError:\n return (np.inf, np.array([0])) if gradient else np.inf\n \n alpha = cho_solve((L, True), self.Y_train)\n \n logl = float(self.Y_train.T.dot(alpha)) / 2\n logl += np.log(np.diag(L)).sum()\n logl += self.ntrain * np.log(2 * np.pi) / 2\n\n \n if gradient:\n logl_grad = alpha.dot(alpha.T) # einsum is slower\n logl_grad -= cho_solve((L, True), self.id_mat)\n logl_grad = 0.5 * np.einsum('ij,ji -> ', logl_grad, grad) #dot prod and trace combined\n return logl, -np.array([logl_grad])\n return logl", "def maximize_marginal_likelihood(kernel, model, optimizer, output_directory,\n testing_data, feature_names, plot_ARD, plot_params):\n\n # run the optimiser with a callback function if user wants to track the parameters\n if plot_params:\n parameter_log = []\n opt_logs = optimizer.minimize(closure=model.training_loss, variables=model.trainable_variables,\n step_callback=(lambda x,y,z: parameter_log.append([x, z])),\n options=dict(maxiter=ci_niter(250)))\n\n else:\n # run the optimiser without a callback function otherwise\n opt_logs = optimizer.minimize(closure=model.training_loss, variables=model.trainable_variables,\n options=dict(maxiter=ci_niter(250)))\n\n # set data against which to validate the model\n features_test = testing_data['features']\n affinity_test = testing_data['affinity']\n\n # calculate the predictions and Pearson's R, Spearman's R as well as RMSE\n mean, var = model.predict_f(features_test)\n pearsonsr, pvalue = pearsonr(mean.numpy().flatten(), affinity_test.values)\n spearmansr, spvalue = spearmanr(a=mean.numpy().flatten(), b=affinity_test.values)\n rmse = np.sqrt(mean_squared_error(affinity_test.values, mean.numpy().flatten()))\n\n # write the results to a file\n filename = f'{model.name}_{kernel.name}'+'.csv'\n\n with open(output_directory+'/'+filename, 'w') as out_file:\n out_file.write(f'%Gaussian process regression with a Gaussian likelihood\\n')\n out_file.write(f'%model: {model.name}, kernel: {kernel.name}\\n')\n out_file.write(f'Optimization success: {opt_logs.get(\"success\")} in {opt_logs.get(\"nit\")} iterations, {opt_logs.get(\"message\")}\\n')\n for key, value in gp.utilities.read_values(model).items():\n out_file.write(f'%{key}: {value}\\n')\n out_file.write(f'%loglikelihood: {model.log_marginal_likelihood()}\\n')\n out_file.write(f'%RMSE:{rmse:.3f}\\n')\n out_file.write(f'%Pearson_correlation_coefficient:{pearsonsr:.3f},P-value:{pvalue:.3f}\\n')\n out_file.write(f'%Spearman_correlation_coefficient:{spearmansr:.3f},P-value:{spvalue:.3f}\\n')\n out_file.write('%%%%%%PREDICTIONS%%%%%\\n')\n out_file.write(f'name,f_pred_mean,f_pred_var,y_true,{\",\".join(feature_names)}\\n')\n for i in range(0, len(mean)):\n out_file.write(f'{affinity_test.index.values[i]},{mean.numpy()[i][0]:.4f},{var.numpy()[i][0]:.4f},{affinity_test.values[i]:.4f},{\"\".join(str(i)+\",\" for i in features_test[i].round(4).tolist())[:-1]}\\n')\n out_file.close()\n\n # create the plots that were specified in the arguments to the specified output directory\n if plot_ARD:\n plot_feature_rankings(lengthscales=model.kernel.kernels[0].lengthscales.numpy(),\n feature_names=feature_names, figpath=output_directory+'/feature_relevance.png')\n if plot_params:\n plot_parameter_change(parameter_log=parameter_log, figpath=output_directory+'/parameter_change.png')", "def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact", "def _object_func_marginals(params, data_vec, model_func, pts, \n lower_bound=None, upper_bound=None, \n verbose=0, multinom=True, flush_delay=0,\n func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n output_stream=sys.stdout, store_thetas=False):\n \n nmarginals= len(data_vec)\n #print \"nmarginals in marginal_optimization\"\n #print nmarginals\n \n global _counter\n _counter += 1\n\n #if nmarginals <2:\n #\tprint \"error: number of marginals less than two, but optimization function for multiple marginals is used!\"\n # \treturn dadi.Inference._out_of_bounds_val\n\n # Deal with fixed parameters\n params_up = dadi.Inference._project_params_up(params, fixed_params)\n\n # Check our parameter bounds\n if lower_bound is not None:\n for pval,bound in zip(params_up, lower_bound):\n if bound is not None and pval < bound:\n #print \"failure in bounds!, pval<lower_bound\"\n return -dadi.Inference._out_of_bounds_val/ll_scale\n if upper_bound is not None:\n for pval,bound in zip(params_up, upper_bound):\n if bound is not None and pval > bound:\n return -dadi.Inference._out_of_bounds_val/ll_scale\n \n \n all_ns = [data_vec[marg_num].sample_sizes for marg_num in range(nmarginals)]\n #print \"in marginal_optimization, all_ns is\"\n #print all_ns\n \n \n all_args = [params_up, all_ns] + list(func_args)\n # Pass the pts argument via keyword, but don't alter the passed-in \n # func_kwargs\n func_kwargs = func_kwargs.copy()\n func_kwargs['pts'] = pts\n \n #print all_args\n #print func_kwargs\n all_sfs = model_func(*all_args, **func_kwargs)\n #this supposes that the two thetas are equal. This should be verified in the end! \n if multinom:\n\tresult=numpy.sum([ll_multinom(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n else:\n result = numpy.sum([ll(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\n if store_thetas:\n global _theta_store\n dadi.Inference._theta_store[tuple(params)] = numpy.mean([optimal_sfs_scaling(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n \n # Bad result\n if numpy.isnan(result):\n result = dadi.Inference._out_of_bounds_val\n \n if (verbose > 0) and (_counter % verbose == 0):\n param_str = 'array([%s])' % (', '.join(['%- 12g'%v for v in params_up]))\n output_stream.write('%-8i, %-12g, %s%s' % (_counter, result, param_str,\n os.linesep))\n Misc.delayed_flush(delay=flush_delay)\n\n return -result/ll_scale", "def marginal_log_likelihood(self, theta):\n\n # Theta is on a log scale\n alpha = np.exp(theta[0])\n beta = 1 / np.exp(theta[1])\n\n D = self.X_transformed.shape[1]\n N = self.X_transformed.shape[0]\n\n A = beta * np.dot(self.X_transformed.T, self.X_transformed)\n A += np.eye(self.X_transformed.shape[1]) * alpha\n try:\n A_inv = np.linalg.inv(A)\n except np.linalg.linalg.LinAlgError:\n A_inv = np.linalg.inv(A + np.random.rand(A.shape[0], A.shape[1]) * 1e-8)\n \n\n m = beta * np.dot(A_inv, self.X_transformed.T)\n m = np.dot(m, self.y)\n\n mll = D / 2 * np.log(alpha)\n mll += N / 2 * np.log(beta)\n mll -= N / 2 * np.log(2 * np.pi)\n mll -= beta / 2. * np.linalg.norm(self.y - np.dot(self.X_transformed, m), 2)\n mll -= alpha / 2. * np.dot(m.T, m)\n mll -= 0.5 * np.log(np.linalg.det(A))\n\n if self.prior is not None:\n mll += self.prior.lnprob(theta)\n\n return mll", "def _posterior_marginal_scipy(self, parameter):\n letter = parameter.strip().split('_')[1]\n A = sum(self._prior_hyperparameters.values())\n ai = self.prior_hyperparameters['a_{}'.format(letter)]\n N = sum(self._data.values())\n ni = self._data[letter]\n\n return _scipy_beta(ai+ni, A-ai+N-ni)", "def optimize_log_fmin(p0, data, model_func, pts, \n lower_bound=None, upper_bound=None,\n verbose=0, flush_delay=0.5, \n multinom=True, maxiter=None, \n full_output=False, func_args=[], \n func_kwargs={},\n fixed_params=None, output_file=None,nmarginals=1):\n #print p0\t\n if output_file:\n output_stream = file(output_file, 'w')\n else:\n output_stream = sys.stdout\n\t\n args = (data, model_func, pts, lower_bound, upper_bound, verbose,\n multinom, flush_delay, func_args, func_kwargs, fixed_params, 1.0,\n output_stream)\n #if nmarginals==1:\n #\tobject_fun=dadi.Inference._object_func_log\n #else:\n object_fun=_object_func_marginals_log\n \n p0 = dadi.Inference._project_params_down(p0, fixed_params)\n #print \"optimizing!\"\n \n #print object_fun\n #print numpy.log(p0)\n #print object_fun(p0,data,model_func,pts, lower_bound=lower_bound,upper_bound=upper_bound,verbose=0,multinom=multinom,flush_delay=flush_delay,func_args=func_args,func_kwargs=func_kwargs,fixed_params=fixed_params, ll_scale=1,output_stream=sys.stdout)\n \n outputs = scipy.optimize.fmin(object_fun, numpy.log(p0), args = args,\n disp=False, maxiter=maxiter, full_output=True)\n xopt, fopt, iter, funcalls, warnflag = outputs\n xopt = dadi.Inference._project_params_up(numpy.exp(xopt), fixed_params)\n\n if output_file:\n output_stream.close()\n\n if not full_output:\n return xopt\n else:\n return xopt, fopt, iter, funcalls, warnflag", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def log_prior(cosmo_param):\n\n Omega_m, beta = cosmo_param\n\n # Get bounds\n bounds = get_bounds()\n\n if bounds[0][0] < Omega_m < bounds[0][1] and bounds[1][0] < beta < bounds[1][1]:\n return 0.0 \n return -np.inf", "def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))", "def test_marginal_likelihood(self):\n data = np.repeat([1, 0], [50, 50])\n marginals = []\n a_prior_0, b_prior_0 = 1.0, 1.0\n a_prior_1, b_prior_1 = 20.0, 20.0\n\n for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):\n with pm.Model() as model:\n a = pm.Beta(\"a\", alpha, beta)\n y = pm.Bernoulli(\"y\", a, observed=data)\n trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)\n # log_marginal_likelihood is found in the last value of each chain\n lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])\n marginals.append(lml)\n\n # compare to the analytical result\n assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1", "def _object_func_marginals_c(params, data_vec, model_func, pts, \n\t lower_bound=None, upper_bound=None, \n\t verbose=0, multinom=True, flush_delay=0,\n\t func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n\t output_stream=sys.stdout, store_thetas=False,nmarginals=2):\n\t #print \"data vec is\"\n\t #print data_vec.shape\n\t \n\t \n\t global _counter\n\t _counter += 1\n\t\n\t if nmarginals <2 and not warningiIssued:\n\t \tprint \"Warning: number of marginals less than two, but optimization function for multiple marginals is used!\"\n\t \twarningIssued=True\n\t \t#return dadi.Inference._out_of_bounds_val\n\t\n\t # Deal with fixed parameters\n\t params_up = dadi.Inference._project_params_up(params, fixed_params)\n\t\n\t # Check our parameter bounds\n\t if lower_bound is not None:\n\t for pval,bound in zip(params_up, lower_bound):\n\t if bound is not None and pval < bound:\n\t return -dadi.Inference._out_of_bounds_val/ll_scale\n\t if upper_bound is not None:\n\t for pval,bound in zip(params_up, upper_bound):\n\t if bound is not None and pval > bound:\n\t return -dadi.Inference._out_of_bounds_val/ll_scale\n\t \n\t \n\t all_ns = [data_vec[marg_num].sample_sizes for marg_num in range(nmarginals)]\n\t #print \"in marginal_optimization, all_ns is\"\n\t #print all_ns\n\t \n\t \n\t all_args = [params_up, all_ns] + list(func_args)\n\t # Pass the pts argument via keyword, but don't alter the passed-in \n\t # func_kwargs\n\t func_kwargs = func_kwargs.copy()\n\t func_kwargs['pts'] = pts\n\t all_sfs = model_func(*all_args, **func_kwargs)\n\t allcoarse=[coarsen.flatten(coarsen.split(all_sfs[i],coarsenings[i])) for i in range(len(all_sfs))]\n\t #this supposes that the two thetas are equal. This should be verified in the end! \n\t if multinom:\n\t\tresult=numpy.sum([ll_multinom(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\t else:\n\t result = numpy.sum([ll(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\t\n\t if store_thetas:\n\t global _theta_store\n\t dadi.Inference._theta_store[tuple(params)] = numpy.mean([optimal_sfs_scaling(all_sfs[marg_num], data_vec[marg_num]) for marg_num in range(nmarginals)])\n\t \n\t # Bad result\n\t if numpy.isnan(result):\n\t result = dadi.Inference._out_of_bounds_val\n\t \n\t if (verbose > 0) and (_counter % verbose == 0):\n\t param_str = 'array([%s])' % (', '.join(['%- 12g'%v for v in params_up]))\n\t output_stream.write('%-8i, %-12g, %s%s' % (_counter, result, param_str,\n\t os.linesep))\n\t Misc.delayed_flush(delay=flush_delay)\n\t\n\t return -result/ll_scale", "def log_marginal_likelihood(self, theta=None, eval_gradient=False):\n if theta is None:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated for theta!=None\")\n return self.log_marginal_likelihood_value_\n\n kernel = self.kernel_.clone_with_theta(theta)\n\n if eval_gradient:\n K, K_gradient = kernel(self.X_train_, eval_gradient=True)\n else:\n K = kernel(self.X_train_)\n\n K[np.diag_indices_from(K)] += self.alpha\n try:\n L = cholesky(K, lower=True) # Line 2\n except np.linalg.LinAlgError:\n return (-np.inf, np.zeros_like(theta)) \\\n if eval_gradient else -np.inf\n\n # Support multi-dimensional output of self.y_train_\n y_train = self.y_train_\n if y_train.ndim == 1:\n y_train = y_train[:, np.newaxis]\n X_train = self.X_train_\n P = self.P_train_\n Q = np.dot(np.transpose(P), cho_solve((L, True), P))\n try:\n M = cholesky(Q, lower=True)\n except np.linalg.LinAlgError:\n return (-np.inf, np.zeros_like(theta)) \\\n if eval_gradient else -np.inf\n\n alpha = cho_solve((L, True), y_train)\n beta = cho_solve((M,True), np.dot(np.transpose(P), alpha) )\n y_shift = y_train - np.dot(P, beta)\n alpha_ = cho_solve((L, True), y_shift)\n self.y_shift = y_shift\n\n # Compute log-likelihood (compare line 7)\n log_likelihood_dims = -0.5 * np.einsum(\"ik,ik->k\", y_shift, alpha_)\n log_likelihood_dims -= np.log(np.diag(L)).sum()\n log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)\n log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions\n\n if eval_gradient: # compare Equation 5.9 from GPML\n tmp = np.einsum(\"ik,jk->ijk\", alpha_, alpha_) # k: output-dimension\n tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]\n # Compute \"0.5 * trace(tmp.dot(K_gradient))\" without\n # constructing the full matrix tmp.dot(K_gradient) since only\n # its diagonal is required\n log_likelihood_gradient_dims = \\\n 0.5 * np.einsum(\"ijl,ijk->kl\", tmp, K_gradient)\n log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)\n\n if eval_gradient:\n return log_likelihood, log_likelihood_gradient\n else:\n return log_likelihood", "def fit_mle(data, copula, marginals, opti_method='SLSQP', known_parameters=False):\n\n if copula.type == \"mixture\":\n print(\"estimation of mixture only available with CMLE try fit mle\")\n raise error\n \n if known_parameters == True:\n\n marg_cdf1 = lambda i : marginals[0][\"distribution\"].cdf(data[0][i], marginals[0][\"loc\"], marginals[0][\"scale\"]) \n marg_pdf1 = lambda i : marginals[0][\"distribution\"].pdf(data[0][i], marginals[0][\"loc\"], marginals[0][\"scale\"])\n\n marg_cdf2 = lambda i : marginals[1][\"distribution\"].cdf(data[1][i], marginals[1][\"loc\"], marginals[1][\"scale\"]) \n marg_pdf2 = lambda i : marginals[1][\"distribution\"].pdf(data[1][i], marginals[1][\"loc\"], marginals[1][\"scale\"]) \n\n logi = lambda i, theta: np.log(copula.get_pdf(marg_cdf1(i),marg_cdf2(i),[theta]))+np.log(marg_pdf1(i)) +np.log(marg_pdf2(i))\n log_likelihood = lambda theta: -sum([logi(i, theta) for i in range(0,len(data[0]))])\n\n results = minimize(log_likelihood, copula.parameters_start, method=opti_method, )# options={'maxiter': 300})#.x[0]\n\n else:\n marg_cdf1 = lambda i, loc, scale : marginals[0][\"distribution\"].cdf(data[0][i], loc, scale) \n marg_pdf1 = lambda i, loc, scale : marginals[0][\"distribution\"].pdf(data[0][i], loc, scale)\n\n marg_cdf2 = lambda i, loc, scale : marginals[1][\"distribution\"].cdf(data[1][i], loc, scale) \n marg_pdf2 = lambda i, loc, scale : marginals[1][\"distribution\"].pdf(data[1][i], loc, scale) \n\n logi = lambda i, theta, loc1, scale1, loc2, scale2: \\\n np.log(copula.get_pdf(marg_cdf1(i, loc1, scale1),marg_cdf2(i, loc2, scale2),[theta])) \\\n + np.log(marg_pdf1(i, loc1, scale1)) +np.log(marg_pdf2(i, loc2, scale2))\n \n def log_likelihood(params):\n theta, loc1, scale1, loc2, scale2 = params\n return -sum([logi(i, theta, loc1, scale1, loc2, scale2) for i in range(0,len(data[0]))])\n\n results = minimize(log_likelihood, (copula.parameters_start, np.array(0), np.array(1), np.array(0), np.array(1)), method=opti_method, )# options={'maxiter': 300})#.x[0]\n\n print(\"method:\", opti_method, \"- success:\", results.success, \":\", results.message)\n if results.success == True:\n return results.x\n\n print(\"Optimization failed\")\n return None", "def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n variance = self.std ** 2\n ndim = params.ndim\n mean_diff = params - self.mean\n scaled_sq_err = jnp.dot(mean_diff, mean_diff) / variance\n # log determinant of covariance matrix\n log_det_cov = 2 * ndim * jnp.log(self.std)\n norm_term = ndim * jnp.log(2 * jnp.pi)\n return -0.5 * (log_det_cov + scaled_sq_err + norm_term)", "def log_marginal_BP(emd, period=None):\n # Unwrap the parameters and call the raw function\n log_p = log_marginal_raw_BP(emd.theta_f, emd.theta_o, emd.sigma_f,\n emd.sigma_o_inv, emd.y, emd.R, emd.N, period)\n\n return log_p", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def log_likelihood(cosmo_param, pk_obs, inv_cov):\n pknlfid, kbins, kspace = fiducial_power(cosmo_param)\n \n x = pk_obs - pknlfid\n return -0.5* (x.T @ inv_cov @ x)", "def _log_posterior_x(self, X):\n if self.marginalize:\n LL = self.log_marginal_likelihood(X, self.W)\n else:\n LL = self.log_likelihood(X=X)\n LP = self._log_prior_x(X)\n return LL + LP", "def latent_margin_optimization(response, design_matrix, param_vector, intercept, indicators, weights, z, prior_means, prior_vars, home_points=\" Home Points\", away_points=\" Away Points\", a_cols=None, h_cols=None, joint=False, MAP=False, show=False, newton_update=False, gamma=6.0, tol=1e-01, max_iter=100):\n z_change = tol + 1\n iterations = 0\n start = datetime.datetime.now()\n gradient_std = np.zeros(len(z))\n p_z_change = 0\n # Run until no change in latent variables or a maximum amount of iterations reached\n while z_change > tol and iterations < max_iter:\n iterations += 1\n prev_z = np.copy(z)\n # Calculate gradient of data under margin model with latent variables\n if not joint:\n z_gradient, z_second_gradient, gradient_stds = margin_model_derivative_z(response, design_matrix, param_vector, intercept, indicators, weights, z=prev_z,\n prior_means=prior_means, prior_vars=prior_vars, MAP=MAP)\n else: # Is joint\n z_gradient, z_second_gradient, gradient_stds = joint_model_derivative_z(response, design_matrix, a_cols, h_cols, param_vector[\"Away\"], param_vector[\"Home\"], intercept[\"Away\"], intercept[\"Home\"], indicators, weights, z=prev_z,\n prior_means=prior_means, prior_vars=prior_vars, MAP=MAP, home_points=home_points, away_points=away_points)\n\n # Take a gradient step and calculate change in latent variable vector\n if not newton_update:\n z += gamma * np.array(z_gradient).reshape(-1,1)\n if newton_update:\n z -= z_gradient / z_second_gradient\n\n z_change = np.linalg.norm(z - prev_z)\n if z_change > p_z_change or (p_z_change - z_change) < 1: # Momentum adjustment calculations\n gamma = gamma / 2.\n p_z_change = z_change\n\n if not joint:\n design_matrix = replace_design_latent(design_matrix=design_matrix, indicators=indicators, z=z)\n else: # Is joint\n design_matrix = replace_design_joint_latent(joint_design_matrix=design_matrix, indicators=indicators, jz=z)\n\n if show:\n print(\"Expectation Optimization Iteration: %d Latent Change: %.8f\" % (iterations, z_change))\n\n if iterations == max_iter:\n print(\"Maximum iterations (%d) reached for termination of expectation optimization\" % max_iter)\n\n finish = datetime.datetime.now()\n time_taken = (finish - start).total_seconds() / 60.\n if show:\n print(\"Time taken (minutes): %.5f\" % time_taken)\n\n return z, gradient_stds", "def relative_likelihood(self):\n \n if self.num_hidden == 0:\n \n return T.exp(-self.compute_energy(self.x, self.batch_size))\n \n if self.num_hidden > 0:\n \n return T.exp(-self.compute_free_energy(self.x))", "def logProbFn(mcVec, logLikeFn, logPriorFn, fitFn, params, freqs, data, sigmas):\n #Pad the mcVec with the non-varying parameter values in the right locations\n paramsVec = mcVec2paramsVec(mcVec, params)\n\n #Update the log-liklihood using the fitFn and the new paramsVec\n logLike = logLikeFn(fitFn, paramsVec, freqs, data, sigmas)\n\n #Update the prior using the parameter bounds and the new paramsVec\n logPrior = logPriorFn(paramsVec, params)\n\n #Update the log-Probability\n logProb = logLike + logPrior\n return logProb", "def loglikelihood(self, x, previous=False):\r\n # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)\r\n # for i in range(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim\r\n # TODO: test this!!\r\n # c=cma.fmin...\r\n # c[3]['cma'].loglikelihood(...)\r\n\r\n if previous and hasattr(self, 'lastiter'):\r\n sigma = self.lastiter.sigma\r\n Crootinv = self.lastiter._Crootinv\r\n xmean = self.lastiter.mean\r\n D = self.lastiter.D\r\n elif previous and self.countiter > 1:\r\n raise _Error('no previous distribution parameters stored, check options importance_mixing')\r\n else:\r\n sigma = self.sigma\r\n Crootinv = self._Crootinv\r\n xmean = self.mean\r\n D = self.D\r\n\r\n dx = array(x) - xmean # array(x) - array(m)\r\n n = self.N\r\n logs2pi = n * log(2*np.pi) / 2.\r\n logdetC = 2 * sum(log(D))\r\n dx = np.dot(Crootinv, dx)\r\n res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC/2 - n*log(sigma)\r\n if 1 < 3: # testing\r\n s2pi = (2*np.pi)**(n/2.)\r\n detC = np.prod(D)**2\r\n res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)\r\n assert res2 < res + 1e-8 or res2 > res - 1e-8\r\n return res", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def log_marginal_likelihood_normal_pdf(self):\n noise_variance=self.params['noise_variance']['value']\n Kxx = self.C@self._Kernel(self.X, self.X, self.params)@self.C.T + (noise_variance+self.jitter) * np.eye(self.Y.shape[0])\n try:\n mu = np.linalg.solve(Kxx, self.Y)\n (sign, logdet) = np.linalg.slogdet(2 * np.pi * Kxx) \n logp1 = -0.5*np.asscalar(self.Y.T@mu)-0.5*logdet \n except:\n logp1=-10.0**300\n return logp1", "def _prior_marginal_scipy(self, parameter):\n letter = parameter.strip().split('_')[1]\n A = sum(self._prior_hyperparameters.values())\n ai = self.prior_hyperparameters['a_{}'.format(letter)]\n\n return _scipy_beta(ai, A-ai)", "def log_likelihood(self, data, reward_model, bias_params):", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def expected_improvement(f_min, mu, sigma):\n # log-scaling might not be the best idea here, especially\n # if people use negative values to maximize output\n # v = (np.log(f_min) - mu) / sigma\n v = (f_min - mu) / sigma\n return (f_min * norm.cdf(v)\n - (np.exp(0.5 * sigma ** 2 + mu)\n * norm.cdf(v - sigma)))", "def minmarginal(self, target, out=None):\n return self.__opReduce2(self.v - target,np.min, out=out)\n\n\n # use ufunc.reduceat? reduce etc seem not good?\n # frompyfunc to make ufunc from python function?\n # use \"externalloop\" flag?\n #return t.max(axis=None,out=None) # use axis to specific dimensions to eliminate; out for IP version", "def parameter_mle(self, p: int, c: int, max_iter: int = 10, display: bool = False,\n theta_init: float = None, loss: str = \"likelihood\", alpha: float = 0.0) -> float:\n active = (np.array([p]), np.array([c]))\n# def f(theta_pc): return -self.log_likelihood_term(np.array([theta_pc]), active=active)[0]\n if loss == \"likelihood\":\n def f(theta_pc):\n return -self._total_likelihood_sum_implementation(theta_pc, p, c)\n elif loss == \"l2\":\n def f(theta_pc):\n return self._l2_loss_sum_implementation(theta_pc, p, c)\n elif loss == \"l2_logit\":\n def f(theta_pc):\n return self._l2_logit_loss_sum_implementation(theta_pc, p, c)\n else:\n raise Exception(\"Unsupported loss function {}\".format(loss))\n\n # The likelihood function may be non-concave but has piecewise smooth. Use a root finder in every interval,\n # then find the minimum of all interval minima. Benchmarked to be fast (see debugging_log_likelihood notebook).\n def f_interval(theta_pc, left, right): return f(theta_pc) if left < theta_pc and theta_pc < right else _LARGE\n i = np.where(self._c == c)[0][0]\n x = self._irf[i].x\n if theta_init:\n # Search only within the bin the initial guess is in, and the two neighboring bins.\n # TODO: replace by binary search.\n if theta_init <= x[0]:\n bin = 0\n else:\n bin = np.max(np.where(x < theta_init)[0])\n bins = range(max(0, bin - 1), min(len(x) - 1, bin + 2))\n else:\n # No initial guess supplied, search in all bins.\n bins = range(len(x) - 1)\n interval_min_result = \\\n (scipy.optimize.minimize_scalar(f, method=\"bounded\", bounds=(x[j], x[j + 1]), bracket=(x[j], x[j + 1]),\n options={\"maxiter\": max_iter, \"disp\": display})\n for j in bins)\n # The result struct also contains the function value, which could be useful for further MCMC steps, but\n # for now just returning the root value.\n interval_min_result = list(interval_min_result)\n # print('interval_min_result', interval_min_result)\n # for result in interval_min_result:\n # print((result.fun, result.x))\n # print(min((result.fun, result.x) for result in interval_min_result))\n return min((result.fun, result.x) for result in interval_min_result)[1]", "def _object_func_marginals_c_log(log_params, *args, **kwargs):\n\t return _object_func_marginals_c(numpy.exp(log_params), *args, **kwargs)", "def optimize(self): \n if self.model == 'ARD':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha, self.beta]),\n args=(self,),\n method='L-BFGS-B',\n bounds=((0, 50), (0, 50)),\n )\n # logger.info(estimate)\n\n # organize into a dict\n result = {\n \"alpha\": estimate.x[0],\n \"beta\": estimate.x[1],\n \"Lik\": estimate.fun,\n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n elif self.model == 'ER':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha]),\n args=(self,),\n method='L-BFGS-B',\n bounds=[(0, 50)],\n )\n\n result = {\n \"alpha\": estimate.x[0],\n \"Lik\": estimate.fun, \n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n else:\n raise Exception('model must be specified as either ARD or ER')\n\n # get scaled likelihood values\n self.log_lik = result[\"negLogLik\"]\n self.tree = self.tree.set_node_values(\n 'likelihood',\n values={\n node.idx: np.array(node.likelihood) / sum(node.likelihood)\n for node in self.tree.idx_dict.values()\n }\n )", "def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᵀΣ⁻¹G = LLᵀ.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # Hμ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - Hμ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: ½|L⁻¹(GᵀΣ⁻¹)y|²\n # (GᵀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # ½|L⁻¹(GᵀΣ⁻¹)y|² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: ½log |K⁻¹| - log |L| + ½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)", "def L1_log_likelihood_gradient(X, y, B, lmbda):\n pass", "def log_prior_grad(self, inputs):", "def log_marginal_raw_BP(theta_f, theta_o, sigma_f, sigma_o_inv, y, R, N,\n period=None):\n if period == None: period = (0, theta_f.shape[0])\n # Initialise\n log_p = 0\n # Iterate over each timestep and compute...\n a, b = 0, 0\n for i in range(period[0], period[1]):\n a += log_likelihood_BP(y[i,:], theta_f[i,:], R, N)\n theta_d = theta_f[i,:] - theta_o[i,:]\n b -= numpy.dot(theta_d, sigma_o_inv[i,:]*theta_d)\n logdet_sigma_f = numpy.sum(numpy.log(sigma_f[i]))\n logdet_sigma_o_inv = numpy.sum(numpy.log(sigma_o_inv[i]))\n b += logdet_sigma_f + logdet_sigma_o_inv\n log_p = a + b / 2\n\n return log_p", "def minfunc(beta, yvec, xmat ):\n return yvec - exp(dot(xmat, beta))", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n \n if self.BICscore is None:\n k = self.m.num_params\n L = self.m.log_likelihood()\n BIC = L - k/2*np.log(self.n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def _log_prior_gradients(self):\n if self.priors.size == 0:\n return 0.\n x = self.param_array\n ret = np.zeros(x.size)\n #compute derivate of prior density\n [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]\n #add in jacobian derivatives if transformed\n priored_indexes = np.hstack([i for p, i in self.priors.items()])\n for c,j in self.constraints.items():\n if not isinstance(c, Transformation):continue\n for jj in j:\n if jj in priored_indexes:\n ret[jj] += c.log_jacobian_grad(x[jj])\n return ret", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n if self.BICscore is None:\n BIC = 0\n for i, model in enumerate(self.models):\n n = model.n \n k = model.m.num_params\n L = model.m.log_likelihood()\n BIC += L - k/2*np.log(n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def BMA_Marginal_Likelihood_exh(df_train, df_test):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n T_oos = df_test.shape[0]\n # g-prior parameter\n c = float(K**2)\n # separate real value and individual forecasts (for both train and oos)\n y = df_train.iloc[:, 0].values\n multid_y = y[:, np.newaxis]\n F_all = np.concatenate((df_train.iloc[:, 1:].values, df_test.values),\n axis=0)\n # list of variables\n var_list = np.hsplit(F_all, K)\n # list containing vectors of all model by the number of variables\n model_list = []\n # null model\n model_list += [np.full((1, T+T_oos, 1), 1, dtype=float)]\n # single variable models\n model_list += [np.insert(np.squeeze(list(itertools.combinations(\n var_list, 1)), axis=3)[:, :, np.newaxis], 0, 1, axis=2)]\n # 2-(K-1) variable combinations models\n for i in range(2, K):\n model_list += [np.insert(np.swapaxes(np.squeeze(list(\n itertools.combinations(var_list, i)),\n axis=3), 1, 2), 0, 1, axis=2)]\n # K variable model\n model_list += [np.insert(F_all[np.newaxis, :, :], 0, 1, axis=2)]\n\n # initializations\n marginal_lik = np.full((2**K, 1), fill_value=np.nan, dtype=float)\n oos_fcts = np.full((df_test.shape[0], 2**K), np.nan, dtype=float)\n\n # compute marginal likelihoods\n # precompute the right part (same in every case)\n y_bar = np.mean(y)\n S_right = sum((y - y_bar)**2)/(c+1)\n # start indices for sets with different numbers of variables\n start_ind = 0\n # compute the marginal likelihoods for all sets of models\n for i in range(len(model_list)):\n # unpack the training design matrix\n Z = model_list[i][:, :T, :]\n Z_t = np.swapaxes(Z, 1, 2)\n # unpack out-of-sample testing matrix\n Z_oos = model_list[i][:, T:, :]\n # estimate the model in sample\n theta_hat = np.matmul(np.linalg.inv(np.matmul(Z_t, Z)),\n np.matmul(Z_t, multid_y))\n # sum of squared residuals\n SSR = np.sum((multid_y - np.matmul(Z, theta_hat))**2, axis=1)\n S_left = (c/(c+1))*SSR\n S = S_left + S_right\n\n # scale to first decimal place for numerical reasons\n # find the scaling factor\n if i == 0:\n S_rescaled = np.copy(S)\n scaling_factor = 1\n while (S_rescaled < 0.1):\n S_rescaled *= 10\n scaling_factor *= 10\n # rescale S (to prevent overflow)\n S *= scaling_factor\n\n # update set end index\n end_ind = start_ind + Z.shape[0]\n # compute and save marginal likelihood for the given set of models\n marginal_lik[start_ind:end_ind, :] = ((c+1)**(-i))*(S**(-((T-1)/2)))\n # out-of-sample forecasts\n oos_fcts[:, start_ind:end_ind] = np.transpose(\n np.squeeze(np.matmul(Z_oos, theta_hat), axis=2))\n # update set start index\n start_ind = end_ind\n\n # model posterior probabilities\n marginal_lik_sum = np.sum(marginal_lik)\n posterior_prob = marginal_lik / marginal_lik_sum\n\n pred = np.squeeze(np.dot(oos_fcts, posterior_prob), axis=1)\n\n df_pred = pd.DataFrame(\n {\"BMA (Marginal Likelihood)\": pred},\n index=df_test.index)\n\n return df_pred", "def maximize_loglik_fcmnl_MPEC(model_params: Union[CupidParams, CupidParamsCSHeteroxy, CupidParamsFcmnl],\n x_init: np.ndarray,\n lower: Optional[np.ndarray] = None,\n upper: Optional[np.ndarray] = None,\n checkgrad: Optional[bool] = False,\n verbose: Optional[bool] = False,\n fixed_vars: Optional[List[int]] = None,\n fixed_vals: Optional[List[float]] = None,\n options: Optional[Dict] = {'iprint': 1}) -> Tuple[float, np.ndarray, int]:\n n_paramsU = x_init.size\n bases_surplus = model_params.bases_surplus\n ncat_men, ncat_women, n_bases = bases_surplus.shape\n n_pars_b_men, n_pars_b_women = model_params.n_pars_b_men, model_params.n_pars_b_women\n n_pars_b = n_pars_b_men + n_pars_b_women\n n_thetas = n_pars_b + n_bases\n\n try:\n kc = KN_new()\n except:\n bs_error_abort(\"Failed to find a valid Knitro license.\")\n\n KN_add_vars(kc, n_paramsU)\n\n # bounds, if any\n if lower is None:\n # not necessary since infinite\n KN_set_var_lobnds(kc, xLoBnds=np.full(n_paramsU, -KN_INFINITY))\n else:\n KN_set_var_lobnds(kc, xLoBnds=lower)\n if upper is None:\n KN_set_var_upbnds(kc, xUpBnds=np.full(n_paramsU, KN_INFINITY))\n else:\n KN_set_var_upbnds(kc, xUpBnds=upper)\n\n n_prod_categories = ncat_men * ncat_women\n # Add the constraints and set the rhs and coefficients\n n_cons = n_prod_categories\n KN_add_cons(kc, n_cons)\n KN_set_con_eqbnds(kc, cEqBnds=[0.0] * n_cons)\n\n # Define an initial point. If not set, Knitro will generate one.\n KN_set_var_primal_init_values(kc, xInitVals=x_init)\n\n if fixed_vars is not None:\n assert fixed_vals is not None\n KN_set_var_fxbnds(kc, fixed_vars, fixed_vals)\n\n cb = KN_add_eval_callback(kc, evalObj=True,\n indexCons=np.arange(n_prod_categories),\n funcCallback=log_likelihood_fcmnl_MPEC)\n\n KN_set_cb_user_params(kc, cb, model_params)\n\n # c(x,y) has derivatives in thetas, U(x,t), U(z,y)\n # dc(x,y)/dU(x,y) may only appear once\n n_args_jac = n_thetas + ncat_men + ncat_women - 1\n n_jac = n_cons * n_args_jac\n cbjacIndexCons = np.repeat(np.arange(n_prod_categories), n_args_jac)\n cbjacIndexVars = np.zeros(n_prod_categories * n_args_jac, int)\n i = 0\n for iman in range(ncat_men):\n for iwoman in range(ncat_women):\n # derivatives in thetas\n cbjacIndexVars[i:(i+n_thetas)] = np.arange(n_thetas)\n # derivatives in [iman, jwoman]\n cbjacIndexVars[(i + n_thetas):(i+n_thetas+ncat_women)] = \\\n n_thetas + iman*ncat_women + np.arange(ncat_women)\n # derivatives in [jman, iwoman] except [iman, iwoman]\n list_men = list(range(ncat_men))\n del list_men[iman]\n cbjacIndexVars[(i + n_thetas + ncat_women):(i+n_args_jac)] = \\\n n_thetas + iwoman + ncat_women*np.array(list_men, int)\n i += n_args_jac\n\n print(cbjacIndexCons.shape)\n print(cbjacIndexVars.shape)\n\n KN_set_cb_grad(kc, cb, objGradIndexVars=KN_DENSE,\n jacIndexCons=cbjacIndexCons, jacIndexVars=cbjacIndexVars,\n gradCallback=grad_log_likelihood_fcmnl_MPEC)\n\n KN_set_int_param(kc, KN_PARAM_OUTLEV, KN_OUTLEV_ALL)\n\n if checkgrad:\n # Perform a derivative check.\n KN_set_int_param(kc, KN_PARAM_DERIVCHECK, KN_DERIVCHECK_ALL)\n\n # Solve the problem.\n nStatus = KN_solve(kc)\n\n loglik_val, estimates = print_optimization_results_MPEC(kc)\n\n print_stars()\n print(f\" Value of log-likelihood: {loglik_val: > 8.3f}\\n\")\n print()\n\n return loglik_val, np.array(estimates), nStatus", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def cost(self, A, b, w):\n f = 0\n if self.glm == 'Gaussian':\n tt = np.dot(A, w) - b\n # nao é loglik mesmo, é só mse\n loglik = 0.5 * np.linalg.norm(tt) ** 2.0\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 100), -100)#avoid overflow\n loglik = -(b * xb - np.exp(xb)).sum()\n elif self.glm == 'Gamma':\n loglik = 0\n for i in np.arange(0, A.shape[0]):\n loglik += scipy.stats.gamma.logpdf(b[i], 1.0 / np.dot(A[i, :], w))\n elif self.glm == 'Binomial':\n ov_lim = 50\n Xbeta = np.maximum(np.minimum(np.dot(A, w), ov_lim), -ov_lim)#avoid overflow\n loglik = -1 * np.sum(((b * Xbeta) - np.log(1 + np.exp(Xbeta))))\n if self.mean:\n loglik /= float(A.shape[0])\n if not np.isnan(loglik):\n f += loglik\n else:\n print(\"****** WARNING: loglik is nan.\")\n return f", "def optimize_log(p0, data, model_func, pts, lower_bound=None, upper_bound=None,\n verbose=0, flush_delay=0.5, epsilon=1e-3, \n gtol=1e-5, multinom=True, maxiter=None, full_output=False,\n func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,\n output_file=None,nmarginals=1):\n if output_file:\n output_stream = file(output_file, 'w')\n else:\n output_stream = sys.stdout\n #print \"in opt,\"\n #print data.shape\n args = (data, model_func, pts, lower_bound, upper_bound, verbose,\n multinom, flush_delay, func_args, func_kwargs, fixed_params, \n ll_scale, output_stream)\n if nmarginals==1:\n \tobject_fun=dadi.Inference._object_func_log\n else:\n \tobject_fun=_object_func_marginals_log\n\n\n p0 = dadi.Inference._project_params_down(p0, fixed_params)\n outputs = scipy.optimize.fmin_bfgs(object_fun, \n numpy.log(p0), epsilon=epsilon,\n args = args, gtol=gtol, \n full_output=True,\n disp=False,\n maxiter=maxiter)\n xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs\n xopt = dadi.Inference._project_params_up(numpy.exp(xopt), fixed_params)\n\n if output_file:\n output_stream.close()\n\n if not full_output:\n return xopt\n else:\n return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag", "def log_likelihood_grad_bias(self, data, reward_model, bias_params):", "def grad_margin(x0, preds, y, model, n_class, weights, L, loss_function):\n theta = x0\n loss_fd = weights[y.ravel().astype(int)]\n\n Alpha = theta - preds # (n_class - 1, n_samples)\n S = np.sign(np.arange(n_class - 1) - y + 0.5)\n\n if(loss_function == 'logistic'):\n lf = sigmoid\n elif(loss_function == 'hinge'):\n lf = hinge_derive\n \n Sigma = np.dot(S, loss_fd.T)\n Sigma = np.dot(Sigma, lf(np.multiply(-S, Alpha)))\n\n grad_theta = -Sigma.sum(0)\n return grad_theta", "def maximize_one(self, gamma, xisum, c, x_digits):\n log_likelihood = np.log(c).sum()\n self._i = gamma[0] / gamma[0].sum()\n self._t = (xisum.T / xisum.sum(1)).T\n self._e = np.dot(x_digits, gamma) / gamma.sum(0)\n return log_likelihood", "def fmin(func, x0, sigma0=None, args=()\r\n # the follow string arguments are evaluated, besides the verb_filenameprefix\r\n , CMA_active='False # exponential negative update, conducted after the original update'\r\n , CMA_activefac='1 # learning rate multiplier for active update'\r\n , CMA_cmean='1 # learning rate for the mean value'\r\n , CMA_const_trace='False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero'\r\n , CMA_diagonal='0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always' # TODO 4/ccov_separable?\r\n , CMA_eigenmethod='np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)'\r\n , CMA_elitist='False # elitism likely impairs global search performance'\r\n , CMA_mirrors='popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used'\r\n , CMA_mu='None # parents selection parameter, default is popsize // 2'\r\n , CMA_on='True # False or 0 for no adaptation of the covariance matrix'\r\n , CMA_rankmu='True # False or 0 for omitting rank-mu update of covariance matrix'\r\n , CMA_rankmualpha='0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0'\r\n , CMA_dampfac='1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere'\r\n , CMA_dampsvec_fac='np.Inf # tentative and subject to changes, 0.5 would be a \"default\" damping for sigma vector update'\r\n , CMA_dampsvec_fade='0.1 # tentative fading out parameter for sigma vector update'\r\n , CMA_teststds='None # factors for non-isotropic initial distr. mainly for test purpose, see scaling_...'\r\n , CMA_AII='False # not yet tested'\r\n , bounds='[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector'\r\n , eval_parallel='False # when True, func might be called with more than one solution as first argument'\r\n , eval_initial_x='False # '\r\n , fixed_variables='None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized'\r\n , ftarget='-inf #v target function value, minimization'\r\n , incpopsize='2 # in fmin(): multiplier for increasing popsize before each restart'\r\n , maxfevals='inf #v maximum number of function evaluations'\r\n , maxiter='100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations'\r\n , mindx='0 #v minimal std in any direction, cave interference with tol*'\r\n , minstd='0 #v minimal std in any coordinate direction, cave interference with tol*'\r\n , noise_handling='False # maximal number of evaluations for noise treatment, only fmin'\r\n , noise_reevals=' 1.5 + popsize/20 # number of solution to be reevaluated for noise measurement, only fmin'\r\n , noise_eps='1e-7 # perturbation factor for noise handling reevaluations, only fmin'\r\n , noise_change_sigma='True # exponent to default sigma increment'\r\n , popsize='4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration'\r\n , randn='np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)'\r\n , restarts='0 # in fmin(): number of restarts'\r\n , restart_from_best='False'\r\n , scaling_of_variables='None # scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is ones(N)'\r\n , seed='None # random number seed'\r\n , termination_callback='None #v a function returning True for termination, called after each iteration step and could be abused for side effects'\r\n , tolfacupx='1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0'\r\n , tolupsigma='1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates \"creeping behavior\" with usually minor improvements'\r\n , tolfun='1e-11 #v termination criterion: tolerance in function value, quite useful'\r\n , tolfunhist='1e-12 #v termination criterion: tolerance in function value history'\r\n , tolstagnation='int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations'\r\n , tolx='1e-11 #v termination criterion: tolerance in x-changes'\r\n , transformation='None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno'\r\n , typical_x='None # used with scaling_of_variables'\r\n , updatecovwait='None #v number of iterations without distribution update, name is subject to future changes' # TODO: rename: iterwaitupdatedistribution?\r\n , verb_append='0 # initial evaluation counter, if append, do not overwrite output files'\r\n , verb_disp='100 #v verbosity: display console output every verb_disp iteration'\r\n , verb_filenameprefix='outcmaes # output filenames prefix'\r\n , verb_log='1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions'\r\n , verb_plot='0 #v in fmin(): plot() is called every verb_plot iteration'\r\n , verb_time='True #v output timings on console'\r\n , vv='0 #? versatile variable for hacking purposes, value found in self.opts[\\'vv\\']'\r\n ): # style guides say there should be the above empty line\r\n try: # pass on KeyboardInterrupt\r\n opts = locals() # collect all local variables (i.e. arguments) in a dictionary\r\n del opts['func'] # remove those without a default value\r\n del opts['args']\r\n del opts['x0'] # is not optional, no default available\r\n del opts['sigma0'] # is not optional for the constructor CMAEvolutionStrategy\r\n if not func: # return available options in a dictionary\r\n return Options(opts, True) # these opts are by definition valid\r\n\r\n # TODO: this is very ugly:\r\n incpopsize = Options({'incpopsize':incpopsize}).eval('incpopsize')\r\n restarts = Options({'restarts':restarts}).eval('restarts')\r\n del opts['restarts']\r\n noise_handling = Options({'noise_handling': noise_handling}).eval('noise_handling')\r\n del opts['noise_handling']# otherwise CMA throws an error\r\n\r\n irun = 0\r\n best = BestSolution()\r\n while 1:\r\n # recover from a CMA object\r\n if irun == 0 and isinstance(x0, CMAEvolutionStrategy):\r\n es = x0\r\n x0 = es.inputargs['x0'] # for the next restarts\r\n if sigma0 is None or not np.isscalar(array(sigma0)):\r\n sigma0 = es.inputargs['sigma0'] # for the next restarts\r\n # ignore further input args and keep original options\r\n else: # default case\r\n if irun and opts['restart_from_best']:\r\n print('CAVE: restart_from_best is typically not useful')\r\n es = CMAEvolutionStrategy(best.x, sigma0, opts)\r\n else:\r\n es = CMAEvolutionStrategy(x0, sigma0, opts)\r\n if opts['eval_initial_x']:\r\n x = es.gp.pheno(es.mean, bounds=es.gp.bounds)\r\n es.best.update([x], None, [func(x, *args)], 1)\r\n es.countevals += 1\r\n\r\n opts = es.opts # processed options, unambiguous\r\n\r\n append = opts['verb_append'] or es.countiter > 0 or irun > 0\r\n logger = CMADataLogger(opts['verb_filenameprefix'], opts['verb_log'])\r\n logger.register(es, append).add() # initial values, not fitness values\r\n\r\n # if es.countiter == 0 and es.opts['verb_log'] > 0 and not es.opts['verb_append']:\r\n # logger = CMADataLogger(es.opts['verb_filenameprefix']).register(es)\r\n # logger.add()\r\n # es.writeOutput() # initial values for sigma etc\r\n\r\n noisehandler = NoiseHandler(es.N, noise_handling, np.median, opts['noise_reevals'], opts['noise_eps'], opts['eval_parallel'])\r\n while not es.stop():\r\n X, fit = es.ask_and_eval(func, args, evaluations=noisehandler.evaluations,\r\n aggregation=np.median) # treats NaN with resampling\r\n # TODO: check args and in case use args=(noisehandler.evaluations, )\r\n\r\n if 11 < 3 and opts['vv']: # inject a solution\r\n # use option check_point = [0]\r\n if 0 * np.random.randn() >= 0:\r\n X[0] = 0 + opts['vv'] * es.sigma**0 * np.random.randn(es.N)\r\n fit[0] = func(X[0], *args)\r\n # print fit[0]\r\n es.tell(X, fit) # prepare for next iteration\r\n if noise_handling:\r\n es.sigma *= noisehandler(X, fit, func, es.ask, args)**opts['noise_change_sigma']\r\n es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though\r\n\r\n es.disp()\r\n logger.add(more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],\r\n modulo=1 if es.stop() and logger.modulo else None)\r\n if opts['verb_log'] and opts['verb_plot'] and \\\r\n (es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop()):\r\n logger.plot(324, fontsize=10)\r\n\r\n # end while not es.stop\r\n mean_pheno = es.gp.pheno(es.mean, bounds=es.gp.bounds)\r\n fmean = func(mean_pheno, *args)\r\n es.countevals += 1\r\n\r\n es.best.update([mean_pheno], None, [fmean], es.countevals)\r\n best.update(es.best) # in restarted case\r\n\r\n # final message\r\n if opts['verb_disp']:\r\n srestarts = (' after %i restart' + ('s' if irun > 1 else '')) % irun if irun else ''\r\n for k, v in list(es.stop().items()):\r\n print('termination on %s=%s%s (%s)' % (k, str(v), srestarts, time.asctime()))\r\n\r\n print('final/bestever f-value = %e %e' % (es.best.last.f, best.f))\r\n if es.N < 9:\r\n print('mean solution: ' + str(es.gp.pheno(es.mean)))\r\n print('std deviation: ' + str(es.sigma * sqrt(es.dC) * es.gp.scales))\r\n else:\r\n print('mean solution: %s ...]' % (str(es.gp.pheno(es.mean)[:8])[:-1]))\r\n print('std deviations: %s ...]' % (str((es.sigma * sqrt(es.dC) * es.gp.scales)[:8])[:-1]))\r\n\r\n irun += 1\r\n if irun > restarts or 'ftarget' in es.stopdict or 'maxfunevals' in es.stopdict:\r\n break\r\n opts['verb_append'] = es.countevals\r\n opts['popsize'] = incpopsize * es.sp.popsize # TODO: use rather options?\r\n opts['seed'] += 1\r\n\r\n # while irun\r\n\r\n es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell\r\n if 1 < 3:\r\n return es.result() + (es.stop(), es, logger)\r\n\r\n else: # previously: to be removed\r\n return (best.x.copy(), best.f, es.countevals,\r\n dict((('stopdict', CMAStopDict(es.stopdict))\r\n ,('mean', es.gp.pheno(es.mean))\r\n ,('std', es.sigma * sqrt(es.dC) * es.gp.scales)\r\n ,('out', es.out)\r\n ,('opts', es.opts) # last state of options\r\n ,('cma', es)\r\n ,('inputargs', es.inputargs)\r\n ))\r\n )\r\n # TODO refine output, can #args be flexible?\r\n # is this well usable as it is now?\r\n except KeyboardInterrupt: # Exception, e:\r\n if opts['verb_disp'] > 0:\r\n print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')\r\n raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit\r", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def _object_func_marginals_coarse_log(log_params, *args, **kwargs):\n return _object_func_marginals_coarse(numpy.exp(log_params), *args, **kwargs)", "def posterior_loss(X, mu, sigma, log_pi):\r\n log_PDF = log_GaussPDF(X, mu, sigma)\r\n log_post = log_posterior(log_PDF, log_pi)\r\n\r\n loss = torch.logsumexp(log_post, dim=1)\r\n # loss = torch.exp(log_post)\r\n # loss = torch.sum(loss, dim=1)\r\n # loss = torch.log(loss)\r\n loss = torch.sum(loss)\r\n loss = -loss\r\n return loss", "def _object_func_marginals_log(log_params, *args, **kwargs):\n return _object_func_marginals(numpy.exp(log_params), *args, **kwargs)", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def grad_margin(x0, X, y, alpha, n_class, weights, L, kernel_type, loss_function, sample_weight):\n\n w = x0[:X.shape[1]]\n c = x0[X.shape[1]:]\n theta = L.dot(c)\n loss_fd = weights[y]\n\n Xw = X.dot(w)\n Alpha = theta[:, None] - Xw # (n_class - 1, n_samples)\n S = np.sign(np.arange(n_class - 1)[:, None] - y + 0.5)\n\n if(loss_function == 'logistic'):\n Sigma = S * loss_fd.T * sigmoid(-S * Alpha)\n elif(loss_function == 'hinge'):\n Sigma = S * loss_fd.T * hinge_derive(-S * Alpha)\n if sample_weight is not None:\n Sigma *= sample_weight\n\n if(kernel_type == 'linear'):\n grad_w = X.T.dot(Sigma.sum(0)) + alpha * w\n else:\n ## Adjusted for batch SGD\n grad_w = X.T.dot(Sigma.sum(0)) + alpha * w\n #grad_w = X.T.dot(Sigma.sum(0)) + 0.5 * alpha * (np.matmul(X.T, w) + np.matmul(X, w))\n\n grad_theta = -Sigma.sum(1)\n grad_c = L.T.dot(grad_theta)\n return np.concatenate((grad_w, grad_c), axis=0)", "def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z", "def cost(self, A, b, w):\n f = 0\n if self.glm == 'Gaussian':\n tt = np.dot(A, w) - b\n # nao é loglik mesmo, é só mse\n loglik = 0.5 * np.linalg.norm(tt) ** 2.0\n\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 100), -100)#avoid overflow\n loglik = -(b * xb - np.exp(xb)).sum()\n\n elif self.glm == 'Gamma':\n loglik = 0\n for i in range(0, A.shape[0]):\n loglik += scipy.stats.gamma.logpdf(b[i], 1.0 / np.dot(A[i, :], w))\n\n elif self.glm == 'Binomial':\n Xbeta = np.dot(A, w)\n loglik = -1 * np.sum(((b * Xbeta) - np.log(1 + np.exp(Xbeta))))\n\n if self.mean:\n loglik /= float(A.shape[0])\n\n if not np.isnan(loglik):\n f += loglik\n else:\n print(\"****** WARNING: loglik is nan.\")\n return f", "def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval", "def reparameterize(self, mu, logvar):\n\t\tlogvar = torch.exp(logvar/2)\n\t\tif self.cuda_flag:\n\t\t\tepsilon = torch.randn((mu.size())).float().cuda()\n\t\telse:\n\t\t\tepsilon = torch.randn((mu.size())).float()\n\t\tlatent_vector = torch.mul(epsilon, logvar) + mu \n\t\treturn latent_vector", "def objective(beta, lambdat, X, y):\n return 1/len(y) * (np.sum(\n (np.maximum(0, 1-((y[:, np.newaxis]*X).dot(beta)))**2)))\\\n + lambdat * np.linalg.norm(beta)**2", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def compute_posterior(prior, likelihood, y):\n\n # -------------------------------------------------------------------------\n # ERROR CHECKS -- DO NOT MODIFY\n #\n\n # check that prior probabilities sum to 1\n if np.abs(1 - np.sum(prior)) > 1e-06:\n exit('In compute_posterior: The prior probabilities need to sum to 1')\n\n # check that likelihood is specified as a 2D array\n if len(likelihood.shape) != 2:\n exit('In compute_posterior: The likelihood needs to be specified as ' +\n 'a 2D array')\n\n K, M = likelihood.shape\n\n # make sure likelihood and prior agree on number of hidden states\n if len(prior) != M:\n exit('In compute_posterior: Mismatch in number of hidden states ' +\n 'according to the prior and the likelihood.')\n\n # make sure the conditional distribution given each hidden state value sums\n # to 1\n for m in range(M):\n if np.abs(1 - np.sum(likelihood[:, m])) > 1e-06:\n exit('In compute_posterior: P(Y | X = %d) does not sum to 1' % m)\n\n #\n # END OF ERROR CHECKS\n # -------------------------------------------------------------------------\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (b)\n #\n # Place your code to compute the log of the posterior here: store it in a\n # NumPy array called `log_answer`. If you exponentiate really small\n # numbers, the result is likely to underflow (i.e., it will be so small\n # that the computer will just make it 0 rather than storing the right\n # value). You need to go to log-domain. Hint: this next line is a good\n # first step.\n log_prior = np.log(prior)\n# print(log_prior)\n# print(likelihood)\n# print(y)\n unnormal = log_prior + np.log(likelihood[y,:]).sum(axis=0)\n# print(unnormal)\n log_answer = unnormal - scipy.misc.logsumexp(unnormal)\n# print(log_answer)\n\n #\n # END OF YOUR CODE FOR PART (b)\n # -------------------------------------------------------------------------\n\n # do not exponentiate before this step\n posterior = np.exp(log_answer)\n return posterior", "def cost(self, A, b, w):\n f = 0\n if self.glm == 'Gaussian':\n tt = np.dot(A, w) - b\n # nao é loglik mesmo, é só mse\n loglik = 0.5 * np.linalg.norm(tt) ** 2.0\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 100), -100)#avoid overflow\n loglik = -(b * xb - np.exp(xb)).sum()\n elif self.glm == 'Gamma':\n loglik = 0\n for i in range(0, A.shape[0]):\n loglik += scipy.stats.gamma.logpdf(b[i], 1.0 / np.dot(A[i, :], w))\n elif self.glm == 'Binomial':\n xb = np.maximum(np.minimum(np.dot(A, w), 100), -100)#avoid overflow\n loglik = -1 * np.sum(((b * xb) - np.log(1 + np.exp(xb))))\n if self.mean:\n loglik /= float(A.shape[0])\n if not np.isnan(loglik):\n f += loglik\n else:\n print(\"****** WARNING: loglik is nan.\")\n return f", "def diag_gaussian_log_likelihood(z, mean=0.0, logvar=0.0, varmin=1e-16):\n logvar_wm = np.log(np.exp(logvar) + varmin)\n return (-0.5 * (logvar + np.log(2*np.pi) +\n np.square((z-mean)/( np.exp(0.5*(logvar_wm))))))", "def _log_posterior_x(self, X):\r\n LL = self.log_likelihood(X=X)\r\n LP = self._log_prior_x(X)\r\n return LL + LP", "def log_marg(self):\n log_prob_X_given_z = 0.\n for k in range(self.K):\n log_prob_X_given_z += self.log_marg_k(k)\n return log_prob_X_given_z", "def marginal_ln_likelihood(samples, prior, data):\n n_samples = len(samples)\n n_linear = len(prior._linear_equiv_units)\n mu = np.zeros(n_linear)\n\n marg_ll = np.zeros(n_samples)\n for n, M, Lambda, ivar, *_ in get_M_Lambda_ivar(samples, prior, data):\n try:\n marg_ll[n], *_ = likelihood_worker(data.rv.value, ivar, M,\n mu, np.diag(Lambda),\n make_aA=False)\n except np.linalg.LinAlgError as e:\n raise e\n\n return marg_ll", "def scipy_minus_objective(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n if perceptron:\n perceptron._obj_iter += 1\n obj = 0.0\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n all_scores = vector_graphs * w\n sum_log_Z = 0.0\n for batch in batches:\n batch_scores = all_scores[batch]\n sum_log_Z += logsumexp(batch_scores) #np.log(np.exp(batch_scores).sum())\n obj += all_scores[correct_rows].sum() - sum_log_Z\n index += 1\n if index % 100 == 0:\n print('Objective '+str(index)+' processed')\n obj = obj / len(all_vector_graphs)\n if sigma != None:\n obj += - 0.5 * sigma * (w * w).sum()\n print('Objective:'+str(obj))\n return -1.0 * obj", "def get_marginals(word, model):\n # forward and backward message at once\n char_count, _ = word.shape\n alpha = np.zeros((char_count, model.dimY)) # alphas\n lbeta = np.zeros((char_count, model.dimY)) # log version of betas\n\n first_term = np.dot(word, model.getW(model.labels))\n second_term_a = model._T\n second_term_b = model._T.T\n for i in range(1, char_count):\n sum_term_a = (first_term[i-1] + alpha[i-1]) + second_term_a\n sum_term_b = (first_term[char_count-i] +lbeta[char_count-i]) + second_term_b\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term_a) \n lbeta[char_count-i-1] = np.apply_along_axis(logsumexp_trick, 1, sum_term_b)\n\n marginal_Y = np.zeros((char_count, model.dimY))\n marginal_Y_Y1 = np.zeros((char_count-1, model.dimY, model.dimY)) \n \n for i in range(char_count):\n sum_term = first_term[i] + alpha[i] + lbeta[i]\n log_marginal_y = sum_term - logsumexp_trick(sum_term)\n marginal_Y[i] = np.exp(log_marginal_y)\n # calculate other marginal dist as well\n if i < char_count-1:\n transition = model._T.transpose() # T_{yi, yi+1}\n outer_sum_w = np.add.outer(first_term[i], first_term[i+1]).reshape(model.dimY,model.dimY)\n outer_sum_m = np.add.outer(alpha[i], lbeta[i+1])\n sum_term_all = outer_sum_w + transition + outer_sum_m\n log_marginal_y_y1 = sum_term_all - logsumexp_trick(sum_term_all)\n marginal_Y_Y1[i] = np.exp(log_marginal_y_y1)\n # Got Denominator same as Zx , which is correct\n return marginal_Y, marginal_Y_Y1", "def __minBayesianEvidence(self, logLam):\n\t\tlam = 10**logLam\n\n\t\t# Calculate wMP for given regularization parameter.\n\t\tA = self.B + lam*self.C\n\t\twMP = self.__MP(A,lam)\n\t\t\n\t\t# Calculate Bayesian evidence. If any of the weights is\n\t\t# below zero, return very high number. This is the \n\t\t# implementation of the prior on lambda that prevents\n\t\t# negative weights.\n\t\tif (wMP >= 0).all():\n\t\t\t# Calculate different terms Bayesian evidence.\n\t\t\tchiSq = self.__getChiSq(wMP)\n\t\t\tpenaltyTerm = 0.5 * lam * np.dot(np.dot(np.transpose(wMP - self.w0), self.C), (wMP - self.w0))\n\t\t\tlogDetA = np.linalg.slogdet(A)\n\t\t\tp1 = -chiSq - penaltyTerm - 0.5 * logDetA[1]\n\t\t\tp2 = self.BE - 0.5*self.detCov\n\t\t\tp3 = (self.nTemplates/2.0) * (np.log(lam)) + 0.5*np.linalg.slogdet(self.C)[1]\n\t\t\n\t\t\t# Calculate BE with flat prior on lambda in logspace\n\t\t\tBE = p1 + p2 + p3 - np.log(lam)\n\t\t\t\n\t\t\t# Return minus the evidence.\n\t\t\treturn -BE\n\t\telse:\n\t\t\treturn 1e120", "def _log_likelihood_gradient(self, z, K, H, B, Kinv):\n\n nparams = 4\n grad = np.zeros((nparams,))\n\n #t0 = time.time()\n tmp = np.dot(self.invc, self.HKinv)\n #t1 = time.time()\n K_HBH_inv = Kinv - np.dot(tmp.T, tmp)\n #t2 = time.time()\n alpha_z = np.dot(K_HBH_inv, z)\n #t3 = time.time()\n\n #print \"gradient: %f %f %f\" % (t1-t0, t2-t1, t3-t2)\n\n for i in range(nparams):\n tA = time.time()\n if (i == 0):\n dKdi = np.eye(self.n)\n else:\n dKdi = self.predict_tree.kernel_deriv_wrt_i(self.X, self.X, \"se\", self.wfn_params, i-1)\n\n dlldi = .5 * np.dot(alpha_z.T, np.dot(dKdi, alpha_z))\n tB = time.time()\n # here we use the fact:\n # trace(AB) = sum_{ij} A_ij * B_ij\n dlldi -= .5 * np.sum(np.sum(K_HBH_inv.T * dKdi))\n\n grad[i] = dlldi\n tC = time.time()\n print \" %d: %f %f\" % (i, tB-tA, tC-tB)\n\n return grad", "def log_likelihood_grad_rew(self, data, reward_model, bias_params):", "def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop", "def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)", "def _call_marginalizevperp(self,o,**kwargs):\n #Get l, vlos\n l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD\n vlos= o.vlos(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])\n R= o.R(use_physical=False)\n phi= o.phi(use_physical=False)\n #Get local circular velocity, projected onto the los\n vcirc= R**self._beta\n vcirclos= vcirc*math.sin(phi+l)\n #Marginalize\n alphalos= phi+l\n if not 'nsigma' in kwargs or ('nsigma' in kwargs and \\\n kwargs['nsigma'] is None):\n nsigma= _NSIGMA\n else:\n nsigma= kwargs['nsigma']\n kwargs.pop('nsigma',None)\n sigmaR2= self.targetSigma2(R,use_physical=False)\n sigmaR1= sc.sqrt(sigmaR2)\n #Use the asymmetric drift equation to estimate va\n va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1.\n -R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True)\n -R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True))\n if math.fabs(va) > sigmaR1: va = 0. #To avoid craziness near the center\n if math.fabs(math.sin(alphalos)) < math.sqrt(1./2.):\n cosalphalos= math.cos(alphalos)\n tanalphalos= math.tan(alphalos) \n return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,\n -self._gamma*va/sigmaR1-nsigma,\n -self._gamma*va/sigmaR1+nsigma,\n args=(self,R,cosalphalos,tanalphalos,\n vlos-vcirclos,vcirc,\n sigmaR1/self._gamma),\n **kwargs)[0]/math.fabs(cosalphalos)\\\n *sigmaR1/self._gamma\n else:\n sinalphalos= math.sin(alphalos)\n cotalphalos= 1./math.tan(alphalos)\n return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,\n -nsigma,nsigma,\n args=(self,R,sinalphalos,cotalphalos,\n vlos-vcirclos,vcirc,sigmaR1),\n **kwargs)[0]/math.fabs(sinalphalos)*sigmaR1", "def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def marginal_error(geom: geometry.Geometry, a: jnp.ndarray, b: jnp.ndarray,\n tau_a: float, tau_b: float, f_u: jnp.ndarray,\n g_v: jnp.ndarray, norm_error: int, lse_mode) -> jnp.ndarray:\n if tau_b == 1.0:\n err = geom.error(f_u, g_v, b, 0, norm_error, lse_mode)\n elif tau_a == 1.0:\n err = geom.error(f_u, g_v, a, 1, norm_error, lse_mode)\n else:\n # In the unbalanced case, we compute the norm of the gradient.\n # the gradient is equal to the marginal of the current plan minus\n # the gradient of < z, rho_z(exp^(-h/rho_z) -1> where z is either a or b\n # and h is either f or g. Note this is equal to z if rho_z → inf, which\n # is the case when tau_z → 1.0\n if lse_mode:\n target = grad_of_marginal_fit(a, b, f_u, g_v, tau_a, tau_b, geom)\n else:\n target = grad_of_marginal_fit(a, b,\n geom.potential_from_scaling(f_u),\n geom.potential_from_scaling(g_v),\n tau_a, tau_b, geom)\n err = geom.error(f_u, g_v, target[0], 1, norm_error, lse_mode)\n err += geom.error(f_u, g_v, target[1], 0, norm_error, lse_mode)\n return err", "def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)", "def _culpepper1a_log_marginal_overcomplicated(x, phi, sigma_n):\n raise NotImplementedError('This is an overcomplicated implementation that does not work')\n M, L = phi.shape\n sigma_n2 = sigma_n**2\n #\n # Precision of posterior for a\n SigmaInv = np.diag(np.ones(L)) + phi.T @ phi / sigma_n2\n #\n # Cholesky\n C = la.cholesky(SigmaInv)\n halflogSigmaDet = - np.add.reduce(np.log(np.diag(C)))\n #\n # Solve for term we need\n xPhiCinv = la.solve_triangular(C, phi.T @ x.T, lower=True).T\n #\n # Normalising constants\n lZa = L / 2. * LOG_2_PI\n lZxa = M / 2. * LOG_2_PI + M * np.log(sigma_n)\n lZax = L / 2. * LOG_2_PI + halflogSigmaDet\n #\n # Log marginal\n lpx = - lZa - lZxa + lZax + (np.square(xPhiCinv).sum(axis=1) / sigma_n2 - np.square(x).sum(axis=1)) / (2. * sigma_n2)\n #\n return lpx", "def _call_marginalizevlos(self,o,**kwargs):\n #Get d, l, vperp\n l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD\n vperp= o.vll(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])\n R= o.R(use_physical=False)\n phi= o.phi(use_physical=False)\n #Get local circular velocity, projected onto the perpendicular \n #direction\n vcirc= R**self._beta\n vcircperp= vcirc*math.cos(phi+l)\n #Marginalize\n alphaperp= math.pi/2.+phi+l\n if not 'nsigma' in kwargs or ('nsigma' in kwargs and \\\n kwargs['nsigma'] is None):\n nsigma= _NSIGMA\n else:\n nsigma= kwargs['nsigma']\n kwargs.pop('nsigma',None)\n sigmaR2= self.targetSigma2(R,use_physical=False)\n sigmaR1= sc.sqrt(sigmaR2)\n #Use the asymmetric drift equation to estimate va\n va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1.\n -R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True)\n -R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True))\n if math.fabs(va) > sigmaR1: va = 0. #To avoid craziness near the center\n if math.fabs(math.sin(alphaperp)) < math.sqrt(1./2.):\n cosalphaperp= math.cos(alphaperp)\n tanalphaperp= math.tan(alphaperp)\n #we can reuse the VperpIntegrand, since it is just another angle\n return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,\n -self._gamma*va/sigmaR1-nsigma,\n -self._gamma*va/sigmaR1+nsigma,\n args=(self,R,cosalphaperp,tanalphaperp,\n vperp-vcircperp,vcirc,\n sigmaR1/self._gamma),\n **kwargs)[0]/math.fabs(cosalphaperp)\\\n *sigmaR1/self._gamma\n else:\n sinalphaperp= math.sin(alphaperp)\n cotalphaperp= 1./math.tan(alphaperp)\n #we can reuse the VperpIntegrand, since it is just another angle\n return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,\n -nsigma,nsigma,\n args=(self,R,sinalphaperp,cotalphaperp,\n vperp-vcircperp,vcirc,sigmaR1),\n **kwargs)[0]/math.fabs(sinalphaperp)*sigmaR1", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def optimize(self, num_restarts=1, max_iters=100, max_f_eval=300.0, method='Anneal'):\n dic = DictVectorizer()\n # flatten the parameters\n init_params,bounds=dic.fit_transform(self.params)\n #we minimise minus the marginal likelihood\n def objective(params_flatten):\n self.params=dic.inverse_transform(params_flatten,bounds)\n val = -self.log_marginal_likelihood()\n return val# we want to maximize it\n \n \n #run ptimisation with multiple restarts\n optml=np.inf\n for i in range(num_restarts):\n #minimise function\n if method=='Anneal':\n res=dual_annealing(objective,bounds, maxiter=max_iters, maxfun=max_f_eval, x0=init_params)\n else:\n \n res = minimize(objective, init_params, \n bounds=bounds, method=method,options={'maxiter': max_iters, 'disp': False})\n #print(\"Iteration \"+str(i)+\" \",-res.fun)\n if res.fun<optml:\n params_best=res.x #init_params \n optml=res.fun\n init_params=bounds[:,0]+(bounds[:,1]-bounds[:,0])*np.random.rand(len(bounds[:,0]))\n print(\"Iteration \"+str(i)+\" \",-res.fun)\n #params_best=res.x\n #optml=res.fun\n self.params=dic.inverse_transform(params_best,bounds)\n return -optml", "def marginal(self):\n m = np.zeros(len(self.domain))\n for fnode in self.neighbors:\n m += self.received[fnode]\n return np.exp(normalize(m))", "def logPriorFlat(paramsVec, params):\n logPrior = 0 #ln(1) = 0\n if params is None:\n #Maximally flat prior: p=1 always\n pass\n else:\n paramsDict = params.valuesdict()\n\n #Loop through parameter bounds and update the prior\n for kindex, key in enumerate(paramsDict.keys()):\n if (params[key].min < paramsVec[kindex] < params[key].max):\n pass\n else:\n logPrior = -np.inf #ln(0) = -inf\n return logPrior", "def linearFitWithOutliers(x, y, e, outtriangle='linear.png'):\n # theta will be an array of length 2 + N, where N is the number of points\n # theta[0] is the intercept, theta[1] is the slope,\n # and theta[2 + i] is the weight g_i\n def log_prior(theta):\n #g_i needs to be between 0 and 1\n if (all(x > 0. for x in theta[2:]) and all(x < 1. for x in theta[2:])) and \\\n 0. < theta[0] < 10. and 0. < theta[1] < 0.1:\n return 0\n else:\n return -np.inf # recall log(0) = -inf\n\n def log_likelihood(theta, x, y, e, sigma_B):\n dy = y - theta[0] - theta[1] * x\n g = np.clip(theta[2:], 0, 1) # g<0 or g>1 leads to NaNs in logarithm\n logL1 = np.log(g) - 0.5 * np.log(2 * np.pi * e ** 2) - 0.5 * (dy / e) ** 2\n logL2 = np.log(1 - g) - 0.5 * np.log(2 * np.pi * sigma_B ** 2) - 0.5 * (dy / sigma_B) ** 2\n return np.sum(np.logaddexp(logL1, logL2))\n\n def log_posterior(theta, x, y, e, sigma_B):\n return log_prior(theta) + log_likelihood(theta, x, y, e, sigma_B)\n\n\n #find starting point\n def squared_loss(theta, x=x, y=y, e=e):\n dy = y - theta[0] - theta[1] * x\n return np.sum(0.5 * (dy / e) ** 2)\n theta1 = optimize.fmin(squared_loss, [0, 0], disp=False)\n\n ndim = 2 + len(x) # number of parameters in the model\n nwalkers = 200 # number of MCMC walkers\n nburn = 5000 # \"burn-in\" period to let chains stabilize\n nsteps = 50000 # number of MCMC steps to take\n\n # set theta near the maximum likelihood, with\n starting_guesses = np.zeros((nwalkers, ndim))\n starting_guesses[:, :2] = np.random.normal(theta1, 1, (nwalkers, 2))\n starting_guesses[:, 2:] = np.random.normal(0.5, 0.1, (nwalkers, ndim - 2))\n\n #initiate sampler\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x, y, e, 20])\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(starting_guesses, nburn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n sampler.reset()\n\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, nburn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n print \"Running MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, nsteps, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #sample shape = (nwalkers, nsteps, ndim)\n sample = sampler.chain.reshape(-1, ndim)\n\n params = np.mean(sample[:, :2], 0)\n g = np.mean(sample[:, 2:], 0)\n outliers = (g < 0.5)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index][:2]\n errors = [sampler.flatchain[:, i].std() for i in xrange(ndim)][:2]\n\n fig = triangle.corner(sample, labels=['intercept' , 'slope'] + len(x)*['Gi',])\n fig.savefig(outtriangle)\n plt.close()\n\n\n return params, params_fit, errors, outliers", "def lnprior(self, params):\n self.debug.start_function('lnprior')\n lower_bounds = self.mcmc_version.prior_bounds[:, 0]\n upper_bounds = self.mcmc_version.prior_bounds[:, 1]\n inside_bounds = np.logical_and(params > lower_bounds,\n params < upper_bounds)\n\n if False in inside_bounds:\n self.debug.end_function()\n return self.zero_lhood\n\n if self.has_logz:\n z_input = params[self.param_idxs['logz']]\n else:\n z = params[self.param_idxs['z']]\n z_input = np.log10(z / z_sun)\n\n prior_lhood = np.log(self.z_prior(z_input))\n\n # ===== anisotropy/inclination priors =====\n if self.has_two_f:\n xi_ratio = params[self.param_idxs['f_p']] / params[self.param_idxs['f_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n elif self.has_xi_ratio:\n xi_ratio = params[self.param_idxs['xi_ratio']]\n d_b = params[self.param_idxs['d_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n prior_lhood += np.log(self.d_b_prior(d_b))\n\n self.debug.variable('prior_lhood', prior_lhood, formatter='f')\n self.debug.end_function()\n return prior_lhood", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood" ]
[ "0.66340697", "0.6619952", "0.6452456", "0.6451917", "0.6415813", "0.64041406", "0.6392549", "0.63788503", "0.6315891", "0.6298545", "0.62985104", "0.6243928", "0.61450654", "0.6131926", "0.6124716", "0.6111718", "0.6108412", "0.60599285", "0.60583085", "0.60303396", "0.6005673", "0.5988207", "0.5973938", "0.5913159", "0.5894918", "0.58789676", "0.58784556", "0.58652174", "0.58211684", "0.58187777", "0.5808868", "0.58062524", "0.5794278", "0.5790856", "0.57799196", "0.57770765", "0.5766098", "0.57207817", "0.57150984", "0.57143396", "0.56904715", "0.56895554", "0.5672133", "0.5668741", "0.56471795", "0.5643719", "0.5642241", "0.564132", "0.5639667", "0.56339574", "0.5628274", "0.5620894", "0.5617364", "0.5605826", "0.560428", "0.55938554", "0.5580371", "0.5566204", "0.5563714", "0.55626583", "0.5557141", "0.55404705", "0.55389684", "0.55363977", "0.55298555", "0.5517588", "0.5516807", "0.5513682", "0.55104613", "0.5509177", "0.5506537", "0.54987997", "0.5495958", "0.5493487", "0.5489806", "0.54867643", "0.54822534", "0.5481427", "0.5474229", "0.54728824", "0.54709625", "0.5470371", "0.54703146", "0.5461021", "0.5459722", "0.54540616", "0.54524374", "0.54518926", "0.545114", "0.54455817", "0.54415137", "0.54382527", "0.5436472", "0.5436472", "0.5435637", "0.54320574", "0.542994", "0.542533", "0.5420451", "0.541726", "0.5416729" ]
0.0
-1
This function deletes duplicate values from a singly linked list
def remove_dups(ll: SinglyLinkedList): seen = set() current = ll.head prev = None while current is not None: if current.data in seen: prev.next = current.next temp = current current = current.next temp.next = None else: seen.add(current.data) prev = current current = current.next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_dup2(linkedlist):", "def remove_duplicates_slow(linked_list):\n current = linked_list.head\n while current:\n runner = current\n while runner:\n if runner.next_node and runner.next_node.value == current.value:\n # delete this duplicate\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n current = current.next_node", "def remove_duplicates(self):\n cur = self.head\n prev = None\n\n dup_values = dict()\n\n while cur:\n if cur.data in dup_values:\n # Remove node:\n prev.next = cur.next\n else:\n # Have not encountered element before.\n dup_values[cur.data] = 1\n prev = cur\n cur = prev.next", "def dedup1(linked_list):\n\tif linked_list is None:\n\t\traise ValueError('Invalid input')\n\n\t# Mark and sweep\n\tcurrent = linked_list\n\tseen = set([current.value])\n\twhile current.next is not None:\n\t\tif current.next.value in seen:\n\t\t\tcurrent.next = current.next.next\n\t\telse:\n\t\t\tcurrent = current.next # Move forward\n\n\treturn linked_list", "def remove_duplicates_from_unsorted_list(linked_list: SinglyLinkedList):\n # Set to store seen values\n unique_list_nodes = set()\n current_node = linked_list.head\n\n if current_node:\n unique_list_nodes.add(current_node.data)\n previous_node = current_node\n current_node = current_node.next\n\n while current_node:\n # If current value is seen before\n if current_node.data in unique_list_nodes:\n previous_node.next = current_node.next\n else:\n previous_node = current_node\n unique_list_nodes.add(current_node.data)\n current_node = current_node.next", "def remove_duplicates(linked_list):\n elems = set()\n prev = None\n for node in linked_list.iternodes():\n if node.value in elems:\n prev.next_node = node.next_node\n else:\n elems.add(node.value)\n prev = node", "def removeDuplicates(self): \r\n aux = self.head \r\n if aux is None: \r\n return\r\n while aux.next is not None: \r\n #Compare head node with next node\r\n if aux.data == aux.next.data: \r\n new = aux.next.next\r\n aux.next = new \r\n else: \r\n aux = aux.next\r\n return self.head", "def remove_dup(linkedlist):\n hashtable=[]\n if not linkedlist or linkedlist.nextnode is None:\n return linkedlist\n else:\n prerider= linkedlist\n rider = prerider.nextnode\n hashtable.append(prerider.data)\n while rider.nextnode is not None:\n if rider.data in hashtable:\n prerider.nextnode=rider.nextnode\n rider=prerider.nextnode\n else:\n hashtable.append(rider.data)\n rider=rider.nextnode\n prerider=prerider.nextnode\n\n if rider.data in hashtable:\n prerider.nextnode=None\n return linkedlist", "def removeDuplicates(self,head):\n if head != None:\n currentNode = head\n if(currentNode.next): \n counterNode = currentNode.next\n while(currentNode):\n if(counterNode):\n if(currentNode.data == counterNode.data): \n currentNode.next = None #If there are duplicate data, we cut connection between them.\n else:\n currentNodenext = counterNode # If there is no duplite, we connect again two nodes.\n currentNode = currentNode.next\n counterNode = counterNode.next\n else:\n break\n return head", "def remove_sorted_duplicates(self):\n cur = self.head\n while cur is not None and cur.next is not None:\n if cur.next.data == cur.data:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return self.head", "def removeDuplicatesFromLinkedList(linkedlist):\n if not linkedlist:\n return None\n\n head = node = linkedlist\n while node:\n while node.next and node.value == node.next.value:\n node.next = node.next.next\n\n node = node.next\n\n return head", "def basic_dedup(llist):\n\n dedup_dict = {}\n\n if llist.headval is not None:\n dedup_dict[llist.headval.dataval] = 1\n\n for item in llist:\n # we haven't hit the end yet\n if item.nextval is not None:\n #check next node\n next_item = item.nextval\n if next_item.dataval not in dedup_dict.keys():\n dedup_dict[next_item.dataval] = 1\n \n # key is a duplicate!\n elif next_item.dataval in dedup_dict.keys():\n # there are links to keep\n if next_item.nextval is not None:\n item.nextval = next_item.nextval\n #just drop this node\n elif next_item.nextval is None:\n item.nextval = None\n return basic_dedup(llist)\n \n elif item.nextval is None:\n return llist", "def remove_duplicates(list):\n x = 0\n while x < len(list):\n y = x + 1\n while y < len(list):\n if list[x] == list[y]:\n del list[y]\n else:\n y += 1\n x += 1\n return list", "def delete_list(self): \n temp_node = self.head\n while temp_node is not None:\n prev_node = temp_node\n temp_node = temp_node.next\n # prev_node.val += \": deleted\" # for sanity check\n # reset data\n prev_node.val = None\n prev_node.next = None", "def unique(self) -> None:\n def unique_list(node: Node) -> Node: #recursive function to remove common elements\n \"\"\"unique helper\"\"\"\n if node is self.node:\n return node\n if node.next.val == node.val:\n temp = node.prev\n temp.next = node.next\n node.next.prev = temp\n unique_list(node.next)\n unique_list(self.node.next)", "def dedup2(linked_list):\n\tif linked_list is None:\n\t\traise ValueError('Invalid input')\n\n\tdef _seen(node):\n\t\t\"\"\"Return True if `node` has already been seen *earlier*\n\t\tin the linked list.\n\n\t\t\"\"\"\n\t\tcheck = linked_list\n\t\twhile check != node:\n\t\t\tif check.value == node.value:\n\t\t\t\treturn True\n\t\t\tcheck = check.next\n\t\treturn False\n\n\t# Iterate through the list\n\tcurrent = linked_list\n\twhile current.next is not None:\n\t\tif _seen(current.next):\n\t\t\tcurrent.next = current.next.next\n\t\telse:\n\t\t\tcurrent = current.next # Move forward\n\n\treturn linked_list", "def delete(self, value):\n current = self.head\n previous = None\n while current.value != value and current.next:\n previous = current\n current = current.next\n if current.value == value:\n if previous:\n previous.next = current.next\n else:\n self.head = current.next\n pass", "def delete(self, value):\n current = self.head\n if current.value == value:\n self.head = current.next\n else:\n while current:\n if current.value == value:\n break\n prev = current\n current = current.next\n if current == None:\n return\n prev.next = current.next\n current = None", "def delete(self, value):\n current = self.head\n prev = None\n\n while current:\n if current.value == value:\n if prev == None:\n self.head = current.next\n else:\n prev.next = current.next\n break\n prev = current\n current = current.next", "def remove(self, d):\n\n if self.head is not None:\n if self.head.data == d:\n self.head = self.head.next\n else:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n break\n else:\n temp = temp.next", "def remove_adjacent(some_list):\n # This function will reduce element that have the same value next to it to single element.\n bucket = []\n for i in range(len(some_list)):\n try:\n #print(\"{0:>3}-{1:<3}\".format(f\"{some_list[i]}\",f\"{some_list[i+1]}\"),end=\"\")\n if some_list[i] == some_list[i+1]:\n bucket.append(some_list[i])\n #print(\"same!!\",end=\"\")\n except:\n pass\n #print(\"\")\n for j in bucket:\n some_list.remove(j)\n return some_list", "def __delitem__(self, value) -> bool: # True -> if element was deleted else False\n if not self.head:\n return False\n if self.head.value == value:\n if self.head.next_value:\n self.head = self.head.next_value\n else:\n self.head = None\n return True\n link = self.head.next_value\n prev = self.head\n while link:\n if link.value == value:\n prev.next_value = link.next_value\n return True\n prev = link\n link = link.next_value\n return False", "def deduplicate_list(lst):\n return list(set(lst))", "def removeDuplicates(seq):\n\n pass", "def remove_duplicates(list1):\n #iterative, not recursive\n if len(list1) == 0:\n return list1\n new_list = []\n new_list.append(list1[0])\n for item in list1[1:]:\n if item != new_list[-1]:\n new_list.append(item)\n return new_list", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList", "def dedup(lst):\n new_lst = []\n seen = set()\n for elem in lst:\n if elem not in seen:\n new_lst.append(elem)\n seen.add(elem)\n\n return new_lst", "def delete_by_value(self, key):\n cur_node = self.head\n\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n if cur_node is None:\n return\n prev.next = cur_node.next\n cur_node = None", "def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli", "def no_dupli(L):\r\n N = [L.pop(0)]\r\n while L != []:\r\n k = 0\r\n flag = True\r\n while k < len(N) and L != [] and flag:\r\n if (N[k] == L[0]).all():\r\n L.pop(0)\r\n flag = False\r\n else:\r\n k = k + 1\r\n if k == len(N):\r\n N.append(L.pop(0))\r\n flag = False\r\n \r\n return N", "def removeDuplicates(list):\n\treturn set((item for item in list))", "def delete(self, value):\n current = self.head\n index = 1\n ''' delete first element '''\n if index == 1 and current.value == value:\n print (\"deleting first element\")\n current.next = current.next.next\n return\n \n ''' delete last element '''\n while not current.next.next and current.next.value == value:\n print (\"deleting last element\")\n current.next = None\n return\n \n ''' anywhere in between '''\n while current.next.next and current.next.value != value:\n current = current.next\n \n ''' delete the element '''\n print (\"deleting anywhere between element\")\n current.next = current.next.next\n return", "def remove_duplicates(l):\n unique = set() # we use a set because ``elem in set`` is much faster than ``elem in list``\n i = 0\n while i < len(l):\n elem = l[i]\n if elem in unique:\n del l[i]\n else:\n unique.add(elem)\n i += 1\n return l", "def removeNodesByValue(self, value): # Class O(nlog2n)\r\n # I'm assuming this classification because this function\r\n # calls removeNode()\r\n h = self.head\r\n count = 1\r\n while count <= self.length():\r\n try:\r\n if h.value == value:\r\n self.removeNode(count)\r\n if h.next != h:\r\n h = h.next\r\n next\r\n else:\r\n count += 1\r\n h = h.next\r\n except:\r\n break", "def remove(self, value):\r\n if self.head is None:\r\n return\r\n\r\n if self.head.value == value:\r\n self.head = self.head.next\r\n return\r\n\r\n node = self.head\r\n while node.next:\r\n if node.next.value == value:\r\n node.next = node.next.next\r\n return\r\n node = node.next", "def remove(self, item):\n \"\"\"\n :type item: Node()\n :rtype None\n \"\"\"\n if self.head.getData() == item:\n self.head = self.head.getNext()\n return\n\n prev = curr = self.head\n while curr: \n if curr.getData() == item:\n prev.setNext(curr.getNext())\n break\n prev = curr\n curr = curr.getNext()", "def delete(self, ele):\n prev = current = self.head\n element_in_head = False\n if self.head:\n while True:\n\tif current.data == ele:\n\t if current == self.head:\n\t element_in_head = True\n\t else:\n\t prev.next = current.next\n\t break\n\tprev = current\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n if element_in_head:\n\tif self.head.next == self.head:\n\t self.head = None\n\telse:\n\t prev.next = self.head.next\n\t self.head = self.head.next", "def rmdup(sll):\n seen = set()\n prev = None\n current = sll.head\n while current:\n if current.payload in seen:\n prev.next_ = current.next_\n current = current.next_\n else:\n seen.add(current.payload)\n prev = current\n current = current.next_\n return sll # for chaining", "def removeDuplicates(list):\n\treturn Set((item for item in list))", "def remove(self , element):\n current = self.head \n previous = None\n\n while current and current.data != element:\n previous = current\n current = current.next\n\n if previous == None :\n self.head = current.next\n elif current :\n previous.next = current.next\n current.next = None", "def delete_by_data(self, data):\n if self.is_empty():\n return\n else:\n cur = self.head\n if cur.data == data:\n # If the element of the first node is the element to be deleted\n if cur.next == None:\n self.head = None\n else:\n cur.next.prev = None\n self.head = cur.next\n return\n while cur != None:\n if cur.data == data:\n # Point the next node of cur to the next node of cur\n cur.prev.next = cur.next\n # Point the prev of the next node of cur to the previous node of cur\n cur.next.prev = cur.prev\n break\n cur = cur.next", "def remove_duplicates(lst):\n lst.sort()\n lst_without_duplicates = [x for (x, _) in groupby(lst)]\n num_removed = len(lst) - len(lst_without_duplicates)\n print(\"Removed %d duplicates!\" % num_removed)\n return lst_without_duplicates", "def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']", "def list_remove_duplicates(l):\n seen = set()\n seen_add = seen.add\n return [x for x in l if not (x in seen or seen_add(x))]", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if self.head == None:\n return None\n\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def delete(self, data):\r\n current_node = self.head\r\n current_index = 0\r\n index = self.get_index(data)\r\n while current_node.next != None:\r\n last_node = current_node\r\n current_node = current_node.next\r\n if current_index == index:\r\n last_node.next = current_node.next\r\n return\r\n current_index += 1", "def dedup_list(l):\n return list(set(l))", "def remove(self, key):\n if self.head is None:\n print('Cannot remove from empty list!')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n\n itr = self.head\n prev = ListNode()\n while itr:\n curr = itr\n if itr.data == key:\n prev.next = curr.next\n return\n prev = curr\n itr = itr.next", "def delete(self, data):\n\n current = self.head\n previous = None\n found = False\n while current and found is False:\n if current.data == data:\n found = True\n else:\n previous = current\n current = current.next\n if current is None:\n raise ValueError(\"Data not in list\")\n if previous is None:\n self.head = current.next\n else:\n previous.next = current.next\n self.size -= 1", "def remove_value(self, value):\n # check the head's key\n temp_node = self.head\n if temp_node.val==value:\n self.head = temp_node.next\n temp_node = None\n self.n -= 1\n return\n\n # search for the key value\n while temp_node.val != value: # check the next node's key\n prev_node = temp_node # store prev node to change prev.next\n temp_node = temp_node.next\n # if the key is not found\n if temp_node == None:\n print(\"Error; key value is not found\")\n return\n else:\n # reconfigure; unlink the current node\n prev_node.next = temp_node.next\n temp_node = None\n self.n -= 1", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n return node \n while node.next_node is not None:\n current = node.next_node \n if current.value == value:\n node.next_node = current.next_node \n return current \n node = current\n raise ValueError('Deleting non-existing value.')", "def remove_identical(list):\n seen = set()\n seen_add = seen.add\n return [x for x in list if not (x in seen or seen_add(x))]", "def remove_by_value(self, data):\n pre_node = None\n for n in self:\n if n.data == data:\n if pre_node is None:\n self.pop()\n else:\n pre_node.next = n.next\n break\n pre_node = n\n else:\n raise ValueError(f'value [{data}] not found in linked list')", "def remove(self, val):\n current_node = self.head\n previous_node = None\n\n while current_node:\n if current_node.val == val:\n if previous_node:\n previous_node.next = current_node.next\n else:\n self.head = current_node.next\n\n previous_node = current_node\n current_node = current_node.next", "def removeDuplicates(seq):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in seq if not (x in seen or seen_add(x))]", "def deleteActual(self, x_data):\n\n if self.is_empty():\n return print(\"List is empty. Nothing to show.\")\n\n headval = self.head\n\n while headval is not None:\n if headval.data == x_data:\n break\n\n headval = headval.next\n\n if headval is None:\n return print(\"can't find item. sorry.\")\n\n if headval.next is not None:\n headval.next.prev = headval.prev\n headval.prev.next = headval.next\n else:\n headval.prev.next = headval.next # will point to default null", "def remove_duplicate(h_list, sdir):\n for head in h_list:\n cmd = 'ls ./' + sdir + '/*' + head + '* > ' + zspace\n os.system(cmd)\n out = mcf.read_data_file(zspace, remove=1)\n if len(out) > 1:\n for ent in out[:-1]:\n mcf.rm_files(ent)", "def remove(self, key: int) -> None:\n \n \n hashvalue=key% 1000\n if self.hashset[hashvalue]==None:\n return\n head = self.hashset[hashvalue]\n dummy_head = Node(0)\n curr = dummy_head\n while head:\n k,v = head.data\n if k==key:\n head=head.next\n curr.next=head\n curr= curr.next\n if head != None:\n \n head = head.next\n \n self.hashset[hashvalue]=dummy_head.next", "def remove(self,value):\n if self.is_empty():\n return\n current = self._head\n if current.value == value:\n self._head = self._head.next\n elif current.next is None:\n # Contains one element only, but it is not the one we are looking for.\n return\n else:\n while current.next.value != value:\n current = current.next\n if current.next is None: # Remove value not found.\n return\n\n # Find removed value, remove it.\n current.next = current.next.next\n if current.next is None:\n self._tail = current\n self._size -= 1", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n if self.head is None: \n self.tail = None\n else:\n self.head.prev_node = None \n return node \n while node.next_node is not None:\n node = node.next_node \n if node.value == value:\n node.prev_node.next_node = node.next_node \n if node.next_node is None: \n self.tail = node.prev_node \n else:\n node.next_node.prev_node = node.prev_node\n return node\n raise ValueError('Deleting non-existing value.')", "def remove_all(self, d):\n\n # Removes leading <d>'s by moving self.head\n while self.head is not None and self.head.data == d:\n self.head = self.head.next\n\n # Removes following <d>'s by traversing the LinkedList\n if self.head is not None:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n else:\n temp = temp.next", "def remove(node):\n for liste in d_list:\n for item2 in liste:\n if item2[0] == node.value:\n liste.remove(item2)", "def remove_duplicates(lst):\n\tnew_lst = list()\n\tfor item in lst:\n\t\tif item not in new_lst:\n\t\t\tnew_lst.append(item)\n\treturn new_lst", "def delete_node(self, key):\n if not self.head:\n print('List is empty. No item to delete')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n temp = self.head\n while temp.next:\n if temp.next.data == key:\n break\n temp = temp.next\n temp.next = temp.next.next", "def remove_duplicates(list1):\n tample = [float('inf')]\n for elem in list1:\n if elem in tample:\n continue\n tample.append(elem)\n return tample[1:]", "def delete_ll_node(node):\n node.val = node.next.val\n node.next = node.next.next", "def removeDups(lst):\n\n return list(dict.fromkeys(lst) )", "def remove_duplicates(self,list_):\r\n ret =[]\r\n\r\n for item in list_:\r\n if item not in ret:\r\n ret.append(item)\r\n removed = len(list_)-len(ret)\r\n logger.info('%d duplicate%s removed.' %(removed,plural_or_not(removed)))\r\n return ret", "def removeDoublon(liste):\n tmp=[]\n for i,elt in enumerate(liste):\n if elt not in tmp:\n tmp.append(elt)\n return tmp", "def remove_duplicates(somelist):\n return set(somelist)", "def delete(self, key):\n # Your code here\n index = self.hash_index(key) \n print(index)\n cur = self.data[index].head\n\n if cur.key==key:\n \n self.data[index].head = self.data[index].head.next\n # cur.next = self.data[index].head\n self.count -=1\n print(\"Warning:headnode deleted\") \n else:\n \n while cur.next: \n prev = cur\n cur =cur.next\n if cur.key == key:\n #to remove the current node, change the pointers\n prev.next=cur.next \n self.count -=1 \n \n\n # return None", "def delete_node_at_end(self):\n if not self.head:\n print('List already empty')\n return\n temp = self.head\n while temp.next:\n if not temp.next.next:\n break\n temp = temp.next\n temp.next = None", "def remove_value(self, value):\n if self.empty():\n return \"Linked List is empty\"\n h = self.head\n previous = self.head\n idx = 0\n while h is not None:\n if h.data is value:\n if previous is h:\n self.head = h.next\n return idx\n else:\n previous.next = h.next\n h = None\n return idx\n idx += 1\n previous = h\n h = h.next\n\n pass", "def remove(self, value):\n node = self.first()\n # case 1 : in case of empty list, do nothing and return None\n if node is None:\n return None\n # case 2 : list has at least one element and node to be removed is the first element\n if node.value() == value:\n self.__head = node.next()\n self.__length -= 1\n node.set_next(None)\n return node\n # case 3 : list has at least one element and node to be removed is not the first element\n previous = node\n node = node.next()\n while node is not None:\n if node.value() == value:\n previous.set_next(node.next())\n self.__length -= 1\n node.set_next(None)\n return node\n else:\n node = node.next()\n return None\n\n ##############", "def remove_adjacent(list):\n a = []\n for item in list:\n if len(a):\n if a[-1] != item:\n a.append(item)\n else: a.append(item) \n return a", "def dedup_list(my_list):\r\n new_list = []\r\n for elem in my_list:\r\n if elem not in new_list:\r\n new_list.append(elem)\r\n return new_list", "def remove(self, key: int) -> None:\n index = key % self.size\n if self.table[index].value is None:\n return \n \n p = self.table[index]\n \n if p.key == key:\n if p.next is None:\n self.table[index] = ListNode()\n else:\n self.table[index] = p.next\n return\n \n prev = p\n while p:\n if p.key == key:\n prev.next = p.next\n return\n prev = p\n p = p.next\n #p = p.next\n #prev = p\n #prev, p = p, p.next", "def removeDup(item, seq):\n return [x for x in seq if x != item]", "def delete_node(self, key):\n cur_node = self.head\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n return\n\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n\n if cur_node is None:\n return\n\n prev.next = cur_node.next\n cur_node = None", "def de_dup_and_sort(input):\r\n if input== None:\r\n return None\r\n input = list(input)\r\n input = remove_duplicates(input)\r\n input.sort()\r\n return input", "def test_delete_sll_next_node(self):\n sll = SinglyLinkedList()\n a = Node('a')\n b = Node('b')\n c = Node('c')\n sll.insert_beg(a)\n sll.insert_beg(b)\n sll.insert_beg(c)\n sll.delete(a,start_node=b)\n actual = [i.data for i in sll]\n expected = 'a'\n nt.assert_not_in(expected,actual)", "def remove_node(self, value):\n node = self.head\n\n while node:\n if self.head.value == value:\n self.head = self.head.next\n return\n if node.next.value == value:\n node.next = node.next.next\n return\n node = node.next", "def delete(self, value):\n # Iterating to node that has value\n node = self.head\n last_node = None\n while node is not None and node.value != value:\n last_node = node\n node = node.next_\n\n # Check if the node has been found\n if node is None:\n return\n\n # Checking whether head matched\n if last_node is None:\n self.head = node.next_\n return\n\n # Deleting node\n last_node.next_ = node.next_", "def remove(self, element):\n if self.head.element == element:\n self.head = self.head.next\n self.head.prev = None\n return None\n cursor = self.head\n while cursor.next is not None:\n if cursor.next.element == element:\n cursor.next = cursor.next.next\n if cursor.next is not None:\n cursor.next.prev = cursor\n break\n else:\n cursor = cursor.next", "def deleteduplicates(iterable):\n seen = []\n for x in iterable:\n if x not in seen:\n yield x\n seen.append(x)", "def remove_all(self, number):\n if self.head.data.number() == number:\n self.head = self.head.next\n self._size -= 1\n\n if self.head is not None:\n cur_node = self.head\n while cur_node.next is not None:\n if cur_node.next.data.number() == number:\n cur_node.next = cur_node.next.next\n self._size -= 1\n else:\n cur_node = cur_node.next", "def remove_duplicate_urls(seq, id_fun=None):\n\n if id_fun is None:\n def id_fun(x):\n return x\n seen = {}\n result = []\n for item in seq:\n marker = id_fun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n\n return result", "def remove_duplicates(list1):\r\n if len(list1) == 1 or len(list1) == 0:\r\n return [item for item in list1]\r\n else:\r\n if list1[-1] == list1[-2]:\r\n return remove_duplicates(list1[:-1])\r\n else:\r\n new_list = remove_duplicates(list1[:-1])\r\n new_list.append(list1[-1])\r\n return new_list", "def remove_duplicates(list1):\n if len(list1) == 0:\n return []\n result_list = [list1[0]]\n last_index = 0\n for dummy_index in range(1,len(list1)):\n if list1[dummy_index] != list1[last_index]:\n result_list.append(list1[dummy_index])\n last_index = dummy_index\n return result_list", "def test__remove_duplicates(self):\n\n result = deduped_list\n expected = [\n 'Fred',\n 'Dave',\n 'Sarah',\n 'John',\n 'Matthew',\n 'Joanna',\n 'Marjorie',\n 'Anna',\n 'Tony',\n 'Sam',\n 'Eric',\n 'Susan',\n 'Arthur',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def unique(li):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in li if not (x in seen or seen_add(x))]", "def delete(self, index):\n if index == 0 and self.head is not None:\n self.head = self.head.next\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current:\n if current_index == index:\n previous.next = current.next\n\n previous = current\n current = current.next\n current_index += 1", "def delDoublon(values):\n\treturn list(set(values))", "def __remove_first(self):\n if self.__head is not None:\n self.__length -= 1\n self.__head = self.__head.next()\n if self.__length == 0: # when there are no more elements in the list,\n self.__last = None # remove the pointer to the last element", "def naive(head: ListNode) -> ListNode:\n if head is None or head.next is None: # Not possible to have a cycle\n return None\n seen = {} # A hash-set would work better\n curr = head\n while curr is not None:\n if curr in seen:\n return curr\n else:\n seen[curr] = True\n curr = curr.next\n return None", "def remove(self, item: Any) -> None:\n curr = self._first\n\n if not curr:\n raise ValueError\n\n elif curr.item == item:\n self._first = self._first.next\n self._length -= 1\n\n else:\n while curr is not None:\n if curr.next and curr.next.item == item:\n curr.next = curr.next.next\n self._length -= 1\n return\n curr = curr.next\n raise ValueError", "def _purge_duplicates(f):\n @functools.wraps(f)\n def wrapper(*args, **kwds):\n ret_val = f(*args, **kwds)\n new_list = []\n for item in ret_val:\n if item in new_list:\n continue\n new_list.append(item)\n return new_list\n return wrapper", "def remove_second(list):\n if list is None: return\n first = list\n second = list.next\n # Make the first node refer to the third\n first.next = second.next\n # Separate the second node from the rest of the list\n second.next = None\n return second" ]
[ "0.84461933", "0.8152717", "0.8094504", "0.8059363", "0.78755367", "0.7790879", "0.7774575", "0.7728998", "0.76568514", "0.7549787", "0.74926317", "0.7353447", "0.7180826", "0.7150074", "0.71448135", "0.71269375", "0.7072273", "0.70498395", "0.695619", "0.6727125", "0.6698436", "0.66599035", "0.6615044", "0.6611298", "0.6584534", "0.6578979", "0.65728205", "0.6565807", "0.6558886", "0.65564954", "0.655584", "0.65398604", "0.65354335", "0.6504221", "0.6493737", "0.64804536", "0.6471233", "0.64699596", "0.6464067", "0.6449829", "0.64322746", "0.64172626", "0.63952017", "0.6378488", "0.63660794", "0.6363362", "0.63565844", "0.6350902", "0.6345191", "0.6328363", "0.6326416", "0.6324919", "0.63233536", "0.63233227", "0.6310391", "0.62962776", "0.6293933", "0.6286232", "0.6284031", "0.62799865", "0.6275526", "0.6247628", "0.62440896", "0.6230052", "0.622823", "0.6225214", "0.6202219", "0.6186538", "0.6174964", "0.61686546", "0.61336863", "0.61224276", "0.6108745", "0.60996324", "0.6099288", "0.60981375", "0.6084193", "0.6080597", "0.6074304", "0.6072464", "0.6066434", "0.60659367", "0.6055747", "0.60518336", "0.6039613", "0.60338044", "0.60320836", "0.60269064", "0.60255", "0.6023279", "0.60199565", "0.6010599", "0.6008046", "0.6004888", "0.60045755", "0.5995115", "0.5995111", "0.59892523", "0.5986973", "0.59786123" ]
0.8174585
1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, UpdateUserOption): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8089139", "0.8089139", "0.8054507", "0.79827213", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.79669285", "0.796109", "0.796109", "0.79430795", "0.7930464", "0.7927239", "0.78980625", "0.7883048", "0.7883048", "0.7880565", "0.7872065", "0.78687155", "0.7867311", "0.7826154", "0.7819807", "0.7816069", "0.7807642", "0.78075194", "0.77969724", "0.77954197", "0.7783734", "0.7778531", "0.7770001", "0.7753262", "0.77461725", "0.773869", "0.7728116", "0.77257377", "0.7719577", "0.77033156", "0.7686754", "0.76762426", "0.7674371", "0.7665038", "0.76561457", "0.7655871", "0.76284474", "0.7626224", "0.7625127", "0.76241153", "0.76241153", "0.76241153", "0.76174015", "0.7600977", "0.75997543", "0.7596471", "0.7595083", "0.75944036", "0.7589736" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Create a board with distinct regions. Each region is continuous and separated from all other regions by at least two cells. The regions are generated using a Dirichlet process in which new cells are added to existed regions with a probability proportional to their boundary.
def make_partioned_regions(shape, alpha=1.0, max_regions=5, min_regions=2): ring = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.int16) adjacent = np.array([ # Diagonals don't count as adjacent [-1,0,0,1], [0,-1,1,0]], dtype=np.int16).T nearby = np.meshgrid([-2,-1,0,1,2], [-2,-1,0,1,2]) board = np.zeros(shape, dtype=np.int16) perimeters = [{ (i, j) for i, j in zip(*np.nonzero(board == 0)) }] exclusions = [set()] while sum(len(p) for p in perimeters) > 0: weights = np.array([len(p) for p in perimeters], dtype=float) weights[0] = min(alpha, weights[0]) if len(weights) <= max_regions else 1e-10 if len(weights) <= min_regions: weights[1:] = 1e-10 weights /= np.sum(weights) k = get_rng().choice(len(perimeters), p=weights) plist = list(perimeters[k]) i, j = plist[get_rng().choice(len(plist))] perimeters[0].discard((i, j)) perimeters[k].discard((i, j)) if (i, j) in exclusions[k]: continue exclusions[0].add((i,j)) exclusions[k].add((i,j)) b = board[(i+nearby[0]) % shape[0], (j+nearby[1]) % shape[1]] b[2,2] = k or -1 num_neighbors = signal.convolve2d(b != 0, ring, mode='valid') num_foreign = signal.convolve2d((b > 0) & (b != k), ring, mode='valid') if ((num_foreign > 0) & (num_neighbors > 2)).any() or num_foreign[1,1] > 0: continue # Add to the board if k == 0: k = len(perimeters) perimeters.append(set()) exclusions.append(set()) board[i, j] = k for i2, j2 in (adjacent + (i, j)) % shape: if board[i2, j2] == 0: perimeters[k].add((i2, j2)) return board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_grains(self, cells):\n\t\tfor cell_num in range(cells):\n\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\tsample_cell = sample_cell[0]\n\t\t\twhile sample_cell.state != 0:\n\t\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\t\tsample_cell = sample_cell[0]\n\t\t\tsample_cell.change_state(self.init_time ,cell_num)", "def generate_regions(nb_elements, nb_regions):\n assert type(nb_elements) is IntType and nb_elements > 0, \\\n \"number of elements is not an integer or <=0: %r\" % nb_elements\n assert type(nb_regions) is IntType and nb_regions > 0, \\\n \"number of regions is not an integer or <=0: %r\" % nb_regions\n\n dreg = {}\n ids = np.arange(1, nb_elements+1)\n mat = ids.reshape(nb_regions, nb_elements/nb_regions)\n rows, cols = mat.shape\n\n for r in range(rows):\n for c in range(cols):\n current = mat[r][c]\n if c == (cols-1):\n mv = mat[:,0]\n else:\n mv = mat[:,(c+1)]\n mv = list(mv)\n mv[r] = current\n dreg[current] = mv\n return dreg", "def __areas_to_pool(self, region_width, region_height, region_width_step, region_height_step):\n \n areas = [[(width_ind * region_width_step, height_ind * region_height_step,\n (width_ind + 1) * region_width_step if (width_ind + 1) < self.width else region_width,\n (height_ind + 1) * region_height_step if (height_ind + 1) < self.height else region_height) for width_ind in range(self.width)] for height_ind in range(self.height)]\n\n return areas", "def create_grid(self):\n for y_iter in range(self.NUM_GRIDS):\n for x_iter in range(self.NUM_GRIDS):\n x, y = x_iter * self.SQUARE_SIZE, y_iter * self.SQUARE_SIZE\n x_stop, y_stop = x + self.SQUARE_SIZE, y + self.SQUARE_SIZE\n cords = x, y, x_stop, y_stop\n self.canvas.create_rectangle(cords, outline=self.color,\n fill=self.default_color)", "def populate_region(mask, layer_params):\n\n from .speedups import (\n NEW_CELL_MASK, CAN_OSCILLATE_MASK, INCLUDE_VIOLATIONS_MASK)\n\n border = ndimage.maximum_filter(mask, size=3, mode='wrap') ^ mask\n interior = ndimage.minimum_filter(mask, size=3, mode='wrap')\n gen_mask = mask * (\n NEW_CELL_MASK |\n CAN_OSCILLATE_MASK |\n INCLUDE_VIOLATIONS_MASK\n ) + border * (\n INCLUDE_VIOLATIONS_MASK\n )\n board = np.zeros(mask.shape, dtype=np.uint16)\n foreground = np.zeros(mask.shape, dtype=bool)\n background = np.zeros(mask.shape, dtype=bool)\n background_color = np.zeros(mask.shape, dtype=bool)\n seeds = None\n max_period = 1\n\n for layer in layer_params:\n if not isinstance(layer, dict):\n raise ValueError(\n \"'layer_params' should be a list of parameter dictionaries.\")\n layer = _fix_random_values(layer)\n old_board = board.copy()\n gen_mask0 = gen_mask.copy()\n interior = ndimage.minimum_filter(\n gen_mask & NEW_CELL_MASK > 0, size=3, mode='wrap')\n color = COLORS.get(layer.get('color'), 0)\n\n fence_frac = layer.get('fences', 0.0)\n if fence_frac > 0:\n fences = build_fence(gen_mask & speedups.NEW_CELL_MASK)\n fences *= coinflip(fence_frac, fences.shape)\n gen_mask &= ~(fences * (NEW_CELL_MASK | CAN_OSCILLATE_MASK))\n board += fences.astype(np.uint16) * CellTypes.wall\n\n spawners = layer.get('spawners', 0)\n if spawners > 0:\n _mask = (gen_mask0 & NEW_CELL_MASK > 0) & interior\n new_cells = _mask & coinflip(spawners, board.shape)\n if not new_cells.any() and _mask.any():\n i, j = np.nonzero(_mask)\n k = get_rng().choice(len(i)) # ensure at least one spawner\n new_cells[i[k], j[k]] = True\n gen_mask[new_cells] ^= NEW_CELL_MASK\n board[new_cells] = CellTypes.spawner + color\n\n tree_lattice = layer.get('tree_lattice')\n # Create a lattice of trees that are spread throughout the region\n # such that every empty cell touches one (and only one) tree\n # (modulo edge effects).\n # Such a lattice tends to make the resulting board very chaotic.\n # Note that this will disrupt any pre-existing patterns.\n if tree_lattice is not None:\n if not isinstance(tree_lattice, dict):\n tree_lattice = {}\n h, w = board.shape\n stagger = tree_lattice.get('stagger', True)\n spacing = float(tree_lattice.get('spacing', 5))\n if not stagger:\n new_cells = _make_lattice(h, w, spacing, spacing, 0)\n elif spacing <= 3:\n new_cells = _make_lattice(h, w, 3, 3, 1)\n elif spacing == 4:\n new_cells = _make_lattice(h, w, 10, 1, 3)\n elif spacing == 5:\n new_cells = _make_lattice(h, w, 13, 1, 5)\n else:\n # The following gets pretty sparse.\n new_cells = _make_lattice(h, w, 6, 3, 3)\n\n new_cells &= gen_mask & NEW_CELL_MASK > 0\n board[new_cells] = CellTypes.tree + color\n\n period = 1\n if 'pattern' in layer:\n pattern_args = layer['pattern'].copy()\n period = pattern_args.get('period', 1)\n if period == 1:\n gen_mask2 = gen_mask & ~CAN_OSCILLATE_MASK\n pattern_args.update(period=max_period, osc_bonus=0)\n elif period == 0:\n gen_mask2 = gen_mask & ~INCLUDE_VIOLATIONS_MASK\n pattern_args.update(period=max_period, osc_bonus=0)\n elif period < max_period:\n raise ValueError(\n \"Periods for sequential layers in a region must be either 0, 1,\"\n \" or at least as large as the largest period in prior layers.\")\n else:\n gen_mask2 = gen_mask\n max_period = period\n\n board = _gen_pattern(board, gen_mask2, seeds, **pattern_args)\n\n # We need to update the mask for subsequent layers so that they\n # do not destroy the pattern in this layer.\n # First get a list of board states throughout the oscillation cycle.\n boards = [board]\n for _ in range(1, max_period):\n boards.append(speedups.advance_board(boards[-1]))\n non_empty = np.array(boards) != 0\n still_cells = non_empty.all(axis=0)\n osc_cells = still_cells ^ non_empty.any(axis=0)\n # Both still life cells and oscillating cells should disallow\n # any later changes. We also want to disallow changes to the cells\n # that are neighboring the oscillating cells, because any changes\n # there would propogate to the oscillating cells at later time\n # steps.\n # Note that it doesn't really matter whether the oscillating mask\n # is set for the currently oscillating cells, because we're not\n # checking for violations in them anyways, and we don't allow any\n # changes that would affect them.\n osc_neighbors = ndimage.maximum_filter(osc_cells, size=3, mode='wrap')\n gen_mask[osc_cells] &= ~(NEW_CELL_MASK | INCLUDE_VIOLATIONS_MASK)\n gen_mask[still_cells | osc_neighbors] &= ~(NEW_CELL_MASK | CAN_OSCILLATE_MASK)\n\n new_mask = board != old_board\n life_mask = ((board & CellTypes.alive) > 0) & new_mask\n board += color * new_mask * life_mask\n # The seeds are starting points for the next layer of patterns.\n # This just makes the patterns more likely to end up close together.\n seeds = ((board & CellTypes.alive) > 0) & mask\n\n new_mask = board != old_board\n\n movable_walls = layer.get('movable_walls', 0)\n if movable_walls > 0:\n new_cells = coinflip(movable_walls, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.wall\n board += new_cells * CellTypes.movable\n\n movable_trees = layer.get('movable_trees', 0)\n if movable_trees > 0:\n new_cells = coinflip(movable_trees, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.tree\n board += new_cells * CellTypes.movable\n\n hardened_life = layer.get('hardened_life', 0)\n if hardened_life > 0:\n new_cells = coinflip(hardened_life, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.life\n board -= new_cells * CellTypes.destructible\n\n buffer_size = layer.get('buffer_zone', 0) * 2 + 1\n life_cells = board & CellTypes.alive > 0\n buf = ndimage.maximum_filter(life_cells, size=buffer_size, mode='wrap')\n gen_mask[buf] &= ~NEW_CELL_MASK\n\n target = layer.get('target', 'board')\n if target == 'board':\n foreground[new_mask] = True\n if period > 0:\n background[new_mask] = True\n elif target == 'goals':\n background[new_mask] = True\n background_color[new_mask] = True\n # Make sure to add walls and such to the foreground\n foreground[new_mask & (board & CellTypes.alive == 0)] = True\n elif target == 'both':\n foreground[new_mask] = True\n if period > 0:\n background[new_mask] = True\n background_color[new_mask] = True\n else:\n raise ValueError(\"Unexpected value for 'target': %s\" % (target,))\n\n fountains = layer.get('fountains', 0)\n if fountains > 0:\n new_cells = coinflip(fountains, board.shape)\n new_cells *= gen_mask & NEW_CELL_MASK > 0\n neighbors = ndimage.maximum_filter(new_cells, size=3, mode='wrap')\n neighbors *= gen_mask & NEW_CELL_MASK > 0\n gen_mask[neighbors] = INCLUDE_VIOLATIONS_MASK\n if buffer_size > 1:\n buf = ndimage.maximum_filter(neighbors, size=buffer_size, mode='wrap')\n gen_mask[buf] &= ~NEW_CELL_MASK\n board[neighbors] = CellTypes.wall + color\n board[new_cells] = CellTypes.fountain + color\n foreground[new_cells] = True\n background[neighbors] = True\n background_color[neighbors] = True\n\n goals = board.copy()\n board *= foreground\n goals *= background\n goals &= ~CellTypes.spawning\n goals &= ~(CellTypes.rainbow_color * ~background_color)\n\n return board, goals", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])", "def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid", "def get_start_grid(cols=4, rows=4):\n\tgrid = [[\"\"]*cols for i in range(rows)]\n\tfor i in range(2):\n\t\tempties = get_empty_cells(grid)\n\t\ty,x = random.choice(empties)\n\t\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn grid", "def generate_space_cells(cell_number, min_cell_distance=1.0, \n x_range=None, y_range=None, z_range=None,\n space_x_prob_distribution=[1.],\n x_hist=1000000\n ):\n space_ranges = [x_range,y_range,z_range]\n \n # check that the given number of cells fits within the span range\n assert check_cells_fit(cell_number, min_cell_distance, space_ranges)\n del space_ranges\n \n # create initial storage arrays \n coords_array = np.zeros([cell_number, 3])\n \n # works only for x and y axis now; for circular cells\n radius=min_cell_distance*0.5 \n x1_raw = generate_possible_coords(radius,x_range,min_cell_distance)\n x2_raw = generate_possible_coords(min_cell_distance,x_range,min_cell_distance)\n y_space_cell = min_cell_distance/2.*np.sqrt(3.) # from pitagoras\n y_raw=generate_possible_coords(radius,y_range,y_space_cell)\n z_raw = generate_possible_coords(radius,z_range,min_cell_distance)\n \n x1 = True\n all_coords = []\n for next_depth in z_raw:\n if x1 == True:\n x1= False\n else:\n x1 = True\n for next_raw in range(len(y_raw)):\n if x1 == True:\n x1 = False\n for next_coord in range(len(x1_raw)):\n all_coords.append([x1_raw[next_coord],y_raw[next_raw],next_depth])\n else:\n for next_coord in range(len(x2_raw)):\n all_coords.append([x2_raw[next_coord],y_raw[next_raw],next_depth])\n x1 = True\n \n # randomly choose the cell coords number which are needed\n from random import choice\n cumsum_layer_syn_prob = np.cumsum(space_x_prob_distribution)\n # normalize\n cumsum_layer_syn_prob = cumsum_layer_syn_prob/np.max(cumsum_layer_syn_prob) # this line was added, might need to be tested for inh neurons\n\n all_x_layers = np.arange(x_range[0], x_range[1]+x_hist, x_hist)-(0.5*x_hist)\n # first and last 'layer' will have half-width\n all_x_layers[0] = x_range[0]\n all_x_layers[-1] = x_range[1]\n assert len(space_x_prob_distribution) == len(all_x_layers)-1, 'there are '+ str(len(space_x_prob_distribution)) + ' values for probability within x-space, allowed: ' +str(len(all_x_layers)-1)\n for next_cell in range(cell_number):\n all_coords_in_arr = np.array(all_coords)\n\n # choose how far in x-range\n x = np.random.rand()\n layer_idx = np.searchsorted(cumsum_layer_syn_prob, x)\n layer_idx = np.where(cumsum_layer_syn_prob == cumsum_layer_syn_prob[layer_idx])[0][0]\n\n '''\n # choose which # here it was always symmetric, let's now change it so the distribution may not be symmetric\n possible = np.where((all_coords_in_arr[:,0] > (x_hist*layer_idx)) & (all_coords_in_arr[:,0] < x_hist*(layer_idx+1)))[0]\n possible_negative = np.where((all_coords_in_arr[:,0] < (-1*x_hist*layer_idx)) & (all_coords_in_arr[:,0] > x_hist*(-1)*(layer_idx+1)))[0]\n\n possible_all = np.hstack([possible_negative, possible])\n\n next_choice = choice(possible_all) # possibly there is not enough space for the parameters given to fit all the cells\n '''\n\n possible = np.where((all_coords_in_arr[:,0] > all_x_layers[layer_idx]) & (all_coords_in_arr[:,0] < all_x_layers[layer_idx+1]))[0]\n next_choice = choice(possible)\n\n #possible = np.setdiff1d(possible, np.array(next_choice))\n #possible.delete(next_choice)\n\n coords_array[next_cell] = all_coords[next_choice]\n all_coords.pop(next_choice)\n\n return coords_array", "def randomCells(width, height):\r\n\tA = createBoard(height, width)\r\n\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif row > 0 and row < height-1:\r\n\t\t\t\tif col > 0 and col < width-1:\r\n\t\t\t\t\tA[row][col] = random.choice([0,1]) \r\n\r\n\treturn A", "def _possible_grids(self, num_windows):\n if num_windows < 2:\n end = 2\n else:\n end = num_windows // 2 + 1\n for rows in range(1, end):\n cols = int(math.ceil(num_windows / rows))\n yield (rows, cols, ROWCOL)\n if rows != cols:\n # also want the reverse test\n yield (cols, rows, COLROW)", "def generate_map(nrows, ncols, nrooms, max_col_size, max_row_size):\n arr = np.zeros((nrows, ncols), dtype=np.int8)\n\n for i in range(nrooms):\n rand_row_start = np.random.randint(nrows)\n rand_col_start = np.random.randint(ncols)\n\n rand_row_size = np.random.randint(max_row_size / 2, max_row_size)\n rand_col_size = np.random.randint(max_col_size / 2, max_col_size)\n\n arr[rand_row_start:rand_row_start + rand_row_size, rand_col_start:rand_col_start + rand_col_size] = 1\n\n labels = measure.label(arr)\n regions = measure.regionprops(labels)\n\n centroids = list()\n for region in regions:\n centroids.append(region.centroid)\n\n num_centroids = len(centroids)\n\n # get distances between every pair of centroids\n dists = scipy.spatial.distance.cdist(centroids, centroids)\n\n # get a distance that is greater than all current distances\n max_dist = np.max(dists) + 1\n\n # make sure upper triangle is at least max_dist so that when picking closest\n # pairs, we won't choose a diagonal element or a duplicate connection\n dists = dists + np.triu(np.ones((num_centroids, num_centroids))) * max_dist\n\n for i in range(num_centroids - 1):\n min_dist_idx = np.argmin(dists)\n min_dist_idx = np.unravel_index(min_dist_idx, dists.shape)\n\n # create a hallway between regionprops\n centroid1 = np.array(centroids[min_dist_idx[0]], dtype=np.int)\n centroid2 = np.array(centroids[min_dist_idx[1]], dtype=np.int)\n\n [row_centroid_1, row_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[0])\n [col_centroid_1, col_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[1])\n\n arr[row_centroid_1[0]:row_centroid_2[0] + 1, row_centroid_1[1]] = 1\n arr[row_centroid_2[0], col_centroid_1[1]:col_centroid_2[1] + 1] = 1\n\n dists[:, min_dist_idx[1]] += max_dist\n\n return arr", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def generate_grid(height, width):\n return [[random.randint(0, 9) for _ in range(width)] for _ in range(height)]", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def create_region_w_spacing (tuple_top_L, tuple_bottom_R):\n\n spacing = int(input ('How many well spaces do you want between each spot? '))\n\n\n #get the plate column numbers from the plate class\n columns = plate1536.columns\n #get the plate rows from the plate class\n rows = plate1536.rows\n\n ###Begin creating list of columns to use###\n\n #initialize and use next\n curr_col_idx = columns.index(int(tuple_top_L[1]))\n\n #set left most column to use as the column given by user in top_left\n col_idxs_to_shoot = [curr_col_idx]\n\n #loop checks the NEXT column that will be produced by moving right\n #by (spacing + 1). If that is beyond the right-most border set by\n #the well region definitions, then it will stop, containing all\n #column choices within the left and right bounds\n while (curr_col_idx + spacing + 1) <= columns.index(int(tuple_bottom_R[1])):\n\n curr_col_idx += (spacing + 1)\n\n col_idxs_to_shoot.append(curr_col_idx)\n\n ###The list of indices in plate1536.columns to use is now set###\n\n\n ###Begin creating list of rows to use###\n\n #initialize and use next\n curr_row_idx = rows.index(tuple_top_L[0])\n\n #set top most row to use as the row given by user in top_left\n row_idxs_to_shoot = [curr_row_idx]\n\n #loop checks the NEXT row that will be produced by moving down\n #by (spacing + 1). If that is beyond the bottom-most border set by\n #the well region definitions, then it will stop, containing all\n #row choices within the top and bottom bounds\n while (curr_row_idx + spacing + 1) <= rows.index(tuple_bottom_R[0]):\n\n curr_row_idx += (spacing + 1)\n\n row_idxs_to_shoot.append(curr_row_idx)\n\n ###The list of indices in plate1536.rows to use is now set###\n\n\n #get all the columns you want to use as STRINGS\n col_strs = []\n for i in col_idxs_to_shoot:\n col_strs += [ str(plate1536.columns[i]) ] #have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n #get all the rows you want to use as STRINGS\n row_strs = []\n for i in row_idxs_to_shoot:\n row_strs += [ plate1536.row_dict[i] ]#have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n\n print(\"This region has {} rows (letters), {} columns (#'s) per row. That's a total of {} spots\".format(len(row_strs), len(col_strs), len(row_strs) * len(col_strs)))\n\n return row_strs, col_strs", "def randomized_prims(width=16, height=16) -> Maze:\n maze = Maze(width=width, height=height, algorithm=None)\n visited = [[False for _ in range(maze.width)] for _ in range(maze.height)]\n\n # ensure only one entrance to the center squares\n centerx = maze.width // 2 - 1\n centery = maze.height // 2 - 1\n \n visited[centery][centerx] = True\n visited[centery][centerx+1] = True\n visited[centery+1][centerx+1] = False\n visited[centery+1][centerx] = True\n\n visited[0][0] = True\n boundary = [(0,0,Compass.EAST), (0,0,Compass.SOUTH)]\n\n while boundary:\n x, y, direction = boundary.pop(random.randint(0, len(boundary)-1))\n nx, ny = maze.neighbor(x, y, direction)\n if not visited[ny][nx]:\n maze.break_wall(x, y, direction)\n boundary.extend([(nx,ny,direction) for direction in maze.neighbors(nx, ny)])\n visited[ny][nx] = True\n \n return maze", "def make_grid(X,Y): \r\n grid = []\r\n for j in range(Y):\r\n row = []\r\n for i in range(X):\r\n row.append( block((i,j)) )\r\n grid.append(row)\r\n return grid", "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def get_start_grid(cols=4, rows=4):\n grid = [[0]*cols for i in range(rows)]\n for i in range(2):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return grid", "def rect(rng, lines, columns):\n\n w = rng.randint(1, max(1, lines // 2))\n h = rng.randint(1, max(1, columns // 2))\n\n i = rng.randint(0, lines - h)\n j = rng.randint(0, columns - w)\n \n return i, j, w, h", "def _find_regions(base_pairs, scores):\n # Make sure the lower residue is on the left for each row\n sorted_base_pairs = np.sort(base_pairs, axis=1)\n\n # Sort the first column in ascending order\n original_indices = np.argsort(sorted_base_pairs[:, 0])\n sorted_base_pairs = sorted_base_pairs[original_indices]\n\n # Rank each base\n # E.g.: [[3, 5] --> [[0, 1]\n # [9, 7]] [3, 2]]\n order = np.argsort(sorted_base_pairs.flatten())\n rank = np.argsort(order).reshape(base_pairs.shape)\n\n # The base pairs belonging to the current region\n region_pairs = []\n # The individual regions\n regions = set()\n\n # Find separate regions\n for i in range(len(sorted_base_pairs)):\n # if a new region is to be started append the current base pair\n if len(region_pairs) == 0:\n region_pairs.append(original_indices[i])\n continue\n\n # Check if the current base pair belongs to the region that is\n # currently being defined\n previous_upstream_rank = rank[i-1, 0]\n this_upstream_rank = rank[i, 0]\n previous_downstream_rank = rank[i-1, 1]\n this_downstream_rank = rank[i, 1]\n\n # if the current base pair belongs to a new region, save the\n # current region and start a new region\n if ((previous_downstream_rank - this_downstream_rank) != 1 or\n (this_upstream_rank - previous_upstream_rank) != 1):\n regions.add(\n _Region(base_pairs, np.array(region_pairs), scores)\n )\n region_pairs = []\n\n # Append the current base pair to the region\n region_pairs.append(original_indices[i])\n\n # The last region has no endpoint defined by the beginning of a\n # new region.\n regions.add(_Region(base_pairs, np.array(region_pairs), scores))\n\n # Return the graphical representation of the conflicting regions\n return _generate_graphical_representation(regions)", "def randomCells(w, h):\n A = createBoard(w, h)\n\n for row in range(1, h-1):\n for col in range(1, w-1):\n if random.choice([0, 1]) == 1:\n A[row][col] = 1\n else:\n A[row][col] = 0\n return A", "def makeup_polygons(\n draw: ImageDraw,\n num_cells: int,\n width: int,\n height: int,\n rgb_im: Image,\n random: bool,\n):\n voronoi, points = generate_voronoi_diagram(num_cells, width, height)\n for point, index in zip(points, voronoi.point_region):\n # Getting the region of the given point\n region = voronoi.regions[index]\n # Getting the points in arrays\n polygon = list()\n for i in region:\n # If vector is out of plot do not add\n if i != -1:\n polygon.append(voronoi.vertices[i])\n # Make tuples of the points\n polygon_tuples = list()\n for l in polygon:\n polygon_tuples.append(tuple(l))\n rgb = (0, 0, 0)\n if random:\n # Get random color\n rgb = random_color()\n else:\n # Get colors of the middle point\n rgb = get_color_of_point(point, rgb_im, width, height)\n # Draw the calculated polygon with the color of the middle point\n if polygon and polygon_tuples:\n draw.polygon(polygon_tuples, rgb)", "def gen_game(\n board_shape=(25,25), min_performance=-1, partitioning={},\n starting_region=None, later_regions=None, buffer_region=None,\n named_regions={}, agents=['default'], agent_types={}, **etc):\n board_shape = _fix_random_values(board_shape)\n min_performance = _fix_random_values(min_performance)\n partitioning = _fix_random_values(partitioning)\n\n regions = make_partioned_regions(board_shape, **partitioning)\n board = np.zeros(board_shape, dtype=np.uint16)\n goals = np.zeros(board_shape, dtype=np.uint16)\n\n # Create locations for the player and the exit\n agent_locs, points_table, agent_names = add_agents_and_exit(\n board, regions, agents, agent_types)\n\n # and fill in the regions...\n for k in np.unique(regions)[2:]:\n mask = regions == k\n if starting_region is not None:\n region_name = _fix_random_values(starting_region)\n else:\n region_name = _fix_random_values(later_regions)\n if region_name not in named_regions:\n logger.error(\"No region parameters for name '%s'\", region_name)\n continue\n logger.debug(\"Making region: %s\", region_name)\n rboard, rgoals = populate_region(mask, named_regions[region_name])\n board += rboard\n goals += rgoals\n starting_region = None\n buffer_region = _fix_random_values(buffer_region)\n if buffer_region in named_regions:\n mask = regions == 0\n rboard, rgoals = populate_region(mask, named_regions[buffer_region])\n board += rboard\n goals += rgoals\n\n # Give the buffer (0) region a rainbow / white color\n # This is mostly a visual hint for humans\n buffer_mask = (regions <= 0) & (goals & CellTypes.rainbow_color == 0)\n goals[buffer_mask] += CellTypes.rainbow_color\n\n game = SafeLifeGame()\n game.deserialize({\n 'board': board,\n 'goals': goals,\n 'agent_locs': agent_locs,\n 'agent_names': agent_names,\n 'min_performance': min_performance,\n 'points_table': points_table,\n 'orientation': 1,\n })\n return game", "def _regions(self, voronoi_diagram, unique_id, ids, crs):\n vertices = pd.Series(voronoi_diagram.regions).take(voronoi_diagram.point_region)\n polygons = []\n for region in vertices:\n if -1 not in region:\n polygons.append(pygeos.polygons(voronoi_diagram.vertices[region]))\n else:\n polygons.append(None)\n\n regions_gdf = gpd.GeoDataFrame(\n {unique_id: ids}, geometry=polygons, crs=crs\n ).dropna()\n regions_gdf = regions_gdf.loc[\n regions_gdf[unique_id] != -1\n ] # delete hull-based cells\n\n return regions_gdf", "def createBoard(self):\n self.board = []\n for row in range(self.height):\n self.board += [self.createRow()]\n return self.board", "def draw_gameBoard(self):\n\n # 15 horizontal lines\n for i in range(9):\n start_pixel_x = (i + 1) * CELL_PIXELS\n start_pixel_y = (0 + 1) * CELL_PIXELS\n end_pixel_x = (i + 1) * CELL_PIXELS\n end_pixel_y = (9 + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(9):\n start_pixel_x = (0 + 1) * CELL_PIXELS\n start_pixel_y = (j + 1) * CELL_PIXELS\n end_pixel_x = (9 + 1) * CELL_PIXELS\n end_pixel_y = (j + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(7, 7)", "def gen_grid(grid_width, grid_height):\n\n grid = []\n for x in range(0, grid_width):\n grid.append([])\n for y in range(0, grid_height):\n grid[x].append(False)\n return grid", "def _create_grid_with_cells(self, width, height):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid", "def random_grid(height, width):\n grid = create_grid(height, width)\n for r in range(1, height - 1):\n for c in range(1, width - 1):\n grid[r][c] = random.choice([0, 1])\n return grid", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def makeBoard(m,n):\n if m < 2 or n < 2:\n raise ValueError('Grid must be at least 2x2')\n grid = []\n for y in range(n):\n row = []\n for x in range(m):\n value = 1 if random.randint(0,4) % 4 == 0 else 0\n if x==0 and y==0:\n value = 0\n if x==(m-1) and y==(n-1):\n value = 0\n row.append(value)\n grid.append(row)\n return grid", "def patterned_region(cls, global_shape,channel_width,channel_separation):\n \n global_shape = np.array(global_shape)\n\n minx = np.min(global_shape[:,0])\n maxx = np.max(global_shape[:,0])\n miny = np.min(global_shape[:,1])\n maxy = np.max(global_shape[:,1])\n\n clip = Polygon([tuple(z) for z in global_shape])\n\n #create vertical grid and calculate its intersection with global shape\n vert_grid = [Feature.define_tube([[x,miny],[x,maxy]],[0,0],channel_width/2).coord[0] for x in np.arange(minx,maxx,channel_separation)]\n vert_grid = MultiPolygon([Polygon([tuple(z) for z in y]) for y in vert_grid])\n\n pattern = []\n\n intersection = vert_grid.intersection(clip)\n if intersection.geom_type == 'Polygon':\n pattern.append(np.array(intersection.exterior.coords))\n if intersection.geom_type == 'MultiPolygon':\n for x in intersection:\n pattern.append(np.array(x.exterior.coords))\n\n #create horizontal grid and calculate its intersection with global shape\n hor_grid = [Feature.define_tube([[minx,y],[maxx,y]],[0,0],channel_width/2).coord[0] for y in np.arange(miny,maxy,channel_separation)]\n hor_grid = MultiPolygon([Polygon([tuple(z) for z in y]) for y in hor_grid])\n\n intersection = hor_grid.intersection(clip)\n if intersection.geom_type == 'Polygon':\n pattern.append(np.array(intersection.exterior.coords))\n if intersection.geom_type == 'MultiPolygon':\n for x in intersection:\n pattern.append(np.array(x.exterior.coords))\n\n pattern_obj = cls()\n pattern_obj.coord = pattern\n return pattern_obj", "def create_board(width, height):\n a = []\n for row in range(height):\n a += [createOneRow(width)] # gebruik de bovenstaande functie zodat ... één rij is!!\n return a", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def create_board(self):\n canvas = tk.Canvas(master=self.panel_mid, width=530, height=550)\n canvas.configure(scrollregion=(self.offset_x, self.offset_y, 20, 20))\n\n # x1 y1 x2 y2\n for i in range(8):\n y = i * self.width\n for j in range(8):\n x = j * self.width\n if ((j + 1) % 2) == 0:\n if ((i + 1) % 2) == 0:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#fff\") # biela\n else:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#999\") # cierna\n else:\n if ((i + 1) % 2) == 1:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#fff\") # biela\n else:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#999\") # cierna\n\n return canvas", "def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])", "def island_procreate(self):\n for y in self.island_map:\n for cell in y:\n cell.procreate()", "def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))", "def generate_board(self):\n random.seed(self.seed)\n for row in self.grid:\n for column in row:\n probability = random.random()\n if self.live_probability > probability:\n column.set_alive()", "def cell_regions(\n x_len: float, y_len: float, factor: float = 2 / 3, buffer: float = 3.5\n) -> Tuple[List[List[float]], ...]:\n x_min, x_max = x_len / 2, -x_len / 2\n y_min, y_max = y_len / 2, -y_len / 2\n\n cell = [[x_min, x_max, x_max, x_min], [y_min, y_min, y_max, y_max]]\n\n liq = [\n [\n x_min * factor + buffer,\n x_max * factor - buffer,\n x_max * factor - buffer,\n x_min * factor + buffer,\n ],\n [\n y_min * factor + buffer,\n y_min * factor + buffer,\n y_max * factor - buffer,\n y_max * factor - buffer,\n ],\n ]\n\n crys = [\n [\n x_min * factor - buffer,\n x_max * factor + buffer,\n x_max * factor + buffer,\n x_min * factor - buffer,\n ],\n [\n y_min * factor - buffer,\n y_min * factor - buffer,\n y_max * factor + buffer,\n y_max * factor + buffer,\n ],\n ]\n\n return cell, liq, crys", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def __init__(self, rows, columns, live_probability=0.3, seed=0):\n self.live_probability = live_probability\n self.seed = seed\n self.rows = rows\n self.columns = columns\n self.grid = [\n [Cell() for column_cells in range(self.columns)]\n for row_cells in range(self.rows)\n ]\n\n self.generate_board()", "def random_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = random.choice([0, 1])\n else:\n a[row][col] = 0\n \n return a", "def createBoard(width, height):\n A = []\n for row in range(height):\n A += [createOneRow(width)]\n return A", "def init_cells(self):\n state = list()\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if randint(1, 100) >= 100 - CELL_DENSITY:\n # Live cell.\n status = NORMAL\n state.append(1)\n else:\n # Dead cell.\n status = HIDDEN\n state.append(0)\n\n cell = self.canvas.create_rectangle((index % width) * CELL_SIZE, (index / width) * CELL_SIZE,\n ((index % width) + 1) * CELL_SIZE, ((index / width) + 1) * CELL_SIZE,\n fill=\"black\", state=status, outline=\"white\")\n self.cells.append(cell)\n\n return state", "def generate_grid():\n y_offset = -10\n for a in range(20):\n # Line 1\n # Adds offset to the x position of the squares\n x_offset = 10\n for b in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for c in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for d in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40\n # Line 2 (needs 2 lines because the offset of each line)\n # Adds offset to the x position of the squares\n x_offset = 30\n for e in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for f in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for g in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40", "def generate(self):\n print self._matrix\n for point, cell in self._matrix:\n walls = zip(('U', 'L', 'D', 'R'), cell)\n blocked = [x for x in walls if not x[1]]\n if len(blocked) < 3:\n # we have more than one exit, this isn't a dead end and we\n # don't need to do anything\n continue\n print \"***\"\n print \"%s: %s\" % (blocked, len(blocked))\n random.shuffle(blocked)\n while(blocked):\n try:\n self._matrix.carve(point, blocked.pop()[0])\n except IndexError:\n continue\n break", "def _createGrid(self, dimensions, density):\n import math\n\n xmin, xmax = dimensions[0], dimensions[1]\n imin, imax = dimensions[2], dimensions[3]\n\n hsteps = math.ceil((xmax - xmin)/density)\n vsteps = math.ceil((imax - imin)/density)\n\n hgrids = int(math.ceil(hsteps/self.gridsize))\n vgrids = int(math.ceil(vsteps/self.gridsize))\n\n grid_inc = density * self.gridsize\n \n #Add one inside the range() because you want to include the last one\n horizontal = [[xmin + (x * grid_inc), xmin + ((x+1) * grid_inc)] for x in range(hgrids)]\n vertical = [[imin + (im * grid_inc), imin + ((im+1) * grid_inc)] for im in range(vgrids)]\n\n #This makes the negative to positive less confusing, positive is at index = 0\n vertical.reverse()\n\n grid_map = []\n\n for im in vertical:\n temp = []\n for x in horizontal:\n my_x = list(x)\n my_x.extend(im)\n temp.append(my_x)\n grid_map.append(temp)\n\n return grid_map", "def gen_world(num_rows, num_cols):\n world = collections.deque()\n\n # Generate top perimeter.\n world.append([eg.ROCK] * num_cols)\n\n # In between top and bottom perimeters, generate a clean world.\n # (all non-perimeter cells are clear)\n for i in xrange(num_rows - 2):\n world.append([eg.ROCK] + ([eg.NONE] * (num_cols - 2)) + [eg.ROCK])\n\n # Generate bottom perimeter.\n world.append([eg.ROCK] * num_cols)\n\n # Apply red anthill in world.\n _randomly_apply_anthill(world, eg.RED)\n\n # Apply black anthill in world.\n _randomly_apply_anthill(world, eg.BLACK)\n\n # Apply food blocks in world.\n _randomly_apply_foodblob(world)\n\n # Apply rocks in world.\n _randomly_apply_rocks(world)\n\n world.appendleft([str(num_rows)])\n world.appendleft([str(num_cols)])\n\n return world", "def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)", "def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list", "def topology_grid(self, width, height):\n\t\tfor s in self.sites:\n\t\t\ts.clear_neighbor()\n\t\tfor i in range(len(self.sites)):\n\t\t\tif (i % width) + 1 < width:\n\t\t\t\tself.sites[i].neighbors.append(self.sites[i + 1])\n\t\t\t\tself.sites[i + 1].neighbors.append(self.sites[i])\n\t\t\tif int(i / width) + 1 < height:\n\t\t\t\tself.sites[i].neighbors.append(self.sites[i + width])\n\t\t\t\tself.sites[i + width].neighbors.append(self.sites[i])", "def random_blocks():\n cells = []\n while len(cells) != 43:\n cell_to_add = (random.randint(0, 11), random.randint(0, 9))\n if cell_to_add not in cells:\n cells.append(cell_to_add)\n return cells", "def regions(self, player):\r\n myregions = []\r\n\r\n # my countries ( return a new list)\r\n # minestatic = player.countries.sort(key=lambda x: x.name, reverse=True)\r\n minestatic = sorted(player.countries)\r\n\r\n mine = sorted(player.countries)\r\n\r\n # function to find next of your connected region\r\n def enumNextRegion():\r\n # add first base to new region\r\n # then remove first from nodes\r\n newreg = [mine.pop()]\r\n # new region found, add\r\n newreg = list(set(newreg) | set(myconnected(newreg)))\r\n myregions.append(newreg)\r\n # update mine\r\n return list(set(mine) - set(newreg))\r\n\r\n # my nodes that are already checked\r\n self.checked = []\r\n\r\n # get my nodes that are connected, input radius arr\r\n def myconnected(radarr):\r\n # next radius adj nodes from radarr\r\n nextRad = []\r\n # from next radius arr, expand radius further\r\n for i, item in range(radarr):\r\n # adj to radarr element that's mine\r\n adjMine = list(list(set(self.g.nodes[item].adjList) & set(minestatic)) - set(self.checked))\r\n nextRad.append(adjMine)\r\n\r\n # the next radius from radarr\r\n nextRad = set(sum([], nextRad))\r\n\r\n # update checked\r\n self.checked = list(set(self.checked) | set(nextRad))\r\n\r\n # recurse while still has more\r\n if len(nextRad) > 0:\r\n return sum(list(set(nextRad) | myconnected(nextRad)), [])\r\n else:\r\n return []\r\n\r\n # call enum\r\n while len(mine) > 0:\r\n mine = enumNextRegion()\r\n\r\n # sort regions by size (desc)\r\n myregions = myregions.sort(key=lambda x: len(x), reverse=True)\r\n\r\n return myregions", "def __generate_rectangle_obstacles(self, world):\n obs_min_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"min_dim\"]\n obs_max_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_dim\"]\n obs_max_combined_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_combined_dim\"]\n obs_min_count = self.cfg[\"obstacle\"][\"rectangle\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"rectangle\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dim_range = obs_max_dim - obs_min_dim\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n # generate dimensions\n width = obs_min_dim + (random() * obs_dim_range )\n height = obs_min_dim + (random() * obs_dim_range )\n while width + height > obs_max_combined_dim:\n height = obs_min_dim + (random() * obs_dim_range )\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = RectangleObstacle(width, height, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles", "def random(self, width, height, seed = None):\n self.grid = [ [''] * width for i in range(height) ]\n random.seed(seed)\n start = ( random.randint(0, len(self.grid) - 1),\n random.randint(0, len(self.grid[0]) - 1)\n )\n visited = set([start])\n self._createPath(start, visited)\n start = ( random.randint(0, len(self.grid) - 1), 0 )\n finish = ( random.randint(0, len(self.grid) - 1),\n len(self.grid[0]) - 1 )\n self.grid[start[0]][start[1]] += '^'\n self.grid[finish[0]][finish[1]] += '$'\n return self.grid, start, finish", "def region_growing(im: np.ndarray, seed_points: list, T: int) -> np.ndarray:\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n # You can also define other helper functions\n segmented = np.zeros_like(im).astype(bool)\n\n (H, W) = im.shape\n\n for seed_row, seed_col in seed_points:\n region = []\n region.append([seed_row, seed_col])\n for row, col in region:\n for rows in range((row-1),(row+2)): # Check neighbouring pixels\n for cols in range((col-1),(col+2)):\n if rows < H and rows >= 0 and cols < W and cols >= 0: # Is pixel inside image?\n if (np.abs(im[seed_row, seed_col] - im[rows, cols]) <= T) and not segmented[row, col]:\n region.append([rows, cols])\n segmented[row, col] = True\n return segmented\n ### END YOUR CODE HERE ### ", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def generate_level(self):\n for _ in range(AMOUNT_REGIONS_TO_DRAW):\n self._generate_next_blocks()", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height", "def nine_regions(self):\n\n coordinateList = []\n\n # Top left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Top center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] ) \n coordinateList.append( [x, y] )\n\n # Top right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * ( 1.0 - self.ratioTopLeft[IDX_X] ) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Center left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Bottom left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n return coordinateList", "def create_board(self, size):\n x = np.arange(0, size[0])\n y = np.arange(0, size[1])\n board = np.meshgrid(x, y)\n return board", "def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def generate_algo(width, height, initial_noise, repeat_parameters, empty_center=False):\n\n tiles = [[0 for y in range(height)] for x in range(width)]\n\n # Initial Random Noise\n for y in range(height):\n for x in range(width):\n if x in [0, width - 1] or y in [0, height - 1] or random.randint(0, 100) <= initial_noise:\n tiles[x][y] = 1\n\n # And do the rounding\n for (number_repeat, number_to_keep, number_to_be_born) in repeat_parameters:\n for repeat in range(number_repeat):\n for y in range(1, height - 1):\n for x in range(1, width - 1):\n count = WildernessRegion._count_border_tile(tiles, x, y, 1)\n if count >= number_to_keep:\n tiles[x][y] = 1\n elif number_to_be_born >= 0 and count <= number_to_be_born:\n tiles[x][y] = 1\n else:\n tiles[x][y] = 0\n\n if empty_center:\n WildernessRegion._eliminate_center_border(tiles, width, height) # A bit brutal\n for y in range(1, height - 1): # We smooth a bit the result\n for x in range(1, width - 1):\n count = WildernessRegion._count_border_tile(tiles, x, y, 1)\n if count >= number_to_keep:\n tiles[x][y] = 1\n else:\n tiles[x][y] = 0\n\n return tiles", "def createBoard(height, width):\r\n A = []\r\n for row in range(height):\r\n A += [createOneRow(width)] \r\n return A\r\n #return [createOneRow(width) for x in range(height)]\r", "def create_board(rows, columns):\n res = [[0 for i in range(columns)] for j in range(rows)]\n return res", "def create_grid(self):\n row = 0\n col = 0\n for row in range(self._dim):\n for col in range(self._dim):\n x1 = col*self._cell_dim # bottom left\n y1 = row * self._cell_dim # top left\n x2 = x1 + self._cell_dim # bottom right\n y2 = y1 + self._cell_dim # top right\n self.rect[row,col] = self.canvas.create_rectangle(x1,y1,x2,y2, fill=self._primary_color, outline=self._grid_lines_color, tags=\"rect\")\n self.canvas.tag_bind(self.rect[row, col], '<ButtonPress-1>', self.change_cell)\n col = 0\n row += 1\n if self._dim < 50:\n button_size = int(80*(self._dim/50))\n font_size = int(22*(self._dim/50))\n else:\n button_size = 80\n font_size = 18\n x1 = col * self._cell_dim + (((self._dim*self._cell_dim) - button_size*3)//2)\n y1 = row * self._cell_dim + 5\n x2 = x1 + button_size\n y2 = y1 + 20\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"toggle\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"toggle-text\", fill=self._secondary_color, text=\"Start\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"toggle\", '<ButtonPress-1>', self.toggle_refresh)\n self.canvas.tag_bind(\"toggle-text\", '<ButtonPress-1>', self.toggle_refresh)\n x1 = x2 + 5 # padding between buttons\n x2 = x1 + button_size\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"next\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"next-text\", fill=self._secondary_color, text=\"Next\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"next\", '<ButtonPress-1>', self.one_step)\n self.canvas.tag_bind(\"next-text\", '<ButtonPress-1>', self.one_step)\n x1 = x2 + 5 # padding between buttons\n x2 = x1 + button_size\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"clear\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"clear-text\", fill=self._secondary_color, text=\"Clear\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"clear\", '<ButtonPress-1>', self.clear_board)\n self.canvas.tag_bind(\"clear-text\", '<ButtonPress-1>', self.clear_board)\n self.model_refresh()", "def _cells_for_rect(self, r):\n cells = set()\n cy = floor(r.y1 / self.cell_size)\n while (cy * self.cell_size) <= r.y2:\n cx = floor(r.x1 / self.cell_size)\n while (cx * self.cell_size) <= r.x2:\n cells.add((int(cx), int(cy)))\n cx += 1.0\n cy += 1.0\n return cells", "def grid(self, northeast, southwest, density=200):\n grid = []\n\n # Determine the bounds of a standard, positive quadrant plot\n y_max, y_min = int(northeast[0]), int(southwest[0])\n x_max, x_min = int(northeast[1]), int(southwest[1])\n\n # Construct a sequence of boxes each moving clockwise from southwest corner\n master = []\n for x in range(x_min, x_max, density):\n for y in range(y_min, y_max, density):\n polygon = [\n (x, y),\n (x, y + density),\n (x + density, y + density),\n (x + density, y),\n (x, y)\n ]\n master.append(polygon)\n\n return master", "def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]", "def generate_all_locations(grid, shape):", "def assign_cell(districts):\r\n hyp_dists = []\r\n created_district = False\r\n #Find the next unassigned cell:\r\n unassigned = np.where(districts==0)\r\n r = unassigned[0][0]\r\n c = unassigned[1][0]\r\n #Try setting the cell to each possible tag:\r\n # continuity_count(districts,1,r,c)\r\n for tag in range(1,6):\r\n #Make sure we havent skipped a tag:\r\n if (tag not in districts and not created_district) or (tag in districts):\r\n #Make sure we havent exceeded 5 cells with this tag:\r\n if (districts == tag).sum() < 5:\r\n if tag not in districts:\r\n #It doesnt make sense to create a new district with each tag, so make sure we only do it once per cell:\r\n created_district = True\r\n #This tag might work, so create a hypothetical case from it:\r\n hyp = np.copy(districts)\r\n hyp[r,c] = tag\r\n if continuity_test(hyp):\r\n #See if the hypothetical map is complete:\r\n if 0 in hyp:\r\n hyp_dists += assign_cell(hyp)\r\n else:\r\n hyp_dists += [hyp]\r\n if r==2 and c==2:\r\n print(len(hyp_dists))\r\n return hyp_dists", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def randomGrid(N):\n grid = np.zeros((N,N), dtype=int)\n for i in range(N): \n for j in range(N): \n if np.random.uniform() < 0.2:\n # cell alive\n grid[i,j] = int(np.random.uniform(low=1, high=(256*256*256)-1))\n return grid", "def region_region_checkerboard(self, **_):\n outputs: dict = {}\n\n if self.AGG_BY == \"zone\":\n agg = \"zone\"\n else:\n agg = \"region\"\n\n # List of properties needed by the plot, properties are a set of tuples and\n # contain 3 parts: required True/False, property name and scenarios required,\n # scenarios must be a list.\n properties = [(True, f\"{agg}_{agg}s_Net_Interchange\", self.Scenarios)]\n\n # Runs get_formatted_data within PlotDataStoreAndProcessor to populate PlotDataStoreAndProcessor dictionary\n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n\n ncols, nrows = set_x_y_dimension(len(self.Scenarios))\n grid_size = ncols * nrows\n excess_axs = grid_size - len(self.Scenarios)\n\n mplt = PlotLibrary(nrows, ncols, squeeze=False, ravel_axs=True)\n fig, axs = mplt.get_figure()\n plt.subplots_adjust(wspace=0.02, hspace=0.4)\n max_flow_group = []\n Data_Out = []\n n = 0\n for scenario in self.Scenarios:\n rr_int = self[f\"{agg}_{agg}s_Net_Interchange\"].get(scenario)\n if shift_leapday:\n rr_int = adjust_for_leapday(rr_int)\n\n if self.AGG_BY != \"region\" and self.AGG_BY != \"zone\":\n agg_region_mapping = (\n self.region_mapping[[\"region\", self.AGG_BY]]\n .set_index(\"region\")\n .to_dict()[self.AGG_BY]\n )\n # Checks if keys all aggregate to a single value, this plot requires multiple values to work\n if len(set(agg_region_mapping.values())) == 1:\n return UnsupportedAggregation()\n rr_int = rr_int.reset_index()\n rr_int[\"parent\"] = rr_int[\"parent\"].map(agg_region_mapping)\n rr_int[\"child\"] = rr_int[\"child\"].map(agg_region_mapping)\n rr_int_agg = rr_int.groupby([\"parent\", \"child\"], as_index=True).sum()\n rr_int_agg.rename(columns={\"values\": \"flow (MW)\"}, inplace=True)\n rr_int_agg = rr_int_agg.loc[\n rr_int_agg[\"flow (MW)\"] > 0.01\n ] # Keep only positive flows\n rr_int_agg.sort_values(ascending=False, by=\"flow (MW)\")\n rr_int_agg = rr_int_agg / 1000 # MWh -> GWh\n\n data_out = rr_int_agg.copy()\n data_out.rename(\n columns={\"flow (MW)\": \"{} flow (GWh)\".format(scenario)}, inplace=True\n )\n\n max_flow = max(rr_int_agg[\"flow (MW)\"])\n rr_int_agg = rr_int_agg.unstack(\"child\")\n rr_int_agg = rr_int_agg.droplevel(level=0, axis=1)\n\n current_cmap = plt.cm.get_cmap()\n current_cmap.set_bad(color=\"grey\")\n\n axs[n].imshow(rr_int_agg)\n axs[n].set_xticks(np.arange(rr_int_agg.shape[1]))\n axs[n].set_yticks(np.arange(rr_int_agg.shape[0]))\n axs[n].set_xticklabels(rr_int_agg.columns)\n axs[n].set_yticklabels(rr_int_agg.index)\n axs[n].set_title(scenario.replace(\"_\", \" \"), fontweight=\"bold\")\n\n # Rotate the tick labels and set their alignment.\n plt.setp(\n axs[n].get_xticklabels(),\n rotation=90,\n ha=\"right\",\n rotation_mode=\"anchor\",\n )\n\n # Delineate the boxes and make room at top and bottom\n axs[n].set_xticks(np.arange(rr_int_agg.shape[1] + 1) - 0.5, minor=True)\n axs[n].set_yticks(np.arange(rr_int_agg.shape[0] + 1) - 0.5, minor=True)\n axs[n].grid(which=\"minor\", color=\"k\", linestyle=\"-\", linewidth=1)\n axs[n].tick_params(which=\"minor\", bottom=False, left=False)\n\n max_flow_group.append(max_flow)\n Data_Out.append(data_out)\n n += 1\n\n # Remove extra axes\n mplt.remove_excess_axs(excess_axs, grid_size)\n\n cmap = cm.inferno\n norm = mcolors.Normalize(vmin=0, vmax=max(max_flow_group))\n cax = plt.axes([0.90, 0.1, 0.035, 0.8])\n fig.colorbar(\n cm.ScalarMappable(norm=norm, cmap=cmap),\n cax=cax,\n label=\"Total Net Interchange [GWh]\",\n )\n plt.xlabel(\"To Region\", color=\"black\", rotation=\"horizontal\", labelpad=40)\n plt.ylabel(\"From Region\", color=\"black\", rotation=\"vertical\", labelpad=40)\n\n data_table_out = pd.concat(Data_Out, axis=1)\n save_figures = self.figure_folder.joinpath(f\"{self.AGG_BY}_transmission\")\n fig.savefig(\n save_figures.joinpath(\"region_region_checkerboard.svg\"),\n dpi=600,\n bbox_inches=\"tight\",\n )\n data_table_out.to_csv(save_figures.joinpath(\"region_region_checkerboard.csv\"))\n\n outputs = DataSavedInModule()\n return outputs", "def board_generator(self, lines, width, height):\n board = Board(width, height, init_std=False)\n for i in range(len(lines)):\n for j in range(len(lines[i])):\n if lines[i][j] == '.':\n board[i][j] = CellState.empty\n elif lines[i][j] == 'b':\n board[i][j] = CellState.black\n elif lines[i][j] == 'w':\n board[i][j] = CellState.white\n return board", "def save_regions_bmp(self, robot, output_directory):\n # Make ouput directory if it doesn't already exist\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n regions_path = os.path.join(output_directory, \"regions.bmp\")\n if os.path.exists(regions_path):\n return\n image = Image.new(\"L\", OUTPUT_BMP_DIMENSIONS)\n draw = ImageDraw.Draw(image)\n pixels = image.load()\n output_width, output_height = image.size\n output_bounds = (0, output_height, output_width, 0)\n # Set default colour\n for i in range(output_width):\n for j in range(output_height):\n pixels[i, j] = OUTPUT_DEFAULT_COLOUR\n # Add regions\n for region in self.regions:\n translated_bounds = get_translated_bounds(region.bounds, self.bounds, output_bounds)\n left, top, right, bottom = list(map(int, translated_bounds))\n if robot.can_hold(region.permeability):\n colour = OUTPUT_VALID_COLOUR\n else:\n colour = OUTPUT_INVALID_COLOUR\n draw.rectangle((left, bottom, right, top), fill=colour)\n image.save(regions_path)\n LOGGER.debug(\"Saved regions!\")", "def _generate_inner_board(self, row, col):\n return [[Label(self.outer_board[row][col], width = self.LABEL_WIDTH, \n height = self.LABEL_HEIGHT, \n font = self.LABEL_FONT, text = TTTMove.BLANK) \n for _ in range(self.BOARD_DIM)]\n for _ in range(self.BOARD_DIM)]", "def _prepare_grid(self):\n draw_grid = list()\n for x in range(len(self._grid) + len(self._grid) + 1):\n if x % 2 == 0:\n draw_grid.append([self._walk_area_color if x % 2 != 0 else self._wall_color\n for x in range(len(self._grid) + len(self._grid) + 1)])\n else:\n draw_grid.append([self._walk_area_color\n for _ in range(len(self._grid) + len(self._grid) + 1)])\n\n draw_grid = self._draw_walls(draw_grid)\n draw_grid = self._draw_treasures(draw_grid)\n draw_grid = self._draw_border(draw_grid)\n return draw_grid", "def inner_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def inregionCut(self,l,r,g,Nb):\n A1 = np.random.randint(l+1-self.keepCL, r-1-(Nb-1)*(g+1)-(1-self.keepCL))\n return A1 + np.arange(Nb)*(1+g)", "def __init__(self):\n self._board_area = [[\" \" for i in range(20)] for j in range(20)]\n\n # Starting setup for board includes these coordinates black, and their mirror white\n black_start = [(1, 2), (2, 2), (2, 1), (2, 3), (3, 2), (4, 1), (4, 3), (5, 2), (6, 1), (6, 3), (7, 1),\n (7, 2), (7, 3), (8, 1), (8, 2), (8, 3), (9, 1), (9, 2), (9, 3), (10, 1), (10, 2), (10, 3),\n (11, 1), (11, 3), (12, 1), (12, 2), (12, 3), (13, 1), (13, 3), (14, 2), (15, 1), (15, 3),\n (16, 2), (17, 1), (17, 2), (17, 3), (18, 2), (2, 6), (5, 6), (8, 6), (11, 6),\n (14, 6), (17, 6)]\n\n # Border points set for clearing out stones that move beyond the border\n self._border = set((0, i) for i in range(20)) | set((19, i) for i in range(20))\n self._border = self._border | set((i, 0) for i in range(20)) | set((i, 19) for i in range(20))\n\n # Fill black and white stones\n for coord in black_start:\n self._board_area[coord[0]][coord[1]] = \"B\"\n self._board_area[coord[0]][-coord[1] - 1] = \"W\"\n\n # Alphabetic indexing of board for alpha-numeric movement inputs\n self._locmap = dict(zip(\"abcdefghijklmnopqrst\", range(20)))", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def gen_rhombus(width):\n for row in range(1, width +1, 2):\n yield f\"{(STAR * row).center(width)}\"\n\n for row in range(width -2, 0, -2):\n yield f\"{(STAR * row).center(width)}\"", "def generatePolygons():", "def create_neighborhood(self):\n if len(self.available_building_cells) == 0:\n return False\n # Pick cell\n shuffle(self.available_building_cells)\n\n neighborhood_origin = self.available_building_cells[0]\n if not self.creates_valid_building(neighborhood_origin):\n # If not a valid placement, remove location from list\n self.available_building_cells.remove(neighborhood_origin)\n # Retry!\n self.create_neighborhood()\n return True # Exit after neighborhood is created\n\n final_cells = [neighborhood_origin]\n self.available_building_cells.remove(neighborhood_origin)\n\n # Place building on origin\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_origin, attractiveness=random()))\n neighborhood_cells = self.environment.grid.get_neighborhood(neighborhood_origin, moore=True, include_center=True)\n\n # Create a random number of residence buildings in this neighborhood\n number_of_residences = randrange(2,6)\n for i in range(number_of_residences):\n while len(neighborhood_cells) > 0:\n shuffle(neighborhood_cells)\n # Only place building if space is empty\n if self.environment.grid.is_cell_empty(neighborhood_cells[0]):\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_cells[0], attractiveness=random()))\n final_cells.append(neighborhood_cells[0])\n try:\n # If this space was available before, remove it from list\n self.available_building_cells.remove(neighborhood_cells[0])\n except:\n pass\n\n continue\n\n # Remove cell from list\n neighborhood_cells.remove(neighborhood_cells[0])\n\n # Fill surrounding space around buildings with roads!\n for building_location in final_cells:\n for surrounding_cell in self.environment.grid.get_neighborhood(building_location, moore=True):\n if self.environment.grid.is_cell_empty(surrounding_cell):\n self.place_road(Road(surrounding_cell))\n\n return True", "def regular_board(shapes):\n board = [[0]*9 for _ in range(9)]\n for shape in shapes:\n for r, c in shape:\n board[r][c] = shape[(r, c)]\n return board", "def _regions_shmem_config(self) -> None:\n assert self.config is not None\n assert self.board is not None\n\n if not self.config.shmem:\n return\n\n for name, shmem_config in self.config.shmem.items():\n for cell_name in shmem_config.peers:\n cell = self.config.cells[cell_name]\n assert cell.pci_devices is not None\n\n # offset 2, since mem_regions always\n # start with table_region and common_output_region\n dev_id = cell.pci_devices[name].shmem_dev_id\n assert dev_id is not None\n assert cell.memory_regions is not None\n\n grouped_region_name = f\"{name}\"\n grouped_region = cell.memory_regions[grouped_region_name]\n\n cell_output_region = grouped_region.regions[2 + dev_id]\n\n def get_mem_region_index(cell, name):\n ret = -1\n index = 0\n\n for region_name, region in cell.memory_regions.items():\n if region_name == name:\n ret = index\n break\n\n if isinstance(region, GroupedMemoryRegion):\n index += len(region.regions)\n else:\n index += 1\n\n if ret == -1:\n raise Exception(\n f\"Invalid cells.yaml, not a memory-region: {name}\"\n )\n\n return ret\n\n shmem_regions_start = get_mem_region_index(\n cell, grouped_region_name\n )\n cell.pci_devices[name].shmem_regions_start = shmem_regions_start\n\n new_cell_output_region = copy.copy(cell_output_region)\n new_cell_output_region.flags = copy.copy(\n cell_output_region.flags\n )\n new_cell_output_region.flags.append(\"MEM_WRITE\")\n\n grouped_region.regions[2 + dev_id] = new_cell_output_region", "def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass", "def create_cells(self):\n raise NotImplementedError(\n \"create_cells function not reimplemented from base class\")", "def increased_obstacles_map(occupancy_grid):\n\n nb_rows = len(occupancy_grid)\n nb_cols = len(occupancy_grid[0])\n increased_occupancy_grid = np.zeros([nb_rows + 6, nb_cols + 6])\n\n for i in range(nb_rows):\n for j in range(nb_cols):\n\n if occupancy_grid[i, j] == OCCUPIED:\n increased_occupancy_grid[i:i + 7, j:j + 7] = np.ones([7, 7])\n\n final_occupancy_grid = increased_occupancy_grid[3:(LENGTH_case + 3), 3:(WIDTH_case + 3)]\n return final_occupancy_grid" ]
[ "0.61022526", "0.607753", "0.59886265", "0.5929426", "0.59226096", "0.58879864", "0.5817077", "0.5783131", "0.57591105", "0.57534856", "0.5751205", "0.575003", "0.57356465", "0.57249886", "0.57249135", "0.5723089", "0.56941694", "0.56906545", "0.5687791", "0.56527513", "0.5643714", "0.5632358", "0.56275725", "0.56214637", "0.5591842", "0.5580674", "0.557969", "0.55768937", "0.5569325", "0.5562425", "0.5551505", "0.55438834", "0.55319965", "0.5528943", "0.5526729", "0.55214703", "0.5512838", "0.5500978", "0.54884005", "0.54862374", "0.54623497", "0.5461101", "0.5455319", "0.5451441", "0.5426839", "0.54246217", "0.5417078", "0.5412932", "0.5411995", "0.54075724", "0.5403977", "0.5403891", "0.53898484", "0.53890055", "0.53885", "0.5370733", "0.5364035", "0.536328", "0.53536415", "0.5352385", "0.5351658", "0.5347523", "0.53431624", "0.5338333", "0.5334066", "0.5330014", "0.5325806", "0.53250545", "0.5324513", "0.53180903", "0.5317668", "0.5317102", "0.53149873", "0.5310264", "0.5309571", "0.53068966", "0.53029245", "0.5302831", "0.5301343", "0.5300075", "0.53000456", "0.52925885", "0.5290178", "0.5287689", "0.52858126", "0.5284702", "0.5279781", "0.5279749", "0.5272662", "0.5267499", "0.5255221", "0.52518034", "0.5250493", "0.52496904", "0.5248889", "0.524264", "0.5239989", "0.5239524", "0.5238318", "0.52282804" ]
0.71277833
0
Create a fence around unmasked regions such that nothing inside the regions can escape. Note that this is a little bit more aggressive than it strictly needs to be.
def build_fence(mask, shuffle=True): mask = mask.astype(np.int32) _i = np.array([-1,-1,-1,0,0,0,1,1,1], dtype=np.int32) _j = np.array([-1,0,1,-1,0,1,-1,0,1], dtype=np.int32) neighbors = ndimage.convolve(mask, np.ones((3,3)), mode='wrap') fence = np.zeros_like(mask) edge_i, edge_j = np.nonzero(mask * neighbors % 9) neighbors *= (1 - mask) if edge_i.size == 0: return fence # First pass. Add in fence where needed. if shuffle: k = get_rng().permutation(len(edge_i)) edge_i = edge_i[k] edge_j = edge_j[k] for i, j in zip(edge_i, edge_j): n_i = (i + _i) % mask.shape[0] n_j = (j + _j) % mask.shape[1] if (neighbors[n_i, n_j] >= 3).any(): neighbors[n_i, n_j] -= 1 fence[i, j] += 1 # Second pass. Remove fence where unneeded. fence_i, fence_j = np.nonzero(fence) if shuffle: k = get_rng().permutation(len(fence_i)) fence_i = fence_i[k] fence_j = fence_j[k] for i, j in zip(fence_i, fence_j): n_i = (i + _i) % mask.shape[0] n_j = (j + _j) % mask.shape[1] if (neighbors[n_i, n_j] < 2).all(): neighbors[n_i, n_j] += 1 fence[i, j] -= 1 return fence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_spurious_landmarks(self):\r\n \r\n remove = np.argwhere(self.lm_counter < 0)\r\n self.lm = np.delete(self.lm, remove, axis=0)\r\n self.lm_cvar = np.delete(self.lm_cvar, remove, axis=0)\r\n self.lm_counter = np.delete(self.lm_counter, remove)\r\n \r\n return # Replace this.\r", "def mask_region(self, ypos, xpos, r):\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r ** 2 and 0 <= j<= self.shapes[0] - 1 and 0<= i <=self.shapes[1] - 1:\r\n j = int(j)\r\n i = int(i)\r\n self.masked[j, i] = 0", "def unstuck(self):\n mask = Map.current_map.mask\n \n x_max, y_max = mask.get_size()\n orig_x, orig_y = round(self.x), round(self.y)\n x, y = orig_x , orig_y\n unstuck_aggr = COLLISION_UNSTUCK_AGGRESSION\n \n # Vertical check for any open spots we could put the entity on...\n while y > 0:\n if not mask.get_at((x, y)):\n self.y = y\n self.vy = -unstuck_aggr\n return\n y -= unstuck_aggr\n y = orig_y\n while y < y_max:\n if not mask.get_at((x, y)):\n self.y = y\n self.vy = unstuck_aggr\n return\n y += unstuck_aggr\n y = orig_y\n \n # Horizontal spots?\n while x > 0:\n if not mask.get_at((x, y)):\n self.x = x\n self.vx = -unstuck_aggr\n return\n x -= unstuck_aggr\n x = orig_x\n while x < x_max:\n if not mask.get_at((x, y)):\n self.x = x\n self.vx = unstuck_aggr\n return\n x += unstuck_aggr\n x = orig_x\n \n # Diagonal spots\n while x > 0 and y > 0:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n self.vx, self.vy = -unstuck_aggr, -unstuck_aggr\n return\n x, y = x - unstuck_aggr, y - unstuck_aggr\n x, y = orig_x, orig_y\n while x < x_max and y < y_max:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n self.vx, self.vy = unstuck_aggr, unstuck_aggr\n return\n x, y = x + unstuck_aggr, y + unstuck_aggr\n x, y = orig_x, orig_y\n while x > 0 and y < y_max:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n return\n x, y = x - unstuck_aggr, y + unstuck_aggr\n x, y = orig_x, orig_y\n while x < x_max and y > 0:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n return\n x, y = x + unstuck_aggr, y - unstuck_aggr\n x, y = orig_x, orig_y\n \n # All right, I officially give up now.\n print(\"Couldn't unstuck object!\")", "def trim_floating_solid(im):\n holes = find_disconnected_voxels(~im)\n im[holes] = True\n return im", "def get_masked_regions(contig):\n masked_regions = \"\"\n\n seq = contig.seq\n contig_end = len(seq)-1\n masked = False\n for i, n in enumerate(seq):\n # mark the starting position of a softmasked region\n if not masked and n.islower():\n masked = True\n start = i\n\n # mark end position of softmasked region (can be end of contig)\n if masked and (n.isupper() or i == contig_end):\n masked = False\n end = i\n\n # store softmasked region in bed3 (chr, start, end) format\n masked_regions += f\"{contig.id}\\t{start}\\t{end}\\n\" # noqa: start exists\n\n return masked_regions", "def fix_straight_lines(self):\r\n\r\n # Creates a vertical 1x5 kernel and applies binary closing based on that kernel\r\n vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5))\r\n self.thresh_invert = cv2.morphologyEx(self.thresh_invert, cv2.MORPH_CLOSE, vertical_kernel, iterations=9)\r\n\r\n # Creates a horizontal 5x1 kernel and applies binary closing based on that kernel\r\n horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))\r\n self.thresh_invert = cv2.morphologyEx(self.thresh_invert, cv2.MORPH_CLOSE, horizontal_kernel, iterations=4)", "def removeIslands(self):\n if isinstance(self.substrates, Polygon):\n return\n mainland = []\n for i, substrate in enumerate(self.substrates.geoms):\n ismainland = True\n for j, otherSubstrate in enumerate(self.substrates.geoms):\n if j == i:\n continue\n if Polygon(otherSubstrate.exterior.coords).contains(substrate):\n ismainland = False\n break\n if ismainland:\n mainland.append(substrate)\n self.substrates = shapely.geometry.collection.GeometryCollection(mainland)\n self.oriented = False", "def proc_unfilled_polygon(self, tokens):\n\n return self._proc_polygon(tokens, filled=False)", "def get_regions_mask(self, input):", "def fill_blind_pores(im):\n holes = find_disconnected_voxels(im)\n im[holes] = False\n return im", "def regular_neighborhood(self):\n euler_char = self.num_switches() - self.num_branches()\n return Surface(num_punctures=self.num_complementary_regions(),\n euler_char=euler_char)", "def land_unmasked(res='4x5', debug=False):\n from .GEOSChem_bpch import get_LWI_map # Kludge, use GEOS-Chem LWI\n\n # Create a np.ma mask\n if debug:\n print(('land_mask called for: ', res))\n m = np.ma.masked_not_equal(get_LWI_map(res=res), 1)\n if debug:\n print((mask, mask.shape))\n return m.mask", "def get_France_unmasked(res='4x5'):\n # France mask\n lowerlat = 42.5\n higherlat = 51\n lowerlon = -4.441\n higherlon = 7.7577\n # Get a mask for lat and lon range, then combine\n mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n higherlat=higherlat)\n mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n higherlon=higherlon)\n mask = np.ma.mask_or(mask1, mask2)\n # Only consider land grid boxes\n mask = np.ma.mask_or(mask, land_unmasked(res=res)[..., 0])\n return mask", "def cull_landmarks(landmarks):\n return np.delete(landmarks, np.r_[\n 49:54, 55:68, # Mouth\n 37:39, 40:42, # Left eye\n 43:45, 46:48, # Right eye\n ], axis=0)", "def make_midwest_mask(fname_coords):\n lon, lat, topo = STEM_parsers.parse_STEM_coordinates(fname_coords)\n lat_range = np.array((40,44)) #deg N\n lon_range = np.array((-96, -87)) #deg W\n\n lat_mask = ma.masked_inside(lat, *lat_range)\n lon_mask = ma.masked_inside(lon, *lon_range)\n mask = np.logical_and(lat_mask.mask, lon_mask.mask)\n\n return(mask)", "def make_western_mask(fname_coords, fname_top):\n top = STEM_parsers.parse_tobspred(fname_top)\n top = STEM_vis.grid_tobspred_data(top, which_data='emi_fac')\n\n lon, lat, topo = STEM_parsers.parse_STEM_coordinates(fname_coords)\n\n mask = np.logical_and(abs(top - 1.0) < 0.001, lon < -95)\n # mask out this little blip in prairie that slips through\n mask[np.logical_and(lat > 40, lon > -100)] = False\n #mask = lon < -110\n\n return(mask)", "def get_masked_scene(orig, mask, local_context_size = 80, dilation=False):\n orig_scene = orig.copy()\n mask_scene = mask.copy()\n orig_scene_no_mask = orig.copy()\n \n mask_info = np.where(mask_scene == 0) \n min_x = max(min(mask_info[0]) - local_context_size, 0)\n max_x = max(mask_info[0]) + local_context_size\n min_y = max(min(mask_info[1]) - local_context_size, 0)\n max_y = max(mask_info[1]) + local_context_size\n \n orig_scene = orig_scene[min_x:max_x,min_y:max_y]\n orig_scene_no_mask = orig_scene_no_mask[min_x:max_x,min_y:max_y]\n mask_scene = mask_scene[min_x:max_x,min_y:max_y]\n \n dialation_mask = np.zeros(mask_scene.shape) + 255\n \n if dilation:\n dialation_mask = cv2.dilate(255-mask_scene, np.ones((local_context_size,local_context_size)))\n \n #implot(dialation_mask)\n #plt.imshow(dialation_mask, 'gray')\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x, y] == 0:\n orig_scene[x, y, :] = 0\n orig_scene_no_mask[x,y,:] = 0\n if dilation:\n if dialation_mask[x,y] == 0:\n orig_scene[x, y, :] = 0\n \n return orig_scene, mask_scene, orig_scene_no_mask, dialation_mask", "def test_offcenter(self):\n actual = cm.ring_mask((5, 5), 1, 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, False, True],\n [False, True, False, False, False],\n [False, False, True, False, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def highlight_available_v_fences(win, game):\n #Check if player has remaining fences\n player_turn = game.get_player_turn()\n if game.get_pawn(player_turn).get_remaining_fences() == 0:\n return\n\n board = game.get_board()\n\n #Set highlight color\n if player_turn == 1:\n color = LIGHTERRED\n else:\n color = LIGHTERBLUE\n \n for row in range(len(board)-2):\n for col in range(len(board)-1):\n #Highlight fence if no fence placed and does not interesect a horizontal fence. \n if not board[row][col]['v'] and not board[row+1][col]['v'] and board[row+1][col]['h'] != \"Fence Continued\" and game.fair_play_check('v',(col,row)):\n coords = board[row][col]['coord']\n v_fence_coords = (coords[0]*SQUARESIZE+FENCEWIDTH*(coords[0]-1), coords[1]*(SQUARESIZE+FENCEWIDTH))\n v_fence = pygame.Rect(v_fence_coords, (FENCEWIDTH,SQUARESIZE))\n pygame.draw.rect(win, color, v_fence)", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def nondetects(self, masked=False):\r\n grd = self.grd\r\n xnd = []\r\n ynd = []\r\n ncells = len(grd.cells['depth'])\r\n non_detects_i_tr = np.zeros(ncells, np.int32)\r\n if masked:\r\n not_flagged = np.where(self.rec_track.flagged==0)[0]\r\n rec_track = self.rec_track[not_flagged]\r\n rec_seg = self.make_segments(set_depth=True, \r\n input_rec_track=rec_track)\r\n else:\r\n rec_seg = self.rec_seg\r\n for nr, rseg in enumerate(rec_seg):\r\n seg = rec_seg[nr]\r\n dt = seg.dt\r\n if dt > dt_signal+1:\r\n t1 = seg.t1\r\n t2 = seg.t2\r\n nint = int(np.rint((t2-t1)/dt_signal)) - 1\r\n x1 = seg.x1\r\n x2 = seg.x2\r\n y1 = seg.y1\r\n y2 = seg.y2\r\n dx_nd = (x2 - x1)/float(nint+1)\r\n dy_nd = (y2 - y1)/float(nint+1)\r\n if nint < 120: # 10 minute cutoff for nondetect filling\r\n xint = [x1 + n*dx_nd for n in range(1,nint)]\r\n yint = [y1 + n*dy_nd for n in range(1,nint)]\r\n xnd = xnd + xint\r\n ynd = ynd + yint\r\n\r\n for nd in range(len(xnd)):\r\n xy = [xnd[nd], ynd[nd]]\r\n i = grd.select_cells_nearest(xy)\r\n if (i is not None) and (i >= 0):\r\n non_detects_i_tr[i] += 1\r\n\r\n return non_detects_i_tr", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def highlight_available_h_fences(win, game):\n #Check if player has remaining fences\n player_turn = game.get_player_turn()\n if game.get_pawn(player_turn).get_remaining_fences() == 0:\n return\n\n board = game.get_board()\n \n #Set highlight color\n if player_turn == 1:\n color = LIGHTERRED\n else:\n color = LIGHTERBLUE\n \n for row in range(len(board)-1):\n for col in range(len(board)-2):\n #Highlight fence if no fence placed and does not intersect a vertical fence\n if not board[row][col]['h'] and not board[row][col+1]['h'] and board[row][col+1]['v'] != \"Fence Continued\" and game.fair_play_check('h',(col,row)):\n coords = board[row][col]['coord']\n h_fence_coords = (coords[0]*(SQUARESIZE+FENCEWIDTH), coords[1]*SQUARESIZE+FENCEWIDTH*(coords[1]-1))\n h_fence = pygame.Rect(h_fence_coords, (SQUARESIZE,FENCEWIDTH))\n pygame.draw.rect(win, color, h_fence)", "def set_fence_mode(self, on):\r\n return self._arm.set_fense_mode(on)", "def outside(self,region):\n fs = FeatureSet()\n for f in self:\n if(f.isNotContainedWithin(region)):\n fs.append(f)\n return fs", "def filter_isolated_pixels(array):\n filtered_array = np.copy(array)\n id_regions, num_ids = ndimage.label(filtered_array,\n structure=np.ones((3, 3)))\n id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids+1)))\n area_mask = (id_sizes == 1)\n filtered_array[area_mask[id_regions]] = 0\n return filtered_array", "def _do_fenced_code_blocks(self, text):\r\n return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)", "def make_partioned_regions(shape, alpha=1.0, max_regions=5, min_regions=2):\n ring = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.int16)\n adjacent = np.array([ # Diagonals don't count as adjacent\n [-1,0,0,1],\n [0,-1,1,0]], dtype=np.int16).T\n nearby = np.meshgrid([-2,-1,0,1,2], [-2,-1,0,1,2])\n\n board = np.zeros(shape, dtype=np.int16)\n perimeters = [{\n (i, j) for i, j in zip(*np.nonzero(board == 0))\n }]\n exclusions = [set()]\n while sum(len(p) for p in perimeters) > 0:\n weights = np.array([len(p) for p in perimeters], dtype=float)\n weights[0] = min(alpha, weights[0]) if len(weights) <= max_regions else 1e-10\n if len(weights) <= min_regions:\n weights[1:] = 1e-10\n weights /= np.sum(weights)\n k = get_rng().choice(len(perimeters), p=weights)\n plist = list(perimeters[k])\n i, j = plist[get_rng().choice(len(plist))]\n perimeters[0].discard((i, j))\n perimeters[k].discard((i, j))\n if (i, j) in exclusions[k]:\n continue\n exclusions[0].add((i,j))\n exclusions[k].add((i,j))\n b = board[(i+nearby[0]) % shape[0], (j+nearby[1]) % shape[1]]\n b[2,2] = k or -1\n num_neighbors = signal.convolve2d(b != 0, ring, mode='valid')\n num_foreign = signal.convolve2d((b > 0) & (b != k), ring, mode='valid')\n if ((num_foreign > 0) & (num_neighbors > 2)).any() or num_foreign[1,1] > 0:\n continue\n # Add to the board\n if k == 0:\n k = len(perimeters)\n perimeters.append(set())\n exclusions.append(set())\n board[i, j] = k\n for i2, j2 in (adjacent + (i, j)) % shape:\n if board[i2, j2] == 0:\n perimeters[k].add((i2, j2))\n return board", "def _masked_edge(var,xac):\n\n if np.any(xac>0):\n ind_gap = (xac==np.nanmin(xac[xac>0]))\n if ind_gap.size==var.size:\n if ind_gap.shape!=var.shape:\n ind_gap = ind_gap.transpose()\n var[ind_gap] = np.nan\n elif ind_gap.size==var.shape[1]:\n var[:,ind_gap] = np.nan\n if np.any(xac<0):\n ind_gap = (xac==np.nanmax(xac[xac<0]))\n if ind_gap.size==var.size:\n if ind_gap.shape!=var.shape:\n ind_gap = ind_gap.transpose()\n var[ind_gap] = np.nan\n elif ind_gap.size==var.shape[1]:\n var[:,ind_gap] = np.nan\n\n return var", "def updraft_env_mask(tracer_2d, w_interp_2d, ql_2d, cb, ct, z_half, ql_tr = 1e-8):\n updraft_mask = np.ones_like(tracer_2d) #mask = 1 -> False, mask = 0 True\n tracer_mask = np.ones_like(tracer_2d)\n w_mask = np.ones_like(tracer_2d)\n ql_mask = np.ones_like(tracer_2d)\n nxy = np.shape(tracer_2d)[0]\n nz = np.shape(tracer_2d)[1]\n\n sigma_sum = 0.\n z_ql = 0.\n cloud_flag = False\n if np.isnan(cb) == False and np.isnan(ct) == False:\n z_ql = z_half[cb] + 0.25 * (z_half[ct] - z_half[cb])\n cloud_flag = True\n\n print \"z_ql = \", z_ql\n\n tracer_mean = np.mean(tracer_2d, axis=0)\n tracer_square_mean = np.mean(tracer_2d * tracer_2d, axis=0)\n tracer_variance = tracer_square_mean - tracer_mean * tracer_mean\n assert(tracer_variance.all() >= 0)\n tracer_std = np.sqrt(tracer_variance)\n\n for k in range(nz):\n sigma_sum += tracer_std[k]\n sigma_min = sigma_sum/(k+1.0) * 0.05 # threshold from the paper\n\n for i in range(nxy):\n if tracer_std[k] >= sigma_min:\n if tracer_2d[i,k] - tracer_mean[k] >= tracer_std[k]:\n updraft_mask[i,k] = 0\n tracer_mask[i,k] = 0\n # TODO - I think the paper condition should also include this\n # But it's not done in Pycles\n #else:\n # if tracer_2d[i,k] - tracer_mean[k] >= sigma_min:\n # updraft_mask[i,k] = 0\n\n if w_interp_2d[i,k] <= 0.:\n updraft_mask[i,k] = 1\n else:\n w_mask[i,k] = 0\n\n if cloud_flag:\n if z_half[k] >= z_ql and z_half[k] <= z_half[ct]:\n if ql_2d[i,k] < ql_tr:\n updraft_mask[i,k] = 1\n else:\n ql_mask[i,k] = 0\n\n env_mask = 1 - updraft_mask\n\n mask_dict = {}\n mask_dict[\"updraft\"] = updraft_mask\n mask_dict[\"env\"] = env_mask\n mask_dict[\"tracer\"] = tracer_mask\n mask_dict[\"w\"] = w_mask\n mask_dict[\"ql\"] = ql_mask\n\n return mask_dict", "def create_soft_blocks(self, count):\n while count > 0:\n x = int(random() * self.map_size[0])\n y = int(random() * self.map_size[1])\n if self.is_filled(x, y):\n continue\n\n self.create_soft_block_at(x, y)\n count -= 1", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def mask(self):\n\n mask = np.zeros(shape=(self._info.height, self._info.width), dtype=np.uint8)\n\n self.draw(image=mask, color=constants.COLOR_WHITE_MONO)\n\n mask_with_border = np.pad(mask, 1, 'constant', constant_values=255)\n\n cv2.floodFill(image=mask,\n mask=mask_with_border,\n seedPoint=(int(self.middle_point[0]), int(self.middle_point[1])),\n newVal=constants.COLOR_WHITE_MONO)\n\n return mask", "def regionstomask(in_regions, genome_len):\n out_mask = np.zeros((2,genome_len)).astype(bool)\n for region in in_regions:\n out_mask[region[0],region[1]:region[2]+1] = True\n return out_mask", "def __mask_region(self, img, vertices):\n\n mask = np.zeros_like(img) \n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n cv2.fillConvexPoly(mask, vertices, ignore_mask_color)\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def setDefensiveArea(self ,gameState): \n mazeCentreX = (gameState.data.layout.width - 2) / 2\n if not self.red:\n mazeCentreX += 1\n mazeCentreY = (gameState.data.layout.height - 2) / 2\n\n self.defenceRegion = []\n for i in range(1, gameState.data.layout.height - 1):\n if gameState.hasWall(mazeCentreX, i):\n a = 0\n \n else:\n self.defenceRegion.append((mazeCentreX, i))\n\n expectedSize = mazeCentreY\n actualSize = len(self.defenceRegion)\n\n\n for i in range(len(self .defenceRegion)):\n if expectedSize > actualSize:\n break\n else:\n self.defenceRegion.remove(self.defenceRegion[0])\n self.defenceRegion.remove(self.defenceRegion[-1])\n actualSize = len(self.defenceRegion)\n\n for i in range(len(self.defenceRegion)):\n if len(self.defenceRegion) > 2:\n self.defenceRegion.remove(self.defenceRegion[0])\n self.defenceRegion.remove(self.defenceRegion[-1])\n else:\n break", "def geodesic_erosion(mask, kernel, n=3):\n m = np.reshape(mask, [1, mask.shape[0] * mask.shape[1]])\n mean = m.sum()/(mask.shape[0] * mask.shape[1])\n ret, mask = cv2.threshold(mask, mean, 255, cv2.THRESH_BINARY)\n\n marker = util.bin_dilate(mask, kernel) # use dilated mask as marker\n\n last_marker = marker\n curr_marker = marker\n for N in range(n):\n curr_marker = util.bin_erode(last_marker, kernel)\n curr_marker = union(curr_marker, mask)\n if not np.any(last_marker != curr_marker):\n return curr_marker\n last_marker = curr_marker\n return curr_marker", "def _mask(self, map_):\n return None", "def CausalSegmentMask(segment_ids, dtype):\n\n assert dtype.is_floating\n # of shape [b, t, t].\n segment_mask = tf.cast(\n tf.not_equal(\n tf.expand_dims(segment_ids, 2), tf.expand_dims(segment_ids, 1)),\n dtype=dtype)\n slen = tf.shape(segment_ids)[1]\n causal_mask = 1 - tf.linalg.band_part(\n tf.ones([slen, slen], dtype=dtype), -1, 0)\n causal_mask = tf.expand_dims(causal_mask, 0)\n combined_mask = tf.cast(causal_mask + segment_mask > 0.5, dtype)\n min_value = GetDtypeMin(dtype)\n return tf.expand_dims(combined_mask * min_value, 1)", "def proc_filled_polygon(self, tokens):\n\n return self._proc_polygon(tokens, filled=True)", "def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')", "def remove_rain_norain_discontinuity(R):\n R = R.copy()\n zerovalue = np.nanmin(R)\n threshold = np.nanmin(R[R > zerovalue])\n R[R > zerovalue] -= threshold - zerovalue\n R -= np.nanmin(R)\n\n return R", "def clean_area(screen,origin,width,height,color):\r\n ox,oy = origin\r\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\r\n pygame.draw.polygon(screen, color, points, 0)", "def boundaries_erase(*args):\n return _ida_hexrays.boundaries_erase(*args)", "def get_front_door_mask(self) -> np.array:\n front_door_mask = self.boundary == 255\n region = measure.regionprops(front_door_mask.astype(int))[0]\n return np.array(region.bbox, dtype=int)", "def mask_ne(lonreg2, latreg2):\n nearth = cfeature.NaturalEarthFeature(\"physical\", \"ocean\", \"50m\")\n main_geom = [contour for contour in nearth.geometries()][0]\n\n mask = shapely.vectorized.contains(main_geom, lonreg2, latreg2)\n m2 = np.where(((lonreg2 == -180.0) & (latreg2 > 71.5)), True, mask)\n m2 = np.where(\n ((lonreg2 == -180.0) & (latreg2 < 70.95) & (latreg2 > 68.96)), True, m2\n )\n m2 = np.where(((lonreg2 == 180.0) & (latreg2 > 71.5)), True, mask)\n m2 = np.where(\n ((lonreg2 == 180.0) & (latreg2 < 70.95) & (latreg2 > 68.96)), True, m2\n )\n # m2 = np.where(\n # ((lonreg2 == 180.0) & (latreg2 > -75.0) & (latreg2 < 0)), True, m2\n # )\n m2 = np.where(((lonreg2 == -180.0) & (latreg2 < 65.33)), True, m2)\n m2 = np.where(((lonreg2 == 180.0) & (latreg2 < 65.33)), True, m2)\n\n return ~m2", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)", "def _isolate(self, frame, landmarks, points):\n region = np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in points])\n region = region.astype(np.int32)\n # Applying a mask to get only the eye\n height, width = frame.shape[:2]\n black_frame = np.zeros((height, width), np.uint8)\n mask = np.full((height, width), 255, np.uint8)\n cv2.fillPoly(mask, [region], (0, 0, 0))\n eye = cv2.bitwise_not(black_frame, frame.copy(), mask=mask)\n # Cropping on the eye\n margin = 5\n min_x = np.min(region[:, 0]) - margin\n max_x = np.max(region[:, 0]) + margin\n min_y = np.min(region[:, 1]) - margin\n max_y = np.max(region[:, 1]) + margin\n self.frame = eye[min_y:max_y, min_x:max_x]\n self.origin = (min_x, min_y)\n height, width = self.frame.shape[:2]\n self.center = (width / 2, height / 2)", "def region_of_interest(self,img):\r\n #defining a blank mask\r\n mask = np.zeros_like(img) \r\n #checking number of image channel(color/grayscale) and applying mask\r\n if len(img.shape) > 2:\r\n ignore_mask_color = (255,255,255)\r\n else:\r\n ignore_mask_color = 255\r\n #filling color to pixels inside the polygon \r\n cv2.fillPoly(mask, self.vertices_img, ignore_mask_color)\r\n #image where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n #cv2.imshow('',masked_image)\r\n return masked_image", "def ocean_unmasked(res='4x5', debug=False):\n\n from .GEOSChem_bpch import get_LWI_map\n if debug:\n print(('ocean_mask called for: ', res))\n\n # Create a mask from land/water/ice indices\n m = np.ma.masked_not_equal(get_LWI_map(res=res), 0)\n if debug:\n print((mask, mask.shape))\n return m.mask", "def _region_mask(self, cs, all_regions, xctr, yctr, hwcs):\n if not HAS_REGIONS:\n return None\n ctr_coord = ar.PixCoord(xctr, yctr)\n mask = None\n for reg_str in all_regions:\n # read ds9 string into a region class\n try:\n with set_log_level('CRITICAL'):\n frame_regions = ar.Regions.parse(reg_str, format='ds9')\n except Exception as err:\n log.debug(f'Region parser error: {err}')\n continue\n for fr in frame_regions:\n if cs == 'wcs':\n # convert to a pixel region first\n try:\n with set_log_level('CRITICAL'):\n fr = fr.to_pixel(hwcs)\n except Exception as err: # pragma: no cover\n # error could be anything, since regions package\n # is in early development state\n log.debug(f'Region WCS conversion error: {err}')\n continue\n\n # check if cursor is contained in a region\n # in any frame\n with set_log_level('CRITICAL'):\n contained = fr.contains(ctr_coord)\n if hasattr(contained, '__len__'):\n # PolygonPixelRegion returns an array, currently\n # (regions v0.4)\n contained = contained[0]\n\n if contained:\n # get mask from first matching region\n try:\n with set_log_level('CRITICAL'):\n mask = fr.to_mask()\n except Exception as err: # pragma: no cover\n # error could be anything, since regions package\n # is in early development state\n log.debug(f'Region mask error: {err}')\n continue\n else:\n log.info(f'Contained in {type(fr).__name__}')\n break\n if mask is not None:\n break\n\n # reset active frame\n return mask", "def clean_mask(mask, background=0):\n kernels = [\n np.array([[ 1, -1, -1], [-1, 1, -1], [-1, -1, -1]]), # top left standalone pixel\n np.array([[-1, -1, 1], [-1, 1, -1], [-1, -1, -1]]), # top right standalone pixel\n np.array([[-1, -1, -1], [-1, 1, -1], [ 1, -1, -1]]), # bottom left standalone pixel\n np.array([[-1, -1, -1], [-1, 1, -1], [-1, -1, 1]]) # bottom right standalone pixel\n ]\n\n proc_masks = [cv2.morphologyEx(mask, cv2.MORPH_HITMISS, kernel).astype(np.bool) for kernel in kernels]\n\n for proc_mask in proc_masks:\n mask[proc_mask] = background\n return mask", "def segment_region_of_interest(image):\n binary = image < 604\n cleared = clear_border(binary)\n\n label_image = label(cleared)\n\n areas = [r.area for r in regionprops(label_image)]\n areas.sort()\n if len(areas) > 2:\n for region in regionprops(label_image):\n if region.area < areas[-2]:\n for coordinates in region.coords:\n label_image[coordinates[0], coordinates[1]] = 0\n\n binary = label_image > 0\n\n selem = disk(2)\n binary = binary_erosion(binary, selem)\n\n selem = disk(10)\n binary = binary_closing(binary, selem)\n\n edges = roberts(binary)\n binary = scipy.ndimage.binary_fill_holes(edges)\n\n get_high_vals = binary == 0\n image[get_high_vals] = 0\n\n return image", "def sense_landmarks(state, field_map, max_observations):\n\n assert isinstance(state, np.ndarray)\n assert isinstance(field_map, Landscape)\n\n assert state.shape == (3,)\n\n M = field_map.num_landmarks\n #print(M, field_map.landmarks.shape)\n noise_free_observations_list = list()\n for k in range(M):\n noise_free_observations_list.append(get_observation(state, field_map, k))\n noise_free_observation_tuples = [(x[0], np.abs(x[1]), int(x[2])) for x in noise_free_observations_list]\n\n dtype = [('range', float), ('bearing', float), ('lm_id', int)]\n noise_free_observations = np.array(noise_free_observations_list)\n noise_free_observation_tuples = np.array(noise_free_observation_tuples, dtype=dtype)\n\n ii = np.argsort(noise_free_observation_tuples, order='bearing')\n noise_free_observations = noise_free_observations[ii]\n noise_free_observations[:, 2] = noise_free_observations[:, 2].astype(int)\n\n c1 = noise_free_observations[:, 1] > -np.pi / 2.\n c2 = noise_free_observations[:, 1] < np.pi / 2.\n ii = np.nonzero((c1 & c2))[0]\n\n if ii.size <= max_observations:\n return noise_free_observations[ii]\n else:\n return noise_free_observations[:max_observations]", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def remove_shadow(patch):\r\n lt = not np.any(patch[0,0])\r\n rt = not np.any(patch[0,-1])\r\n lb = not np.any(patch[-1,0])\r\n rb = not np.any(patch[-1,-1])\r\n\r\n return lt or rt or lb or rb", "def create_geofence(self):\n\t\tring = ogr.Geometry(ogr.wkbLinearRing)\n\t\tring.AddPoint(*self.north_coords)\n\t\tring.AddPoint(*self.northeast_coords)\n\t\tring.AddPoint(*self.east_coords)\n\t\tring.AddPoint(*self.southeast_coords)\n\t\tring.AddPoint(*self.south_coords)\n\t\tring.AddPoint(*self.southwest_coords)\n\t\tring.AddPoint(*self.west_coords)\n\t\tring.AddPoint(*self.northwest_coords)\n\t\tring.AddPoint(*self.north_coords)\n\t\tself.polygon.AddGeometry(ring)", "def __iteratively_retain(\n self,\n orf_regions: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n\n ret = []\n\n arr = np.zeros((len(self.seq), ))\n\n for start, end in orf_regions:\n ret.append((start, end))\n arr[start-1:end] = 1\n orf_coverage = np.sum(arr) / len(arr)\n if orf_coverage > self.min_orf_coverage:\n break\n\n return ret", "def contextualcull(cnts):\n temp = []\n actualout = []\n indicesused = []\n for cnt in cnts:\n if cnt.area > CULL_MINIMUMS[cnt.spatialindex]:\n temp.append(cnt)\n if cnt.spatialindex not in indicesused:\n indicesused.append(cnt.spatialindex)\n if PRIORITIZE_FASTEST:\n for sindex in indicesused: #filter down to the fastest contour.\n _allatthisindex = [x for x in temp if x == sindex]\n highest = _allatthisindex.sort(key=lambda y: y.spd, reverse=False)[0]\n actualout.append(highest)\n else:\n actualout = temp\n print 'Contours after cull: ', len(actualout)\n return actualout", "def sanitize_mask(orig_x, orig_y, mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Draw contours:\n cv2.drawContours(mask, contours, 0, (0, 255, 0), 2)\n # Calculate image moments of the detected contour\n num_objects = (len(contours))\n #threshold\n threshold = 3\n\n center_list = []\n # print(num_objects)\n if num_objects > 1:\n for item in range(num_objects):\n M = cv2.moments(contours[item])\n try:\n center_x = round(M['m10'] / M['m00'])\n center_y = round(M['m01'] / M['m00'])\n center_list.append([center_y , center_x ])\n except:\n pass\n\n # initialize retmask\n retmask = mask\n if num_objects > 1:\n for x, y in center_list:\n if orig_x - threshold <= x <= orig_x + threshold and orig_y - threshold <= y <= orig_y + threshold:\n pass\n else:\n def dfs_removal(px , py, mask):\n R = len(mask)\n C = len(mask[0])\n if mask[px][py ] != 255: \n return\n mask[px][py] = 0\n if 0 <= px - 1 and mask[px - 1][py ] == 255: dfs_removal(px - 1 , py , mask)\n if px + 1 < R and mask[px + 1][py ] == 255: dfs_removal(px + 1 , py , mask)\n if 0 <= py - 1 and mask[px][py - 1] == 255: dfs_removal(px, py -1 , mask)\n if py + 1 < C and mask[px][py + 1] == 255: dfs_removal(px, py + 1 , mask)\n\n dfs_removal(x,y, mask)\n\n return retmask", "def _create_observation_mask(self):\n\n\n if self.BLUE_PARTIAL:\n centers, radii = [], []\n for agent in self._team_blue:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._blue_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.blue_memory = np.logical_and(self.blue_memory, self._blue_mask)\n else:\n self._blue_mask = np.zeros_like(self._static_map, dtype=bool)\n\n if self.RED_PARTIAL:\n centers, radii = [], []\n for agent in self._team_red:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._red_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.red_memory = np.logical_and(self.red_memory, self._red_mask)\n else:\n self._red_mask = np.zeros_like(self._static_map, dtype=bool)", "def ice_unmasked(res='4x5', debug=False):\n # Create a np.ma mask\n m = np.logical_not((land_unmasked(res)*ocean_unmasked(res)))\n if debug:\n print((mask, mask.shape))\n return m", "def spectrum_regions(x_on,\n y_on,\n r_on,\n x_fov,\n y_fov,\n r_fov,\n exclusion,\n outfile,\n min_on_distance):\n from astropy.io import fits\n from gammapy.background import ReflectedRegionMaker\n\n if exclusion:\n log.info('Reading {0}'.format(exclusion))\n exclusion = fits.open(exclusion)[0]\n else:\n # log.info('No exclusion mask used.')\n # TODO: make this work without exclusion mask\n log.error(\"Currently an exclusion mask is required\")\n exit(-1)\n\n fov = dict(x=x_fov, y=y_fov, r=r_fov)\n rr_maker = ReflectedRegionMaker(exclusion=exclusion,\n fov=fov)\n source = dict(x_on=x_on, y_on=y_on, r_on=r_on)\n rr_maker.compute(**source)\n\n log.info('Writing {0}'.format(outfile))\n rr_maker.write_off_regions(outfile)", "def test_degrade_widemask_or(self):\n\n nside_coverage = 32\n nside_map = 256\n nside_map2 = 64\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,\n WIDE_MASK, wide_mask_maxbits=7)\n sparse_map_or = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map2,\n WIDE_MASK, wide_mask_maxbits=7)\n # Fill some pixels in the \"high-resolution\" map\n pixel = np.arange(4000, 8000)\n sparse_map.set_bits_pix(pixel, [4])\n\n # Check which pixels will be full in the \"low-resolution\" map and fill them\n pixel2 = np.unique(np.right_shift(pixel, healsparse.utils._compute_bitshift(nside_map2, nside_map)))\n sparse_map_or.set_bits_pix(pixel2, [4])\n\n # Degrade with or\n sparse_map_test = sparse_map.degrade(nside_map2, reduction='or')\n\n # Check the results\n testing.assert_almost_equal(sparse_map_or._sparse_map, sparse_map_test._sparse_map)\n\n # Repeat for maxbits > 8\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,\n WIDE_MASK, wide_mask_maxbits=16)\n sparse_map_or = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map2,\n WIDE_MASK, wide_mask_maxbits=16)\n # Fill some pixels in the \"high-resolution\" map\n pixel = np.arange(0, 1024)\n pixel = np.concatenate([pixel[:512], pixel[512::3]]).ravel()\n sparse_map.set_bits_pix(pixel, [4, 12])\n sparse_map.clear_bits_pix(pixel[:16], [4]) # set low value in the first pixel\n\n # Check which pixels will be full in the \"low-resolution\" map and fill them\n # Note that we are filling more than the ones that are going to be True\n # since we want to preserve the coverage_map\n pixel2_all = np.unique(np.right_shift(pixel,\n healsparse.utils._compute_bitshift(nside_map2, nside_map)))\n sparse_map_or.set_bits_pix(pixel2_all, [4, 12])\n\n # Get the pixel number of the bad pixels\n pixel2_bad = np.array([0])\n sparse_map_or.clear_bits_pix(pixel2_bad, [4]) # set low value in the first pixel\n\n # Degrade with or\n sparse_map_test = sparse_map.degrade(nside_map2, reduction='or')\n\n # Check the results\n testing.assert_almost_equal(sparse_map_test._sparse_map, sparse_map_or._sparse_map)\n\n # Test degrade-on-read\n self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')\n\n fname = os.path.join(self.test_dir, 'test_wide_degrade.hs')\n sparse_map.write(fname)\n\n sparse_map_test2 = healsparse.HealSparseMap.read(fname, degrade_nside=nside_map2, reduction='or')\n testing.assert_almost_equal(sparse_map_test2._sparse_map, sparse_map_or._sparse_map)", "def _offset_region(all_polygons, bboxes,\n left, bottom, right, top,\n distance = 5,\n join_first = True,\n precision = 1e-4,\n join = 'miter',\n tolerance = 2):\n\n # Mark out a region slightly larger than the final desired region\n d = distance*1.01\n\n polygons_to_offset = _crop_edge_polygons(all_polygons, bboxes, left-d,\n bottom-d, right+d, top+d,\n precision = precision)\n\n # Offset the resulting cropped polygons and recrop to final desired size\n polygons_offset = clipper.offset(polygons_to_offset, distance, join,\n tolerance, 1/precision, int(join_first))\n polygons_offset_cropped = _crop_region(polygons_offset, left, bottom,\n right, top, precision = precision)\n\n return polygons_offset_cropped", "def __trim(\n self,\n orf_regions: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n\n ret = []\n\n for start, end in orf_regions:\n if start == 1:\n new_start = start # Trust the model prediction\n else:\n new_start = self.__find_start_codon(start, end)\n ret.append((new_start, end))\n\n return ret", "def as_boolean_mask(self):\n bbox = self.bbox()\n zs = np.unique([c.image_z_position for c in self.contours])\n z_to_index = dict(zip(zs,range(len(zs))))\n\n # Get dimensions, initialize mask.\n nx,ny = np.diff(bbox[:2], axis=1).astype(int) + 1\n nx = int(nx); ny = int(ny)\n nz = int(zs.shape[0])\n mask = np.zeros((nx,ny,nz), dtype=np.bool)\n\n # We check if these points are enclosed within each contour \n # for a given slice. `test_points` is a list of image coordinate \n # points, offset by the bounding box.\n test_points = bbox[:2,0] + np.c_[ np.where(~mask[:,:,0]) ]\n\n # First we \"turn on\" pixels enclosed by inclusion contours.\n for contour in self.contours:\n if contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels\n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n contains_pts = path.contains_points(test_points)\n mask[:,:,zi] = contains_pts.reshape(mask.shape[:2])\n\n # Second, we \"turn off\" pixels enclosed by exclusion contours.\n for contour in self.contours:\n if not contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n path = mplpath.Path(contour_matrix, closed=True)\n not_contains_pts = ~path.contains_points(test_points)\n not_contains_pts = not_contains_pts.reshape(mask.shape[:2])\n mask[:,:,zi] = np.logical_and(mask[:,:,zi], not_contains_pts)\n\n # The first and second axes have to \n # be swapped because of the reshape.\n return mask.swapaxes(0,1), bbox[[1,0,2]]", "def build_block_cross(self):\n from ambry.geo.util import find_geo_containment, find_containment\n from geoid import civick \n\n lr = self.init_log_rate(3000)\n\n def gen_bound():\n \n boundaries = self.library.dep('blockgroups').partition\n\n # Note, ogc_fid is the primary key. The id column is created by the shapefile. \n for i,boundary in enumerate(boundaries.query(\n \"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups\")):\n lr('Load rtree')\n \n yield i, boundary['wkt'] , boundary['gvid'] \n \n def gen_points():\n\n for row in self.partitions.find(table = 'facilities_addresses').rows:\n if row['longitude'] and row['latitude']:\n yield (row['longitude'], row['latitude']), row['facilities_id']\n\n\n p = self.partitions.find_or_new(table='facilities_geoids')\n p.clean()\n\n with p.inserter() as ins:\n for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):\n\n blockgroup_gvid = civick.Blockgroup.parse(cntr_o)\n tract_gvid = blockgroup_gvid.convert(civick.Tract)\n county_gvid = blockgroup_gvid.convert(civick.County)\n \n ins.insert(dict(facilities_id = point_o, \n blockgroup_gvid = str(blockgroup_gvid),\n tract_gvid = str(tract_gvid),\n county_gvid = str(county_gvid)\n ))\n \n lr('Marking point containment')", "def create_region_mask(latitude_array, target_shape, lat_bounds):\n\n target_ndim = len(target_shape)\n\n southern_lat, northern_lat = lat_bounds\n mask_array = numpy.where((latitude_array >= southern_lat) & (latitude_array < northern_lat), False, True)\n\n mask = uconv.broadcast_array(mask_array, [target_ndim - 2, target_ndim - 1], target_shape)\n assert mask.shape == target_shape \n\n return mask", "def RemovePolygonHoles_management(in_fc, threshold=0.0):\n desc = arcpy.Describe(in_fc)\n if desc.dataType != \"FeatureClass\" and desc.dataType != \"ShapeFile\":\n print(\"Invalid data type. The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n else:\n if desc.shapeType != \"Polygon\":\n print(\"The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n if threshold < 0.0:\n threshold = 0.0\n with arcpy.da.UpdateCursor(in_fc, [\"SHAPE@\"]) as updateCursor:\n for updateRow in updateCursor:\n shape = updateRow[0]\n new_shape = arcpy.Array()\n for part in shape:\n new_part = arcpy.Array()\n if threshold > 0:\n # find None point in shape part\n # in arcpy module, a None point is used to seperate exterior and interior vertices\n null_point_index = []\n for i in range(len(part)):\n if part[i] is None:\n null_point_index.append(i)\n # if interior vertices exist, create polygons and compare polygon shape area to given threshold\n # if larger, keep vertices, else, dismiss them\n if len(null_point_index) > 0:\n for k in range(0, null_point_index[0]):\n new_part.add(part[k])\n for i in range(len(null_point_index)):\n pointArray = arcpy.Array()\n # determine if the None point is the last one\n if i+1 < len(null_point_index):\n for j in range(null_point_index[i] + 1, null_point_index[i+1]):\n pointArray.add(part[j])\n else:\n for j in range(null_point_index[i] + 1, len(part)):\n pointArray.add(part[j])\n # create a polygon to check shape area against the given threshold\n inner_poly = arcpy.Polygon(pointArray)\n # if larger than threshold, then add to the new part Array\n if inner_poly.area > threshold:\n if i+1 < len(null_point_index):\n for k in range(null_point_index[i], null_point_index[i+1]):\n new_part.add(part[k])\n else:\n for k in range(null_point_index[i], len(part)):\n new_part.add(part[k])\n new_shape.add(new_part)\n # if interior does not exist, add the whole part\n else:\n new_shape.add(part)\n else:\n # get the first None point index\n first_null_point_index = 0\n for i in range(len(part)):\n if part[i] is None:\n first_null_point_index = i\n break\n if first_null_point_index == 0:\n new_shape.add(part)\n else:\n for j in range(first_null_point_index):\n new_part.add(part[j])\n new_shape.add(new_part)\n if len(new_shape) > 0:\n new_poly = arcpy.Polygon(new_shape)\n updateRow[0] = new_poly\n updateCursor.updateRow(updateRow)", "def create_mask(frame):\n \n # detect ridges\n ridges = enhance_ridges(frame)\n\n # threshold ridge image\n thresh = filters.threshold_otsu(ridges)\n thresh_factor = 1.1\n prominent_ridges = ridges > thresh_factor*thresh\n prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=128)\n\n # the mask contains the prominent ridges\n mask = morphology.convex_hull_image(prominent_ridges)\n mask = morphology.binary_erosion(mask, disk(10))\n return mask", "def unmask(self, near_x, near_y, radius=BRIGHTEN_RECT_TEMP, is_lighter=False):\n #self._actual_unmask(self.last_brighten, self.masked_image, radius=radius)\n \n brighten_rect = pygame.draw.circle(self.image, (255, 255,0), (near_x, near_y), radius/2)\n \n if is_lighter:\n brighten_rect = pygame.draw.circle(self.image, (50, 0, 0), (near_x, near_y), 40)\n brighten_rect = pygame.draw.circle(self.image, (255, 255,0), (near_x, near_y), 38)\n \n self.game.dirty_rects.append(brighten_rect)\n \n self.last_brighten = (near_x, near_y)", "def masktoregions(in_mask):\n regions = []\n for i in [0,1]: # do the thing for the first and second strands\n current_strand = in_mask[i].copy().astype(float)\n current_strand[-1] = np.nan # set final position to np.nan to avoid overlap issues\n transitions = current_strand - np.roll(current_strand,1)\n true_start = np.where(transitions == 1)[0]\n true_end = np.where(transitions == -1)[0] - 1\n if current_strand[0] == 1: # if starts on True, add True start to front end\n true_start = np.r_[0,true_start]\n if in_mask[i][-1] == True: # if ends on True, add True end to back end\n true_end = np.r_[true_end, len(current_strand)-1]\n if in_mask[i][-2] == False: # if the one before is False, it's a single point True\n true_start = np.r_[true_start,len(current_strand)-1]\n if np.all(in_mask[i][-2:] == [True, False]):\n true_end = np.r_[true_end, len(current_strand)-2]\n regions.append(np.asarray([np.zeros(len(true_start))+i,true_start,true_end]).T)\n out_regions = np.concatenate(regions,axis=0).astype(int)\n return out_regions", "def cmask(self):\n mask = np.zeros(18)\n if 'full' in self.CONS: mask[:] = 1\n if 'f0' in self.CONS: mask[0] = 1\n if 'f1' in self.CONS: mask[1:4] = 1\n if 'f2' in self.CONS: mask[4:10] = 1\n if 'vx' in self.CONS: mask[10] = 1\n if 'vy' in self.CONS: mask[11] = 1\n if 'vz' in self.CONS: mask[12] = 1\n if 'TG' in self.CONS: mask[13:18] = 1\n return mask>0", "def GetCoastGrids(LandMask):\n \n \"\"\"\n Define a coastline map. This map will be set to 1 on all coast cells.\n \"\"\"\n \n \n CoastlineMap = np.zeros((LandMask.shape[0], LandMask.shape[1]))\n \n \"\"\"\n We will use a nested loop to loop through all cells of the Landmask cell. What this loop basically does is,\n when a cell has a value of 1 (land), it will make all surrounding cells 1, so we create kind of an extra line of \n grids around the landmask. In the end we will substract the landmask from the mask which is created by the nested loop, \n which result in only a mask with the coast grids. Notice, that when we're in the corner, upper, side, or lower row, and we\n meet a land cell, we should not make all surrounding cells 1. For example, we the lower left corner is a land grid, you should only make the inner cells 1. \n \"\"\"\n \n for i in range(LandMask.shape[0]-1):\n for j in range(LandMask.shape[1]-1):\n \n\n \"\"\"\n We have nine if statements, four for the corners, four for the sides and one for the middle\n of the landmask. \n \"\"\"\n\n if i == 0 and j == 0: #upper left corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j+1] = 1\n \n \n elif i == 0 and j != 0 and j != LandMask.shape[1]-1: #upper row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1, j] = 1\n CoastlineMap[i+1,j-1] = 1\n CoastlineMap[i+1,j+1] = 1\n \n \n elif i == 0 and j == LandMask.shape[1]-1: #upper right corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j-1] = 1\n \n elif i != 0 and i != LandMask.shape[0]-1 and j == LandMask.shape[1]-1: #right row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i+1,j] = 1\n CoastlineMap[i-1,j] = 1\n \n CoastlineMap[i, j-1] = 1\n CoastlineMap[i+1,j-1] = 1\n CoastlineMap[i-1,j-1] = 1\n \n elif i == LandMask.shape[0]-1 and j == LandMask.shape[1]-1: #lower right corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n \n CoastlineMap[i-1,j] = 1 \n CoastlineMap[i-1, j-1] = 1\n \n elif i == LandMask.shape[0]-1 and j != 0 and j != LandMask.shape[1]-1: #lower row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j-1] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i-1, j] = 1\n CoastlineMap[i-1,j-1] = 1\n CoastlineMap[i-1,j+1] = 1\n \n \n elif i == LandMask.shape[0]-1 and j == 0: #lower left corner\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i,j+1] = 1\n \n CoastlineMap[i+1,j] = 1 \n CoastlineMap[i+1, j+1] = 1\n \n elif i != 0 and i != LandMask.shape[0]-1 and j == 0: #left row\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1\n CoastlineMap[i+1,j] = 1\n CoastlineMap[i-1,j] = 1\n \n CoastlineMap[i, j+1] = 1\n CoastlineMap[i+1,j+1] = 1\n CoastlineMap[i-1,j+1] = 1\n \n else:\n \n if LandMask[i,j] == 1:\n \n CoastlineMap[i,j] = 1 #middle\n CoastlineMap[i+1,j] = 1#lowermiddle\n CoastlineMap[i-1,j] = 1#uppermiddle\n \n CoastlineMap[i+1, j-1] = 1\n CoastlineMap[i-1, j-1] = 1\n CoastlineMap[i, j-1] =1\n \n CoastlineMap[i+1, j+1] = 1\n CoastlineMap[i-1, j+1] = 1\n CoastlineMap[i, j+1] = 1\n \n \n \n \"\"\"\n Here we substract the landmaks from the coastline mask, resulting in only\n the coastline. \n \"\"\"\n \n \n Coastgrids = CoastlineMap - LandMask\n \n return Coastgrids, CoastlineMap", "def mask_img_region(self, image, coordinate_list):\n \n \n # Mask to locate cropping region (CR)\n mask = np.ones((image.shape),dtype=np.uint8)\n mask.fill(255)\n \n # Mark the coorinate region as black\n masked_image = cv2.fillPoly(mask, np.array([coordinate_list], dtype=np.int32),0)\n \n # Extract coorinate region from original image and rest is white \n cropped = cv2.bitwise_or(image, masked_image)\n \n return cropped", "def opencv_watershed(masked, mask) -> JSON_TYPE:\n # For code and detailed explanation see:\n # http://datahacker.rs/007-opencv-projects-image-segmentation-with-watershed-algorithm/\n threshold: int = 30\n gray = cv2.cvtColor(masked, cv2.COLOR_RGB2GRAY)\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n\n n_increases: int = 0\n while local_max_location.shape[0] < 30 and n_increases < 15:\n threshold += 20\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n n_increases += 1\n # Reset threshold\n threshold = 30\n\n num_clusters: int = 30\n if n_increases >= 15:\n num_clusters = local_max_location.shape[0]\n kmeans = KMeans(n_clusters=num_clusters)\n # If local_max_location size is 0, return 0 predictions\n if not local_max_location.size:\n return {\n \"count\": 0\n }\n kmeans.fit(local_max_location)\n local_max_location = kmeans.cluster_centers_.copy()\n # Kmeans is returning a float data type so we need to convert it to an int. \n local_max_location = local_max_location.astype(int)\n dist_transform_copy = dist_transform.copy()\n for i in range(local_max_location.shape[0]):\n cv2.circle(dist_transform_copy, (local_max_location[i][1], local_max_location[i][0]), 5, 255)\n # markers = np.zeros_like(dist_transform)\n ret, sure = cv2.threshold(dist_transform, 0.01*dist_transform.max(), 255, 0)\n sure = np.uint8(sure)\n ret, markers = cv2.connectedComponents(sure)\n labels = np.arange(kmeans.n_clusters)\n markers[local_max_location[:,0], local_max_location[:,1]] = labels + 1\n # Convert all local markers to an integer. This because cluster centers will be float numbers. \n markers = markers.astype(int)\n markers_copy = markers.copy()\n index_non_zero_markers = np.argwhere(markers != 0)\n markers_copy = markers_copy.astype(np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i in range(index_non_zero_markers.shape[0]):\n string_text = str(markers[index_non_zero_markers[i][0], index_non_zero_markers[i][1]])\n cv2.putText(markers_copy, string_text, (index_non_zero_markers[i][1], index_non_zero_markers[i][0]), font, 1, 255)\n markers = markers.astype(np.int32)\n segmented = cv2.watershed(masked, markers)\n count_segments(markers)\n #return {\n # \"count\": local_max_location.shape[0]\n #}\n return {\n \"count\": count_segments(markers),\n }", "def fill_vert(self, mask):\n im_floodfill = np.copy(mask)\n im_floodfill[im_floodfill!=self.vertebra_id] = 0\n im_floodfill[im_floodfill==self.vertebra_id] = 255\n im_floodfill_copy = np.copy(im_floodfill)\n # The size needs to be 2 pixels larger than the image.\n h, w = im_floodfill.shape[:2]\n mask4mask = np.zeros((h+2, w+2), np.uint8)\n # Floodfill from point (0, 0)\n cv2.floodFill(im_floodfill, mask4mask, (0,0), 255)\n # Invert floodfilled image\n im_floodfill_inv = cv2.bitwise_not(im_floodfill)\n # Combine the two images to get the foreground.\n im_floodfill_inv = im_floodfill_inv | im_floodfill_copy\n im_floodfill_inv[im_floodfill_inv==255] = self.vertebra_id\n mask_filled = mask | im_floodfill_inv\n return mask_filled", "def _occlude_image(im, cR, cC, size_patch, stride):\n im[cR:cR + stride, cC:cC + stride, :] = 127.5\n occ_map = np.ones((im_target_size, im_target_size))\n occ_map[cR:cR + stride, cC:cC + stride] = 0\n return im, occ_map", "def generate_effective_mask(self, mask_size: tuple, polygons_ignore):\n mask = np.ones(mask_size, dtype=np.uint8)\n\n for poly in polygons_ignore:\n instance = poly.astype(np.int32).reshape(1, -1, 2)\n cv2.fillPoly(mask, instance, 0)\n\n return mask", "def _boundary_constraint_fence(\n self,\n x: np.ndarray,\n ) -> np.ndarray:\n # clip dimensions to fit within the boundary\n x_constrained = np.clip(\n x,\n self.boundary_fence['min'],\n self.boundary_fence['max'],\n )\n return x_constrained", "def test_make_outer_mask_from_fp(self):\n fp_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n output_mask = boundary_mask(fp_mask, boundary_type=\"outer\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_outer.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def get_occlusion_mask_from_rigid_flow(self, rigid_flow):\n with tf.variable_scope(\"get_occlusion_mask_from_rigid_flow\"):\n b, h, w, _ = rigid_flow.shape\n rigid_flow = tf.stop_gradient(rigid_flow)\n mask = bilinear_sampler.flow_warp(\n tf.ones([b, h, w, 1], dtype=tf.float32), rigid_flow\n )\n mask = tf.clip_by_value(mask, 0.0, 1.0)\n return mask", "def applymask(self,mask):\n self.spec[mask==0]=np.nan", "def _get_watershed_areas(self, class_contours, class_mask, kernel_size=3, area_thresh=500):\n\n kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)\n\n # Since the watershed draws contours, we need to invert the predictions to\n # get the 'inside' blob portion. We also slightly compress the blob portion\n # so we can get a more defining border.\n inverted_contours = 255 - class_contours\n\n inverted_contours = cv2.erode(inverted_contours, kernel, iterations=1)\n # remove areas that are not part of the class mask\n inverted_contours[class_mask==0]= 0 # here ?\n\n return inverted_contours", "def region_of_interest(self, img):\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, self.vertices, ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def toggle_culling(self):\n self.view['cull'] = not self.view['cull']\n self.update_flags()", "def set_region(img, vertices):\n mask = np.zeros_like(img)\n channel_count = img.shape[2]\n match_mask_color = (255,) * channel_count\n cv2.fillPoly(mask, vertices, match_mask_color)\n\n masked_img = cv2.bitwise_and(img, mask)\n\n new_mask = np.zeros(masked_img.shape[:2], np.uint8)\n\n bg = np.ones_like(masked_img, np.uint8) * 255\n cv2.bitwise_not(bg, bg, mask=new_mask)\n\n return masked_img", "def predominateMask(self) -> MaskState:\n return MaskState.fromCoreEmotion(self._coreEstimation.result)", "def fill_context_mask(mask, sizes, v_mask, v_unmask):\r\n mask.fill_(v_unmask)\r\n n_context = mask.size(2)\r\n for i, size in enumerate(sizes):\r\n if size < n_context:\r\n mask[i, :, size:] = v_mask\r\n return mask", "def test_bad_region():\n ref_file = pkg_resources.resource_filename('m260b.test_data', 'ref_practice_W_1_chr_1.fasta')\n read_file = pkg_resources.resource_filename('m260b.test_data', 'practice_w_1.std.bad_region1.bam')\n ref_hdr, reference = read_basic_fasta(ref_file) \n read_iter = pysam.Samfile(read_file)\n chr = ref_hdr[1:].strip()\n areg = list(active_regions(read_iter, reference, chr, start_offset=0, flank=30, dfrac=1.0))\n found = False\n for region, reads in areg:\n found |= region.start <= 5769 <= region.stop\n if not found:\n raise ValueError('Window did not open around variant')", "def warp_tensor(tensor):\n\n #tf.config.run_functions_eagerly(True)\n\n num_hole_rate = 4 / (128*128) # percent of selected pixels in downsample imagee\n\n tensor = tf.expand_dims(tensor, 0)\n if tensor.shape.rank == 5:\n # 3D blurring\n filters = tf.ones([3,3,3], dtype=tf.float32) / 27\n filters = filters[..., tf.newaxis, tf.newaxis]\n tensor = tf.nn.conv3d(tensor, filters, [1,1,1,1,1], \"SAME\")\n\n # make hole\n uniform_random = tf.random.uniform([tensor.shape[1]*tensor.shape[2]*tensor.shape[3]], 0, 1.0)\n uniform_random = tf.reshape(uniform_random, tensor.shape)\n mask_matrix = tf.where(uniform_random < num_hole_rate, tf.ones_like(tensor), tf.zeros_like(tensor)) \n \n # dilate holes \n filters = tf.ones([4,4,4], dtype=tf.float32) \n filters = filters[..., tf.newaxis, tf.newaxis]\n mask_matrix = tf.nn.conv3d(mask_matrix, filters, [1,1,1,1,1], \"SAME\")\n \n # apply mask -- make the \"holes\" the mean value of the image \n mean = tf.math.reduce_mean(tensor)\n tensor = tf.where(mask_matrix > 0, tf.ones_like(tensor)*mean, tensor) \n else:\n # 2D blurring\n filters = tf.ones([3,3], dtype=tf.float32) / 9\n filters = filters[..., tf.newaxis, tf.newaxis]\n tensor = tf.nn.conv2d(tensor, filters, [1,1,1,1], \"SAME\")\n\n # make hole\n uniform_random = tf.random.uniform([tensor.shape[1]*tensor.shape[2]], 0, 1.0)\n uniform_random = tf.reshape(uniform_random, tensor.shape)\n mask_matrix = tf.where(uniform_random < num_hole_rate, tf.ones_like(tensor), tf.zeros_like(tensor)) \n \n # dilate holes \n filters = tf.ones([4,4], dtype=tf.float32) \n filters = filters[..., tf.newaxis, tf.newaxis]\n mask_matrix = tf.nn.conv2d(mask_matrix, filters, [1,1,1,1], \"SAME\")\n \n # apply mask -- make the \"holes\" the mean value of the image \n mean = tf.math.reduce_mean(tensor)\n tensor = tf.where(mask_matrix > 0, tf.ones_like(tensor)*mean, tensor) \n\n\n return tf.squeeze(tensor, [0])", "def test_offcenter(self):\n actual = cm.circle_mask((5, 5), 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, True, True],\n [False, True, True, True, True],\n [False, False, True, True, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def get_cruise_track_mask(max_lon=None, min_lon=None, max_lat=None,\n min_lat=None, unmask_water=True, res='4x5',\n trop_limit=True):\n # only look at surface\n m = surface_unmasked(res=res, trop_limit=trop_limit)\n # apply ocean mask\n if unmask_water:\n m = m + ocean_unmasked(res=res)\n # Mask over given longitude range, if provided\n if not isinstance(max_lon, type(None)):\n m = m + lon2lon_2D_unmasked(lowerlon=min_lon, higherlon=max_lon,\n res=res)[:, :, None]\n # Mask over given latitude range, if provided\n if not isinstance(max_lat, type(None)):\n m = m + lat2lat_2D_unmasked(lowerlat=min_lat, higherlat=max_lat,\n res=res)[:, :, None]\n # Invert\n m = np.logical_not(m)\n return m", "def test_make_thick_outer_mask_from_fp(self):\n fp_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n output_mask = boundary_mask(fp_mask, boundary_type=\"outer\",\n boundary_width=10)\n truth_mask = skimage.io.imread(\n os.path.join(data_dir, 'sample_b_mask_outer_10.tif')\n )\n\n assert np.array_equal(output_mask, truth_mask)", "def obscure(rects):\n image = Image.open('/tmp/.i3lock.png')\n\n for rect in rects:\n area = (\n rect.x, rect.y,\n rect.x + rect.width,\n rect.y + rect.height\n )\n\n cropped = image.crop(area)\n cropped = obscure_image(cropped)\n image.paste(cropped, area)\n overlay = Image.open('/home/robin/Documents/source/scripts/src/locked.png')\n image.paste(overlay, tuple([(i-o)/2 for i,o in zip(image.size,overlay.size)]), overlay)\n image.save('/tmp/.i3lock.png')", "def mask(self):", "def remove_land(data, data_obs):\n \n ### Import modules\n import numpy as np\n from netCDF4 import Dataset\n \n ### Read in ocean mask\n directorydata = '/Users/zlabe/Data/masks/'\n filename = 'ocmask_19x25.nc'\n datafile = Dataset(directorydata + filename)\n mask = datafile.variables['nmask'][:]\n datafile.close()\n \n ### Mask out model and observations\n datamask = data * mask\n data_obsmask = data_obs * mask\n \n return datamask, data_obsmask", "def susceptibleToInfected(self):\n\n #create a mask to sieve those uninfected out\n infected = self.space == 1\n\n # add extra boundaries\n expan1 = np.hstack((infected,np.zeros((self.space.shape[0],1))))\n expan1 = np.vstack((expan1,np.zeros((1,expan1.shape[1]))))\n expan1 = np.hstack((np.zeros((expan1.shape[0],1)),expan1))\n expan1 = np.vstack((np.zeros((1,expan1.shape[1])),expan1))\n\n # make the addition for how many infected are around each position\n expan2 = (expan1[:-2,:-2] + \n expan1[:-2,1:-1] + \n expan1[:-2,2:] + \n expan1[1:-1,2:] + \n expan1[2:,2:] + \n expan1[2:,1:-1] + \n expan1[2:,0:-2] + \n expan1[1:-1,0:-2])\n\n exposedToRisk = np.logical_and(expan2 > 0, self.space == 0)\n # initialize a random matrix where around infection_probability % of the values are True\n infect_prob_arr = np.random.rand(self.space.shape[0], self.space.shape[1]) < self.infection_probability\n # find the overlap between healthy and \n self.space[np.logical_and(exposedToRisk, infect_prob_arr)] = 1" ]
[ "0.5550262", "0.5375474", "0.52712905", "0.52280205", "0.522766", "0.5217894", "0.5148621", "0.51361966", "0.513452", "0.51226854", "0.5100473", "0.5098816", "0.5098428", "0.5061148", "0.5000526", "0.4992203", "0.49582353", "0.49527928", "0.49484676", "0.49470642", "0.49467957", "0.49197185", "0.4894862", "0.4880932", "0.48607126", "0.48242447", "0.48066574", "0.4788666", "0.47864896", "0.47649777", "0.47564128", "0.4737929", "0.4731734", "0.47239953", "0.4723145", "0.47058016", "0.46961164", "0.46949393", "0.46906397", "0.46849874", "0.46796918", "0.46758732", "0.46669695", "0.46543688", "0.46347088", "0.4628797", "0.46246427", "0.4618372", "0.46021822", "0.45992267", "0.45922455", "0.45885223", "0.45784035", "0.457064", "0.45509425", "0.45508534", "0.45484504", "0.45469826", "0.4531129", "0.45305684", "0.45300308", "0.45274368", "0.4524763", "0.45242482", "0.45152742", "0.45152208", "0.45098245", "0.45052037", "0.45047554", "0.44951826", "0.44831187", "0.44775304", "0.44771454", "0.44716963", "0.4467333", "0.44634217", "0.4458573", "0.44562316", "0.44485396", "0.44478142", "0.4446446", "0.4445586", "0.44311628", "0.44309497", "0.4427003", "0.4419042", "0.44139278", "0.4412814", "0.44054973", "0.4397937", "0.43907067", "0.4390599", "0.43856168", "0.43827066", "0.43780905", "0.43775472", "0.43769", "0.4374468", "0.43737403", "0.43735856" ]
0.56984526
0
Populates an isolated region of the board. For examples of different region types, see ``safelife/levels/random/defaults.yaml``.
def populate_region(mask, layer_params): from .speedups import ( NEW_CELL_MASK, CAN_OSCILLATE_MASK, INCLUDE_VIOLATIONS_MASK) border = ndimage.maximum_filter(mask, size=3, mode='wrap') ^ mask interior = ndimage.minimum_filter(mask, size=3, mode='wrap') gen_mask = mask * ( NEW_CELL_MASK | CAN_OSCILLATE_MASK | INCLUDE_VIOLATIONS_MASK ) + border * ( INCLUDE_VIOLATIONS_MASK ) board = np.zeros(mask.shape, dtype=np.uint16) foreground = np.zeros(mask.shape, dtype=bool) background = np.zeros(mask.shape, dtype=bool) background_color = np.zeros(mask.shape, dtype=bool) seeds = None max_period = 1 for layer in layer_params: if not isinstance(layer, dict): raise ValueError( "'layer_params' should be a list of parameter dictionaries.") layer = _fix_random_values(layer) old_board = board.copy() gen_mask0 = gen_mask.copy() interior = ndimage.minimum_filter( gen_mask & NEW_CELL_MASK > 0, size=3, mode='wrap') color = COLORS.get(layer.get('color'), 0) fence_frac = layer.get('fences', 0.0) if fence_frac > 0: fences = build_fence(gen_mask & speedups.NEW_CELL_MASK) fences *= coinflip(fence_frac, fences.shape) gen_mask &= ~(fences * (NEW_CELL_MASK | CAN_OSCILLATE_MASK)) board += fences.astype(np.uint16) * CellTypes.wall spawners = layer.get('spawners', 0) if spawners > 0: _mask = (gen_mask0 & NEW_CELL_MASK > 0) & interior new_cells = _mask & coinflip(spawners, board.shape) if not new_cells.any() and _mask.any(): i, j = np.nonzero(_mask) k = get_rng().choice(len(i)) # ensure at least one spawner new_cells[i[k], j[k]] = True gen_mask[new_cells] ^= NEW_CELL_MASK board[new_cells] = CellTypes.spawner + color tree_lattice = layer.get('tree_lattice') # Create a lattice of trees that are spread throughout the region # such that every empty cell touches one (and only one) tree # (modulo edge effects). # Such a lattice tends to make the resulting board very chaotic. # Note that this will disrupt any pre-existing patterns. if tree_lattice is not None: if not isinstance(tree_lattice, dict): tree_lattice = {} h, w = board.shape stagger = tree_lattice.get('stagger', True) spacing = float(tree_lattice.get('spacing', 5)) if not stagger: new_cells = _make_lattice(h, w, spacing, spacing, 0) elif spacing <= 3: new_cells = _make_lattice(h, w, 3, 3, 1) elif spacing == 4: new_cells = _make_lattice(h, w, 10, 1, 3) elif spacing == 5: new_cells = _make_lattice(h, w, 13, 1, 5) else: # The following gets pretty sparse. new_cells = _make_lattice(h, w, 6, 3, 3) new_cells &= gen_mask & NEW_CELL_MASK > 0 board[new_cells] = CellTypes.tree + color period = 1 if 'pattern' in layer: pattern_args = layer['pattern'].copy() period = pattern_args.get('period', 1) if period == 1: gen_mask2 = gen_mask & ~CAN_OSCILLATE_MASK pattern_args.update(period=max_period, osc_bonus=0) elif period == 0: gen_mask2 = gen_mask & ~INCLUDE_VIOLATIONS_MASK pattern_args.update(period=max_period, osc_bonus=0) elif period < max_period: raise ValueError( "Periods for sequential layers in a region must be either 0, 1," " or at least as large as the largest period in prior layers.") else: gen_mask2 = gen_mask max_period = period board = _gen_pattern(board, gen_mask2, seeds, **pattern_args) # We need to update the mask for subsequent layers so that they # do not destroy the pattern in this layer. # First get a list of board states throughout the oscillation cycle. boards = [board] for _ in range(1, max_period): boards.append(speedups.advance_board(boards[-1])) non_empty = np.array(boards) != 0 still_cells = non_empty.all(axis=0) osc_cells = still_cells ^ non_empty.any(axis=0) # Both still life cells and oscillating cells should disallow # any later changes. We also want to disallow changes to the cells # that are neighboring the oscillating cells, because any changes # there would propogate to the oscillating cells at later time # steps. # Note that it doesn't really matter whether the oscillating mask # is set for the currently oscillating cells, because we're not # checking for violations in them anyways, and we don't allow any # changes that would affect them. osc_neighbors = ndimage.maximum_filter(osc_cells, size=3, mode='wrap') gen_mask[osc_cells] &= ~(NEW_CELL_MASK | INCLUDE_VIOLATIONS_MASK) gen_mask[still_cells | osc_neighbors] &= ~(NEW_CELL_MASK | CAN_OSCILLATE_MASK) new_mask = board != old_board life_mask = ((board & CellTypes.alive) > 0) & new_mask board += color * new_mask * life_mask # The seeds are starting points for the next layer of patterns. # This just makes the patterns more likely to end up close together. seeds = ((board & CellTypes.alive) > 0) & mask new_mask = board != old_board movable_walls = layer.get('movable_walls', 0) if movable_walls > 0: new_cells = coinflip(movable_walls, board.shape) * new_mask new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.wall board += new_cells * CellTypes.movable movable_trees = layer.get('movable_trees', 0) if movable_trees > 0: new_cells = coinflip(movable_trees, board.shape) * new_mask new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.tree board += new_cells * CellTypes.movable hardened_life = layer.get('hardened_life', 0) if hardened_life > 0: new_cells = coinflip(hardened_life, board.shape) * new_mask new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.life board -= new_cells * CellTypes.destructible buffer_size = layer.get('buffer_zone', 0) * 2 + 1 life_cells = board & CellTypes.alive > 0 buf = ndimage.maximum_filter(life_cells, size=buffer_size, mode='wrap') gen_mask[buf] &= ~NEW_CELL_MASK target = layer.get('target', 'board') if target == 'board': foreground[new_mask] = True if period > 0: background[new_mask] = True elif target == 'goals': background[new_mask] = True background_color[new_mask] = True # Make sure to add walls and such to the foreground foreground[new_mask & (board & CellTypes.alive == 0)] = True elif target == 'both': foreground[new_mask] = True if period > 0: background[new_mask] = True background_color[new_mask] = True else: raise ValueError("Unexpected value for 'target': %s" % (target,)) fountains = layer.get('fountains', 0) if fountains > 0: new_cells = coinflip(fountains, board.shape) new_cells *= gen_mask & NEW_CELL_MASK > 0 neighbors = ndimage.maximum_filter(new_cells, size=3, mode='wrap') neighbors *= gen_mask & NEW_CELL_MASK > 0 gen_mask[neighbors] = INCLUDE_VIOLATIONS_MASK if buffer_size > 1: buf = ndimage.maximum_filter(neighbors, size=buffer_size, mode='wrap') gen_mask[buf] &= ~NEW_CELL_MASK board[neighbors] = CellTypes.wall + color board[new_cells] = CellTypes.fountain + color foreground[new_cells] = True background[neighbors] = True background_color[neighbors] = True goals = board.copy() board *= foreground goals *= background goals &= ~CellTypes.spawning goals &= ~(CellTypes.rainbow_color * ~background_color) return board, goals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_region(self):\n self.new_region_name = \"\"\n self.map.regions.create_new_region()", "def set_region(sender, instance, *args, **kwargs):\n if instance.geocity and not instance.georegion:\n instance.georegion = instance.geocity.region", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def region(self):\n return self.random_element(self._regions)", "def __init__(self, region):\r\n self.region = region", "def putregion(self, *args, **kwargs):\n return _image.image_putregion(self, *args, **kwargs)", "def add_region(self, position):\n region = self.region_selector(position)\n self.regions[id(region)] = region", "def set_locations():\n STATUS['locations']['monster'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['monster'][1] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][1] = generate_random_coord(STATUS['grid_size'])", "def __init__(self, world, location, elevation):\n LandCell.__init__(self, world, location, elevation)\n self.plant = 0\n self.reset_food_level()", "def to_region(self):\n\n coords = self.convert_coords()\n log.debug(coords)\n viz_keywords = ['color', 'dash', 'dashlist', 'width', 'font', 'symsize',\n 'symbol', 'symsize', 'fontsize', 'fontstyle', 'usetex',\n 'labelpos', 'labeloff', 'linewidth', 'linestyle',\n 'point', 'textangle', 'fontweight']\n\n if isinstance(coords[0], SkyCoord):\n reg = self.shape_to_sky_region[self.region_type](*coords)\n elif isinstance(coords[0], PixCoord):\n reg = self.shape_to_pixel_region[self.region_type](*coords)\n else:\n self._raise_error(\"No central coordinate\")\n\n reg.visual = RegionVisual()\n reg.meta = RegionMeta()\n\n # both 'text' and 'label' should be set to the same value, where we\n # default to the 'text' value since that is the one used by ds9 regions\n label = self.meta.get('text',\n self.meta.get('label', \"\"))\n if label != '':\n reg.meta['label'] = label\n for key in self.meta:\n if key in viz_keywords:\n reg.visual[key] = self.meta[key]\n else:\n reg.meta[key] = self.meta[key]\n reg.meta['include'] = self.include\n return reg", "def region(self, region):\n \n self._region = region", "def create_panel_custom_regions():\n panel_id = request.json[\"panel_id\"]\n chrom = request.json[\"chrom\"]\n start = request.json[\"start\"]\n end = request.json[\"end\"]\n name = request.json[\"name\"]\n regions = select_region_by_location(s, chrom, start, end) # if region already exists, return current entry\n if regions:\n for i in regions:\n add_region_to_panel(s, i.id, panel_id)\n s.commit()\n continue\n else:\n create_custom_region(s, panel_id, chrom, start, end, name)\n\n return jsonify(\"complete\")", "def region_setup(self, slices, ipa_regions):\n self.ipa_regions = ipa_regions\n self.slices = slices", "def __init__(self):\n self.regions = []", "def assign(self):\n\n for s in self.spots:\n if self.cells[s[:2]] == 0:\n label = find_nearest_region(self.cells, *s[:2])\n else:\n label = self.cells[s[:2]]\n\n s.region = label", "def create_region(self, region_ref):\n raise exception.NotImplemented() # pragma: no cover", "def regions(self, regions):\n self._regions = regions", "def region(self, region: str) -> Region:\n return Region(self, region)", "def region(self, region):\n\n self._region = region", "def region(self, region):\n\n self._region = region", "def region(self, region):\n\n self._region = region", "def create(self):\n assert self.name != \"Settings\", \"Cannot create a new mesh region with this Name\"\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n self.meshmodule.AssignMeshRegion(args)\n return True", "def update_region_config(cls, body: CloudAccountRegionConfigurationViewModel) -> Dict:\n\t\tpass", "def add_cloud_region(self, position):\n region = self.cloud_region_selector(position)\n self.regions[id(region)] = region", "def assign_mesh_region(self, objectlist=[], level=5, is_submodel=False, name=None):\n if not name:\n name = generate_unique_name(\"MeshRegion\")\n meshregion = self.MeshRegion(self.omeshmodule, self.boundingdimension, self.modeler.model_units)\n meshregion.UserSpecifiedSettings = False\n meshregion.Level = level\n meshregion.name = name\n if not objectlist:\n objectlist = [i for i in self.modeler.primitives.object_names]\n if is_submodel:\n meshregion.SubModels = objectlist\n else:\n meshregion.Objects = objectlist\n all_objs = [i for i in self.modeler.primitives.object_names]\n meshregion.create()\n objectlist2 = self.modeler.primitives.object_names\n added_obj = [i for i in objectlist2 if i not in all_objs]\n meshregion.Objects = added_obj\n meshregion.SubModels = None\n self.meshregions.append(meshregion)\n return meshregion", "def __init__(self, world, location, elevation):\n LandCell.__init__(self, world, location, elevation)", "def region(self, region_name):\n return Region(region_name, self)", "def build_region(self, \n dataset_metadata_dict,\n min_lod_pixels=100, \n max_lod_pixels=-1, \n min_fade_extent=200, \n max_fade_extent=800\n ):\n\n region = simplekml.Region(latlonaltbox=\"<north>\" + str(dataset_metadata_dict['latitude_max']) + \"</north>\" +\n \"<south>\" + str(dataset_metadata_dict['latitude_min']) + \"</south>\" +\n \"<east>\" + str(dataset_metadata_dict['longitude_max']) + \"</east>\" +\n \"<west>\" + str(dataset_metadata_dict['longitude_min']) + \"</west>\",\n lod=\"<minLodPixels>\" + str(min_lod_pixels) + \"</minLodPixels>\" +\n \"<maxLodPixels>\" + str(max_lod_pixels) + \"</maxLodPixels>\" +\n \"<minFadeExtent>\" + str(min_fade_extent) + \"</minFadeExtent>\" +\n \"<maxFadeExtent>\" + str(max_fade_extent) + \"</maxFadeExtent>\")\n return region", "def initialize_areas(self):\n self._areas[1] = copy.copy(self._areas[0])", "def random_offset_bounds(self) -> utils.BoxRegion:\n extra_size = self.random_canvas_extra_ratio * self.canvas_bounds().size / 2\n return utils.BoxRegion(\n minimum=-extra_size,\n maximum=extra_size\n )", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'", "def __init__(self, *args, **kwargs):\n _gdi_.Region_swiginit(self,_gdi_.new_Region(*args, **kwargs))", "def new_tile(self):\n rowm, colm = self.get_ava_index()\n value = 2 if random() <= 0.90 else 4\n self.set_tile(rowm, colm, value)\n print rowm,colm,value", "def SetRegion(self,stateAbbrev):\n if not stateAbbrev in self.VectorData:\n print \"Error - No Data for %s available\" % stateAbbrev\n print \"Valid state abbreviations are:\", self.StateAbbrevList\n else:\n self.SelectedRegion = stateAbbrev", "def setUpClass(cls):\n cls.use_temp_region()\n cls.runModule(\"g.region\", raster=\"elev_state_500m\")", "def clear(self):\n tmpRegion = self._createBlankCopy()\n self._assign(tmpRegion)\n if self._ancestorModelSourceCreated:\n self._reload()\n else:\n self._informRegionChange(True)", "def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))", "def sampleRegionPowerModel():\n\tregionVector = []\n\t# Loop through regional rounds R64, R32, and S16\n\tseeds = [1, 16, 8, 9, 5, 12, 4, 13, 6, 11, 3, 14, 7, 10, 2, 15]\n\tfor roundNum in range(1, 5):\n\t\tnumGames = int(len(seeds) / 2)\n\t\tnewSeeds = []\n\t\tfor gameNum in range(numGames):\n\t\t\ts1 = seeds[2 * gameNum]\n\t\t\ts2 = seeds[2 * gameNum + 1]\n\t\t\tp = getWinProbability({'seed': s1}, {'seed': s2}, r=roundNum)\n\n\t\t\trnd = random.random()\n\t\t\tregionVector.append(1 if rnd < p else 0)\n\t\t\tnewSeeds.append(s1 if rnd < p else s2)\n\t\tseeds = newSeeds\n\n\treturn [regionVector, seeds[0]]", "def set_x_y(region, x_offset, y_offset):\n for x in range(region.shape[0]):\n for y in range(region.shape[1]):\n region[x, y, 0] = x_offset + 5*x\n region[x, y, 1] = y_offset + 5*y\n \n return region", "def add_region_feature(data):\n\n data.loc[:, 'region'] = data.loc[:, 'district'].apply(\n lambda x: mapping.SOFIA_NEIGHBOURHOOD_TO_REGION_MAPPING[x]\n )\n\n return data", "def update_humidity_region(self):\n self.linear_region.setRegion(\n self.humidity_plot_graph.getViewBox().viewRange()[0])", "def set_sample_region(self):\n\n theta_delta = self.wrap(self.start[0:3], self.goal[0:3])\n\n delta_signs = np.sign(self.goal[0:3] - self.start[0:3])\n\n self.smart_region_min = self.start\n\n self.smart_region_max = self.start + delta_signs * theta_delta\n\n velocity_min = np.zeros(3,1)\n\n \n for i in range(3):", "def set_sample_region(self):\n\n theta_delta = self.wrap(self.start[0:3], self.goal[0:3])\n\n delta_signs = np.sign(self.goal[0:3] - self.start[0:3])\n\n self.smart_region_min = self.start\n\n self.smart_region_max = self.start + delta_signs * theta_delta\n\n velocity_min = np.zeros(3,1)\n\n \n for i in range(3):", "def __init__(\n self,\n location: FeatureLocation,\n reference_sequence: Seq,\n name: str = None\n ):\n super().__init__('repeat_region', location=location, reference_sequence=reference_sequence, name=name)", "def set_region_of_interest(self, roi: UserRoi):\n value = roi.to_struct()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_SetUserROI(self.dev, byref(value)))", "def ZoneBuilder():\n\n # Part 1: Zone Dimensions\n matrix, xaxis, yaxis, zaxis = dimensions()\n\n # Part 2: Assigning Room Existance.\n matrix = existance(matrix, xaxis, yaxis, zaxis)\n \n # Part 3: Creating room walls.\n \n # First, generate walls adjacent to void spaces.\n matrix = enclose_rooms(matrix, xaxis, yaxis, zaxis)\n \n matrix = select_walls(matrix, xaxis, yaxis, zaxis)", "def add_new_region(self, image_name: str, region_text: str, region_position: RegionPosition, region_type: str):\n pass", "def region(self, box):\n is_indexbox(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n return self", "def set_roi(self, y_min, height):\n self.camera_microscope.stop_free_run()\n self.camera_microscope.stop_continuous_reads()\n self.background = None # This is to prevent shape mismatch between before and after\n current_roi = self.camera_microscope.ROI\n new_roi = (current_roi[0], (y_min, height))\n self.camera_microscope.ROI = new_roi\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()", "def region(self, box: list):\n is_box(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n self._AccessPoint_data = {'box': box} # Register the requested access point data\n\n if self._mode == \"standard\" and self._dataset_id != \"ref\":\n def postprocessing(xds):\n xds = self.fetcher.filter_data_mode(xds)\n xds = self.fetcher.filter_qc(xds)\n xds = self.fetcher.filter_variables(xds, self._mode)\n return xds\n self.postproccessor = postprocessing\n\n return self", "def create_meshregion_component(\n self, scale_factor=1.0, name=\"Component_Region\", restore_padding_values=[50, 50, 50, 50, 50, 50]\n ):\n self.modeler.edit_region_dimensions([0, 0, 0, 0, 0, 0])\n\n verticesID = self.modeler.oeditor.GetVertexIDsFromObject(\"Region\")\n\n x_values = []\n y_values = []\n z_values = []\n\n for id in verticesID:\n tmp = self.modeler.oeditor.GetVertexPosition(id)\n x_values.append(tmp[0])\n y_values.append(tmp[1])\n z_values.append(tmp[2])\n\n scale_factor = scale_factor - 1\n delta_x = (float(max(x_values)) - float(min(x_values))) * scale_factor\n x_max = float(max(x_values)) + delta_x / 2.0\n x_min = float(min(x_values)) - delta_x / 2.0\n\n delta_y = (float(max(y_values)) - float(min(y_values))) * scale_factor\n y_max = float(max(y_values)) + delta_y / 2.0\n y_min = float(min(y_values)) - delta_y / 2.0\n\n delta_z = (float(max(z_values)) - float(min(z_values))) * scale_factor\n z_max = float(max(z_values)) + delta_z / 2.0\n z_min = float(min(z_values)) - delta_z / 2.0\n\n dis_x = str(float(x_max) - float(x_min))\n dis_y = str(float(y_max) - float(y_min))\n dis_z = str(float(z_max) - float(z_min))\n\n min_position = self.modeler.Position(str(x_min) + \"mm\", str(y_min) + \"mm\", str(z_min) + \"mm\")\n mesh_box = self.modeler.primitives.create_box(min_position, [dis_x + \"mm\", dis_y + \"mm\", dis_z + \"mm\"], name)\n\n self.modeler.primitives[name].model = False\n\n self.modeler.edit_region_dimensions(restore_padding_values)\n return dis_x, dis_y, dis_z", "def __init__(self, type, high_elevation, low_elevation):\n\t\tself.type = type\n\t\tself.high_elevation = high_elevation\n\t\tself.low_elevation = low_elevation\n\t\tself.occupant = 0 # use 0 to initilize tile which \n\t\t\t\t\t\t\t# means it has not been explored.\n\t\tif type == \"plains\": # initialize the shade\n\t\t\tself.terrain = \" \"\n\t\telse:\n\t\t\tself.terrain = \"#\"", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, realm, master_client):\n if not regions:\n log.debug(\n 'In rgw.configure_multisite_regions_and_zones() and regions is None. '\n 'Bailing')\n yield\n return\n\n if not realm:\n log.debug(\n 'In rgw.configure_multisite_regions_and_zones() and realm is None. '\n 'Bailing')\n yield\n return\n\n log.info('Configuring multisite regions and zones...')\n\n log.debug('config is %r', config)\n log.debug('regions are %r', regions)\n log.debug('role_endpoints = %r', role_endpoints)\n log.debug('realm is %r', realm)\n # extract the zone info\n role_zones = dict([(client, extract_zone_info(ctx, client, c_config))\n for client, c_config in config.items()])\n log.debug('role_zones = %r', role_zones)\n\n # extract the user info and append it to the payload tuple for the given\n # client\n for client, c_config in config.items():\n if not c_config:\n user_info = None\n else:\n user_info = extract_user_info(c_config)\n\n (region, zone) = role_zones[client]\n role_zones[client] = (region, zone, user_info)\n\n region_info = dict([\n (region_name, extract_region_info(region_name, r_config))\n for region_name, r_config in regions.items()])\n\n fill_in_endpoints(region_info, role_zones, role_endpoints)\n\n\n host, port = role_endpoints[master_client]\n endpoint = 'http://{host}:{port}/'.format(host=host, port=port)\n log.debug(\"endpoint: %s\", endpoint)\n\n # clear out the old defaults\n cluster_name, daemon_type, client_id = teuthology.split_role(master_client)\n first_mon = teuthology.get_first_mon(ctx, config, cluster_name)\n (mon,) = iter(ctx.cluster.only(first_mon).remotes.keys())\n\n # read master zonegroup and master_zone\n for zonegroup, zg_info in region_info.items():\n if zg_info['is_master']:\n master_zonegroup = zonegroup\n master_zone = zg_info['master_zone']\n break\n\n log.debug('master zonegroup =%r', master_zonegroup)\n log.debug('master zone = %r', master_zone)\n log.debug('master client = %r', master_client)\n\n rgwadmin(ctx, master_client,\n cmd=['realm', 'create', '--rgw-realm', realm, '--default'],\n check_status=True)\n\n rgwadmin(ctx, master_client,\n cmd=['zonegroup', 'create', '--rgw-zonegroup', master_zonegroup, '--master', '--endpoints', endpoint,\n '--default'])\n\n rgwadmin(ctx, master_client,\n cmd=['zone', 'create', '--rgw-zonegroup', master_zonegroup,\n '--rgw-zone', master_zone, '--endpoints', endpoint, '--access-key',\n user_info['system_key']['access_key'], '--secret',\n user_info['system_key']['secret_key'], '--master', '--default'],\n check_status=True)\n\n rgwadmin(ctx, master_client,\n cmd=['period', 'update', '--commit'],\n check_status=True)\n\n yield", "def create_region_w_spacing (tuple_top_L, tuple_bottom_R):\n\n spacing = int(input ('How many well spaces do you want between each spot? '))\n\n\n #get the plate column numbers from the plate class\n columns = plate1536.columns\n #get the plate rows from the plate class\n rows = plate1536.rows\n\n ###Begin creating list of columns to use###\n\n #initialize and use next\n curr_col_idx = columns.index(int(tuple_top_L[1]))\n\n #set left most column to use as the column given by user in top_left\n col_idxs_to_shoot = [curr_col_idx]\n\n #loop checks the NEXT column that will be produced by moving right\n #by (spacing + 1). If that is beyond the right-most border set by\n #the well region definitions, then it will stop, containing all\n #column choices within the left and right bounds\n while (curr_col_idx + spacing + 1) <= columns.index(int(tuple_bottom_R[1])):\n\n curr_col_idx += (spacing + 1)\n\n col_idxs_to_shoot.append(curr_col_idx)\n\n ###The list of indices in plate1536.columns to use is now set###\n\n\n ###Begin creating list of rows to use###\n\n #initialize and use next\n curr_row_idx = rows.index(tuple_top_L[0])\n\n #set top most row to use as the row given by user in top_left\n row_idxs_to_shoot = [curr_row_idx]\n\n #loop checks the NEXT row that will be produced by moving down\n #by (spacing + 1). If that is beyond the bottom-most border set by\n #the well region definitions, then it will stop, containing all\n #row choices within the top and bottom bounds\n while (curr_row_idx + spacing + 1) <= rows.index(tuple_bottom_R[0]):\n\n curr_row_idx += (spacing + 1)\n\n row_idxs_to_shoot.append(curr_row_idx)\n\n ###The list of indices in plate1536.rows to use is now set###\n\n\n #get all the columns you want to use as STRINGS\n col_strs = []\n for i in col_idxs_to_shoot:\n col_strs += [ str(plate1536.columns[i]) ] #have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n #get all the rows you want to use as STRINGS\n row_strs = []\n for i in row_idxs_to_shoot:\n row_strs += [ plate1536.row_dict[i] ]#have to have extra list brackets to avoid python interpreting a string 'FFF' as\n #a list ['F', 'F', 'F'] and adding 3 items instead of 'FFF'\n\n\n print(\"This region has {} rows (letters), {} columns (#'s) per row. That's a total of {} spots\".format(len(row_strs), len(col_strs), len(row_strs) * len(col_strs)))\n\n return row_strs, col_strs", "def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)", "def assignToRegion(self, region):\n self._sim.assignReactionRegion(self, region)\n return self", "def region(location):\n x, y = location\n if y < 89:\n return \"left-door\" if x < 56 else \"middle-platform\" if x < 102 else \"right-door\"\n elif y < 124:\n return \"left-platform\" if x < 39 else \"belt\" if x < 79 else \"middle-ladder\" if x < 81 else \"belt\" if x < 111 else \"rope\" if x < 113 else \"right-platform\"\n elif y < 127:\n return \"left-platform\" if x < 39 else \"belt\" if x < 111 else \"rope\" if x < 113 else \"right-platform\"\n elif y < 150:\n return \"left-ladder\" if x < 32 else \"floor\" if x < 111 else \"rope\" if x < 113 else \"floor\" if x < 128 else \"right-ladder\"\n elif y < 168:\n return \"left-ladder\" if x < 32 else \"floor\" if x < 128 else \"right-ladder\"\n else:\n return \"floor\"", "def spawn_orb(self):\n x_pos = random.randint(0, self.config.arena_size[0] - 1)\n y_pos = random.randint(0, self.config.arena_size[1] - 1)\n self.arena[x_pos][y_pos] = Tile.ORB", "def region(self):\n if self._region is None:\n cache_key = self.expand_name(\"region\")\n cached = unitdata.kv().get(cache_key)\n if cached:\n self._region = cached\n else:\n req = self._imdv2_request(self._az_url)\n with urlopen(req) as fd:\n az = fd.read(READ_BLOCK_SIZE).decode(\"utf8\")\n self._region = az.rstrip(string.ascii_lowercase)\n unitdata.kv().set(cache_key, self._region)\n return self._region", "def update_resistance_region(self):\n self.linear_region.setRegion(\n self.resistance_graph.getViewBox().viewRange()[0])", "def prepare_region(path: Path, region: mundi.Region):\n\n # Age distribution \n df = region.age_distribution\n distrib = df.values.copy()[:18:2]\n distrib += df.values[1:18:2]\n distrib[-1] += df.values[18:].sum()\n \n # Estimate cases from deaths\n curve = covid19.epidemic_curve(region, path=CASES)\n deaths = cast(pd.Series,\n curve[\"deaths\"]\n .rolling(WINDOW_SIZE, center=True, win_type=\"triang\")\n .mean()\n .fillna(method=\"bfill\")\n .dropna()\n )\n params = covid19.params(region=region)\n cases = (deaths / params.IFR).astype(\"int\")\n epicurve = cases.diff().fillna(0).astype(\"int\").values\n attack = 100 * cases.iloc[-1] / region.population\n print(\"Attack rate: {:n}%\".format(attack))\n \n # Clean epicurve\n i, j = 0, len(epicurve) - 1\n while epicurve[i] == 0:\n i += 1\n \n while epicurve[j] == 0:\n j -= 1\n \n if (n := len(epicurve) - j -1):\n m = n + WINDOW_SIZE // 2\n epicurve = list(epicurve)[:j - WINDOW_SIZE // 2]\n print(f'WARNING: {region.id} tail with {n} null items. trucanting epicurve to a {m} delay')\n n += WINDOW_SIZE // 2\n epicurve = epicurve[i:j]\n \n # Create config\n conf = TOML_TEMPLATE.format(\n num_iter=60,\n pop_counts=list(distrib),\n epicurve_data=list(epicurve),\n smoothness=0.75,\n delay=n,\n attack=attack,\n ) \n \n with open(path / 'conf.toml', 'w') as fd:\n fd.write(conf)", "def _create_room(new_map, room):\n for x in range(room.x1 + 1, room.x2):\n for y in range(room.y1 + 1, room.y2):\n new_map.terrain[x][y] = 1", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def nine_regions(self):\n\n coordinateList = []\n\n # Top left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Top center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] ) \n coordinateList.append( [x, y] )\n\n # Top right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * ( 1.0 - self.ratioTopLeft[IDX_X] ) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Center left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Bottom left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n return coordinateList", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def fill(self, xrange=range(0,16), yrange=range(0,16), zrange=range(0,16), **blockstate):\n blk = self.block_state_index(**blockstate)\n seq = array(self._blocks.typecode, (blk for i in xrange))\n\n def fct(section, blocks, row, *args):\n blocks[row] = seq\n\n self.row_apply(fct, xrange, yrange, zrange)", "def region(self, args):\n m = MessageClass()\n print('123124')\n data = {'list': []}\n data['list'].append({\"Region_Name\": \"us-east-1\"})\n data['list'].append({\"Region_Name\": \"us-east-2\"})\n data['list'].append({\"Region_Name\": \"us-west-1\"})\n data['list'].append({\"Region_Name\": \"us-west-2\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-northeast-2\"})\n data['list'].append({\"Region_Name\": \"ap-south-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ap-southeast-1\"})\n data['list'].append({\"Region_Name\": \"ca-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-central-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-1\"})\n data['list'].append({\"Region_Name\": \"eu-west-2\"})\n data['list'].append({\"Region_Name\": \"eu-west-3\"})\n data['list'].append({\"Region_Name\": \"sa-east-1\"})\n m.data = data\n return m.to_json()", "def init_board(rows, columns, method=\"random\"):\n if method == \"random\":\n board = np.random.random_integers(2, size=(rows, columns)) - 1\n return board", "def place_obj(self):\r\n for pos in BOARD_POSITIONS:\r\n self.board[pos[0]][pos[1]] = Stone(color=self.state[pos[0]][pos[1]], pos=(pos[0], pos[1]))\r\n self.board[pos[0]][pos[1]].liberty = self.board[pos[0]][pos[1]].compute_liberty(self.state)", "def establecer_region(self, region, guess, delta_ppm=(1,1)): \r\n # obtengo los indices del centro del pico.\r\n xc, yc = self.encontrar_picos(guess, delta_ppm)\r\n # obtengo las coordenadas que determinan el rectangulo donde voy a\r\n # integrar. \r\n x_lims, y_lims = self.establecer_limites(xc, yc)\r\n \r\n xi,xf = x_lims\r\n yi,yf = y_lims\r\n spec = self.spec[yi:yf, xi:xf]\r\n ppmGridDir = self.ppmGridDir[yi:yf, xi:xf]\r\n ppmGridInd = self.ppmGridInd[yi:yf, xi:xf]\r\n \r\n \r\n n, m = region\r\n self.regiones[n][m] = Region(ppmGridDir, ppmGridInd, spec)", "def _pad_region(region_data: bytes, *,\n tile_size: int,\n offset: int,\n pad_value: int = 0) -> bytes:\n region_size = len(region_data)\n assert len(region_data) % tile_size == 0\n assert tile_size >= offset\n assert pad_value < 2 ** 8\n tile_count = region_size // tile_size\n original_linear = np.frombuffer(region_data, dtype=np.ubyte)\n original_2d = original_linear.reshape(tile_count,\n tile_size)\n padded = np.insert(original_2d, offset, pad_value, axis=1)\n return padded.tobytes()", "def fillingrid(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.fillinpercent(n)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setValue(self.currentnsigs[n])\n else:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n self.nsspins[n].setValue(self.currentnsigs[n])\n self.fillinpercent(n)", "def create_region_from_entity(\n entity: Entity, created_by: stix2.Identity\n) -> stix2.Location:\n name = entity.value\n if name is None:\n raise TypeError(\"Entity value is None\")\n\n return create_region(name, created_by=created_by)", "def init_locations():\n player, door, monster = sample(CELLS, k=3)\n\n return player, door, monster", "def draw_world(grid, r, c, image):\n under = grid[r, c]\n grid[r, c] = AGENT\n image.set_data(grid)\n grid[r, c] = under", "def test_server_override_region(self):\n # Sanity check our override values do not overlap\n self.assertNotEqual(CONFIG_DATA[\"Region\"],\n CONFIG_DATA[\"OverrideRegion\"])\n config_data = imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(\n self._server_valid_override))\n # Test Region was overridden\n self.assertEqual(\n CONFIG_DATA[\"OverrideRegion\"],\n [server_data.region\n for server_data in config_data.server_data\n if server_data.name ==\n CONFIG_DATA[\"OverrideRegionFQDN\"]]\n [0])", "def __init__(self, x_min, x_max, y_min, y_max, bs_number, ue_number,\n layer=1, power=1.0, bs_distribution=\"square_grid\",\n ue_distribution=\"gaussian\", ue_sigma=0,\n if_fix_bs=True,\n bs_radius_1=50,\n grid_l_1=10,\n grid_l_2=10):\n BaseRegion.__init__(self, x_min, x_max, y_min, y_max)\n BaseBS.__init__(self, bs_number, layer, power, bs_distribution, if_fix_bs)\n BaseUE.__init__(self, ue_number, ue_distribution, ue_sigma)\n self.bs_radius_1_ = bs_radius_1\n self.grid_l_1_ = grid_l_1\n self.grid_l_2_ = grid_l_2\n if not if_fix_bs:\n self.set_bs_to_region()\n self.set_ue_to_region()\n self.bs_ue_dict_ = {}\n # a dict that show which ue belong to which bs\n # key: 0, 1, ..., num_bs\n # value: 0, 1, ..., num_ue belong to the key\n self.select_ue()", "def add_region_offset(self, offset):\n if not self.__offset_added:\n self.__region_ids = dict(\n (var, region + offset)\n for var, region in self.__region_ids.items())\n\n self.__offset_added = True", "def __init__(self, NW, SE):\n self.type = \"HOLD AREA\"\n self.NW = NW\n self.SE = SE\n self.ctr = [(self.NW[0] + self.SE[0]) / 2, (self.NW[1] + self.SE[1]) / 2]", "def from_region(self, name, with_guards=None):\n return _from_region(self.data, name, with_guards)", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def init_right_zone(self):\n self.right_zone_rect = pg.rect.Rect(500, 0, 300, 80)\n self.right_zone_image = pg.Surface(self.right_zone_rect.size).convert()\n self.right_zone_image.fill(pg.Color('#82A6CB'))\n self.right_zone_bottom_rect = pg.rect.Rect(500, 70, 800, 10)\n self.right_zone_bottom_image = pg.Surface(\n self.right_zone_bottom_rect.size).convert()\n self.right_zone_bottom_image.fill(pg.Color('#3667A6'))\n self.right_zone_side_rect = pg.rect.Rect(500, 50, 10, 30)\n self.right_zone_side_image = pg.Surface(\n self.right_zone_side_rect.size).convert()\n self.right_zone_side_image.fill(pg.Color('#3667A6'))\n\n self.selected_tower = None\n self.selected_monster = None\n self.tower_attack_image = prepare.GFX['icons']['tower_damage']\n self.tower_cooldown_image = prepare.GFX['icons']['tower_cooldown']\n self.monster_health_image = prepare.GFX['icons']['monster_health']\n self.monster_speed_image = prepare.GFX['icons']['monster_speed']\n self.selected_image_pos = (510, 10)\n self.selected_name_pos = (570, 7)\n self.selected_info_pos = (570, 30)\n self.selected_description_pos = (570, 50)", "def load(self):\n\n if self.loaded:\n return\n\n self.region_back = None\n self.objects = []\n self.plants = []\n self.tiles = []\n\n # Some convenience vars\n materials = self.data.materials\n matmods = self.data.matmods\n objects = self.data.objects\n plants = self.data.plants\n world = self.world\n self.loaded = True\n\n # Get tiles\n try:\n data_tiles = world.get_tiles(self.rx, self.ry)\n except KeyError:\n print('WARNING: Region ({}, {}) was not found in world'.format(self.rx, self.ry))\n return\n\n # \"real\" coordinates\n base_x = self.rx*32\n gui_x = base_x*8\n base_y = self.ry*32\n gui_y = (world.height*8)-(base_y*8)\n\n # Background for our drawn area (black)\n self.region_back = self.scene.addRect(gui_x, gui_y-255, 255, 255,\n QtGui.QPen(QtGui.QColor(0, 0, 0)),\n QtGui.QBrush(QtGui.QColor(0, 0, 0)),\n )\n self.region_back.setZValue(Constants.z_black)\n\n # Tiles!\n cur_row = 0\n cur_col = 0\n for data_tile in data_tiles:\n self.tiles.append(GUITile(self.scene, data_tile,\n base_x+cur_col, base_y+cur_row,\n self,\n gui_x+cur_col*8, gui_y-(cur_row+1)*8,\n self.layer_toggles))\n self.scene.addItem(self.tiles[-1])\n cur_col += 1\n if cur_col == 32:\n cur_col = 0\n cur_row += 1\n\n # Entities!\n entities = []\n try:\n entities = world.get_entities(self.rx, self.ry)\n except KeyError:\n pass\n\n for e in entities:\n if e.name == 'ObjectEntity':\n obj_name = e.data['name']\n obj_orientation = e.data['orientationIndex']\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n if obj_name in objects:\n obj = objects[obj_name]\n (image, offset_x, offset_y) = obj.get_image(obj_orientation)\n qpmi = QtWidgets.QGraphicsPixmapItem(image)\n qpmi.setPos(\n (obj_x*8) + offset_x,\n (world.height*8)-(obj_y*8) - offset_y - image.height(),\n )\n qpmi.setZValue(Constants.z_objects)\n if not self.layer_toggles.objects_toggle.isChecked():\n qpmi.setVisible(False)\n self.scene.addItem(qpmi)\n self.objects.append(qpmi)\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_object(obj, obj_name, obj_orientation, qpmi, e.data)\n elif e.name == 'PlantEntity':\n desc = e.data['descriptions']['description']\n images = []\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n for piece in e.data['pieces']:\n piece_img = piece['image'].split('?')[0]\n if piece_img in plants:\n img = plants[piece_img].image\n qpmi = QtWidgets.QGraphicsPixmapItem(img)\n qpmi.setPos(\n (obj_x*8) + (piece['offset'][0]*8),\n (world.height*8)-(obj_y*8) - (piece['offset'][1]*8) - img.height(),\n )\n qpmi.setZValue(Constants.z_plants)\n if not self.layer_toggles.plants_toggle.isChecked():\n qpmi.setVisible(False)\n images.append((plants[piece_img], qpmi))\n self.scene.addItem(qpmi)\n self.plants.append(qpmi)\n else:\n print('not found: {}'.format(piece_img))\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_plant(desc, images)\n elif (e.name == 'MonsterEntity'\n or e.name == 'NpcEntity'\n or e.name == 'StagehandEntity'\n or e.name == 'ItemDropEntity'\n or e.name == 'VehicleEntity'\n ):\n # TODO: Ignoring for now\n pass\n else:\n print('Unknown entity type: {}'.format(e.name))", "def sync_region(self, region_id):\n self.init_structures()\n con = SimConnection()\n con.connect(self.gridinfo._url)\n scenedata = con._con.ogrescene_list({\"RegionID\":region_id})[\"res\"]\n objects = editor.getSelected()\n if not objects:\n objects = bpy.data.objects\n for obj in objects:\n obj_uuid = str(self.get_uuid(obj))\n if obj_uuid:\n if obj_uuid in scenedata:\n self.import_group(obj_uuid, scenedata[obj_uuid], 10)", "def _create_mid_region_frame(self):\n self.region_option = tk.StringVar()\n self.region_option.set(\"euw\")\n self.frames.append(tk.LabelFrame(self.master))\n self.option_menu = tk.OptionMenu(self.frames[5], self.region_option,\n \"euw\", \"na\", \"eune\")\n self.option_menu.grid(sticky=\"ew\")\n self.frames[5].grid(column=1, row=0, sticky=\"ns\", pady=10)\n self.frames[5].columnconfigure(0, weight=1)", "def get_regions(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self.R", "def create_manual_barrage_initial_state(\n spy_locations_list,\n scout_locations_list,\n miner_locations_list,\n sergeant_locations_list,\n lieutenant_locations_list,\n captain_locations_list,\n major_locations_list,\n colonel_locations_list,\n general_locations_list,\n marshall_locations_list,\n flag_locations_list,\n bomb_locations_list,\n specify_pieces_for_player=OUTSIDE_AGENT_PLAYER_ID):\n game_version = STRATEGO_ENV_BARRAGE_INTERFACE_CONFIG['version']\n game_version_config = VERSION_CONFIGS[game_version]\n board_shape = (game_version_config['rows'], game_version_config['columns'])\n procedural_env = StrategoProceduralEnv(*board_shape)\n\n # Verify inputs and fill a 2d ndarray with specified player piece values.\n if not (specify_pieces_for_player == 1 or specify_pieces_for_player == -1):\n raise ValueError(\"specify_pieces_for_player must be 1 or -1\")\n\n allowed_piece_rows_for_player = [0, 1, 2, 3] if specify_pieces_for_player == 1 else [6, 7, 8, 9]\n specified_player_initial_piece_map = np.zeros(shape=board_shape, dtype=INT_DTYPE_NP)\n\n manual_piece_locations = {\n SP.SPY: spy_locations_list,\n SP.SCOUT: scout_locations_list,\n SP.MINER: miner_locations_list,\n SP.SERGEANT: sergeant_locations_list,\n SP.LIEUTENANT: lieutenant_locations_list,\n SP.CAPTAIN: captain_locations_list,\n SP.MAJOR: major_locations_list,\n SP.COLONEL: colonel_locations_list,\n SP.GENERAL: general_locations_list,\n SP.MARSHALL: marshall_locations_list,\n SP.FLAG: flag_locations_list,\n SP.BOMB: bomb_locations_list\n }\n\n for piece_type, locations_list in manual_piece_locations.items():\n if len(locations_list) > 0 and \\\n (len(np.shape(locations_list)) != 2 or\n (len(np.shape(locations_list)) == 2 and np.shape(locations_list)[1] != 2)):\n raise ValueError(f\"Each locations list must be a list of 2d coordinates. Examples: [] or [[1,2], [2,5]].\\n\"\n f\"For {piece_type.name}, {locations_list} was passed.\")\n\n if len(locations_list) != game_version_config['piece_amounts'][piece_type]:\n allowed_piece_amounts = {pc_type.name: amt for pc_type, amt in game_version_config['piece_amounts'].items()}\n raise ValueError(f\"{len(locations_list)} {piece_type.name} piece locations were provided when \"\n f\"{game_version.name} requires the following piece amounts: \\n{allowed_piece_amounts}\")\n\n for location in locations_list:\n row, column = location\n if (not 0 <= column < board_shape[1]) or (row not in allowed_piece_rows_for_player):\n raise ValueError(f\"The out-of-range location {location} for {piece_type.name} was provided. \"\n f\"Locations are in the format, (row, column). \"\n f\"Rows take values in {allowed_piece_rows_for_player} for player {specify_pieces_for_player}. \"\n f\"Columns must be in the range [0, {board_shape[1]}].\")\n if specified_player_initial_piece_map[row, column] != 0:\n raise ValueError(f\"The location {location} was specified for more than one piece.\")\n\n # Set piece value for location\n specified_player_initial_piece_map[row, column] = piece_type.value\n\n # Grab a random human initialization for the non-specified player.\n # Human inits have been downloaded from the Gravon Archive (https://www.gravon.de/gravon/stratego/strados2.jsp)\n random_human_init_spec_str = np.random.choice(HUMAN_INITS)\n player_1_random_human_piece_map, player_2_random_human_piece_map = create_initial_positions_from_human_data(\n player1_string=random_human_init_spec_str, player2_string=random_human_init_spec_str,\n game_version_config=game_version_config)\n\n # Set obstacle locations\n obstacle_map = np.zeros(shape=board_shape, dtype=INT_DTYPE_NP)\n for obstacle_location in VERSION_CONFIGS[game_version]['obstacle_locations']:\n obstacle_map[obstacle_location] = 1.0\n\n # Create the initial state\n initial_state = procedural_env.create_initial_state(\n obstacle_map=obstacle_map,\n player_1_initial_piece_map=specified_player_initial_piece_map if specify_pieces_for_player == 1 else player_1_random_human_piece_map,\n player_2_initial_piece_map=specified_player_initial_piece_map if specify_pieces_for_player == -1 else player_2_random_human_piece_map,\n max_turns=game_version_config['max_turns'])\n\n return initial_state", "def river_region(rr_id):\n r = RiverRegionRenderer(request, rr_id, None)\n return r.render()", "def get_region(ref_table, assoc_name=None,\n pos_margin=10., vel_margin=2.,\n scale_margin=None, mg_colname=None):\n\n ## Commenting this out for now. Usage by me (Tim) should be the same\n ## as everyone else. i.e. no hardcoded filenames for convenience.\n # if gagne_reference_data is None:\n # gagne_reference_data =\\\n # '/home/tcrun/chronostar/data/gagne_bonafide_full_kinematics_with_lit_and_best_radial_velocity' \\\n # '_comb_binars_with_banyan_radec.fits'\n\n if mg_colname is None:\n mg_colname = 'Moving group'\n\n # If reference table is provided as str, convert to table\n if type(ref_table) is str:\n ref_table = Table.read(ref_table)\n\n # Extract all stars\n if assoc_name is None:\n subtable = ref_table\n else:\n if assoc_name not in set(ref_table[mg_colname]):\n raise UserWarning(\n 'Association name must be one of:\\n{}\\nReceived: \"{}\"'.format(\n list(set(ref_table[mg_colname])), assoc_name\n ))\n subtable = ref_table[np.where(ref_table[mg_colname] == assoc_name)]\n logging.info('Initial membership list has {} members'.format(len(subtable)))\n\n star_means = tabletool.build_data_dict_from_table(subtable, only_means=True,\n cartesian=True)\n\n data_upper_bound = np.nanmax(star_means, axis=0)\n data_lower_bound = np.nanmin(star_means, axis=0)\n logging.info('Stars span from {} to {}'.format(\n np.round(data_lower_bound),\n np.round(data_upper_bound)\n ))\n\n # First try and scale box margins by provided scale margin.\n # scale_margin of 1 would double total span (1 + 1)\n if scale_margin is not None:\n data_span = data_upper_bound - data_lower_bound\n box_margin = 0.5 * scale_margin * data_span\n\n # Set up boundaries of box that span double the association\n box_lower_bound = data_lower_bound - box_margin\n box_upper_bound = data_upper_bound + box_margin\n\n # Set margin based on provided (or default) constant amounts\n else:\n data_margin = np.array(3*[pos_margin] + 3*[vel_margin])\n box_lower_bound = data_lower_bound - data_margin\n box_upper_bound = data_upper_bound + data_margin\n\n logging.info('Range extended.\\nLower: {}\\nUpper: {}'.format(\n np.round(box_lower_bound),\n np.round(box_upper_bound)\n ))\n\n return box_lower_bound, box_upper_bound", "def init(self):\n\n self.pos = np.random.rand(self.N, 7)\n for i in range(3):\n self.pos[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n self.pos[:, i] += self.bounds[2*i]\n\n # Star colors http://www.isthe.com/chongo/tech/astro/HR-temp-mass-table-byhrclass.html http://www.vendian.org/mncharity/dir3/starcolor/\n O3 = np.array([144., 166., 255.])\n O3 /= 255.\n self.pos[:, 3:-1] = O3[None, :]\n M4Ia = np.array([255., 185., 104.])\n M4Ia /= 255.\n self.pos[np.random.rand(self.N)>.5, 3:-1] = M4Ia[None, :]\n\n self.pos[:, -1] = .8 + .2*self.pos[:, -1]", "def set_board(board):", "def __init__(self, rows=8, cols=8):\n self.rows = rows\n self.cols = cols\n self.matrix = [[Disk.NONE for x in range(rows)] for y in range(cols)]\n init_array = (('d','4',Disk.LIGHT), ('e','5',Disk.LIGHT), ('d','5',Disk.DARK), ('e','4',Disk.DARK))\n # init_array = (('a', '2', Disk.LIGHT), ('b', '5', Disk.LIGHT), ('d', '5', Disk.DARK), ('e', '4', Disk.DARK))\n for item in init_array:\n self.place_disk(*self.coordinates_to_matrix(item[0], item[1]), item[2])", "def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)", "def generate_level(self):\n for _ in range(AMOUNT_REGIONS_TO_DRAW):\n self._generate_next_blocks()", "def _loadRegions(self, fh):\n holeNumbers = self._mainBasecallsGroup[\"ZMW/HoleNumber\"].value\n self._holeNumberToIndex = dict(zip(holeNumbers, range(len(holeNumbers))))\n\n #\n # Region table\n #\n self.regionTable = toRecArray(REGION_TABLE_DTYPE,\n fh[\"/PulseData/Regions\"].value)\n\n self._regionTableIndex = _makeRegionTableIndex(self.regionTable.holeNumber)\n isHqRegion = self.regionTable.regionType == HQ_REGION\n hqRegions = self.regionTable[isHqRegion]\n\n if len(hqRegions) != len(holeNumbers):\n # Bug 23585: pre-2.1 primary had a bug where a bas file\n # could get a broken region table, lacking an HQ region\n # entry for a ZMW. This happened fairly rarely, mostly on\n # very long traces. Workaround here is to rebuild HQ\n # regions table with empty HQ region entries for those\n # ZMWs.\n hqRegions_ = toRecArray(REGION_TABLE_DTYPE,\n np.zeros(shape=len(holeNumbers),\n dtype=REGION_TABLE_DTYPE))\n hqRegions_.holeNumber = holeNumbers\n for record in hqRegions:\n hn = record.holeNumber\n hqRegions_[self._holeNumberToIndex[hn]] = record\n hqRegions = hqRegions_\n\n hqRegionLength = hqRegions.regionEnd - hqRegions.regionStart\n holeStatus = self._mainBasecallsGroup[\"ZMW/HoleStatus\"].value\n\n #\n # Sequencing ZMWs - Note: this differs from Primary's\n # definition. To obtain those values, one would use the\n # `allSequencingZmws` property.\n #\n self._sequencingZmws = \\\n holeNumbers[(holeStatus == SEQUENCING_ZMW) &\n (self._mainBasecallsGroup[\"ZMW/NumEvent\"].value > 0) &\n (hqRegionLength > 0)]\n\n self._allSequencingZmws = holeNumbers[holeStatus == SEQUENCING_ZMW]", "def update_temperature_region(self):\n self.linear_region.setRegion(\n self.temperature_plot_graph.getViewBox().viewRange()[0])", "def _regions_shmem_config(self) -> None:\n assert self.config is not None\n assert self.board is not None\n\n if not self.config.shmem:\n return\n\n for name, shmem_config in self.config.shmem.items():\n for cell_name in shmem_config.peers:\n cell = self.config.cells[cell_name]\n assert cell.pci_devices is not None\n\n # offset 2, since mem_regions always\n # start with table_region and common_output_region\n dev_id = cell.pci_devices[name].shmem_dev_id\n assert dev_id is not None\n assert cell.memory_regions is not None\n\n grouped_region_name = f\"{name}\"\n grouped_region = cell.memory_regions[grouped_region_name]\n\n cell_output_region = grouped_region.regions[2 + dev_id]\n\n def get_mem_region_index(cell, name):\n ret = -1\n index = 0\n\n for region_name, region in cell.memory_regions.items():\n if region_name == name:\n ret = index\n break\n\n if isinstance(region, GroupedMemoryRegion):\n index += len(region.regions)\n else:\n index += 1\n\n if ret == -1:\n raise Exception(\n f\"Invalid cells.yaml, not a memory-region: {name}\"\n )\n\n return ret\n\n shmem_regions_start = get_mem_region_index(\n cell, grouped_region_name\n )\n cell.pci_devices[name].shmem_regions_start = shmem_regions_start\n\n new_cell_output_region = copy.copy(cell_output_region)\n new_cell_output_region.flags = copy.copy(\n cell_output_region.flags\n )\n new_cell_output_region.flags.append(\"MEM_WRITE\")\n\n grouped_region.regions[2 + dev_id] = new_cell_output_region", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value" ]
[ "0.6742911", "0.5997335", "0.5913795", "0.573142", "0.56936634", "0.56373113", "0.56364506", "0.5610511", "0.5602637", "0.55689424", "0.5567876", "0.553663", "0.5532952", "0.55236477", "0.5481019", "0.5457451", "0.5454251", "0.544278", "0.5432112", "0.5432112", "0.5432112", "0.54132175", "0.5345846", "0.5345425", "0.53006715", "0.5268588", "0.5261683", "0.5255762", "0.52529144", "0.5224182", "0.51988786", "0.5193345", "0.519081", "0.5180113", "0.5168156", "0.5166387", "0.51640004", "0.51552254", "0.51426095", "0.5139516", "0.51372105", "0.5127384", "0.5127384", "0.5113221", "0.5101546", "0.5073147", "0.50712013", "0.50674015", "0.50664294", "0.50536704", "0.50477946", "0.50441754", "0.5031139", "0.50263506", "0.50262123", "0.501471", "0.50046724", "0.5002816", "0.49999812", "0.49950248", "0.4993164", "0.4969553", "0.49669695", "0.49557677", "0.4945821", "0.4945211", "0.49412164", "0.49386188", "0.49265516", "0.49213815", "0.4920508", "0.4919846", "0.4917624", "0.49143732", "0.49022818", "0.4895478", "0.48950115", "0.48841983", "0.4875334", "0.48706442", "0.48693988", "0.48628083", "0.48611423", "0.48595762", "0.48525757", "0.48524988", "0.4850724", "0.48465583", "0.48436847", "0.4840618", "0.4834213", "0.483338", "0.48318627", "0.48306897", "0.48291117", "0.48222187", "0.48204514", "0.4816622", "0.48150495", "0.48073468" ]
0.5901017
3
Add agents and exits to the board. This modifies both the board and regions in place.
def add_agents_and_exit(board, regions, agents, agent_types): agent_vals = [] point_tables = [] agent_names = [] agent_types = {'default': DEFAULT_AGENT, **agent_types} for agent_type in _fix_random_values(agents): agent_type = _fix_random_values(agent_type) if agent_type not in agent_types: continue agent = {**DEFAULT_AGENT, **agent_types[agent_type]} agent_val = CellTypes.agent | CellTypes.frozen if agent['color'] in COLORS: agent_val |= COLORS[agent['color']] else: logger.error("Invalid agent color: '%s'", agent['color']) for flag in agent['flags']: if flag in AGENT_PROPERTIES: agent_val |= AGENT_PROPERTIES[flag] else: logger.error("Invalid agent property '%s'", flag) agent_vals.append(agent_val) point_tables.append(agent['points_table']) agent_names.append(agent_type) if not agent_vals: return np.zeros((0,2), dtype=int), np.zeros((0,8,9), dtype=int) # Add agents to the board zero_reg = (regions == 0) zero_idx = np.array(np.nonzero(zero_reg)).T # ensure that there are not more agents than places to put them: agent_vals = agent_vals[:len(zero_idx)] agent_locs = zero_idx[ get_rng().choice(len(zero_idx), len(agent_vals), replace=False)] board[tuple(agent_locs.T)] = agent_vals # Find the location that's as far away from agents as possible while still # in the buffer region. row_dist = np.abs(np.arange(board.shape[0])[:, np.newaxis] - agent_locs[:,0]) col_dist = np.abs(np.arange(board.shape[1])[:, np.newaxis] - agent_locs[:,1]) row_dist = np.sum(np.minimum(row_dist, board.shape[0] - row_dist), axis=-1) col_dist = np.sum(np.minimum(col_dist, board.shape[1] - col_dist), axis=-1) dist = (row_dist[:, np.newaxis] + col_dist[np.newaxis, :]) * zero_reg k = np.argmax(dist) exit_loc = k // board.shape[1], k % board.shape[1] board[exit_loc] = CellTypes.level_exit | CellTypes.color_r # Ensure that the player and exit aren't touching any other region all_locs = np.append(agent_locs, [exit_loc], axis=0) n = np.array([[-1,0,1,-1,0,1,-1,0,1],[-1,-1,-1,0,0,0,1,1,1]]).T new_locs = (all_locs[:,np.newaxis] + n).reshape(-1, 2) % board.shape regions[tuple(new_locs.T)] = -1 return agent_locs, point_tables, agent_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initialization\n grid_queue = [gq for gq in self.grid_queues.values() if gq.accepts(agent)][agent.sex]\n agent.grid_queue = grid_queue.index\n self.add_to_grid_queue(agent)", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def update_available_cells(self, agent):\n try:\n self.available_road_cells.remove(agent.pos)\n except:\n pass\n try:\n self.available_building_cells.remove(agent.pos)\n except:\n pass\n\n adj_cells = self.environment.grid.get_neighborhood(agent.pos, moore=False)\n surrounding_cells = self.environment.grid.get_neighborhood(agent.pos, moore=True)\n\n # Update available cells if agent is a road\n if type(agent) == Road:\n for cell in surrounding_cells:\n # Roads\n if self.creates_valid_road(cell) and cell not in self.available_road_cells:\n self.available_road_cells.append(cell)\n\n # Buildings\n if self.creates_valid_building(cell) and cell not in self.available_building_cells:\n self.available_building_cells.append(cell)\n\n if type(agent) == Building:\n for cell in surrounding_cells:\n # Roads\n if self.creates_valid_road(cell) and cell not in self.available_road_cells:\n self.available_road_cells.append(cell)\n\n # Buildings\n if self.creates_valid_building(cell) and cell not in self.available_building_cells:\n self.available_building_cells(cell)", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def push_regions(self, regions: [MouseRegion]):\n raise NotImplementedError", "def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)", "def insert_host_states(hosts):\n IMPL.insert_host_states(hosts)", "def enter(self, env):\n env = self._find_env(env, new=True)\n env.add_agents(self)", "def on_enter(self):\n # Add self to list of obstacles\n self.parent._obstacles.add(self)\n super().on_enter()", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def apply_to_world(self, world):\n # add the current obstacles\n for obstacle in self.current_obstacles:\n world.add_obstacle(obstacle)\n\n # program the robot supervisors\n for robot in world.robots:\n robot.supervisor.goal = self.current_goal[:]", "def place_dungeon_items(self):\r\n self.place_entrance()\r\n self.place_exit()\r\n self.place_pillar_a()\r\n self.place_pillar_e()\r\n self.place_pillar_i()\r\n self.place_pillar_p()\r\n self.place_pits()\r\n self.place_vision()\r\n self.place_healing()\r\n self.original_map = self.__repr__()", "def add_items(self):\n # -- item list\n item = ['S','H','R','M', 'B', 'C']\n # -- loops through item list and adds them to the maze\n for i in item:\n x_coordinate, y_coordinate = self.find_random_spot()\n self.grid[x_coordinate][y_coordinate] = i", "def place_entrance(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__current_room = x, y # places adventurer in dungeon at start of game\r\n self.__entrance_room = x, y\r\n self.__maze[x][y].set_entrance(True)", "def resize_board(self, dx, dy):\n height, width = self.board.shape\n if width <= 0 or height <= 0:\n raise ValueError(\"Cannot resize to zero.\")\n new_board = np.zeros((height+dy, width+dx), dtype=self.board.dtype)\n height += min(0, dy)\n width += min(0, dx)\n new_board[:height, :width] = self.board[:height, :width]\n self.board = new_board\n out_of_bounds = np.any(self.agent_locs >= new_board.shape, axis=1)\n self.agent_locs = self.agent_locs[~out_of_bounds]\n self.edit_loc = tuple(np.array(self.edit_loc) % new_board.shape)\n self.update_exit_locs()", "def add_region(self, position):\n region = self.region_selector(position)\n self.regions[id(region)] = region", "def place_building(self, building):\n if self.environment.grid.is_cell_empty(building.pos):\n self.environment.grid.place_agent(building, building.pos)\n self.environment.agents['residences'].append(building)\n else:\n try:\n self.available_cells.remove(building.pos)\n except:\n pass", "def regions(self, regions):\n self._regions = regions", "def _update_battle_position(self, new_cells=[], previous_cells=[]):\n if previous_cells:\n for previous_cell in previous_cells:\n self._battle_area.set_cell(previous_cell.get_name(), False)\n if new_cells:\n for new_cell in new_cells:\n self._battle_area.set_cell(new_cell.get_name(), self)", "def create_block(self, location_list, POI_locations):\n\n \n for i in range(len(location_list)):\n this_cell = self.grid.get_cell_list_contents(location_list[i])\n\n for agent in this_cell:\n if type(agent) is nodeAgent:\n agent.block = True\n\n for i in POI_locations:\n agent.locations[i] = 10000", "def test_arena_move(agents_and_engines):\n agents, engine = agents_and_engines\n assert len(agents) == 3\n\n game_state = engine.get_state()\n\n positions = [g['pos'] for g in game_state['gladiators']]\n\n rw = random_walker.ArenaAgent()\n\n move = rw.move(game_state)\n for i in range(3):\n engine.move(move)\n new_state = engine.get_state()\n\n new_positions = [g['pos'] for g in new_state['gladiators']]\n\n assert new_positions != positions", "def advance_board(self):\n board = self.board\n rules = self.energy_rules\n h, w = board.shape\n beta = 1.0 / max(1e-20, self.temperature)\n if len(rules[0]) - 1 == 4:\n neighborhood = np.array([[0,1,0],[1,0,1],[0,1,0]])\n elif len(rules[0]) - 1 == 6:\n neighborhood = np.array([[0,1,1],[1,0,1],[1,1,0]])\n elif len(rules[0]) - 1 == 8:\n neighborhood = np.array([[1,1,1],[1,0,1],[1,1,1]])\n else:\n raise RuntimeError(\"async rules must have length 5, 7, or 9\")\n rng = get_rng()\n for _ in range(int(board.size * self.cells_per_update)):\n x = rng.choice(w)\n y = rng.choice(h)\n if board[y, x] & CellTypes.frozen:\n continue\n neighbors = board.view(wrapping_array)[y-1:y+2, x-1:x+2] * neighborhood\n alive_neighbors = np.sum(neighbors & CellTypes.alive > 0)\n spawn_neighbors = np.sum(neighbors & CellTypes.spawning > 0)\n frozen = np.sum(neighbors & CellTypes.freezing) > 0\n if frozen:\n continue\n if board[y, x] & CellTypes.alive:\n H = rules[0][alive_neighbors]\n else:\n H = rules[1][alive_neighbors]\n\n P = 0.5 + 0.5*np.tanh(H * beta)\n P = 1 - (1-P)*(1-self.spawn_prob)**spawn_neighbors\n board[y, x] = CellTypes.life if coinflip(P) else CellTypes.empty", "def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))", "def add_agent(self, agent):\n\t\tif not (agent in self.agents_in_site):\n\t\t\tif (agent.site != None):\n\t\t\t\tagent.site.agents_in_site.remove(agent) \n\t\t\tself.agents_in_site.append(agent)\n\t\t\tagent.site = self", "def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])", "def shift_board(self, dx, dy):\n self.board = np.roll(self.board, dy, axis=0)\n self.board = np.roll(self.board, dx, axis=1)\n self.agent_locs += [dy, dx]\n self.agent_locs %= self.board.shape\n self.update_exit_locs()", "def begin_encounter(self):\r\n\r\n #introduce NPCs - run all introduce methods, unless the NPCs have the same name\r\n for i in range(len(self.npc_names)):\r\n for _npc in self.npc_list:\r\n if _npc.name == self.npc_names[i]:\r\n _npc.introduce(self.npc_quantities[i], self.location)\r\n break\r\n\r\n #list visible enemies\r\n self.display_npcs()\r\n\r\n #check close proximity - if hostile enemy within 10 ft, don't go to interact menu\r\n hostile_close_proximity = False\r\n \r\n for m in range(len(self.npc_distances)):\r\n if self.npc_distances[m] < 10:\r\n if self.npc_list[m].hostility == utils.HostilityLevel.HOSTILE:\r\n hostile_close_proximity = True\r\n multiple = self.npc_quantities[self.npc_names.index(self.npc_list[m].name)] > 1\r\n self.npc_list[m].alert_close_proximity(multiple)\r\n break\r\n\r\n interaction_result = NextState\r\n \r\n if hostile_close_proximity:\r\n #start combat\r\n interaction_result = NextState.COMBAT\r\n else:\r\n #run interaction choice menu - interactions may return flags that spawn social/combat encounters\r\n print(\"Select NPC to interact with:\")\r\n for l in range(len(self.npc_list)):\r\n print(str(l + 1) + \". \" + self.npc_list[l].name + \" (Distance: \" + str(self.npc_distances[l]) + \"ft.)\")\r\n \r\n choice = 0\r\n while choice < 1 or choice > len(self.npc_names) + 1:\r\n try:\r\n choice = int(input(\"Make selection: \"))\r\n except:\r\n print(\"Enter an integer between 1 and \" + str(len(self.npc_names)))\r\n \r\n interaction_result = self.npc_list[choice - 1].interact(self, choice - 1, self.main_player)\r\n\r\n #spawn social/combat encounter\r\n #if combat, pass npc list to generate turn order\r\n if interaction_result.name == \"COMBAT\":\r\n #spawn combat encounter\r\n print(\"Starting combat\")\r\n new_combat = combat.CombatEncounter(self.main_player, self.npc_list, self.npc_distances, self.npc_quantities)\r\n elif interaction_result.name == \"SOCIAL\":\r\n #spawn social encounter\r\n print(\"Starting social encounter\")\r\n elif interaction_result.name == \"FINISHED\":\r\n #present next choices, award loot from area\r\n #allow player to interact with any remaining/new NPCs\r\n print(\"Encounter finished\")\r\n elif interaction_result.name == \"DEATH\":\r\n #kill the player and end the game\r\n print(\"Player dead\")", "def place(self, board):\r\n self.board = board", "def append(self, agent):\n self.agents.append(agent)", "def add_objects_to_space(self):\n self.anti_spacecraft.add_to_space(self.space) # Anti-spacecraft Parts (represent the whole vehicle)\n self.space.add(self.spacecraft.body, self.spacecraft.shape) # Spacecraft body and shape\n self.space.add(self.pm_landing_pad) # Landing pad", "def draw_world(grid, r, c, image):\n under = grid[r, c]\n grid[r, c] = AGENT\n image.set_data(grid)\n grid[r, c] = under", "def make_hh_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-97 from excel_import\r\n self.hhpos = self.determine_hhpos(hh_row, 'house_latitude', 'house_longitude')\r\n self.hh_id = return_values(hh_row, 'hh_id')\r\n self.admin_village = 1\r\n\r\n # 2016\r\n mig_remittances = return_values(hh_row, 'mig_remittances') # remittances of initial migrant\r\n if mig_remittances is None:\r\n mig_remittances = 0\r\n household_income_list[hh_row - 1] = int(mig_remittances)\r\n household_remittances_list[hh_row - 1] = int(mig_remittances)\r\n\r\n if return_values(hh_row, 'initial_migrants') is not None:\r\n out_mig_list[hh_row - 1] = 1\r\n household_migrants_list.append(self.hh_id)\r\n cumulative_mig_list[hh_row - 1] = 1\r\n\r\n num_labor_list[hh_row - 1] = initialize_labor(hh_row)\r\n hh_size_list[hh_row - 1] = len(return_values(hh_row, 'age'))\r\n\r\n a = HouseholdAgent(hh_row, self, self.hh_id, self.admin_village)\r\n self.space.place_agent(a, self.hhpos) # admin_village placeholder\r\n self.schedule.add(a)", "def addTiles(self, rows, cols, minecount):\n for row in range(rows):\n self.tiles.append([])\n for col in range(cols):\n tile = Tile(self, row, col)\n tile.grid(row=row+1, column=col)\n self.tiles[row].append(tile)\n #left click listeners\n tile.bind('<ButtonPress-1>', self.pressTile)\n tile.bind('<ButtonRelease-1>', self.showTile)\n #middle click listeners\n tile.bind('<ButtonPress-2>', self.pressAdjTiles)\n tile.bind('<ButtonRelease-2>', self.showAdjTiles)\n #right click listeners\n tile.bind('<ButtonPress-3>', self.pressTile)\n tile.bind('<ButtonRelease-3>', self.toggleFlag)", "def sea_execution(board, position, role):\n quitt = False\n if position == 'comp':\n #print(1)\n #temporary for dumb AI\n #create and print a list of coastal, friendly regions where norse is not the ONLY one\n \n possible_region_list = []\n \n #loops through list of friendly, coastal, not just Norse regions to append to a possible_region_list\n for region in board.get_controlled_regions(role):\n #print(2)\n coastal = False\n just_norse = False\n if region.coast:\n coastal = True\n if len(region.blocks_present) == 1 and region.blocks_present[0].name.upper() == 'NORSE':\n just_norse = True\n \n if coastal and not just_norse:\n possible_region_list.append(region)\n \n \n #loops through list of friendly, coastal regions to append to a possible_final_region_list\n possible_final_region_list = []\n for region in board.get_controlled_regions(role): \n #print(3) \n if region.coast:\n possible_final_region_list.append(region)\n \n \n \n if len(possible_final_region_list) >= 2:\n #if you want to add in last-min strategy, do it here\n #random region from possible list\n england = board.regions[22]\n if england in possible_region_list:\n original_region = england\n else:\n original_region = possible_region_list[random.randint(0, len(possible_region_list) - 1)]\n #remove the original region from the possible end regions\n possible_final_region_list.remove(original_region)\n \n #possible_block_list\n #list of possible blocks to move (present in region) and not norse\n possible_block_list = []\n for block in original_region.blocks_present:\n if block.name != 'NORSE':\n possible_block_list.append(block)\n \n move_block_list = []\n blocks_moved = 0\n #print(4)\n\n while blocks_moved < 2:\n #print(5)\n block = possible_block_list[random.randint(0, len(possible_block_list)-1)]\n #if it's not already on the list,append to move_block_list\n if block not in move_block_list:\n move_block_list.append(block)\n blocks_moved+=1\n elif block in move_block_list and len(possible_block_list) == 1:\n blocks_moved+=1\n else:\n print('neither condition was met so this is an infinite loop')\n \n \n #print(6) \n new_region = possible_final_region_list[random.randint(0, len(possible_final_region_list) - 1)]\n \n for block in move_block_list:\n \n board.add_to_location(block, new_region)\n print(block.name + ' moved from ' + original_region.name + ' to ' + new_region.name)\n \n else:\n print('There are not enough friendly regions with which to play this card.')\n \n \n #add in if it's not possible\n elif position == 'opp':\n \n \n possible_region_list = []\n \n #loops through list of friendly, coastal, not just Norse regions to append to a possible_region_list\n for region in board.get_controlled_regions(role):\n coastal = False\n just_norse = False\n if region.coast:\n coastal = True\n if len(region.blocks_present) == 1 and region.blocks_present[0].name.upper() == 'NORSE':\n just_norse = True\n \n if coastal and not just_norse:\n possible_region_list.append(region)\n \n \n #loops through list of friendly, coastal regions to append to a possible_final_region_list\n possible_final_region_list = []\n for region in board.get_controlled_regions(role): \n if region.coast:\n possible_final_region_list.append(region)\n \n \n \n if len(possible_final_region_list) >= 2:\n \n print('Possible origin regions:')\n for region in possible_region_list:\n print(region.name)\n \n #user input region, check if in possible list\n valid_region = False\n while not valid_region:\n \n original_region_name = input('What region would you like to move block(s) from? Enter a name or \\'none\\'.\\n>').upper()\n \n if original_region_name != 'NONE':\n \n original_region = search.region_name_to_object(board, original_region_name)\n \n if original_region and original_region in possible_region_list:\n valid_region = True\n else:\n print('Invalid region.')\n else:\n quitt = True\n \n if not quitt:\n #remove the original region from the possible end regions\n possible_final_region_list.remove(original_region)\n \n #possible_block_list\n #list of possible blocks to move (present in region) and not norse\n possible_block_list = []\n for block in original_region.blocks_present:\n if block.name != 'NORSE':\n possible_block_list.append(block)\n \n print('Possible blocks:')\n for block in possible_block_list:\n print(block.name)\n \n \n move_block_list = []\n blocks_moved = 0\n quittt = False\n block_name = ''\n while blocks_moved < 2 and not quittt:\n if block_name != 'NONE':\n valid_block = False\n while not valid_block:\n \n \n block_name = input('Which block would you like to move? Enter a name or \\'none\\'.\\n>').upper()\n \n if block_name != 'NONE':\n \n block_to_move = search.block_name_to_object(possible_block_list, block_name)\n \n if block_to_move and block_to_move not in move_block_list:\n valid_block = True\n move_block_list.append(block_to_move)\n blocks_moved+=1\n \n elif block in move_block_list and len(possible_block_list) == 1:\n blocks_moved=1\n \n else:\n print('Invalid block.')\n continue\n else:\n valid_block = True\n if len(move_block_list) == 1:\n quittt = True\n quitt = False\n if len(move_block_list) > 0: \n print('Possible final regions:')\n for region in possible_final_region_list:\n print(region.name)\n \n #user input region, check if in possible list\n valid_region = False\n while not valid_region:\n \n new_region_name = input('What region would you like to move block(s) to? Enter a name or \\'none\\'.\\n>').upper()\n \n if new_region_name != 'NONE':\n \n new_region = search.region_name_to_object(board, new_region_name)\n \n if new_region and new_region in possible_final_region_list:\n valid_region = True\n else:\n print('Invalid region.')\n continue\n else:\n valid_region = True\n quitt = True\n \n if not quitt:\n \n for block in move_block_list:\n \n board.add_to_location(block, new_region)\n print(block.name + ' moved from ' + original_region.name + ' to ' + new_region.name)\n \n else:\n print('There are not enough friendly coastal regions with which to play this card.')", "def step(self):\n try:\n self.agents.sort(key=lambda x: x.dist)\n except Exception as e:\n print(e)\n\n for agent in self.agents:\n try:\n agent.step()\n except Exception as e:\n print(e)\n\n\n # Removes agents if they reach exit\n for exit in self.model.exits:\n x, y = exit.pos[0] * 6 + 1, exit.pos[1] * 6 + 1\n if agent.node == (x, y):\n try:\n agent.saved()\n except Exception as e:\n print(e)", "def _update_board(self):\n\n self.game_board.update_board(self.tetrino_set)", "def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_sugar_in_field -= available_sugar\n # Set sugar in current cell to zero\n self.get_sugar(self.pos).eat_sugar() \n \n \n \n if self.sugar == 0:\n self.model.remove_agent(self)\n \n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n \n \n if self.reproduction_and_death:\n if self.age > self.max_age: # Agent dies\n # Tax inheritance\n self.model.inheritance_tax_agent(self)\n \n if self.model.spawn_at_random:\n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n self.model.remove_agent(self) #agent dies\n \n \n else:\n #spawn new agent\n self.gen += 1\n if self.sugar != 0:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.sugar)\n else:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.model.starting_sugar)\n \n self.model.remove_agent(self) #agent dies", "def load_agents(self, agents):\n self.agents = agents", "def _place_board(self, board):\n for i, row in enumerate(board):\n for j, widget in enumerate(row):\n widget.grid(row = i, column = j)", "def setup_lists(self):\n \n # Decompose spores do not affect terrain like ground or water\n for thing in self.room.wall_list:\n self.unaffected.add(thing)\n\n for thing in self.room.sludge:\n self.unaffected.add(thing)\n\n # It does, however, destroy enemies and things that are alive. Later implement logs.\n for thing in self.room.enemy_list:\n self.affected.add(thing)\n\n for thing in self.room.can_climb:\n self.affected.add(thing)", "def add_parties(self, *parties) -> None:\n\n for party in parties:\n self._route_table['route_table'][party.get_id()] = party.to_entry_point(\n )", "def add_to_grid_queue(self, agent):\n self.pipes[agent.grid_queue].send(\"add\")\n self.pipes[agent.grid_queue].send(agent)", "def add_all_regions():\n gene_id = request.json['gene_id']\n panel_id = request.json['panel_id']\n tx_id = request.json['tx_id']\n gene_name = request.json['gene_name']\n project_id = get_project_id_by_panel_id(s, panel_id)\n\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n add_genes_to_panel_with_ext(s, panel_id, gene_id)\n return jsonify({\"genes\": [gene_id, ]})", "def insert_parts(self, parts):\r\n self.board.insert_parts(parts)\r\n self.set_changed(parts)", "def updated_occupied_locations(self):\n if len(self.occupiedLocations) > self.currentTurn:\n self.occupiedLocations[self.currentTurn] += [self.character.path[-1]]\n else:\n self.occupiedLocations += [[self.character.path[-1]]]", "def __activate(self, x: int, y: int, tree: int) -> None:\n self.__maze[x, y] = tree", "def spill(self, agent):\n self.spill_list.append(agent)", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def add_artifacts(cells_info):\n # Add fire extinguishers\n cells_info[(1, 0)].add_artifact(\"[email protected] 2.1 0.004 0 0 0\")\n cells_info[(1, 14)].add_artifact(\"[email protected] -2.1 0.004 -90 0 90\")\n cells_info[(7, 6)].add_artifact(\"[email protected] -3 0.004 0 0 0\")\n cells_info[(7, 0)].add_artifact(\"Extinguisher@-5 0 5.004 -90 0 300\")\n\n # Add phones\n cells_info[(8, 3)].add_artifact(\"[email protected] 3 0.004 -90 0 0\")\n cells_info[(15, 0)].add_artifact(\"Phone@-3 2.1 0.004 -90 0 -90\")\n cells_info[(13, 7)].add_artifact(\"Phone@-3 0 0.004 90 0 -30\")\n cells_info[(4, 2)].add_artifact(\"Phone@-1 -4 0.004 90 0 0\")\n\n # Add backpacks\n cells_info[(10, 6)].add_artifact(\"Backpack@-6 -1.3 0.004 0 0 0\")\n cells_info[(1, 5)].add_artifact(\"[email protected] 6 0.004 -90 0 0\")\n cells_info[(0, 8)].add_artifact(\"Backpack@1 6 0.004 -90 0 0\")\n cells_info[(15, 4)].add_artifact(\"Backpack@2 2 0.004 90 0 0\")\n\n # Add Rescue Randy\n cells_info[(15, 15)].add_artifact(\"Rescue Randy@1 -7 0.004 0 0 180\")\n cells_info[(15, 7)].add_artifact(\"Rescue Randy@-1 6 0.004 0 0 0\")\n cells_info[(5, 12)].add_artifact(\"Rescue [email protected] 6.5 0.004 0 0 -90\")\n cells_info[(2, 11)].add_artifact(\"Rescue Randy@0 -7 0.004 0 0 180\")\n\n # Add Drills\n cells_info[(8, 8)].add_artifact(\"Drill@-6 0 0.004 0 -90 0\")\n cells_info[(10, 15)].add_artifact(\"Drill@-6 -1.2 0.004 0 90 -20\")\n cells_info[(3, 7)].add_artifact(\"[email protected] 7 0.004 0 0 0\")\n cells_info[(13, 6)].add_artifact(\"Drill@0 -7 0.004 0 90 -80\")", "def reset_agent_locations(self):\n self.transitions_left = self.T-1\n self.x_agent = np.repeat(self.xT.reshape(1, self.dimensions), self.n_agents, axis=0)", "def add_spawns_outside_boss_doors(self: WWRandomizer):\n \n rooms_to_add_new_spawns_to = [\n (\"M_NewD2\", 10, TGDR, None, 11),\n #(\"kindan\", 16, TGDR, None, 13), # Already has a spawn, ID 1.\n (\"Siren\", 18, TGDR, None, 13),\n (\"sea\", 1, ACTR, 1, 56),\n (\"M_Dai\", 15, TGDR, None, 17),\n (\"kaze\", 12, TGDR, None, 13),\n ]\n \n for stage_name, room_number, chunk, layer, boss_door_index in rooms_to_add_new_spawns_to:\n new_spawn_id = 27\n \n dzs = self.get_arc(\"files/res/Stage/%s/Stage.arc\" % stage_name).get_file(\"stage.dzs\", DZx)\n dzr = self.get_arc(\"files/res/Stage/%s/Room%d.arc\" % (stage_name, room_number)).get_file(\"room.dzr\", DZx)\n \n if chunk == TGDR:\n dzx_for_door = dzs\n else:\n dzx_for_door = dzr\n \n door = dzx_for_door.entries_by_type_and_layer(chunk, layer=layer)[boss_door_index]\n spawn_dist_from_door = 200\n y_rot = door.y_rot\n if door.from_room_num != room_number and door.from_room_num != 63:\n y_rot = (y_rot + 0x8000) % 0x10000\n y_rot_degrees = y_rot * (90.0 / 0x4000)\n x_offset = math.sin(math.radians(y_rot_degrees)) * spawn_dist_from_door\n z_offset = math.cos(math.radians(y_rot_degrees)) * spawn_dist_from_door\n x_pos = door.x_pos + x_offset\n y_pos = door.y_pos\n z_pos = door.z_pos + z_offset\n \n if stage_name in [\"M_Dai\", \"kaze\"]:\n # Earth and Wind temple spawns must be in the stage instead of the room or the game will crash.\n dzx_for_spawn = dzs\n else:\n dzx_for_spawn = dzr\n \n spawns = dzx_for_spawn.entries_by_type(PLYR)\n assert len([spawn for spawn in spawns if spawn.spawn_id == new_spawn_id]) == 0\n \n new_spawn = dzx_for_spawn.add_entity(PLYR)\n new_spawn.spawn_type = 0\n new_spawn.room_num = room_number\n new_spawn.x_pos = x_pos\n new_spawn.y_pos = y_pos\n new_spawn.z_pos = z_pos\n new_spawn.y_rot = y_rot\n new_spawn.spawn_id = new_spawn_id\n \n dzx_for_spawn.save_changes()", "def addAgent(self, new_agent, parent_agent, secondary_caregivers):\n # set unique id\n new_agent.ID = self.idCounter\n self.idCounter += 1\n \n self.pop.append(new_agent)\n newPopStructure = np.zeros((self.popStructure.shape[0]+1,self.popStructure.shape[1]+1))\n newPopStructure[:-1,:-1] = self.popStructure\n\n \n # inherit social structure from parent\n parent_id = self.findAgentIndexById(parent_agent)\n secondary_caregivers_ids = [self.findAgentIndexById(a) for a in secondary_caregivers]\n newPopStructure[-1,] = newPopStructure[parent_id,]\n newPopStructure[:,-1] = newPopStructure[:,parent_id] \n\n\t\t# strong bond between parents\n newPopStructure[-1,parent_id] = self.parameters[\"maxWeight\"]\n newPopStructure[parent_id,-1] = self.parameters[\"maxWeight\"]\n for s in secondary_caregivers_ids:\n \tnewPopStructure[-1,s] =self.parameters[\"maxWeight\"]\n \tnewPopStructure[s,-1] =self.parameters[\"maxWeight\"]\n\n # can't communicate with self\n np.fill_diagonal(newPopStructure, 0.0)\n \n self.popStructure = newPopStructure\n \n # start unmarried\n newMarriageStructure = np.zeros((self.marriageStructure.shape[0]+1,self.marriageStructure.shape[1]+1))\n newMarriageStructure[:-1,:-1] = self.marriageStructure\n newMarriageStructure[-1,] = 0\n newMarriageStructure[:,-1] = 0\n self.marriageStructure = newMarriageStructure\n \n\t\t# inherit clan from parent\n parent_clan = self.clans[parent_id]\n parent_compound = self.compounds[parent_id]\n \n self.clans.append(parent_clan)\n self.compounds.append(parent_compound)\n\n self.nAgents = len(self.pop)", "def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)", "def execute_actions(self, actions):\n execute_actions(self.board, self.agent_locs, actions)", "def gen_game(\n board_shape=(25,25), min_performance=-1, partitioning={},\n starting_region=None, later_regions=None, buffer_region=None,\n named_regions={}, agents=['default'], agent_types={}, **etc):\n board_shape = _fix_random_values(board_shape)\n min_performance = _fix_random_values(min_performance)\n partitioning = _fix_random_values(partitioning)\n\n regions = make_partioned_regions(board_shape, **partitioning)\n board = np.zeros(board_shape, dtype=np.uint16)\n goals = np.zeros(board_shape, dtype=np.uint16)\n\n # Create locations for the player and the exit\n agent_locs, points_table, agent_names = add_agents_and_exit(\n board, regions, agents, agent_types)\n\n # and fill in the regions...\n for k in np.unique(regions)[2:]:\n mask = regions == k\n if starting_region is not None:\n region_name = _fix_random_values(starting_region)\n else:\n region_name = _fix_random_values(later_regions)\n if region_name not in named_regions:\n logger.error(\"No region parameters for name '%s'\", region_name)\n continue\n logger.debug(\"Making region: %s\", region_name)\n rboard, rgoals = populate_region(mask, named_regions[region_name])\n board += rboard\n goals += rgoals\n starting_region = None\n buffer_region = _fix_random_values(buffer_region)\n if buffer_region in named_regions:\n mask = regions == 0\n rboard, rgoals = populate_region(mask, named_regions[buffer_region])\n board += rboard\n goals += rgoals\n\n # Give the buffer (0) region a rainbow / white color\n # This is mostly a visual hint for humans\n buffer_mask = (regions <= 0) & (goals & CellTypes.rainbow_color == 0)\n goals[buffer_mask] += CellTypes.rainbow_color\n\n game = SafeLifeGame()\n game.deserialize({\n 'board': board,\n 'goals': goals,\n 'agent_locs': agent_locs,\n 'agent_names': agent_names,\n 'min_performance': min_performance,\n 'points_table': points_table,\n 'orientation': 1,\n })\n return game", "def add_agents(*_coconut_match_args, **_coconut_match_kwargs):\n _coconut_match_check_2 = False\n _coconut_match_set_name_self = _coconut_sentinel\n _coconut_match_set_name_agents = _coconut_sentinel\n _coconut_match_set_name__set_defaults = _coconut_sentinel\n _coconut_match_set_name_named_agents = _coconut_sentinel\n _coconut_FunctionMatchError = _coconut_get_function_match_error()\n if _coconut.sum((_coconut.len(_coconut_match_args) > 0, \"self\" in _coconut_match_kwargs)) == 1:\n _coconut_match_set_name_agents = _coconut_match_args[1:]\n _coconut_match_temp_5 = _coconut_match_kwargs.pop(\"_set_defaults\") if \"_set_defaults\" in _coconut_match_kwargs else True\n _coconut_match_temp_4 = _coconut_match_args[0] if _coconut.len(_coconut_match_args) > 0 else _coconut_match_kwargs.pop(\"self\")\n _coconut_match_set_name__set_defaults = _coconut_match_temp_5\n _coconut_match_set_name_self = _coconut_match_temp_4\n _coconut_match_set_name_named_agents = _coconut_match_kwargs\n _coconut_match_check_2 = True\n if _coconut_match_check_2:\n if _coconut_match_set_name_self is not _coconut_sentinel:\n self = _coconut_match_set_name_self\n if _coconut_match_set_name_agents is not _coconut_sentinel:\n agents = _coconut_match_set_name_agents\n if _coconut_match_set_name__set_defaults is not _coconut_sentinel:\n _set_defaults = _coconut_match_set_name__set_defaults\n if _coconut_match_set_name_named_agents is not _coconut_sentinel:\n named_agents = _coconut_match_set_name_named_agents\n if not _coconut_match_check_2:\n raise _coconut_FunctionMatchError('match def add_agents(self, *agents, _set_defaults=True, **named_agents):', _coconut_match_args)\n\n new_agents = []\n for a in _coconut.itertools.chain.from_iterable(_coconut_reiterable(_coconut_func() for _coconut_func in (lambda: agents, lambda: named_agents.items()))):\n _coconut_match_to_0 = a\n _coconut_match_check_1 = False\n _coconut_match_set_name_name = _coconut_sentinel\n _coconut_match_set_name_actor = _coconut_sentinel\n if (_coconut.isinstance(_coconut_match_to_0, _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_to_0) == 2):\n _coconut_match_set_name_name = _coconut_match_to_0[0]\n _coconut_match_set_name_actor = _coconut_match_to_0[1]\n _coconut_match_check_1 = True\n if _coconut_match_check_1:\n if _coconut_match_set_name_name is not _coconut_sentinel:\n name = _coconut_match_set_name_name\n if _coconut_match_set_name_actor is not _coconut_sentinel:\n actor = _coconut_match_set_name_actor\n if _coconut_match_check_1:\n if not callable(actor):\n a = init_agent(name, actor)\n elif isinstance(actor, Agent):\n a = actor.clone(name=name)\n else:\n a = Agent(name, actor)\n assert isinstance(a, Agent), \"not isinstance({_coconut_format_0}, Agent)\".format(_coconut_format_0=(a))\n new_agents.append(a)\n self.agents += new_agents\n if _set_defaults:\n self.set_defaults(new_agents)\n return self", "def move_to(self, entity, location):\n y, x = location\n if not y in range(self.size) or not x in range(self.size):\n return\n y, x = entity.location\n self.grid[y][x].contents.remove(entity)\n entity.location = location\n y, x = location\n self.grid[y][x].contents.append(entity)\n for ent in self.grid[y][x].contents:\n try:\n if not ent.player_enter_callback is None:\n ent.player_enter_callback(ent)\n except AttributeError:\n pass", "def push_up(self, event):\n self.transpose()\n self.stack()\n self.merge()\n self.transpose()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()", "def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n self.start = gameState.getAgentPosition(self.index)\n CaptureAgent.registerInitialState(self, gameState)\n\n \"G A M E K E Y L O C A T I O N S D E T E R M I N A T I O N\"\n if self.red:\n leftEdge = gameState.data.layout.width / 2\n rightEdge = gameState.data.layout.width - 2 #don't need the last wall\n self.safeColumn = leftEdge - 2 # -1 doesn't always seem to work\n else:\n leftEdge = 1\n rightEdge = gameState.data.layout.width / 2\n self.safeColumn = rightEdge + 2\n\n self.safeSpaces = []\n for h in xrange(1,gameState.data.layout.height-1):\n if not gameState.data.layout.isWall((self.safeColumn, h)):\n self.safeSpaces += [(self.safeColumn, h)]\n\n\n \"S T A T E A S S I G N M E N T\"\n pos = gameState.getAgentState(self.index).getPosition()\n self.friend = min(2 + int(not self.red), 2 - self.index + 2 * int(not self.red))\n friendPos = gameState.getAgentState(self.friend).getPosition()\n opps = [gameState.getAgentState(el).getPosition() for el in [1 - int(not self.red), 3 - int(not self.red)] ]\n\n print \"I am agent\", self.index, \"at position \", pos\n #print \"agent 0:\", gameState.getAgentState(0).getPosition()\n print \"My friend agent\", self.friend, \"is at position \", friendPos\n print \"My first enemy agent is at position \", opps[0]\n print \"My second enemy agent is at position \", opps[1]\n\n self.top = False\n self.undecided = False\n\n if pos[1] > friendPos[1]:\n print \"My friend is lower on the map, and I will take top Quad\"\n self.top = True\n elif pos[1] < friendPos[1]:\n print \"My friend is higher on the map, and I will take bottom Quad\"\n else:\n self.undecided = True\n\n \"F O O D A S S I G N M E N T\"\n self.initFood = self.getFood(gameState).asList()\n self.myFood = self.initFood[:] #this is will be updated during our A* Search for theoretical consumption\n print self.myFood\n\n \"I N I T I A L F O O D A S S I G N M E N T S \"\n\n start = time.time()\n print 'eval time for moves: %.4f' % (time.time() - start)\n\n\n \"D E B U G G I N G\"\n print \"Coloring my safe column white\"\n self.debugDraw([(self.safeColumn, el) for el in xrange(0, gameState.data.layout.height)], [1,1,1], clear=False)\n\n print \"Coloring my safe spaces\", self.safeSpaces, \"blue\"\n self.debugDraw(self.safeSpaces, [0,0,1], clear=False)\n\n self.counter = 0\n self.moves = []\n self.intendedCoords =[]\n self.best = None\n\n #new\n print \"Using my sweet time to find next moves during init as agent\", self.index\n self.best = self.ActionLoop(gameState, 140)\n self.moves = self.best.getDir()[1]\n self.counter = len(self.moves)\n self.cacheSize = len(self.moves)\n #new", "def __add_players_spawns(self):\n # Werewolves\n self.__grid[self.__werewolves_start[0]][self.__werewolves_start[1]][\"werewolves\"] \\\n = self.__number_of_beasts\n # Vampires\n self.__grid[self.__vampires_start[0]][self.__vampires_start[1]][\"vampires\"] \\\n = self.__number_of_beasts", "def draw_board(self):\n self.current_board = self.gameboard.copy()\n \n # Draw our rewards\n for r, row in enumerate(self.world_rewards):\n for c, reward in enumerate(row):\n if reward is not None:\n asset_key = reward.asset\n x = 64*(c+1)\n y = 64*(r+1)\n self.current_board.paste(\\\n self.assets[asset_key], (x,y), self.assets[asset_key])\n \n # Draw our creature\n cr_x, cr_y = self.creature.current_location\n x = 64*(cr_x + 1) # Should be the center of the tile\n y = 64*(cr_y + 1)\n creature_image = self.assets['beaver']\n if self.creature.facing == 'S':\n creature_image = creature_image.rotate(-180)\n elif self.creature.facing == 'E':\n creature_image = creature_image.rotate(-90)\n elif self.creature.facing == 'W':\n creature_image = creature_image.rotate(-270)\n self.current_board.paste(creature_image, (x,y), creature_image)", "def on_enter(self):\n # Obtain pointer to Parent Grid + Obstacles\n self._grid = self.parent._grid\n self._obstacles = self.parent._obstacles\n super().on_enter()", "def _add_agent_to_graph(self, agent: mantrap.agents.base.DTAgent):\n from data import Node\n is_robot = agent.is_robot\n\n # In Trajectron each node has a certain type, which is either robot or pedestrian, an id and\n # state data. Enforce the Trajectron id to the internal ids format, to be able to query the\n # results later on.\n agent_history = agent.history\n acc_history = agent.compute_acceleration(agent_history, dt=self.dt)\n\n node_data = self._create_node_data(state_history=agent_history, accelerations=acc_history)\n node_tye = self._gt_env.NodeType.PEDESTRIAN if not is_robot else self._gt_env.NodeType.ROBOT\n node = Node(node_type=node_tye, node_id=agent.id, data=node_data, is_robot=is_robot)\n if is_robot:\n self._gt_scene.robot = node\n self._gt_scene.nodes.append(node)\n\n # Re-Create online environment with recently appended node.\n self._online_env = self.create_online_env(env=self._gt_env, scene=self._gt_scene)", "def add_zombie(self, row, col):\r\n self._zombie_list.append((row, col))", "def add_rect(self, r, obj):\n cells = self._cells_for_rect(r)\n for c in cells:\n self._add(c, obj)", "def cells(self, cells):\n\n self.container['cells'] = cells", "def add_zombie(self, row, col):\n self._zombie_list.append((row,col))", "def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)", "def initialize_areas(self):\n self._areas[1] = copy.copy(self._areas[0])", "def place_obj(self):\r\n for pos in BOARD_POSITIONS:\r\n self.board[pos[0]][pos[1]] = Stone(color=self.state[pos[0]][pos[1]], pos=(pos[0], pos[1]))\r\n self.board[pos[0]][pos[1]].liberty = self.board[pos[0]][pos[1]].compute_liberty(self.state)", "def add_zombie(self, row, col):\n self._zombie_list.append((row, col))", "def AddRegions(self, **kwargs):\n # Addregions use pixel coordinates. listRegions and SaveRegions use RA and Dec.\n n_objs = 0\n objs = []\n # default shape is circle\n if not 'shape' in kwargs:\n kwargs['shape'] = ['circle']\n for k in kwargs.keys():\n n_objs = max(n_objs, len(kwargs[k]))\n for j in range(n_objs):\n temp = {}\n for k in kwargs.keys():\n try:\n temp[k] = kwargs[k][j]\n except IndexError:\n if k == 'shape': \n temp[k] = 'circle'\n objs.append(temp)\n self.all_objs = json.dumps(objs)\n command = \"JS9.AddRegions({objs}, {{display:'{wid}{suffix}'}})\".format(objs=self.all_objs, wid=self.wid, suffix=self.suffix)\n get_ipython().run_cell_magic('javascript', '', command)", "def mapAdd(block, posMap):\n for (x, y) in block.coords:\n theFallener(x + block.x, y + block.y, block.color, posMap)", "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def add_new_region(self, image_name: str, region_text: str, region_position: RegionPosition, region_type: str):\n pass", "def do_merge(self, cr, uid, ids, context=None): \n invent_obj = self.pool.get('stock.inventory')\n invent_line_obj = self.pool.get('stock.inventory.line')\n invent_lines = {}\n if context is None:\n context = {}\n for inventory in invent_obj.browse(cr, uid, context['active_ids'], context=context):\n if inventory.state == \"done\":\n raise osv.except_osv(_('Warning!'),\n _('Merging is only allowed on draft inventories.'))\n\n for line in inventory.inventory_line_id:\n key = (line.location_id.id, line.product_id.id, line.product_uom.id)\n if key in invent_lines:\n invent_lines[key] += line.product_qty\n else:\n invent_lines[key] = line.product_qty\n\n\n new_invent = invent_obj.create(cr, uid, {\n 'name': 'Merged inventory'\n }, context=context)\n\n for key, quantity in invent_lines.items():\n invent_line_obj.create(cr, uid, {\n 'inventory_id': new_invent,\n 'location_id': key[0],\n 'product_id': key[1],\n 'product_uom': key[2],\n 'product_qty': quantity,\n })\n\n return {'type': 'ir.actions.act_window_close'}", "def load(self):\n\n if self.loaded:\n return\n\n self.region_back = None\n self.objects = []\n self.plants = []\n self.tiles = []\n\n # Some convenience vars\n materials = self.data.materials\n matmods = self.data.matmods\n objects = self.data.objects\n plants = self.data.plants\n world = self.world\n self.loaded = True\n\n # Get tiles\n try:\n data_tiles = world.get_tiles(self.rx, self.ry)\n except KeyError:\n print('WARNING: Region ({}, {}) was not found in world'.format(self.rx, self.ry))\n return\n\n # \"real\" coordinates\n base_x = self.rx*32\n gui_x = base_x*8\n base_y = self.ry*32\n gui_y = (world.height*8)-(base_y*8)\n\n # Background for our drawn area (black)\n self.region_back = self.scene.addRect(gui_x, gui_y-255, 255, 255,\n QtGui.QPen(QtGui.QColor(0, 0, 0)),\n QtGui.QBrush(QtGui.QColor(0, 0, 0)),\n )\n self.region_back.setZValue(Constants.z_black)\n\n # Tiles!\n cur_row = 0\n cur_col = 0\n for data_tile in data_tiles:\n self.tiles.append(GUITile(self.scene, data_tile,\n base_x+cur_col, base_y+cur_row,\n self,\n gui_x+cur_col*8, gui_y-(cur_row+1)*8,\n self.layer_toggles))\n self.scene.addItem(self.tiles[-1])\n cur_col += 1\n if cur_col == 32:\n cur_col = 0\n cur_row += 1\n\n # Entities!\n entities = []\n try:\n entities = world.get_entities(self.rx, self.ry)\n except KeyError:\n pass\n\n for e in entities:\n if e.name == 'ObjectEntity':\n obj_name = e.data['name']\n obj_orientation = e.data['orientationIndex']\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n if obj_name in objects:\n obj = objects[obj_name]\n (image, offset_x, offset_y) = obj.get_image(obj_orientation)\n qpmi = QtWidgets.QGraphicsPixmapItem(image)\n qpmi.setPos(\n (obj_x*8) + offset_x,\n (world.height*8)-(obj_y*8) - offset_y - image.height(),\n )\n qpmi.setZValue(Constants.z_objects)\n if not self.layer_toggles.objects_toggle.isChecked():\n qpmi.setVisible(False)\n self.scene.addItem(qpmi)\n self.objects.append(qpmi)\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_object(obj, obj_name, obj_orientation, qpmi, e.data)\n elif e.name == 'PlantEntity':\n desc = e.data['descriptions']['description']\n images = []\n (obj_x, obj_y) = tuple(e.data['tilePosition'])\n for piece in e.data['pieces']:\n piece_img = piece['image'].split('?')[0]\n if piece_img in plants:\n img = plants[piece_img].image\n qpmi = QtWidgets.QGraphicsPixmapItem(img)\n qpmi.setPos(\n (obj_x*8) + (piece['offset'][0]*8),\n (world.height*8)-(obj_y*8) - (piece['offset'][1]*8) - img.height(),\n )\n qpmi.setZValue(Constants.z_plants)\n if not self.layer_toggles.plants_toggle.isChecked():\n qpmi.setVisible(False)\n images.append((plants[piece_img], qpmi))\n self.scene.addItem(qpmi)\n self.plants.append(qpmi)\n else:\n print('not found: {}'.format(piece_img))\n rel_x = obj_x - base_x\n rel_y = obj_y - base_y\n tile_idx = rel_y*32 + rel_x\n self.tiles[tile_idx].add_plant(desc, images)\n elif (e.name == 'MonsterEntity'\n or e.name == 'NpcEntity'\n or e.name == 'StagehandEntity'\n or e.name == 'ItemDropEntity'\n or e.name == 'VehicleEntity'\n ):\n # TODO: Ignoring for now\n pass\n else:\n print('Unknown entity type: {}'.format(e.name))", "def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])", "def _populate_level_with_enemies(self,\n map_layer_configuration,\n base_enemy_chance_cave: float = 0.006,\n base_enemy_chance_dungeon: float = 0.006,\n base_boss_chance: float = 0.003) -> None:\n enemy_chance_cave = self.generate_enemy_chance(base_enemy_chance_cave)\n enemy_chance_dungeon = self.generate_enemy_chance(base_enemy_chance_dungeon)\n boss_chance = self.generate_enemy_chance(base_boss_chance)\n for row in map_layer_configuration:\n for block in row:\n if block[0] == ' ':\n if np.random.rand() > (1 - enemy_chance_cave):\n if self.sprites.drill.center_x != block[1] or self.sprites.drill.center_y != block[2]:\n enemy_to_add = random.choice(potential_enemies)\n enemy_to_append = enemy_to_add(block[1], block[2], vision=200)\n self.sprites.entity_list.append(enemy_to_append)\n self.sprites.enemy_list.append(enemy_to_append)\n elif block[0] == 'F':\n if np.random.rand() > (1 - enemy_chance_dungeon):\n if self.sprites.drill.center_x != block[1] or self.sprites.drill.center_y != block[2]:\n enemy_to_add = random.choice(potential_enemies)\n enemy_to_append = enemy_to_add(block[1], block[2], vision=200)\n self.sprites.entity_list.append(enemy_to_append)\n self.sprites.enemy_list.append(enemy_to_append)\n elif np.random.rand() > (1 - boss_chance):\n if self.sprites.drill.center_x != block[1] or self.sprites.drill.center_y != block[2]:\n enemy_to_add = random.choice(potential_bosses)\n enemy_to_append = enemy_to_add(block[1], block[2], vision=200, speed=0.7)\n self.sprites.entity_list.append(enemy_to_append)\n self.sprites.enemy_list.append(enemy_to_append)\n self.sprites.drill_list.append(self.sprites.drill)\n\n for entity in self.sprites.entity_list:\n entity.setup_collision_engine([self.sprites.indestructible_blocks_list])", "def move2(self):\n\n options = self.location.exits.keys()\n for key in options:\n if self.location.exits[key] == p.location:\n self.location.objects.remove(a)\n self.location = p.location\n self.location.objects.append(a)\n print('fred entered the room')\n self.attack(['attack', str(p.name)])\n break\n else:\n self.move1()", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def add_conflicting_agents(self, agent_i, agent_j):\n\n self.conflicting_agents = (agent_i, agent_j)", "def grow(self, start_period=1, cascade=True):\n end_period = start_period + 1 if not cascade else self.parent.horizon\n for p in range(start_period, end_period):\n self.reset_areas(p+1) #, self._areas[p], self._areas[p+1] # WTF?\n #for age, area in list(self._areas[p].items()): self._areas[p+1][age+1] = area\n for age, area in list(self._areas[p].items()): self._areas[p+1][age+self.parent.period_length] = area", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def draw_region(self, constraint, agent):\n if (constraint == self.obstacles) and (self.obstacles[agent] is not None):\n for area in self.obstacles[agent]:\n x_min, x_max = area[0][0], area[0][1]\n y_min, y_max = area[1][0], area[1][1]\n rectangle = plt.Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, fc='k', ec=\"k\")\n plt.gca().add_patch(rectangle)\n elif (constraint == self.observation_areas) and (self.observation_areas[agent] is not None):\n for observation_area in self.observation_areas[agent]:\n x_min, x_max = observation_area.region[0][0], observation_area.region[0][1]\n y_min, y_max = observation_area.region[1][0], observation_area.region[1][1]\n rectangle = plt.Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, fc='c', ec=\"c\", alpha=0.5)\n plt.gca().add_patch(rectangle)\n\n plt.xlim(self.Xi[0])\n plt.ylim(self.Xi[1])", "def add(self, states, actions, rewards, next_states, dones):\n assert len(states) == self.num_agents, 'ERROR> group states size mismatch'\n assert len(actions) == self.num_agents, 'ERROR> group actions size mismatch'\n assert len(rewards) == self.num_agents, 'ERROR> group rewards size mismatch'\n assert len(next_states) == self.num_agents, 'ERROR> group next states size mismatch'\n assert len(dones) == self.num_agents, 'ERROR> group dones size mismatch'\n\n experience = (states, actions, rewards, next_states, dones)\n self.memory.append(experience)", "def editor_multi_agent_example():\n agent_definitions = [\n AgentDefinition(\"uav0\", agents.UavAgent, [sensors.RGBCamera, sensors.LocationSensor]),\n AgentDefinition(\"uav1\", agents.UavAgent, [sensors.LocationSensor, sensors.VelocitySensor])\n ]\n env = HolodeckEnvironment(agent_definitions, start_world=False)\n\n cmd0 = np.array([0, 0, -2, 10])\n cmd1 = np.array([0, 0, 5, 10])\n\n for i in range(10):\n env.reset()\n env.act(\"uav0\", cmd0)\n env.act(\"uav1\", cmd1)\n for _ in range(1000):\n states = env.tick()", "def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)", "def populate_board(self):\n for row in range(10):\n for col in range(10):\n coord = Coordinate(row, col)\n coord_attack = Coordinate(row, col)\n self.player_table.setItem(row, col, coord)\n self.attack_table.setItem(row, col, coord_attack)", "def place_road(self, road):\n\n # Check if space is empty\n if not self.environment.grid.is_cell_empty(road.pos):\n return False\n\n # Place Road\n self.environment.grid.place_agent(agent=road, pos=road.pos)\n\n # Add road to environment's road list\n self.environment.agents['roads'].append(road)\n\n # Update the list of cells where other things can be built\n self.update_available_cells(road)", "def __add_homes(self):\n for home in self.__positions_of_homes:\n self.__grid[home[0]][home[1]][\"humans\"] = math.floor(\n self.__number_of_humans / self.__number_of_homes\n )", "def _add_rooms(self):\r\n rooms = self.model.get_all_rooms()\r\n\r\n for room in rooms:\r\n self._add_room(room)", "def add_region(self, address, data):\n region = HexFileRegion(address, data)\n self.regions.append(region)\n self.check()", "def _undo_overlap(self, agent1, agent2, dist, combined_sizes, **kwargs):\n overlap = (combined_sizes - dist) / combined_sizes\n self.position_state.modify_position(agent1, -agent1.velocity * overlap)\n self.position_state.modify_position(agent2, -agent2.velocity * overlap)", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def create_enemy():\n if randint(0, 20) == 5:\n try:\n check.check_life(common.COLS-1, common.MIDS_R, \"Enemy\")\n eitem = person.Enemy(common.COLS-1, common.MIDS_R)\n config.E_LIST.append(eitem)\n except (config.EnemyHere, config.GapHere):\n pass\n\n for i in config.E_LIST:\n try:\n i.move(i.x_pos-2, i.y_pos)\n except config.WallHere:\n pass\n except config.EnemyHere:\n config.E_LIST.remove(i)", "def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])", "def specific_reset(self) -> None:\n\n # first, set agent xy and adjust its height\n self.agent.specific_reset()\n agent_pos = np.zeros(3)\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # second, reset obstacle positions\n if len(self.obstacles) > 0:\n obs_init_pos = env_utils.generate_obstacles_init_pos(\n num_obstacles=len(self.obstacles),\n agent_pos=self.agent.get_position(),\n goal_pos=np.array([]), # no goal in gather task\n world=self.world,\n min_allowed_distance=self.obstacle_obstacle_distance,\n agent_obstacle_distance=self.agent_obstacle_distance\n )\n for i, ob in enumerate(self.obstacles):\n ob.set_position(obs_init_pos[i])\n\n # finally, make all collected objects visible again\n [ob.update_visuals(make_visible=True) for ob in self.obstacles]", "def create_board(self):\n for field in self.fields_start_locs:\n loc = self.loc_to_view(field[0], field[1])\n new_field = Field(loc, self.piece_size)\n self.sprite_group.add(new_field)\n self.click_sprites.add(new_field)\n self.fields.append(new_field)" ]
[ "0.5694394", "0.5502142", "0.5498129", "0.5477356", "0.5419938", "0.53479344", "0.5290117", "0.52449757", "0.52416706", "0.51957417", "0.5178578", "0.5174868", "0.51587874", "0.5111144", "0.5063345", "0.5056426", "0.50496", "0.50238144", "0.50177455", "0.49965593", "0.49882892", "0.49847248", "0.49558023", "0.49202922", "0.48733482", "0.48692533", "0.48518908", "0.48447728", "0.48413575", "0.48335016", "0.48326555", "0.4828696", "0.48268774", "0.48096022", "0.48023614", "0.4796406", "0.4788084", "0.47722533", "0.4771003", "0.47645703", "0.47590595", "0.47455546", "0.47427794", "0.4734808", "0.47327325", "0.4717882", "0.47109875", "0.47075865", "0.4704568", "0.46952543", "0.46941066", "0.4679838", "0.46765575", "0.46761733", "0.46673986", "0.46664986", "0.46630695", "0.4658269", "0.46554542", "0.46516305", "0.4649908", "0.463904", "0.4613151", "0.4606439", "0.46057656", "0.46055856", "0.45998886", "0.45993975", "0.45991403", "0.45963895", "0.45952758", "0.4594601", "0.4593535", "0.45786968", "0.4571666", "0.45665836", "0.45643786", "0.4563613", "0.45540094", "0.45512614", "0.4546316", "0.4545035", "0.45444444", "0.45423743", "0.45412555", "0.4540445", "0.45383367", "0.453191", "0.4531171", "0.45279387", "0.45269635", "0.45234138", "0.4520731", "0.45201582", "0.4517912", "0.4517013", "0.4516745", "0.4516174", "0.45055997", "0.45025578" ]
0.716102
0
Randomly generate a new SafeLife game board. Generation proceeds by creating several different random "regions", and then filling in each region with one of several types of patterns or tasks. Regions can be surrounded by fences / walls to make it harder for patterns to spread from one region to another. Each set of parameters can additionally be randomized by passing in a dictionary either with the 'choices' key or the 'uniform' key.
def gen_game( board_shape=(25,25), min_performance=-1, partitioning={}, starting_region=None, later_regions=None, buffer_region=None, named_regions={}, agents=['default'], agent_types={}, **etc): board_shape = _fix_random_values(board_shape) min_performance = _fix_random_values(min_performance) partitioning = _fix_random_values(partitioning) regions = make_partioned_regions(board_shape, **partitioning) board = np.zeros(board_shape, dtype=np.uint16) goals = np.zeros(board_shape, dtype=np.uint16) # Create locations for the player and the exit agent_locs, points_table, agent_names = add_agents_and_exit( board, regions, agents, agent_types) # and fill in the regions... for k in np.unique(regions)[2:]: mask = regions == k if starting_region is not None: region_name = _fix_random_values(starting_region) else: region_name = _fix_random_values(later_regions) if region_name not in named_regions: logger.error("No region parameters for name '%s'", region_name) continue logger.debug("Making region: %s", region_name) rboard, rgoals = populate_region(mask, named_regions[region_name]) board += rboard goals += rgoals starting_region = None buffer_region = _fix_random_values(buffer_region) if buffer_region in named_regions: mask = regions == 0 rboard, rgoals = populate_region(mask, named_regions[buffer_region]) board += rboard goals += rgoals # Give the buffer (0) region a rainbow / white color # This is mostly a visual hint for humans buffer_mask = (regions <= 0) & (goals & CellTypes.rainbow_color == 0) goals[buffer_mask] += CellTypes.rainbow_color game = SafeLifeGame() game.deserialize({ 'board': board, 'goals': goals, 'agent_locs': agent_locs, 'agent_names': agent_names, 'min_performance': min_performance, 'points_table': points_table, 'orientation': 1, }) return game
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def draw_random_setup(types_available, team, game_dim):\n\n nr_pieces = len(types_available)-1\n types_available = [type_ for type_ in types_available if not type_ == 0]\n if game_dim == 5:\n row_offset = 2\n elif game_dim == 7:\n row_offset = 3\n else:\n row_offset = 4\n setup_agent = np.empty((row_offset, game_dim), dtype=object)\n if team == 0:\n flag_positions = [(game_dim-1, j) for j in range(game_dim)]\n flag_choice = np.random.choice(range(len(flag_positions)), 1)[0]\n flag_pos = game_dim-1 - flag_positions[flag_choice][0], game_dim-1 - flag_positions[flag_choice][1]\n setup_agent[flag_pos] = pieces.Piece(0, 0, flag_positions[flag_choice])\n\n types_draw = np.random.choice(types_available, nr_pieces, replace=False)\n positions_agent_0 = [(i, j) for i in range(game_dim-row_offset, game_dim) for j in range(game_dim)]\n positions_agent_0.remove(flag_positions[flag_choice])\n\n for idx in range(nr_pieces):\n pos = positions_agent_0[idx]\n setup_agent[(game_dim-1 - pos[0], game_dim-1 - pos[1])] = pieces.Piece(types_draw[idx], 0, pos)\n elif team == 1:\n flag_positions = [(0, j) for j in range(game_dim)]\n flag_choice = np.random.choice(range(len(flag_positions)), 1)[0]\n setup_agent[flag_positions[flag_choice]] = pieces.Piece(0, 1, flag_positions[flag_choice])\n\n types_draw = np.random.choice(types_available, nr_pieces, replace=False)\n positions_agent_1 = [(i, j) for i in range(row_offset) for j in range(game_dim)]\n positions_agent_1.remove(flag_positions[flag_choice])\n\n for idx in range(nr_pieces):\n pos = positions_agent_1[idx]\n setup_agent[pos] = pieces.Piece(types_draw[idx], 1, pos)\n return setup_agent", "def generate(self, site_type='random', arg='random'):\n size = entities.world['size']\n if site_type == 'random':\n if randint(1,3) == 1:\n site_type = 'adventure'\n else:\n site_type = 'resource'\n elif site_type in ref.material_type_dct.keys():\n self.resource = site_type\n site_type = 'resource'\n terrain_list = None\n if arg == 'random':\n terrain_list = [x for x in ref.terrain_dct.keys() if type(x) == int]\n elif arg in ref.terrain_type_list:\n terrain_list = [\n x for x in ref.terrain_dct.keys() if ref.terrain_dct[x]['terrain type'] == arg\n ]\n x = randint(0, size-1)\n y = randint(0, size-1)\n terrain_type = entities.world['grid'][y][x]\n site_locations = [s.location for s in entities.sites['object list']]\n while terrain_type not in terrain_list or [x,y] in site_locations:\n x = randint(0, size-1)\n y = randint(0, size-1)\n terrain_type = entities.world['grid'][y][x]\n\n self.location = [x,y]\n self.structure = Structure().generate(\n ref.terrain_dct[terrain_type]['terrain type'], site_type\n )\n if self.resource == None:\n if 'resource type' in ref.structure_type_dct[\n self.structure.structure_type\n ].keys():\n resource_type = ref.structure_type_dct[\n self.structure.structure_type]['resource type'\n ]\n resource_possibilities = []\n for possible_material in [\n x for x in ref.material_class_dct[resource_type][\n 'types'] if 'rarity' in ref.material_type_dct[x].keys()\n ]:\n for x in xrange(ref.rarity_dct[\n ref.material_type_dct[possible_material]['rarity']\n ]):\n resource_possibilities.append(possible_material)\n self.resource = choice(resource_possibilities)\n #resources measured in grams\n if self.resource != None:\n self.harvestable = randint(100000, 1500000)\n try:\n entities.town['object'].resources[\n ref.material_type_dct[self.resource]['class']][\n self.resource]['harvestable'] += self.harvestable\n except KeyError:\n pass\n #NOTE: These numbers suitable for metal, may not be for other materials\n #NOTE: Mine production should be ~1kg pure metal per day per miner.\n #NOTE: IRL mine has ~43500kg before producing much less.\n \n self.set_site_id()\n return self", "def gen_world(num_rows, num_cols):\n world = collections.deque()\n\n # Generate top perimeter.\n world.append([eg.ROCK] * num_cols)\n\n # In between top and bottom perimeters, generate a clean world.\n # (all non-perimeter cells are clear)\n for i in xrange(num_rows - 2):\n world.append([eg.ROCK] + ([eg.NONE] * (num_cols - 2)) + [eg.ROCK])\n\n # Generate bottom perimeter.\n world.append([eg.ROCK] * num_cols)\n\n # Apply red anthill in world.\n _randomly_apply_anthill(world, eg.RED)\n\n # Apply black anthill in world.\n _randomly_apply_anthill(world, eg.BLACK)\n\n # Apply food blocks in world.\n _randomly_apply_foodblob(world)\n\n # Apply rocks in world.\n _randomly_apply_rocks(world)\n\n world.appendleft([str(num_rows)])\n world.appendleft([str(num_cols)])\n\n return world", "def generate_world(x_size, y_size):\n\n\tdef make_blank_world():\n\t\t\"\"\"\n\t\tCreates an x-by-y list of lists of zeroes.\n\t\t\"\"\"\n\t\tblank_array = [[Blank() for j in range(y_size + 1)] for i in range(x_size + 1)]\n\t\treturn blank_array\n\n\n\tdef check_surroundings(x_coord, y_coord, value):\n\t\t\"\"\"\n\t\tIf the variable world has already been defined, it checks all x and y coords within one square (aka, checks the 8 surrounding squares) for a given value. If that value is present in 1 or more squares, returns True; else, False.\n\t\t\"\"\"\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\texamining = world[x_coord - 1 + i][y_coord - 1 + j]\n\t\t\t\tif examining.name == value:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\treturn False\n\n\n\tworld = make_blank_world()\n\n\tworld[random.randint(2, x_size-2)][random.randint(2, y_size-2)] = Water()\n\n\tfor i in range(x_size):\n\t\tfor j in range(y_size):\n\t\t\tseed = random.random()\n\t\t\tif check_surroundings(i, j, 'water'):\n\t\t\t\tif seed >= 0.5:\n\t\t\t\t\tworld[i][j] = Water()\n\t\t\t\telif seed >= 0.4:\n\t\t\t\t\tworld[i][j] = Tree()\n\t\t\t\telse:\n\t\t\t\t\tworld[i][j] = Grass()\n\t\t\telif not check_surroundings(i, j, 'tree'):\n\t\t\t\tif seed >= 0.5:\n\t\t\t\t\tworld[i][j] = Tree()\n\t\t\t\telse:\n\t\t\t\t\tworld[i][j] = Grass()\n\t\t\telse:\n\t\t\t\tworld[i][j] = Grass()\n\treturn [row[:y_size+1] for row in world[:x_size+1]]", "def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)", "def fill_with_random_tiles(self):\n for elem in [x[1] for x in self.tile_grid.values()]:\n self.view.remove(elem)\n tile_grid = {}\n # Fill the data matrix with random tile types\n while True: # Loop until we have a valid table (no imploding lines)\n for x in range(COLS_COUNT):\n for y in range(ROWS_COUNT):\n tile_type, sprite = choice(self.available_tiles), None\n tile_grid[x, y] = tile_type, sprite\n if len(self.get_same_type_lines(tile_grid)) == 0:\n break\n tile_grid = {}\n\n # Build the sprites based on the assigned tile type\n for key, value in tile_grid.items():\n tile_type, sprite = value\n sprite = self.tile_sprite(tile_type, self.to_display(key))\n tile_grid[key] = tile_type, sprite\n self.view.add(sprite)\n\n self.tile_grid = tile_grid", "def generate_world(world_seed, biome_min, biome_max, w, h):\n\n while True:\n\n try:\n\n # Set the initial seed for the random module (random.seed())\n seed(world_seed)\n\n # Create a blank map (2D list filled with '0' strings\n world = [[0 for y in range(h)] for x in range(w)]\n # Generates the random values for the terrain construction\n terrain = [randrange(20) + 40 for _ in range(w)]\n\n #Empty biome map\n biomes = []\n\n #Generates biomes\n for __ in range(w//biome_min):\n\n #Biome at cursor\n biome_select = choice(list(biome_data))\n\n #Biomes size\n for _ in range(randint(biome_min, biome_max)):\n biomes.append(biome_select)\n\n #World size met\n if len(biomes) >= w:\n biomes = biomes[:w] #Truncate selection\n break\n\n\n # ----- Construct the Terrain\n # Counter that changes dynamically to check through all blocks in the terrain list\n cur_pos = 0\n # Runs through all the generated numbers in a while loop\n while cur_pos < w:\n\n # print(\".\", end=\"\")\n\n # Check to see if terrain gap is too large\n\n if abs(terrain[cur_pos] - terrain[cur_pos - 1]) > biome_data[str(biomes[cur_pos])][\"maxh\"]: # if terrain gap is larger than threshhold (too big)\n\n for n in range(randint(biome_data[str(str(biomes[cur_pos]))][\"minx\"], biome_data[str(str(biomes[cur_pos]))][\"maxx\"])):\n # Insert a new value into the terrain list between the values that are too far apart\n terrain.insert(cur_pos, (terrain[cur_pos] + terrain[cur_pos - 1]) // 2)\n\n else: # Difference between the two blocks is not too big\n\n # Check next block\n cur_pos += 1\n\n # ----- Transfer Terrain To Empty World\n # Run through every space in the empty world\n for x in range(len(world)): # runs through each level\n for y in range(len(world[x])): # runs through each individual space\n\n # Generates structures\n if y > terrain[x]:\n\n #Top layer\n if y - terrain[x] == 1:\n\n #Sets the layer with block specified in biome config\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"top\"]]\n\n if randint(0, 10) == 0 and x + 10 < w:\n world = generate_structure(x, y - 1, world, choice(biome_data[biomes[x]][\"structure\"]))\n\n #Middle layer\n elif y - terrain[x] < randint(3, 8):\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"middle\"]]\n\n #Base\n else:\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"lower\"]]\n\n #Generate ores\n # Coal\n if 10 + terrain[x] > y > 5 + terrain[x] and randint(0, 200) == 0:\n for cluster in range(randint(3, 10)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Coal Ore\"]\n\n # Iron\n if 30 + terrain[x] > y > 20 + terrain[x] and randint(0, 200) == 0:\n\n for cluster in range(randint(3, 6)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Iron Ore\"]\n\n # Gold\n if 80 > y > 65 and randint(0, 400) == 0:\n for cluster in range(randint(3, 6)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Gold Ore\"]\n\n # Diamonds\n if 80 > y > 70 and randint(0, 500) == 0:\n for cluster in range(randint(1, 5)):\n world[x + randint(-3, 3)][y + randint(-3, 3)] = block_lookup[\"Diamond Ore\"]\n\n # Bedrock\n if y > 92 or y > 87 and randint(0, 3) == 0:\n world[x][y] = block_lookup[\"Bed Rock\"]\n\n # Last edit, adding extras to the top of the world to prevent problems\n world = [[0] * 40 + x for x in world]\n\n # Return the world object for use\n return np.array(world)\n\n except:\n world_seed += '1'", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)", "def random_place(board, player):\n available = possibilities(board)\n place(board, player, random.choice(available))", "def generate(self, level):\n # TODO The dungeon's instances are spawned and loaded here.\n # fill map with \"blocked\" tiles\n level.maze = [[Tile(x, y, True) for y in range(level.height)] for x in range(level.width)]\n\n for r in range(level.max_rooms):\n # random width and height\n w = random.randint(level.min_room_size, level.max_room_size)\n h = random.randint(level.min_room_size, level.max_room_size)\n\n # random position without going out of the boundaries of the map\n x = random.randint(0, level.width - w - 1)\n y = random.randint(0, level.height - h - 1)\n\n # \"DungeonRoom\" class makes rectangles easier to work with\n new_room = Room(x, y, w, h)\n level.rooms.append(new_room)\n\n # run through the other rooms and see if they intersect with this one\n failed = False\n for other_room in level.rooms:\n if other_room is not new_room and new_room.intersect(other_room):\n failed = True\n break\n\n if not failed:\n # this means there are no intersections, so this room is valid\n\n # \"paint\" it to the map's tiles\n self._create_room(level, new_room)\n\n # center coordinates of new room, will be useful later\n new_x, new_y = new_room.center()\n\n if level.num_rooms > 0:\n # connect it to the previous room with a tunnel\n # center coordinates of previous room\n (prev_x, prev_y) = level.rooms[level.num_rooms - 1].center()\n\n # draw a coin (random number that is either 0 or 1)\n if random.randint(0, 1) == 1:\n # first move horizontally, then vertically\n self._create_h_tunnel(level, prev_x, new_x, prev_y)\n self._create_v_tunnel(level, prev_y, new_y, new_x)\n else:\n # first move vertically, then horizontally\n self._create_v_tunnel(level, prev_y, new_y, prev_x)\n self._create_h_tunnel(level, prev_x, new_x, new_y)\n\n # finally, append the new room to the list\n level.rooms.append(new_room)\n level.num_rooms += 1\n\n # connect them with a tunnel\n self._create_h_tunnel(level, 25, 55, 23)", "def generate_mine_map(width=30, height=16, num_mines=99):\n\n if num_mines > width * height:\n print(\"The number of mines exceeds the size of the board.\")\n return\n \n mine_map = [[False for i in range(width)] for j in range(height)]\n mines = 0\n while mines < num_mines:\n x = random.randint(0, width-1)\n y = random.randint(0, height-1)\n if not mine_map[y][x]:\n mine_map[y][x] = True\n mines += 1\n\n return mine_map", "def generate_grains(self, cells):\n\t\tfor cell_num in range(cells):\n\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\tsample_cell = sample_cell[0]\n\t\t\twhile sample_cell.state != 0:\n\t\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\t\tsample_cell = sample_cell[0]\n\t\t\tsample_cell.change_state(self.init_time ,cell_num)", "def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def random_grid(height, width):\n grid = create_grid(height, width)\n for r in range(1, height - 1):\n for c in range(1, width - 1):\n grid[r][c] = random.choice([0, 1])\n return grid", "def _generate_building(self, min_size, max_size, modulo_rest=2, name=None, one_connection=False):\n size_x = random.randint(min_size[0], max_size[0])\n size_y = random.randint(min_size[1], max_size[1])\n if modulo_rest < 2:\n while size_x % 2 != modulo_rest:\n size_x = random.randint(min_size[0], max_size[0])\n while size_y % 2 != modulo_rest:\n size_y = random.randint(min_size[1], max_size[1])\n return TownRegion._Building((size_x, size_y), name=name, one_connection=one_connection)", "def randomCells(width, height):\r\n\tA = createBoard(height, width)\r\n\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif row > 0 and row < height-1:\r\n\t\t\t\tif col > 0 and col < width-1:\r\n\t\t\t\t\tA[row][col] = random.choice([0,1]) \r\n\r\n\treturn A", "def randomSchedule(self,contents):\n\t\timport random as ran\n import copy\n\t\tcontents_copy = copy.deepcopy(contents)\n\t\tsol = Area('sb',ran.random())\n\t\twhile contents_copy:\n\t\t\tcont = ran.choice(contents_copy)\n\t\t\ti = 0\n\t\t\twhile True:\n\t\t\t\tran_waiting = ran.randint(0,2)\n\t\t\t\tran_start = ran.randint(0,19)\n\t\t\t\tif sol.checkAddContent(ran_waiting,ran_start,cont):\n\t\t\t\t\tsol.addContent(ran_waiting,ran_start,cont)\n\t\t\t\t\tcontents_copy.remove(cont)\n\t\t\t\t\tbreak\n\t\t\t\ti += 1\n\t\t\t\tif i>150:\n\t\t\t\t\t#print \"cut\"\n\t\t\t\t\tsol = Area('sb',ran.random())\n\t\t\t\t\tcontents_copy = contents[:]\n\t\t\t\t\tbreak\n\t\t#print \"generate new schedule\\n\",sol.printSchedule()\n\t\treturn sol", "def random_world():\n # Create empty world\n grid = np.zeros((WORLD_WIDTH, WORLD_WIDTH))\n # Add dirt and obstacles\n for r in range(WORLD_WIDTH):\n for c in range(WORLD_WIDTH):\n if random.random() < 0.5:\n grid[r, c] = DIRT\n elif random.random() < 0.1:\n grid[r, c] = OBSTACLE\n # Place agent\n while True:\n r = random.randrange(WORLD_WIDTH)\n c = random.randrange(WORLD_WIDTH)\n if grid[r, c] == EMPTY:\n return grid, r, c", "def trial(length, height):\n screen.refresh()\n global stimList\n global oddLength\n global oddHeight\n currentLength = int(maxLength / 4)\n currentHeight = int(maxHeight / 4)\n for i in range(stimAmt):\n if i == oddLocation:\n oddLength = currentLength\n oddHeight = currentHeight\n stimList.append(\n pg.draw.rect(\n screen.fg,\n PgTools.rand_color(),\n (currentLength, currentHeight, length, height,),\n )\n )\n PgTools.rand_pattern(\n screen.fg,\n (\n currentLength,\n currentHeight,\n ),\n (length, height),\n i=(randint(0, 2), randint(0, 1)),\n )\n if randShapes:\n PgTools.rand_shape(screen.fg, (currentLength, currentHeight),(length, height), oddSeed)\n else:\n stimList.append(\n pg.draw.rect(\n screen.fg,\n color,\n (currentLength, currentHeight, length, height,),\n )\n )\n PgTools.rand_pattern(\n screen.fg,\n (\n currentLength,\n currentHeight,\n ),\n (length, height),\n patColor,\n randNums,\n )\n if randShapes:\n PgTools.rand_shape(screen.fg, (currentLength, currentHeight),(length, height), regSeed)\n currentLength += maxLength / 4\n currentLength = int(currentLength)\n if (i + 1) % 3 == 0:\n currentLength = maxLength / 4\n currentLength = int(currentLength)\n currentHeight += maxHeight / 4\n currentHeight= int(currentHeight)", "def generate(self, n_enemies, n_blocks):\n self.create_map()\n self.create_floor()\n self.player = self.create_player_at(0, 0)\n self.create_dummy_obj_at(0, 1)\n self.create_dummy_obj_at(1, 0)\n self.create_soft_block_at(0, 2)\n self.create_soft_block_at(2, 0)\n self.create_hard_blocks() \n self.create_enemies(n_enemies)\n self.create_soft_blocks(n_blocks) \n self.clear_dummy_obj()", "def generateTestProblem(printTerrain = False):\r\n size = random.randint(5, 100)\r\n print(size)\r\n start = (random.randint(0, size - 1), random.randint(0, size - 1))\r\n print(start)\r\n goal = (random.randint(0, size - 1), random.randint(0, size - 1))\r\n print(goal)\r\n terrain = [[random.choice(['m', 'p', 's', 'w']) for i in range(0, size)] for j in range(0, size)]\r\n\r\n # print the terrain matrix if required\r\n if printTerrain:\r\n for i in range(0, size):\r\n print(terrain[i])\r\n\r\n return (start, goal, terrain)", "def initialize_puzzle_board(self, n=3, hType=1, random=True, diff=None):\r\n\t\tself.n = n\r\n\r\n\t\t# While loop to continuously create random boards until a solvable one is made.\r\n\t\tboardList = [x for x in range(n**2)]\r\n\t\twhile random:\r\n\t\t\tshuffle(boardList)\r\n\r\n\t\t\tif self.generated_solvable(boardList):\r\n\t\t\t\tprint \"Found something solvable:\", boardList\r\n\t\t\t\tbreak # From outer While-True\r\n\r\n\t\t# If statements to use non-random, burnt-in boards of various difficulties.\r\n\t\tif not random and n == 3:\r\n\t\t\tif diff == 0:\r\n\t\t\t\tboardList = [3,1,2,4,7,5,6,8,0]\r\n\t\t\telif diff == 1:\r\n\t\t\t\tboardList = [3,2,5,4,1,8,6,0,7]\r\n\t\t\telif diff == 2:\r\n\t\t\t\tboardList = [1,0,6,5,7,4,2,3,8]\r\n\r\n\t\telif not random and n == 4:\r\n\t\t\tif diff == 0:\r\n\t\t\t\tboardList = [4,1,2,3,5,0,6,7,8,9,10,11,12,13,14,15]\r\n\r\n\t\t# Location of 0 (the empty tile) in the flat list.\r\n\t\tlocZero = boardList.index(0)\r\n\r\n\t\t# Using floor division and modulo to attain the nested location of the 0\r\n\t\tself.x = locZero // self.n\r\n\t\tself.y = locZero % self.n\r\n\r\n\t\t# Looping over the flat list and appending it, creating the nested list that is the final board\r\n\t\tfor i in range(self.n):\r\n\t\t\ti1, i2 = self.n*i, self.n*(i+1)\r\n\t\t\tself.board.append(boardList[i1:i2])\r\n\r\n\t\t# Double checking that we determined 0's position correctly.\r\n\t\tassert( self.board[self.x][self.y] == 0 )\r\n\r\n\t\t# Generate the goal (class variable) for the board based on size\r\n\t\tself.generate_goal()\r\n\t\t# Generates the heuristic value for this first board.\r\n\t\tself.generate_heuristic()\r\n\t\t# Generates the hash value for __eq__ from the board.\r\n\t\tself.eqHash = hash(str(self))", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def randomCells(w, h):\n A = createBoard(w, h)\n\n for row in range(1, h-1):\n for col in range(1, w-1):\n if random.choice([0, 1]) == 1:\n A[row][col] = 1\n else:\n A[row][col] = 0\n return A", "def build_world(self):\n total_reward_prob = 0.0 # Total probability of any type of reward\n probs_list = [] # List of reward probabilities\n \n # We want to create a list of reward probabilities for numpy's choice method\n for r in self.rewards:\n probs_list.append(r.prob)\n total_reward_prob += r.prob\n probs_list.append(1.0-total_reward_prob) # Add in the probability of no reward\n \n \n self.world_rewards = [] # Will hold all rewards for this game\n for y in range(self.world_height):\n row_rewards = []\n for x in range(self.world_width):\n # Given a probability for each reward (including None), select a reward for each cell\n cell = np.random.choice(self.rewards + [None], p=probs_list)\n row_rewards.append(cell)\n self.world_rewards.append(row_rewards)", "def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return", "def random_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = random.choice([0, 1])\n else:\n a[row][col] = 0\n \n return a", "def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height", "def generate_board(self):\n random.seed(self.seed)\n for row in self.grid:\n for column in row:\n probability = random.random()\n if self.live_probability > probability:\n column.set_alive()", "def create_random_heads_or_tails(sheet, columns):\r\n\r\n use_column = 0\r\n row = 1\r\n for i in range(64):\r\n column = columns[use_column]\r\n row = row\r\n\r\n sheet[column + str(row)] = random.randint(0, 1) # assigns 0 or 1 randomly for each cell\r\n\r\n if use_column == 7: # stops and resets column count\r\n use_column = 0\r\n row += 1\r\n else: # increases column\r\n use_column += 1\r\n\r\n print('All coins have been randomly assigned.')", "def _draw_random_turn_params(self):\n return TurnParams(\n main_corridor_length=self._rng.uniform(10, 16),\n turn_corridor_length=self._rng.uniform(4, 12),\n turn_corridor_angle=self._rng.uniform(-3./8. * np.pi, 3./8.*np.pi),\n main_corridor_width=self._rng.uniform(0.5, 1.5),\n turn_corridor_width=self._rng.uniform(0.5, 1.5),\n flip_arnd_oy=bool(self._rng.rand() < 0.5),\n flip_arnd_ox=bool(self._rng.rand() < 0.5),\n rot_theta=self._rng.uniform(0, 2*np.pi)\n )", "def new_tile(self):\r\n count = 0\r\n tot_count = self.get_grid_width() * self.get_grid_height()\r\n\r\n while count < 2 and tot_count > 0:\r\n # my_list = 4 10% of the time and a 2 90%\r\n my_list = [4] * 10 + [2] * 90\r\n new_tile = random.choice(my_list)\r\n\r\n # Selects a random number from 0 to width * height -1\r\n\r\n spot = random.randint(0, self._grid_height * self._grid_width - 1)\r\n\r\n # sets location to random selection from spot\r\n loc = [spot / self._grid_width, spot % self._grid_width]\r\n # if loc is empty ( == 0 ) sets number, else repeats process.\r\n\r\n if self._board[loc[0]][loc[1]] == 0:\r\n # sets radom selected board tile to new_tile number\r\n self._board[loc[0]][loc[1]] = new_tile\r\n count += 1\r\n tot_count -= 1", "def init_board(rows, columns, method=\"random\"):\n if method == \"random\":\n board = np.random.random_integers(2, size=(rows, columns)) - 1\n return board", "def new_tile(self):\r\n # replace with your code\r\n # complete search ....\r\n non_zero_count = 0;\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if self._grid_tile[row][col] == 0:\r\n non_zero_count += 1\r\n random_choice = random.randrange(0, non_zero_count)\r\n count = 0\r\n # another search ....\r\n generated_new_tile = False\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if generated_new_tile == False and self._grid_tile[row][col] == 0:\r\n if count != random_choice:\r\n count += 1 \r\n else:\r\n if random.randrange(0,100) < 10:\r\n self.set_tile(row, col ,4)\r\n else:\r\n self.set_tile(row, col ,2)\r\n generated_new_tile = True", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def random_strategy(player, board):\n return random.choice(Othello.legal_moves(player, board))", "def genRandTeam(nPos, totPlayers):\n # 0 1 2 3 4 5 6\n # nPos = [nFirst, nSecond, nThird, nShort, nCatcher, nOf, nDh]\n chromosome = []\n sum = 0\n count = 0\n\n\n for i in nPos: # general loop\n if count == 6: # when loop enters the nDh players it instead chooses from ALL positions five times\n for j in range(5): # to represent the 2 util positions and the 3 benches\n rNum = random.randint(0, totPlayers - 1) # random number of ANY player\n chromosome.append(rNum) # picks a random pos\n break # no more work needs to be done\n if count == 5: # this will occur before the previous loop; nOF must be iterated 3 times for 3 outfield spots\n for j in range(2):\n rNum2 = random.randint(0, i - 1)\n chromosome.append(rNum2 + sum) # nOF must be iterated 3 times for 3 outfield spots; i is on oF\n rNum3 = random.randint(0, i - 1)\n chromosome.append(rNum3 + sum)\n sum += i\n count += 1\n # first = random.randint(0,nPos[0])\n # second = random.randint(0,nPos[1])\n # third = random.randint(0,nPos[2])\n # short = random.randint(0,nPos[3])\n # catcher = random.randint(0,nPos[4])\n # of = [random.randint(0,nPos[5]), random.randint(0,nPos[5]), random.randint(0,nPos[5])] #THREE outfielders\n # rNum = [random.randint(0,6) for i in range(5)] #random numbers representing one of the nPos rosters\n # util = [random.randint(0,nPos[rNum[0]]), random.randint(0,nPos[rNum[1]])] #picks 2 random players from ANY roster\n # ben = [random.randint(0,nPos[rNum[2]]), random.randint(0,nPos[rNum[3]]), random.randint(0,nPos[rNum[4]])] # picks 3 random players form any roster\n # print first,second,third,short,catcher,of,util,ben\n # temp = Team()\n return chromosome", "def test_initialization(number: int) -> None:\n for _ in range(number):\n if random.random() < 0.5:\n size = random.randint(3, 10)\n baby_position = [random.randint(0, size - 1), random.randint(0, size - 1)]\n num_berries = random.randint(1, size)\n else:\n size = [random.randint(3, 10), random.randint(3, 10)]\n baby_position = [\n random.randint(0, size[0] - 1),\n random.randint(0, size[1] - 1),\n ]\n num_berries = random.randint(1, size[0])\n print(f\"\\n\\n\\nSize of the board {size}\")\n print(f\"Baby position: {baby_position}\")\n print(f\"Number of berries to be placed randomly: {num_berries}\")\n game = Game(size, baby_position, 0, 0, 0, 0, num_berries)\n print(f\"Here is the board:\\n{game.get_board()}\")\n print(game.get_baby())\n for b in game.get_berries():\n print(b)", "def new_tile(self):\r\n # creating a list value to ensure the 90 and 10 percent ratio\r\n value=[2,2,2,2,2,2,2,2,2,2]\r\n position_of_4=random.randrange(0,10)\r\n value[position_of_4]=4\r\n # selecting a random position on the grid\r\n dummy_row=random.randrange(0,self._height)\r\n dummy_column=random.randrange(0,self._width)\r\n # check to ensure that same tiles are not selected\r\n if self._grid[dummy_row][dummy_column]!=0:\r\n while self._grid[dummy_row][dummy_column]!=0:\r\n dummy_row=random.randrange(0,self._height)\r\n dummy_column=random.randrange(0,self._width)\r\n # assigning a value to the selected tile\r\n self._grid[dummy_row][dummy_column]=random.choice(value)", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def randomLeggings():\n return random.choice(LEGGINGS)", "def stage_1_generator(option):\n # Stage Size + Player Starting Position\n STAGE_1 = ([10,10], [1,1])\n\n # Non-Ocean tiles\n STAGE_1_TILES = { \n \"1,2\":\"rock\",\n \"1,3\":\"mountain\",\n \"2,4\":\"rock\",\n \"2,7\":\"rock\",\n \"2,8\":\"rock\",\n \"3,3\":\"rock\",\n \"3,4\":\"rock\",\n \"3,8\":\"mountain\",\n \"3,9\":\"rock\",\n \"3,10\":\"rock\",\n \"4,4\":\"rock\",\n \"4,5\":\"mountain\",\n \"5,6\":\"rock\",\n \"6,1\":\"rock\",\n \"6,2\":\"rock\",\n \"6,7\":\"rock\",\n \"6,10\":\"rock\",\n \"7,1\":\"rock\",\n \"7,2\":\"rock\",\n \"7,6\":\"rock\",\n \"7,10\":\"rock\",\n \"8,5\":\"rock\",\n \"8,6\":\"rock\",\n \n \"8,10\":\"rock\",\n \"9,1\":\"sign\",\n \"9,3\":\"rock\",\n \"9,4\":\"rock\",\n \"9,9\":\"rock\",\n \"9,10\":\"rock\",\n \"10,1\":\"cave\",\n \"10,3\":\"rock\",\n \"10,4\":\"cave\",\n \"10,8\":\"rock\",\n \"10,9\":\"rock\",\n \"10,10\":\"rock\",\n \n \"1,10\":\"end\",\n }\n\n # Special Tiles that trigger an event\n STAGE_1_SPECIAL = { \n \"1,10\":\"end\",\n \"9,1\":\"sign_cave\",\n \"10,1\":\"cave_entrance_1\",\n \"10,4\":\"cave_entrance_2\",\n \"1,9\":\"dark_water\",\n \"2,9\":\"dark_water\",\n \"2,10\":\"dark_water\"\n }\n\n # Decide what data to return\n if option == \"stage\":\n return STAGE_1\n elif option == \"tiles\":\n return STAGE_1_TILES\n elif option == \"special\":\n return STAGE_1_SPECIAL\n else:\n print(\"Something Broke! map_generator_1\")", "def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux", "def uniform_random(self) -> None:\n\n size = self.circ_size\n random.seed(self.seed)\n\n gates = [self.h, self.x, self.y, self.z, self.s, self.t, self.cx]\n candidates = set(range(size))\n\n for i in range(size):\n for j in range(size):\n to_apply = random.choice(gates)\n\n num_qubits = 2 if to_apply == self.cx else 1\n targets = random.sample(candidates, num_qubits)\n to_apply(*targets)\n\n if self.meas: self.measure(self.qr, self.cr)", "def randomized_prims(width=16, height=16) -> Maze:\n maze = Maze(width=width, height=height, algorithm=None)\n visited = [[False for _ in range(maze.width)] for _ in range(maze.height)]\n\n # ensure only one entrance to the center squares\n centerx = maze.width // 2 - 1\n centery = maze.height // 2 - 1\n \n visited[centery][centerx] = True\n visited[centery][centerx+1] = True\n visited[centery+1][centerx+1] = False\n visited[centery+1][centerx] = True\n\n visited[0][0] = True\n boundary = [(0,0,Compass.EAST), (0,0,Compass.SOUTH)]\n\n while boundary:\n x, y, direction = boundary.pop(random.randint(0, len(boundary)-1))\n nx, ny = maze.neighbor(x, y, direction)\n if not visited[ny][nx]:\n maze.break_wall(x, y, direction)\n boundary.extend([(nx,ny,direction) for direction in maze.neighbors(nx, ny)])\n visited[ny][nx] = True\n \n return maze", "def new_tile(self):\n two_or_four = random.random();\n if two_or_four < 0.9:\n value = 2\n else:\n value = 4\n empty = False\n all_cells = 0\n while empty == False:\n all_cells += 1 \n row = random.choice(range(self._height))\n col = random.choice(range(self._width))\n if self.get_tile(row, col) == 0:\n empty = True\n self.set_tile(row, col, value)\n elif all_cells >= self._height * self._width:\n empty = True", "def populate_region(mask, layer_params):\n\n from .speedups import (\n NEW_CELL_MASK, CAN_OSCILLATE_MASK, INCLUDE_VIOLATIONS_MASK)\n\n border = ndimage.maximum_filter(mask, size=3, mode='wrap') ^ mask\n interior = ndimage.minimum_filter(mask, size=3, mode='wrap')\n gen_mask = mask * (\n NEW_CELL_MASK |\n CAN_OSCILLATE_MASK |\n INCLUDE_VIOLATIONS_MASK\n ) + border * (\n INCLUDE_VIOLATIONS_MASK\n )\n board = np.zeros(mask.shape, dtype=np.uint16)\n foreground = np.zeros(mask.shape, dtype=bool)\n background = np.zeros(mask.shape, dtype=bool)\n background_color = np.zeros(mask.shape, dtype=bool)\n seeds = None\n max_period = 1\n\n for layer in layer_params:\n if not isinstance(layer, dict):\n raise ValueError(\n \"'layer_params' should be a list of parameter dictionaries.\")\n layer = _fix_random_values(layer)\n old_board = board.copy()\n gen_mask0 = gen_mask.copy()\n interior = ndimage.minimum_filter(\n gen_mask & NEW_CELL_MASK > 0, size=3, mode='wrap')\n color = COLORS.get(layer.get('color'), 0)\n\n fence_frac = layer.get('fences', 0.0)\n if fence_frac > 0:\n fences = build_fence(gen_mask & speedups.NEW_CELL_MASK)\n fences *= coinflip(fence_frac, fences.shape)\n gen_mask &= ~(fences * (NEW_CELL_MASK | CAN_OSCILLATE_MASK))\n board += fences.astype(np.uint16) * CellTypes.wall\n\n spawners = layer.get('spawners', 0)\n if spawners > 0:\n _mask = (gen_mask0 & NEW_CELL_MASK > 0) & interior\n new_cells = _mask & coinflip(spawners, board.shape)\n if not new_cells.any() and _mask.any():\n i, j = np.nonzero(_mask)\n k = get_rng().choice(len(i)) # ensure at least one spawner\n new_cells[i[k], j[k]] = True\n gen_mask[new_cells] ^= NEW_CELL_MASK\n board[new_cells] = CellTypes.spawner + color\n\n tree_lattice = layer.get('tree_lattice')\n # Create a lattice of trees that are spread throughout the region\n # such that every empty cell touches one (and only one) tree\n # (modulo edge effects).\n # Such a lattice tends to make the resulting board very chaotic.\n # Note that this will disrupt any pre-existing patterns.\n if tree_lattice is not None:\n if not isinstance(tree_lattice, dict):\n tree_lattice = {}\n h, w = board.shape\n stagger = tree_lattice.get('stagger', True)\n spacing = float(tree_lattice.get('spacing', 5))\n if not stagger:\n new_cells = _make_lattice(h, w, spacing, spacing, 0)\n elif spacing <= 3:\n new_cells = _make_lattice(h, w, 3, 3, 1)\n elif spacing == 4:\n new_cells = _make_lattice(h, w, 10, 1, 3)\n elif spacing == 5:\n new_cells = _make_lattice(h, w, 13, 1, 5)\n else:\n # The following gets pretty sparse.\n new_cells = _make_lattice(h, w, 6, 3, 3)\n\n new_cells &= gen_mask & NEW_CELL_MASK > 0\n board[new_cells] = CellTypes.tree + color\n\n period = 1\n if 'pattern' in layer:\n pattern_args = layer['pattern'].copy()\n period = pattern_args.get('period', 1)\n if period == 1:\n gen_mask2 = gen_mask & ~CAN_OSCILLATE_MASK\n pattern_args.update(period=max_period, osc_bonus=0)\n elif period == 0:\n gen_mask2 = gen_mask & ~INCLUDE_VIOLATIONS_MASK\n pattern_args.update(period=max_period, osc_bonus=0)\n elif period < max_period:\n raise ValueError(\n \"Periods for sequential layers in a region must be either 0, 1,\"\n \" or at least as large as the largest period in prior layers.\")\n else:\n gen_mask2 = gen_mask\n max_period = period\n\n board = _gen_pattern(board, gen_mask2, seeds, **pattern_args)\n\n # We need to update the mask for subsequent layers so that they\n # do not destroy the pattern in this layer.\n # First get a list of board states throughout the oscillation cycle.\n boards = [board]\n for _ in range(1, max_period):\n boards.append(speedups.advance_board(boards[-1]))\n non_empty = np.array(boards) != 0\n still_cells = non_empty.all(axis=0)\n osc_cells = still_cells ^ non_empty.any(axis=0)\n # Both still life cells and oscillating cells should disallow\n # any later changes. We also want to disallow changes to the cells\n # that are neighboring the oscillating cells, because any changes\n # there would propogate to the oscillating cells at later time\n # steps.\n # Note that it doesn't really matter whether the oscillating mask\n # is set for the currently oscillating cells, because we're not\n # checking for violations in them anyways, and we don't allow any\n # changes that would affect them.\n osc_neighbors = ndimage.maximum_filter(osc_cells, size=3, mode='wrap')\n gen_mask[osc_cells] &= ~(NEW_CELL_MASK | INCLUDE_VIOLATIONS_MASK)\n gen_mask[still_cells | osc_neighbors] &= ~(NEW_CELL_MASK | CAN_OSCILLATE_MASK)\n\n new_mask = board != old_board\n life_mask = ((board & CellTypes.alive) > 0) & new_mask\n board += color * new_mask * life_mask\n # The seeds are starting points for the next layer of patterns.\n # This just makes the patterns more likely to end up close together.\n seeds = ((board & CellTypes.alive) > 0) & mask\n\n new_mask = board != old_board\n\n movable_walls = layer.get('movable_walls', 0)\n if movable_walls > 0:\n new_cells = coinflip(movable_walls, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.wall\n board += new_cells * CellTypes.movable\n\n movable_trees = layer.get('movable_trees', 0)\n if movable_trees > 0:\n new_cells = coinflip(movable_trees, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.tree\n board += new_cells * CellTypes.movable\n\n hardened_life = layer.get('hardened_life', 0)\n if hardened_life > 0:\n new_cells = coinflip(hardened_life, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.life\n board -= new_cells * CellTypes.destructible\n\n buffer_size = layer.get('buffer_zone', 0) * 2 + 1\n life_cells = board & CellTypes.alive > 0\n buf = ndimage.maximum_filter(life_cells, size=buffer_size, mode='wrap')\n gen_mask[buf] &= ~NEW_CELL_MASK\n\n target = layer.get('target', 'board')\n if target == 'board':\n foreground[new_mask] = True\n if period > 0:\n background[new_mask] = True\n elif target == 'goals':\n background[new_mask] = True\n background_color[new_mask] = True\n # Make sure to add walls and such to the foreground\n foreground[new_mask & (board & CellTypes.alive == 0)] = True\n elif target == 'both':\n foreground[new_mask] = True\n if period > 0:\n background[new_mask] = True\n background_color[new_mask] = True\n else:\n raise ValueError(\"Unexpected value for 'target': %s\" % (target,))\n\n fountains = layer.get('fountains', 0)\n if fountains > 0:\n new_cells = coinflip(fountains, board.shape)\n new_cells *= gen_mask & NEW_CELL_MASK > 0\n neighbors = ndimage.maximum_filter(new_cells, size=3, mode='wrap')\n neighbors *= gen_mask & NEW_CELL_MASK > 0\n gen_mask[neighbors] = INCLUDE_VIOLATIONS_MASK\n if buffer_size > 1:\n buf = ndimage.maximum_filter(neighbors, size=buffer_size, mode='wrap')\n gen_mask[buf] &= ~NEW_CELL_MASK\n board[neighbors] = CellTypes.wall + color\n board[new_cells] = CellTypes.fountain + color\n foreground[new_cells] = True\n background[neighbors] = True\n background_color[neighbors] = True\n\n goals = board.copy()\n board *= foreground\n goals *= background\n goals &= ~CellTypes.spawning\n goals &= ~(CellTypes.rainbow_color * ~background_color)\n\n return board, goals", "def generate_advanced_tech_tiles(seed=0):\n\n if seed is not 0:\n random.seed(seed)\n all_advanced_tiles = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ,15]\n randomized_tiles = list()\n\n for _ in range(6):\n chosen_tile_index = random.randint(0, len(all_advanced_tiles) - 1)\n randomized_tiles.append(all_advanced_tiles[chosen_tile_index])\n all_advanced_tiles.pop(chosen_tile_index)\n\n return tuple(randomized_tiles)", "def random_board(n):\r\n \r\n return(np.random.randint(0,n-1, size = n))", "def generate_grid(height, width):\n return [[random.randint(0, 9) for _ in range(width)] for _ in range(height)]", "def __init__(self, rows, columns, live_probability=0.3, seed=0):\n self.live_probability = live_probability\n self.seed = seed\n self.rows = rows\n self.columns = columns\n self.grid = [\n [Cell() for column_cells in range(self.columns)]\n for row_cells in range(self.rows)\n ]\n\n self.generate_board()", "def __init__(self):\r\n self.rows = [[0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9]\r\n self.block1 = []\r\n self.block5 = []\r\n self.block9 = []\r\n self.puzzle = []\r\n self.score = 0\r\n self.difficulty = 1 # By default Easy difficulty\r\n\r\n \"\"\" Creating blocks using random number generator\"\"\"\r\n while len(self.block1) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block1:\r\n self.block1.append(r)\r\n\r\n while len(self.block5) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block5:\r\n self.block5.append(r)\r\n\r\n while len(self.block9) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block9:\r\n self.block9.append(r)\r\n x = 0\r\n for i in range(3):\r\n for j in range(3):\r\n self.rows[i][j] = self.block1[x]\r\n x = x+1\r\n x = 0\r\n for i in range(3, 6):\r\n for j in range(3, 6):\r\n self.rows[i][j] = self.block5[x]\r\n x = x+1\r\n x = 0\r\n for i in range(6,9):\r\n for j in range(6,9):\r\n self.rows[i][j] = self.block9[x]\r\n x = x+1\r\n \"\"\"Creating a valid solution\"\"\"\r\n self.createsolution(self.rows)", "def generate(width=20, height=20):\n m = Maze(width, height)\n m.randomize()\n return m", "def generate_board(self):\n try:\n all_words = [word.strip().lower() for word in open(self.in_file)]\n except UnicodeDecodeError:\n raise Exception(\"Make sure that in_file is a text file\")\n except FileNotFoundError:\n raise Exception(\"Make sure that in_file exists\")\n\n permutation = np.random.permutation(len(all_words))\n words = np.array(all_words)[permutation][:25]\n\n # 9 Blue, 8 Red, 7 Neutral, 1 Assassin\n board = []\n for i, word in enumerate(words):\n if i < 9:\n type = \"blue\"\n colour = \"#0080FF\"\n elif i < 17:\n type = \"red\"\n colour = \"#FF0000\"\n elif i < 24:\n type = \"neutral\"\n colour = \"#D0D0D0\"\n else:\n type = \"assassin\"\n colour = \"#202020\"\n word_details = {\"name\": word, \"type\": type, \"colour\": colour, \"active\": False}\n board.append(word_details)\n\n np.random.shuffle(board)\n\n # Assign ids (+1 because of the header)\n for i in range(25):\n board[i][\"id\"] = i+1\n\n return board", "def __init__(self, screen, game_settings):\n\t\tself.screen = screen\n\t\tself.color = game_settings.growth_block_color\n\t\t\n\t\t# Sets rect at origin and size 25 x 25\n\t\tself.rect = pygame.Rect(0, 0, 24, 24)\n\t\t\n\t\t# Uses randint to use as random rect position\n\t\tx = randint(0, 35)\n\t\ty = randint(1, 26)\n\t\tself.rect.x = (25 * x) + 1\n\t\tself.rect.bottom = 25 * y", "def create_random_grid(N):\n return np.random.choice(values, N*N, p=[0.2, 0.8]).reshape(N, N)", "def new_tile(self):\n\n # creating a random float variable that will roll a random value\n # if randomvalue > .90\n #\n\n tile_added = False\n while not tile_added:\n row = random.randint(0,self.grid_height - 1)\n col = random.randint(0,self.grid_width - 1)\n if self.board[row][col] == 0:\n tile_added = True\n random_tile = random.random()\n if random_tile < .90:\n self.board[row][col] = 2\n else:\n self.board[row][col] = 4", "def random_placement(area):\n\n area.create_houses(True)\n\n for house in area.houses:\n place_house(area, house)", "def random_reassignment(graph, possibilities):\n\n random_assignment(graph, possibilities)\n\n violating_nodes = graph.get_violations()\n\n while len(violating_nodes):\n random_reconfigure_nodes(graph, violating_nodes, possibilities)\n\n violating_nodes = graph.get_violations()", "def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)", "def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1", "def __init__(self, x, y):\n self.height = x\n self.width = y\n self.grid = self.initialize(self.height, self.width)\n self.randx = random.randint(0, self.height-1)\n self.randy = random.randint(0, self.width-1)\n #self.make()\n #self.show()", "def random(self, width, height, seed = None):\n self.grid = [ [''] * width for i in range(height) ]\n random.seed(seed)\n start = ( random.randint(0, len(self.grid) - 1),\n random.randint(0, len(self.grid[0]) - 1)\n )\n visited = set([start])\n self._createPath(start, visited)\n start = ( random.randint(0, len(self.grid) - 1), 0 )\n finish = ( random.randint(0, len(self.grid) - 1),\n len(self.grid[0]) - 1 )\n self.grid[start[0]][start[1]] += '^'\n self.grid[finish[0]][finish[1]] += '$'\n return self.grid, start, finish", "def get_next_alea_tiles(plateau, mode):\n from random import randint\n # si le saisie et condition de plateau sont valide\n if mode.upper() == \"INIT\" and get_nb_empty_room(plateau) >= 2 or mode.upper() == \"ENCOURS\" and get_nb_empty_room(plateau) >= 1:\n if mode.upper() == \"INIT\":\n tableau = {'mode' : \"init\", 'check' : not is_game_over(plateau), '0' : {'val' : 1, 'lig' : randint(0,3), 'col' : randint(0,3)}, '1' : {'val' : 2, 'lig' : randint(0,3), 'col' : randint(0,3)}}\n # quand les deux cases choisi ne sont pas vide et la position des 2 valeur donner sont egaux\n while not ( is_room_empty(plateau,tableau[\"0\"][\"lig\"], tableau[\"0\"][\"col\"]) and is_room_empty(plateau, tableau[\"1\"][\"lig\"], tableau[\"1\"][\"col\"]) and not(tableau[\"0\"][\"lig\"] == tableau[\"1\"][\"lig\"] and tableau[\"0\"][\"col\"] == tableau[\"1\"][\"col\"])):\n tableau[\"0\"] = {'val' : 1, 'lig' : randint(0,3), 'col' : randint(0,3)}\n tableau[\"1\"] = {'val' : 2, 'lig' : randint(0,3), 'col' : randint(0,3)}\n else:\n tableau = {'mode' : 'encours', 'check' : not is_game_over(plateau), '0' : {'val' : randint(1,3), 'lig' : randint(0,3), 'col' : randint(0,3)}}\n # auand la cases choisi n est pas vide\n while not (is_room_empty(plateau, tableau[\"0\"][\"lig\"], tableau[\"0\"][\"col\"])):\n tableau[\"0\"] = {'val' : randint(1,3), 'lig' : randint(0,3), 'col' : randint(0,3)}\n return tableau\n else:\n return 'Erreur !'", "def generate_new_board(self, difficulty=1):\n self._reset_board()\n self._solve_empty_board_with_random_values()\n self._remove_numbers_to_get_puzzle(difficulty)", "def create_random(self):\n for key in self.nn_param_choices:\n self.network[key] = random.choice(self.nn_param_choices[key])", "def startGeneration(variant, resolution, loops):\n # Check for valid resolution\n if resolution % 2 != 0:\n print (\"Resolution should be an even integer.\")\n return\n\n # Set high score:\n if variant == 20:\n high_score = 11365950\n if variant == 40:\n high_score = 17858670\n if variant == 60:\n high_score = 24239310\n\n # House distirbution:\n familyHome_count = 0.6 * variant\n bungalow_count = 0.25 * variant\n maison_count = 0.15 * variant\n\n for loops in range(loops):\n\n # Initialize Classlist\n placed_houses = []\n placed_water = []\n\n # Initialize values\n gr = generic.genMap(180 * resolution, 160 * resolution)\n\n # Set length and width based on resultion.\n fam_length = int(resolution * 8)\n fam_width = int(resolution * 8)\n fam_freespace = int(resolution * 2)\n\n bung_length = int(resolution * 7.5)\n bung_width = int(resolution * 10)\n bung_freespace = int(resolution * 3)\n\n mais_length = int(resolution * 10.5)\n mais_width = int(resolution * 11)\n mais_freespace = int(resolution * 6)\n\n # Water\n # Generate water parts\n water_parts = genWater(gr, resolution)\n\n # Place water parts in grid:\n for part in range(len(water_parts)):\n W = 0\n\n # Loop until correctly placed.\n while W != 1:\n\n # Define class instance\n Water = class_house.House(water_parts[part][1], water_parts[part][0],\n 1, 0, 0, 4, \"W\", resolution)\n\n ngrid = genHome(gr, Water)\n\n # Check for success:\n if ngrid == False:\n print (\"No succesfull placement Water\")\n else:\n print (\"Water {0} placed!\".format(W))\n gr = list(ngrid)\n\n # Add water to list\n placed_houses.append(Water)\n\n W = 1\n\n # Maisons\n M = 0\n while M != maison_count:\n\n # Define class instance\n Maison = class_house.House(mais_length, mais_width,\n mais_freespace, 610000, 6, 1, \"M\", resolution)\n\n ngrid = genHome(gr, Maison)\n\n # Check if house succsfully placed:\n if ngrid == False:\n print (\"No succesfull placement Maison\")\n else:\n print (\"Maison {0} placed!\".format(M))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Maison)\n\n M += 1\n\n # Then bungalows\n B = 0\n while B != bungalow_count:\n\n # Define class instance\n Bungalow = class_house.House(bung_length, bung_width,\n bung_freespace, 399000, 4, 2, \"B\", resolution)\n\n ngrid = genHome(gr, Bungalow)\n\n # Check for succes:\n if ngrid == False:\n print (\"No succesfull placement Bungalow\")\n else:\n print (\"Bungalow {0} placed!\".format(B))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Bungalow)\n\n B += 1\n\n # Then Family homes\n F = 0\n while F != familyHome_count:\n\n # Define class instance\n Familyhome = class_house.House(fam_length, fam_width,\n fam_freespace, 285000, 3, 3, \"F\", resolution)\n\n ngrid = genHome(gr, Familyhome)\n\n # Check for succes:\n if ngrid == False:\n print (\"No succesfull placement Family Home\")\n else:\n print (\"Family home {0} placed!\".format(F))\n gr = list(ngrid)\n\n # Add maison to list\n placed_houses.append(Familyhome)\n\n F += 1\n\n # Calculate score using Placed houses\n sc = generic.calculateScore(gr, placed_houses)\n name = (\"Score: \" + str(sc))\n\n # Only save to file when new record.\n fname = \"Type{0} - {1}\".format(variant, sc)\n\n\n if sc > high_score:\n #read_write.write(fname, placed_houses)\n high_score = sc\n print (\"New high score ({0}) in loop: {1}\".format(sc, loops))\n print (\"Writing to file..\")\n\n return gr, placed_houses, sc", "def createMap(self):\n map = {}\n for rows in xrange(0,(size[1]/50)):\n for columns in xrange(0,(size[0]/50)):\n if rows == (size[1]/50)-1 or rows == 0 or columns== (size[0]/50)-1 or columns==0:\n map.update({(rows,columns):\"block\"})\n elif(rows%3 == 0):\n map.update({(rows,columns):random.choice(map_options)})\n else:\n map.update({(rows,columns):random.choice(map_options[:1])})\n\n self.map = map", "def create_some_random_pos(actor_cls, n, actor_type, actor_list, game,\r\n probability_each=100):\r\n ITERATIONS_MAX = 12\r\n cell_size = lib_jp.Size(w=actor_cls.size.w, h=actor_cls.size.h)\r\n cell_size_with_border = lib_jp.Size(w=cell_size.w + Actor.CELL_SCREEN_SECURITY_SIZE,\r\n h=cell_size.h + Actor.CELL_SCREEN_SECURITY_SIZE)\r\n cell_total_security_border = lib_jp.Size(w=actor_cls.cell_added_size.w\r\n + Actor.CELL_SCREEN_SECURITY_SIZE,\r\n h=actor_cls.cell_added_size.h\r\n + Actor.CELL_SCREEN_SECURITY_SIZE)\r\n if len(actor_list) >= actor_cls.max_qty_on_board:\r\n return\r\n elif n + len(actor_list) >= actor_cls.max_qty_on_board:\r\n n = actor_cls.max_qty_on_board - len(actor_list)\r\n iterations = 0\r\n for _ in range(n):\r\n if probability_each < 100 and randint(1, 100) > probability_each:\r\n continue\r\n actor_added = False\r\n iterations = 0\r\n actor_obj = None\r\n while not actor_added and (iterations <= ITERATIONS_MAX):\r\n iterations += 1\r\n x = randint(cell_total_security_border.w,\r\n Settings.screen_width - cell_size_with_border.w)\r\n y = randint(Settings.screen_near_top + cell_total_security_border.h,\r\n Settings.screen_height - cell_size_with_border.h)\r\n # Check if there is some sprite in this position\r\n position_not_taken = True\r\n rect1 = pg.Rect(x, y, cell_size.w, cell_size.h)\r\n if actor_cls.actor_type != ActorType.BAT:\r\n # Apples and mines cannot collide with any kind of sprite\r\n for sprite in game.active_sprites:\r\n if rect1.colliderect(sprite.rect):\r\n position_not_taken = False\r\n break\r\n else:\r\n # Bats cannot collide with snakes and other bats\r\n for sprite in game.snakes:\r\n if rect1.colliderect(sprite.rect):\r\n position_not_taken = False\r\n break\r\n if position_not_taken:\r\n for sprite in game.bats:\r\n if rect1.colliderect(sprite.rect):\r\n position_not_taken = False\r\n break\r\n if position_not_taken:\r\n actor_obj = actor_cls(x, y, actor_type, game=game)\r\n if actor_obj.actor_type == ActorType.BAT:\r\n actor_obj.change_x = randint(3, 5)\r\n actor_obj.change_y = randint(3, 5)\r\n actor_obj.initialize_boundaries()\r\n actor_added = True", "def __generate_rectangle_obstacles(self, world):\n obs_min_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"min_dim\"]\n obs_max_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_dim\"]\n obs_max_combined_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_combined_dim\"]\n obs_min_count = self.cfg[\"obstacle\"][\"rectangle\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"rectangle\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dim_range = obs_max_dim - obs_min_dim\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n # generate dimensions\n width = obs_min_dim + (random() * obs_dim_range )\n height = obs_min_dim + (random() * obs_dim_range )\n while width + height > obs_max_combined_dim:\n height = obs_min_dim + (random() * obs_dim_range )\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = RectangleObstacle(width, height, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles", "def sea_execution(board, position, role):\n quitt = False\n if position == 'comp':\n #print(1)\n #temporary for dumb AI\n #create and print a list of coastal, friendly regions where norse is not the ONLY one\n \n possible_region_list = []\n \n #loops through list of friendly, coastal, not just Norse regions to append to a possible_region_list\n for region in board.get_controlled_regions(role):\n #print(2)\n coastal = False\n just_norse = False\n if region.coast:\n coastal = True\n if len(region.blocks_present) == 1 and region.blocks_present[0].name.upper() == 'NORSE':\n just_norse = True\n \n if coastal and not just_norse:\n possible_region_list.append(region)\n \n \n #loops through list of friendly, coastal regions to append to a possible_final_region_list\n possible_final_region_list = []\n for region in board.get_controlled_regions(role): \n #print(3) \n if region.coast:\n possible_final_region_list.append(region)\n \n \n \n if len(possible_final_region_list) >= 2:\n #if you want to add in last-min strategy, do it here\n #random region from possible list\n england = board.regions[22]\n if england in possible_region_list:\n original_region = england\n else:\n original_region = possible_region_list[random.randint(0, len(possible_region_list) - 1)]\n #remove the original region from the possible end regions\n possible_final_region_list.remove(original_region)\n \n #possible_block_list\n #list of possible blocks to move (present in region) and not norse\n possible_block_list = []\n for block in original_region.blocks_present:\n if block.name != 'NORSE':\n possible_block_list.append(block)\n \n move_block_list = []\n blocks_moved = 0\n #print(4)\n\n while blocks_moved < 2:\n #print(5)\n block = possible_block_list[random.randint(0, len(possible_block_list)-1)]\n #if it's not already on the list,append to move_block_list\n if block not in move_block_list:\n move_block_list.append(block)\n blocks_moved+=1\n elif block in move_block_list and len(possible_block_list) == 1:\n blocks_moved+=1\n else:\n print('neither condition was met so this is an infinite loop')\n \n \n #print(6) \n new_region = possible_final_region_list[random.randint(0, len(possible_final_region_list) - 1)]\n \n for block in move_block_list:\n \n board.add_to_location(block, new_region)\n print(block.name + ' moved from ' + original_region.name + ' to ' + new_region.name)\n \n else:\n print('There are not enough friendly regions with which to play this card.')\n \n \n #add in if it's not possible\n elif position == 'opp':\n \n \n possible_region_list = []\n \n #loops through list of friendly, coastal, not just Norse regions to append to a possible_region_list\n for region in board.get_controlled_regions(role):\n coastal = False\n just_norse = False\n if region.coast:\n coastal = True\n if len(region.blocks_present) == 1 and region.blocks_present[0].name.upper() == 'NORSE':\n just_norse = True\n \n if coastal and not just_norse:\n possible_region_list.append(region)\n \n \n #loops through list of friendly, coastal regions to append to a possible_final_region_list\n possible_final_region_list = []\n for region in board.get_controlled_regions(role): \n if region.coast:\n possible_final_region_list.append(region)\n \n \n \n if len(possible_final_region_list) >= 2:\n \n print('Possible origin regions:')\n for region in possible_region_list:\n print(region.name)\n \n #user input region, check if in possible list\n valid_region = False\n while not valid_region:\n \n original_region_name = input('What region would you like to move block(s) from? Enter a name or \\'none\\'.\\n>').upper()\n \n if original_region_name != 'NONE':\n \n original_region = search.region_name_to_object(board, original_region_name)\n \n if original_region and original_region in possible_region_list:\n valid_region = True\n else:\n print('Invalid region.')\n else:\n quitt = True\n \n if not quitt:\n #remove the original region from the possible end regions\n possible_final_region_list.remove(original_region)\n \n #possible_block_list\n #list of possible blocks to move (present in region) and not norse\n possible_block_list = []\n for block in original_region.blocks_present:\n if block.name != 'NORSE':\n possible_block_list.append(block)\n \n print('Possible blocks:')\n for block in possible_block_list:\n print(block.name)\n \n \n move_block_list = []\n blocks_moved = 0\n quittt = False\n block_name = ''\n while blocks_moved < 2 and not quittt:\n if block_name != 'NONE':\n valid_block = False\n while not valid_block:\n \n \n block_name = input('Which block would you like to move? Enter a name or \\'none\\'.\\n>').upper()\n \n if block_name != 'NONE':\n \n block_to_move = search.block_name_to_object(possible_block_list, block_name)\n \n if block_to_move and block_to_move not in move_block_list:\n valid_block = True\n move_block_list.append(block_to_move)\n blocks_moved+=1\n \n elif block in move_block_list and len(possible_block_list) == 1:\n blocks_moved=1\n \n else:\n print('Invalid block.')\n continue\n else:\n valid_block = True\n if len(move_block_list) == 1:\n quittt = True\n quitt = False\n if len(move_block_list) > 0: \n print('Possible final regions:')\n for region in possible_final_region_list:\n print(region.name)\n \n #user input region, check if in possible list\n valid_region = False\n while not valid_region:\n \n new_region_name = input('What region would you like to move block(s) to? Enter a name or \\'none\\'.\\n>').upper()\n \n if new_region_name != 'NONE':\n \n new_region = search.region_name_to_object(board, new_region_name)\n \n if new_region and new_region in possible_final_region_list:\n valid_region = True\n else:\n print('Invalid region.')\n continue\n else:\n valid_region = True\n quitt = True\n \n if not quitt:\n \n for block in move_block_list:\n \n board.add_to_location(block, new_region)\n print(block.name + ' moved from ' + original_region.name + ' to ' + new_region.name)\n \n else:\n print('There are not enough friendly coastal regions with which to play this card.')", "def populate_tiles(self):\n\n # grid format :\n # grid(x,y,z)[0]: A valid WorldTile type (i.e. WorldTile.door)\n # grid(x,y,z)[1]: A list of ASCII color or format codes for ColorIze\n # grid(x,y,z)[2]: The tile object\n\n self.t_count = 0 # Tile count, increment for each tile added\n self.build_start = time.clock()\n self.logger.info(\"[*] Starting world building script\")\n\n script_list = [\n self.build_boss_room,\n self.build_rooms,\n self.build_halls,\n self.build_doors,\n self.build_chests,\n self.build_traps,\n self.build_mobs,\n self.build_npcs\n ]\n for func in script_list:\n self.logger.debug(\"\\tRunning {}\".format(func.__name__))\n if not func():\n e_text = \"Build script failed : {}\".format(func.__name__)\n raise AssertionError(e_text)\n\n self.logger.info(\"[*] World building script completed\")\n self.logger.debug(\"\\tTiles Placed : {}\".format(self.t_count))\n build_time = time.clock()-self.build_start\n self.logger.debug(\"\\tTook {}s\".format(build_time))\n self.logger.debug(\"\\tTiles/s : {}\".format(t_count/build_time))", "def new_tile(self):\n random.shuffle(self.tiles) # shuffle the list of tiles tuples\n count = 0\n while self.get_tile(self.tiles[0][0], self.tiles[0][1]) != 0 and count < self.grid_height*self.grid_width: \n self.tiles.append(self.tiles.pop(0)) \n \n # next, select value as 2 with a 90% probability (percentage) and 4 with 10%\n percentage = random.random() \n if percentage > 0.1:\n value = 2\n else:\n value = 4\n row = self.tiles[0][0]\n col = self.tiles[0][1]\n self.set_tile(row , col,value)", "def new_tile(self):\n \n empty_items = []\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if self.get_tile(row, col) == 0:\n empty_items.append((row, col))\n \n random_row = 0\n random_col = 0\n if len(empty_items) != 0:\n random_empty_tile = random.randrange(0, len(empty_items))\n (random_row, random_col) = empty_items[random_empty_tile]\n else:\n return\n # the % of getting \"4\" from 0~9 is 10%\n random_time = random.randrange(0, 10)\n \n if random_time == 4:\n self._cells[random_row][random_col] = 4\n else:\n self._cells[random_row][random_col] = 2", "def init():\n for i in range(COLS):\n for j in range(ROWS):\n BOARD[i][j] = int(random(2))", "def generate_standard_tech_tiles(seed=0):\n\n if seed is not 0:\n random.seed(seed)\n all_standard_tiles = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n randomized_tiles = list()\n\n for _ in range(9):\n chosen_tile_index = random.randint(0, len(all_standard_tiles) - 1)\n randomized_tiles.append(all_standard_tiles[chosen_tile_index])\n all_standard_tiles.pop(chosen_tile_index)\n\n return tuple(randomized_tiles)", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def random_v_random(n=1):\n p1_strategy = strategies.RandomStrategy()\n p2_strategy = strategies.RandomStrategy()\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_one()", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def new_tile(self):\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n while self.get_tile(rand_y, rand_x) != 0:\r\n rand_x = random.randrange(self.width)\r\n rand_y = random.randrange(self.height)\r\n value = random.choice([2,2,2,2,2,2,2,2,2,4])\r\n del self.board[rand_y][rand_x]\r\n self.board[rand_y].insert(rand_x,value)\r\n return self.board", "def NewTile(field):\n var = False\n while not var:\n temp = random.randrange(0, len(field), 1)\n if field[temp] == 0:\n r = random.randrange(0, 100, 1)\n if r > 80:\n field[temp] = -4\n else:\n field[temp] = -2\n \n var = True\n return field", "def genWater(grid, resolution):\n\n grid_surface = len(grid) * len(grid[0])\n\n # Amount of surface for the water.\n allowed_surface = round(0.2 * grid_surface)\n\n # Collection of the created surfaces\n water_surfaces = []\n\n # Min size of one water piece.\n min_single_size = 4 * resolution\n\n run = 0\n\n # Loop until 4 and enough surface:\n while allowed_surface != 0:\n run += 1\n print (allowed_surface)\n w = 0\n l = 0\n\n # Only generate random if we have more then 1 option left.\n if len(water_surfaces) < 3 and min_single_size != allowed_surface and allowed_surface > 5:\n try:\n size = random.randint(min_single_size, allowed_surface)\n except ValueError as e:\n print (\"Error occured: {0} min: {1} surf: {2}\".format(e, min_single_size, allowed_surface))\n\n # Otherwise the size is the allowed surface\n else:\n size = allowed_surface\n\n # Shouldn't go below 0\n if allowed_surface - size >= 0:\n\n # Calculate width and length if possible.\n # Starting from the square root causes the part to\n # be more square instead of a straight line.\n init = round(math.sqrt(size))\n for i in range(init, 1, -1):\n if size % i == 0 and 1 <= math.ceil((size / i) / i) <= 4:\n w = i\n l = size / w\n\n # Reinit allowed surface\n allowed_surface -= size\n\n # Randomly switch width and length\n coinflip = random.randint(1, 2)\n if coinflip == 1:\n\n # Adds a tuple wich contains (width, length, ratio, \n # surface)\n water_surfaces.append((w, int(l), round(l / w, 2), \n size))\n else:\n water_surfaces.append((int(l), w, round(l / w, 2), \n size))\n break\n\n if run > 4:\n # Drop last try and remake the surface.\n print (\"Drop\")\n try:\n run = 2\n allowed_surface += water_surfaces[-1][3]\n water_surfaces = water_surfaces[:-1]\n except:\n print (\"wat\")\n\n return water_surfaces", "def makeGrid(self, width, height, rewardLocs, exit, nPick=1, nAux=1, walls=[]):\n # Make mapping from coordinate (x, y, (takenreward1, takenreward2, ...))\n # to state number, and vice-versa.\n rTaken = iter([(),])\n for nPicked in range(1, nPick+1):\n rTaken = itertools.chain(rTaken, \n myCombinations(rewardLocs, r=nPicked)\n )\n # Iterators are hard to reset, so we list it.\n rTaken = list(rTaken)\n\n # Mappings from state to coordinates, vice-versa\n coordToState = {}\n stateToCoord = {}\n stateIdx = 0\n for x in range(width):\n for y in range(height):\n for stuff in rTaken:\n for holding in self.holdingPossibilities:\n coordToState[(x, y, stuff, holding)] = stateIdx\n stateToCoord[stateIdx] = (x, y, stuff, holding)\n stateIdx += 1\n self.deadEndState = stateIdx\n\n # Actually make the transition function\n def trans(f, p): \n aux = p\n (x, y, stuff, holding) = stateToCoord[f]\n actionMap = {}\n default = {(f, aux): 1}\n # Make the transition dictionary if the dead-end state (state width*height)\n if f == self.F-1:\n for action in range(5):\n actionMap[action] = default\n return actionMap\n\n # Otherwise, determine directions of motion, etc. \n for i in range(4):\n actionMap[i] = default\n if x != 0 and ((x-1, y) not in walls):\n actionMap[0] = {(coordToState[(x-1,y,stuff, holding)], aux): 1}\n if x < width-1 and ((x+1, y) not in walls):\n actionMap[1] = {(coordToState[(x+1,y,stuff, holding)], aux): 1}\n if y != 0 and ((x, y-1) not in walls):\n actionMap[2] = {(coordToState[(x,y-1,stuff, holding)], aux): 1}\n if y < height-1 and ((x, y+1) not in walls):\n actionMap[3] = {(coordToState[(x,y+1,stuff, holding)], aux): 1}\n # What happens when the agent uses action 4?\n if (x, y) == exit:\n # Some cases, depending on self.oneAtATime\n if not self.oneAtATime:\n # The agent is leaving.\n actionMap[4] = {(self.deadEndState, aux): 1}\n else:\n # The agent is dropping off a reward. holeFiller will\n # take care of the reward value.\n if len(stuff) >= nPick:\n # The agent is not allowed to pick up more stuff\n actionMap[4] = {(self.deadEndState, aux): 1}\n else:\n # The agent drops off the object.\n actionMap[4] = {(coordToState[(x,y,stuff, -1)], aux): 1}\n elif (x, y) not in rewardLocs:\n # No reward to pick up. Do nothing.\n actionMap[4] = default\n elif (x, y) in stuff:\n # This reward has already been used. Do nothing.\n actionMap[4] = default\n elif len(stuff) >= nPick or (holding != -1 and holding < len(stuff)\n and self.oneAtATime):\n # The agent has its hands full.\n actionMap[4] = default\n else:\n # The agent is allowed to pick up an object.\n newStuff = tuple(sorted(list(stuff) + [(x, y)]))\n if self.oneAtATime:\n newHoldingIdx = newStuff.index((x, y))\n else:\n newHoldingIdx = -1\n actionMap[4] = {(coordToState[(x, y, newStuff, newHoldingIdx)], aux): 1}\n return actionMap\n\n # Man, I'm outputting a lot of stuff.\n # coordToState[(x, y, rewardsLeft, holding)] -> index of this state\n # stateToCoord[index] -> (x, y, rewardsLeft, holding)\n # rTaken is a list of all possible combinations of leftover rewards.\n return (trans, coordToState, stateToCoord, rTaken)", "def create_random(self):\n number_of_layers = random.choice(self.parameter_choices['number_of_layers'])\n neurons_per_layer = []\n dropout_per_layer = []\n self.network['number_of_layers'] = number_of_layers\n\n for i in range(number_of_layers):\n neurons_per_layer.append(random.choice(self.parameter_choices['neurons_per_layer']))\n dropout_per_layer.append(random.choice(self.parameter_choices['dropout_per_layer']))\n\n self.network['neurons_per_layer'] = neurons_per_layer\n self.network['dropout_per_layer'] = dropout_per_layer\n self.network['optimizer'] = random.choice(self.parameter_choices['optimizer'])\n self.network['activation'] = random.choice(self.parameter_choices['activation'])", "def generate_boards():\n\n print \"Generating data, please hold on...\"\n # a list for turns, each which is a list of boards, which are unique layouts\n # a completely blank layout is always the start of the game, counting for turn 0\n game = [[Board(' ' * 9, 1)]]\n\n # there are at most 9 turns in a game of tic tac toe\n for turnNum in range(1, 10):\n # list of layouts for the current turn\n turn = []\n upperLayouts = game[-1]\n\n if turnNum % 2 == 1: player = 'X'\n else: player = 'O'\n\n # every turns' unique layouts are numbered to seperate them more easily\n pattern = 1\n # goes through every layout from the previous turn\n for ul in upperLayouts:\n # game does not continue after a winning move, and using a won board is only possible after turn 5\n if turnNum <= 5 or not ul.check_win()[0]:\n # 9 positions on every board\n for pos in range(9):\n if ul[pos] == ' ':\n newLayout = Board(ul[0:pos] + player + ul[pos+1:])\n # if it is a unique layout\n unique = True\n # goes through every existing layout for this turn\n for item in turn:\n if newLayout.matches(item): \n unique = False\n # the upper layout leads to an existing layout\n ul.paths.append(item.pattern)\n break\n if unique:\n turn.append(Board(newLayout, pattern))\n # the current upper layout leads to the new layout\n ul.paths.append(pattern)\n pattern += 1\n else:\n # adds a zero for paths because a played character is taking up that space\n ul.paths.append(0)\n game.append(turn)\n return game", "def generate_random(self, prob_alive=0.3):\n self.generation = 0\n for i in range(self.lines):\n for j in range(self.cols):\n if random.random() < prob_alive:\n self[i][j] = self.cell_state['alive']", "def random_blocks():\n cells = []\n while len(cells) != 43:\n cell_to_add = (random.randint(0, 11), random.randint(0, 9))\n if cell_to_add not in cells:\n cells.append(cell_to_add)\n return cells", "def randomChestplate():\n return random.choice(CHESTPLATES)", "def generate_options(board: list, player_turn: chr):\n black_marbles, white_marbles = Board.read_marbles(board)\n black_risk, white_risk = Evaluator.assess_risk(black_marbles, white_marbles)\n if player_turn == 'b':\n risk = black_risk\n else:\n risk = white_risk\n\n board_object = Board()\n moves, resulting_boards = board_object.generate_all_boards(board, player_turn)\n # Assume score 0 for now\n Evaluator.score_move(moves, 0)\n for i in range(0, len(moves)):\n Evaluator.calculate_board_score(moves[i], resulting_boards[i], player_turn, risk)\n return moves, resulting_boards", "def make_grid(width, height): \n return {(row, col): choice(ascii_uppercase)\n for row in range (height) # remove ' ' and add choice()\n for col in range(width)}", "def simulate(self):\n self._t = self._t + 1\n if self._t == self._cycle:\n # End of a season, start of the next one. Year is also cyclic that is WINTER -> SPRING.\n self._t = 0\n self._season = self._season.next()\n\n # When the ammount of newly produced food in a cell is over and the cell can seed we\n # randomly choose another spot where some random ammount of newly produced food should\n # be stored.\n for i in range(self._height):\n for j in range(self._width):\n if self._env[i][j].get_newly() == 0 and not self._seeded[i][j]:\n # if the cell become empty just now seed in once in a randomn cell on the grid.\n self._seeded[i][j] = True\n cap = self._height + self._width\n while cap > 0:\n seedi = random.randint(0, self._height - 1)\n seedj = random.randint(0, self._width - 1)\n\n production_cap = self._food_per_season[self._season.value]\n\n production_cap -= self._env[seedi][seedj].get_newly()\n\n if production_cap > 0:\n seed_amount = random.randint(1, production_cap)\n self._env[seedi][seedj].produce(seed_amount)\n self._seeded[seedi][seedj] = False\n break\n\n cap = cap - 1", "def new_game(self):\n self.cells = [] # Array of cells\n self.frame_count = 0\n self.database = []\n self.timer = [Consts[\"MAX_TIME\"], Consts[\"MAX_TIME\"]]\n self.result = None\n # Define the players first\n self.cells.append(Cell(0, [Consts[\"WORLD_X\"] / 4, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n self.cells.append(Cell(1, [Consts[\"WORLD_X\"] / 4 * 3, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n # Generate a bunch of random cells\n for i in range(Consts[\"CELLS_COUNT\"]):\n if i < 4:\n rad = 1.5 + (random.random() * 1.5) # Small cells\n elif i < 10:\n rad = 10 + (random.random() * 4) # Big cells\n else:\n rad = 2 + (random.random() * 9) # Everything else\n x = Consts[\"WORLD_X\"] * random.random()\n y = Consts[\"WORLD_Y\"] * random.random()\n cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)\n safe_dist = Consts[\"SAFE_DIST\"] + rad\n while min(map(cell.distance_from, self.cells[:2])) < safe_dist:\n cell.pos = [\n Consts[\"WORLD_X\"] * random.random(),\n Consts[\"WORLD_Y\"] * random.random()\n ]\n self.cells.append(cell)", "def generate_random_toy() -> Toy:\n dimensions = round(uniform(5, 100), 2)\n rooms_number = randint(1, 5)\n return SantaWorkShop(dimensions, rooms_number, 5)", "def create_grid(player1: game_code.Player, player2: game_code.Player) -> None:\r\n status = True\r\n abort = False\r\n\r\n # Initialize two game board both with randomized ship placements\r\n player_1 = game_code.RandomizedBattleshipGame()\r\n player_2 = game_code.RandomizedBattleshipGame()\r\n\r\n player_1_sequence = []\r\n player_2_sequence = []\r\n player_1_previous_move = None\r\n player_2_previous_move = None\r\n\r\n save_initial_state1 = player_1\r\n save_initial_state2 = player_2\r\n\r\n escape = instruction_font.render('HIT ESC TO RETURN TO THE MAIN MENU OR TO START A NEW GAME', False,\r\n (255, 255, 255))\r\n player_label_1 = label_font.render('Player 1', False, (255, 255, 255))\r\n player_label_2 = label_font.render('Player 2', False, (255, 255, 255))\r\n\r\n while status:\r\n screen.blit(background, (0, 0))\r\n\r\n # Draw the grids belonging to each player\r\n for column in range(0, 8):\r\n for row in range(0, 8):\r\n cell = pygame.Rect((190 + column * 50, 160 + row * 50), (50, 50))\r\n pygame.draw.rect(screen, (255, 255, 255, 1), cell, 0)\r\n pygame.draw.rect(screen, (0, 0, 0, 1), cell, 3)\r\n\r\n for column in range(0, 8):\r\n for row in range(0, 8):\r\n cell = pygame.Rect((690 + column * 50, 160 + row * 50), (50, 50))\r\n pygame.draw.rect(screen, (255, 255, 255, 1), cell, 0)\r\n pygame.draw.rect(screen, (0, 0, 0, 1), cell, 3)\r\n\r\n # Display labels and text\r\n screen.blit(player_label_1, (340, 580))\r\n screen.blit(player_label_2, (840, 580))\r\n screen.blit(escape, (25, 685))\r\n\r\n columns = 'ABCDEFGH'\r\n rows = '12345678'\r\n # Label Player 1 Board\r\n for letter in range(0, 8):\r\n label = label_font.render(columns[letter], False, (255, 255, 255))\r\n screen.blit(label, (205 + letter * 50, 125))\r\n for number in range(0, 8):\r\n label = label_font.render(rows[number], False, (255, 255, 255))\r\n screen.blit(label, (165, 170 + number * 50))\r\n # Label Player 2 Board\r\n for letter in range(0, 8):\r\n label = label_font.render(columns[letter], False, (255, 255, 255))\r\n screen.blit(label, (705 + letter * 50, 125))\r\n for number in range(0, 8):\r\n label = label_font.render(rows[number], False, (255, 255, 255))\r\n screen.blit(label, (665, 170 + number * 50))\r\n\r\n # Display the ships prior to starting the game\r\n display_ships(save_initial_state1, True)\r\n display_ships(save_initial_state2, False)\r\n\r\n if player_1_previous_move is None and player_2_previous_move is None:\r\n pygame.display.update()\r\n pygame.time.wait(1000)\r\n\r\n while player_1.get_winner() is None and player_2.get_winner() is None and not abort:\r\n\r\n # player1 shot on player2 Board\r\n player_1_previous_move = player1.make_move(player_2, player_1_previous_move)\r\n player_2.make_move(player_1_previous_move)\r\n player_1_sequence.append(player_1_previous_move)\r\n if player_2.get_winner() is not None:\r\n break\r\n # player1 on player2 Board\r\n player_2_previous_move = player2.make_move(player_1, player_2_previous_move)\r\n player_1.make_move(player_2_previous_move)\r\n player_2_sequence.append(player_2_previous_move)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n abort = True\r\n status = False\r\n\r\n display_ships(player_1, True)\r\n pygame.time.wait(500)\r\n pygame.display.update()\r\n pygame.time.wait(500)\r\n display_ships(player_2, False)\r\n pygame.display.update()\r\n\r\n # Display the victory message of the winning player\r\n if player_1.get_winner() == 'Lost':\r\n winner = 'Player 2'\r\n victory = message_font.render(winner + ' Wins!', False, (255, 255, 255))\r\n screen.blit(victory, (450, 50))\r\n else:\r\n winner = 'Player 1'\r\n victory = message_font.render(winner + ' Wins!', False, (255, 255, 255))\r\n screen.blit(victory, (450, 50))\r\n\r\n # Display the final state of the game\r\n display_ships(player_1, True)\r\n display_ships(player_2, False)\r\n\r\n # Check for user input\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n status = False\r\n\r\n pygame.display.update()", "def generate_board():\n b = open(_BOARD_FILE, \"r\").readlines()\n for line in b:\n raw = line.strip().split(\" \")\n _board_graph[str_to_pos(raw[0])] = Space(\n (raw[1] == \"R\"),\n TYPE_MAP[raw[1]],\n {str_to_pos(str_pos) for str_pos in raw[2:]})", "def randPlace(self):\r\n random.seed(self.seed)\r\n \r\n # Start placement on Partition A\r\n partA = True\r\n for node in self.G.nodes():\r\n \r\n randSite = random.randint(0,int(self.sitesNum/2)-1)\r\n \r\n if partA:\r\n partSite = self.sitesA\r\n self.G.node[node][\"part\"] = 'A'\r\n \r\n else:\r\n partSite = self.sitesB\r\n self.G.node[node][\"part\"] = 'B'\r\n \r\n while (partSite[randSite].isOcp()):\r\n randSite = random.randint(0,int(self.sitesNum/2)-1) \r\n\r\n partSite[randSite].setCell(node)\r\n self.G.node[node][\"site\"] = partSite[randSite]\r\n \r\n # Toggle partition for next placement\r\n partA = not partA" ]
[ "0.68248934", "0.64361274", "0.6384287", "0.6220165", "0.614266", "0.613112", "0.6068092", "0.6057356", "0.6054699", "0.6002918", "0.5973913", "0.5956003", "0.59538376", "0.59481156", "0.59429073", "0.59363997", "0.59216344", "0.59191567", "0.59100956", "0.58849156", "0.58718026", "0.5866675", "0.58520085", "0.5846157", "0.5842792", "0.58204687", "0.58107954", "0.5800557", "0.5800468", "0.57978207", "0.57948196", "0.5725796", "0.5722613", "0.571879", "0.57154936", "0.5712448", "0.5709946", "0.5681572", "0.56668794", "0.5657484", "0.56568533", "0.56566954", "0.56488353", "0.5639202", "0.5638784", "0.5634537", "0.5632511", "0.5629356", "0.56182694", "0.5594509", "0.55859405", "0.55857456", "0.5582024", "0.55732036", "0.5555083", "0.5553031", "0.5546122", "0.5544155", "0.5541593", "0.5541232", "0.5535443", "0.5532788", "0.5526076", "0.5519978", "0.5514341", "0.5512885", "0.55126464", "0.5504339", "0.5486688", "0.54818666", "0.5479802", "0.54753065", "0.54727226", "0.5471637", "0.54676825", "0.5462452", "0.5461227", "0.5455798", "0.54512185", "0.544472", "0.54392064", "0.54386324", "0.54383487", "0.5434769", "0.54317147", "0.54137623", "0.54129684", "0.5410515", "0.54075736", "0.5402761", "0.5401965", "0.53994775", "0.53994155", "0.5398598", "0.53982306", "0.53963536", "0.53812623", "0.5374453", "0.534779", "0.5344692" ]
0.64837104
1
Highlights separable regions that stable with the given period. A "separable" region is one which can be removed from the board without effecting any of the rest of the board.
def stability_mask(board, period=6, remove_agent=True): if remove_agent: board = board * ((board & CellTypes.agent) == 0) neighborhood = np.ones((3,3)) alive = (board & CellTypes.alive) // CellTypes.alive neighbors = ndimage.convolve(alive, neighborhood, mode='wrap') max_neighbors = neighbors ever_alive = alive orig_board = board for _ in range(period): board = speedups.advance_board(board) alive = (board & CellTypes.alive) // CellTypes.alive neighbors = ndimage.convolve(alive, neighborhood, mode='wrap') ever_alive |= alive max_neighbors = np.maximum(max_neighbors, neighbors) is_boundary = (board & CellTypes.frozen > 0) is_boundary |= (ever_alive == 0) & (max_neighbors <= 2) labels, num_labels = speedups.wrapped_label(~is_boundary) mask = np.zeros(board.shape, dtype=bool) for idx in range(1, num_labels+1): region = labels == idx if (board[region] == orig_board[region]).all(): mask |= region return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_partioned_regions(shape, alpha=1.0, max_regions=5, min_regions=2):\n ring = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.int16)\n adjacent = np.array([ # Diagonals don't count as adjacent\n [-1,0,0,1],\n [0,-1,1,0]], dtype=np.int16).T\n nearby = np.meshgrid([-2,-1,0,1,2], [-2,-1,0,1,2])\n\n board = np.zeros(shape, dtype=np.int16)\n perimeters = [{\n (i, j) for i, j in zip(*np.nonzero(board == 0))\n }]\n exclusions = [set()]\n while sum(len(p) for p in perimeters) > 0:\n weights = np.array([len(p) for p in perimeters], dtype=float)\n weights[0] = min(alpha, weights[0]) if len(weights) <= max_regions else 1e-10\n if len(weights) <= min_regions:\n weights[1:] = 1e-10\n weights /= np.sum(weights)\n k = get_rng().choice(len(perimeters), p=weights)\n plist = list(perimeters[k])\n i, j = plist[get_rng().choice(len(plist))]\n perimeters[0].discard((i, j))\n perimeters[k].discard((i, j))\n if (i, j) in exclusions[k]:\n continue\n exclusions[0].add((i,j))\n exclusions[k].add((i,j))\n b = board[(i+nearby[0]) % shape[0], (j+nearby[1]) % shape[1]]\n b[2,2] = k or -1\n num_neighbors = signal.convolve2d(b != 0, ring, mode='valid')\n num_foreign = signal.convolve2d((b > 0) & (b != k), ring, mode='valid')\n if ((num_foreign > 0) & (num_neighbors > 2)).any() or num_foreign[1,1] > 0:\n continue\n # Add to the board\n if k == 0:\n k = len(perimeters)\n perimeters.append(set())\n exclusions.append(set())\n board[i, j] = k\n for i2, j2 in (adjacent + (i, j)) % shape:\n if board[i2, j2] == 0:\n perimeters[k].add((i2, j2))\n return board", "def resetSagittalSegment(self):\r\n #research\r\n profprint()\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYellow == None :\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n sYellow.SetSliceVisible(0)\r\n sYellow.SetOrientationToSagittal()\r\n sw = slicer.app.layoutManager().sliceWidget(\"Yellow\")\r\n sw.fitSliceToBackground()\r\n sYellow.Modified()", "def highlight_available_v_fences(win, game):\n #Check if player has remaining fences\n player_turn = game.get_player_turn()\n if game.get_pawn(player_turn).get_remaining_fences() == 0:\n return\n\n board = game.get_board()\n\n #Set highlight color\n if player_turn == 1:\n color = LIGHTERRED\n else:\n color = LIGHTERBLUE\n \n for row in range(len(board)-2):\n for col in range(len(board)-1):\n #Highlight fence if no fence placed and does not interesect a horizontal fence. \n if not board[row][col]['v'] and not board[row+1][col]['v'] and board[row+1][col]['h'] != \"Fence Continued\" and game.fair_play_check('v',(col,row)):\n coords = board[row][col]['coord']\n v_fence_coords = (coords[0]*SQUARESIZE+FENCEWIDTH*(coords[0]-1), coords[1]*(SQUARESIZE+FENCEWIDTH))\n v_fence = pygame.Rect(v_fence_coords, (FENCEWIDTH,SQUARESIZE))\n pygame.draw.rect(win, color, v_fence)", "def color_segmentation(self):\n cv.namedWindow(\"Segmentation parameters\")\n self.create_trackbar(\"h-u\", \"Segmentation parameters\")\n self.create_trackbar(\"h-l\",\"Segmentation parameters\")\n self.create_trackbar(\"s-u\",\"Segmentation parameters\")\n self.create_trackbar(\"s-l\",\"Segmentation parameters\")\n self.create_trackbar(\"v-u\",\"Segmentation parameters\")\n self.create_trackbar(\"v-l\",\"Segmentation parameters\")\n\n image = self.__image.copy()\n\n while True:\n var_h_upper = cv.getTrackbarPos(\"h-u\", \"Segmentation parameters\")\n var_h_lower = cv.getTrackbarPos(\"h-l\", \"Segmentation parameters\")\n var_s_upper = cv.getTrackbarPos(\"s-u\", \"Segmentation parameters\")\n var_s_lower = cv.getTrackbarPos(\"s-l\", \"Segmentation parameters\")\n var_v_upper = cv.getTrackbarPos(\"v-u\", \"Segmentation parameters\")\n var_v_lower = cv.getTrackbarPos(\"v-l\", \"Segmentation parameters\")\n\n lower = np.array([var_h_lower,var_s_lower,var_v_lower])\n upper = np.array([var_h_upper,var_s_upper,var_v_upper])\n\n bin_image = cv.inRange(self.hsv_image, lower, upper)\n cv.imshow(\"Segmentated image\", bin_image)\n\n if (cv.waitKey(1) & 0xFF == ord('q')):\n break\n cv.destroyAllWindows()", "def highlight_available_h_fences(win, game):\n #Check if player has remaining fences\n player_turn = game.get_player_turn()\n if game.get_pawn(player_turn).get_remaining_fences() == 0:\n return\n\n board = game.get_board()\n \n #Set highlight color\n if player_turn == 1:\n color = LIGHTERRED\n else:\n color = LIGHTERBLUE\n \n for row in range(len(board)-1):\n for col in range(len(board)-2):\n #Highlight fence if no fence placed and does not intersect a vertical fence\n if not board[row][col]['h'] and not board[row][col+1]['h'] and board[row][col+1]['v'] != \"Fence Continued\" and game.fair_play_check('h',(col,row)):\n coords = board[row][col]['coord']\n h_fence_coords = (coords[0]*(SQUARESIZE+FENCEWIDTH), coords[1]*SQUARESIZE+FENCEWIDTH*(coords[1]-1))\n h_fence = pygame.Rect(h_fence_coords, (SQUARESIZE,FENCEWIDTH))\n pygame.draw.rect(win, color, h_fence)", "def plot_sed(self,period=6.,projection='lambert',geopolygons=None, showfig=True, vmin=0, vmax=None, hillshade=False):\n\t\tif hillshade:\n\t\t\talpha = 0.5\n\t\telse:\n\t\t\talpha =1.\n\t\tm = self._get_basemap(projection=projection, geopolygons=geopolygons,hillshade=hillshade)\n\t\tgroup = self['%g_sec'%( period )]\n\t\tx, y = m(group['lonArr'].value, group['latArr'].value)\n\t\tmy_cmap = pycpt.load.gmtColormap('./cv.cpt')\n\t\tsed_Arr = group['sed_Arr'].value\n\t\tsed_Arr_msk = group['sed_Arr_msk'].value\n\t\tif vmin == None:\n\t\t\tvmin = np.nanmin(sed_Arr[~sed_Arr_msk])\n\t\t\tvmin = np.floor(vmin/5.)*5.\n\t\tif vmax == None:\n\t\t\tvmax = np.nanmax(sed_Arr[~sed_Arr_msk])\n\t\t\tvmax = np.ceil(vmax/5.)*5.\n\t\tim = m.pcolormesh(x, y, np.ma.masked_array(sed_Arr,mask=sed_Arr_msk), cmap=my_cmap, shading='gouraud', vmin=vmin, vmax=vmax, alpha=alpha)\n\t\tcb = m.colorbar(im, \"bottom\", size=\"3%\", pad='2%', format='%d')\n\t\tcb.set_label('Sediment thickness (m)', fontsize=12, rotation=0)\n\t\tcb.set_alpha(1)\n\t\tcb.draw_all()\n\t\tax = plt.gca() # only plot the oceanic part for JdF\n\t\t# ax.set_xlim(right=x_max)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\treturn", "def addSeparatorFeature(self):\n \n # graphical separators\n dNS = {\"pc\":PageXml.NS_PAGE_XML}\n someNode = self.lNode[0]\n ndPage = someNode.node.xpath(\"ancestor::pc:Page\", namespaces=dNS)[0]\n lNdSep = ndPage.xpath(\".//pc:SeparatorRegion\", namespaces=dNS)\n loSep = [ShapeLoader.node_to_LineString(_nd) for _nd in lNdSep]\n \n if self.bVerbose: traceln(\" %d graphical separators\"%len(loSep))\n\n # make an indexed rtree\n idx = index.Index()\n for i, oSep in enumerate(loSep):\n idx.insert(i, oSep.bounds)\n \n # take each edge in turn and list the separators it crosses\n nCrossing = 0\n for edge in self.lEdge:\n # bottom-left corner to bottom-left corner\n oEdge = geom.LineString([(edge.A.x1, edge.A.y1), (edge.B.x1, edge.B.y1)])\n prepO = prep(oEdge)\n lCrossingPoints = []\n fSepTotalLen = 0\n for i in idx.intersection(oEdge.bounds):\n # check each candidate in turn\n oSep = loSep[i]\n if prepO.intersects(oSep):\n fSepTotalLen += oSep.length\n oPt = oEdge.intersection(oSep)\n if type(oPt) != geom.Point:\n traceln('Intersection in not a point: skipping it')\n else:\n lCrossingPoints.append(oPt)\n \n if lCrossingPoints:\n nCrossing += 1\n edge.bCrossingSep = True\n edge.sep_NbCrossing = len(lCrossingPoints)\n minx, miny, maxx, maxy = geom.MultiPoint(lCrossingPoints).bounds\n edge.sep_SpanLen = abs(minx-maxx) + abs(miny-maxy)\n edge.sep_AvgSpanSgmt = edge.sep_SpanLen / len(lCrossingPoints) \n edge.sep_AvgSepLen = fSepTotalLen / len(lCrossingPoints)\n else:\n edge.bCrossingSep = False\n edge.sep_NbCrossing = 0\n edge.sep_SpanLen = 0\n edge.sep_AvgSpanSgmt = 0 \n edge.sep_AvgSepLen = 0\n \n #traceln((edge.A.domid, edge.B.domid, edge.bCrossingSep, edge.sep_NbCrossing, edge.sep_SpanLen, edge.sep_AvgSpanSgmt, edge.sep_AvgSepLen))\n \n \n if self.bVerbose: \n traceln(\" %d (/ %d) edges crossing at least one graphical separator\"%(nCrossing, len(self.lEdge)))", "def analyze_block_horizon(mask):\n\n horizon_borders = find_border_horizon(mask)\n if border_analyze_horizon(mask, horizon_borders) and mask.shape[0] > mask.shape[1]:\n return True\n else:\n return False\n # heights = [abs(start - stop) for (start, stop) in horizon_borders]\n # horizon_big_borders = []\n # segmented_components = []\n # for idx, h in enumerate(heights):\n # if h > space_height:\n # horizon_big_borders.append(horizon_borders[idx])\n # if len(horizon_big_borders) == 0:\n # return segmented_components\n # slice_start = 0\n # horizon_slice_start_stops = []\n # for h_border in horizon_big_borders[::-1]:\n # horizon_slice_start_stops.append([slice_start, h_border[0]])\n # slice_start = h_border[1]\n # horizon_slice_start_stops.append([slice_start, mask.shape[0] - 1])\n # horizon_slice_start_stops = [(start, stop) for (start, stop) in horizon_slice_start_stops if stop > start]\n # for (horizon_start, horizon_stop) in horizon_slice_start_stops:\n # mask_slice = mask[horizon_start:horizon_stop, :]\n # vertical_borders = find_border_vertical(mask_slice)\n # widths = [abs(border[0] - border[1]) for border in vertical_borders]\n # vertical_big_borders = [vertical_borders[idx] for (idx, w) in enumerate(widths) if w > space_width]\n # if len(vertical_big_borders) == 0:\n # segmented_components.append([horizon_start, horizon_stop, 0, mask.shape[1] - 1])\n # else:\n # v_slice_start = 0\n # for idx, v_border in enumerate(vertical_big_borders[::-1]):\n # if v_slice_start != v_border[1]:\n # segmented_components.append([horizon_start, horizon_stop, v_slice_start, v_border[1]])\n # v_slice_start = v_border[0]\n # if v_slice_start < mask.shape[1] - 1:\n # segmented_components.append([horizon_start, horizon_stop, v_slice_start, mask.shape[1] - 1])\n # return segmented_components", "def region_growing(im: np.ndarray, seed_points: list, T: int) -> np.ndarray:\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n # You can also define other helper functions\n segmented = np.zeros_like(im).astype(bool)\n\n (H, W) = im.shape\n\n for seed_row, seed_col in seed_points:\n region = []\n region.append([seed_row, seed_col])\n for row, col in region:\n for rows in range((row-1),(row+2)): # Check neighbouring pixels\n for cols in range((col-1),(col+2)):\n if rows < H and rows >= 0 and cols < W and cols >= 0: # Is pixel inside image?\n if (np.abs(im[seed_row, seed_col] - im[rows, cols]) <= T) and not segmented[row, col]:\n region.append([rows, cols])\n segmented[row, col] = True\n return segmented\n ### END YOUR CODE HERE ### ", "def vis_seg(img, seg, palette, alpha=0.5):\n vis = np.array(img, dtype=np.float32)\n mask = seg > 0\n vis[mask] *= 1. - alpha\n vis[mask] += alpha * palette[seg[mask].flat]\n vis = vis.astype(np.uint8)\n\n # own code - Jasper\n total_pixels = totalNumPixels(seg, palette)\n # print(\"color_seg():\\n\")\n # print(palette)\n # print(\"Seg: \\n\")\n # print(seg)\n # print(\"Seg.flat:\\n\")\n # print(seg.flat)\n # print(\"Palette seg.flat:\\n\")\n # print(palette[seg.flat])\n exportLogs(\"Number of pixels: {:d}\".format(total_pixels))\n\n # classes of pixels in tuple form\n exportLogs(\"Extract Unique Pixel Classes:\\n\")\n pixel_classes = [tuple(row) for row in palette[seg.flat]]\n\n # remove duplicate classes of pixels\n unique_classes = np.unique(pixel_classes, axis=0)\n\n # print result of pixel classes present in image\n for x in range(0, len(unique_classes.tolist())):\n exportLogs(\"{}. {}\".format(x + 1, unique_classes.tolist()[x]))\n\n # determine index of class and relate it with class_names\n # numpy array must be converted to list for list manipulation\n exportLogs(\"---------- Class Names - RGB Value ----------\")\n for i in unique_classes.tolist():\n # get index of pixel class from palette\n class_index = palette.tolist().index(i)\n\n # RGB value of pixel class\n class_color = tuple(i)\n\n # get the class name from class_names list based on PASCAL VOC list of classes \n class_name = class_names[class_index]\n\n # compute region percentage of class from the image\n percentage = (pixel_classes.count(tuple(i))/total_pixels) * 100\n \n # print results\n exportLogs(\"Class ID: {:d}\".format(class_index))\n exportLogs(\"Class Color: {}\".format(class_color))\n exportLogs(\"Class Name: {:s}\".format(class_name))\n exportLogs(\"Percentage of region: {:.3f}%\".format(percentage))\n exportLogs(\"\\n\")\n # end\n return vis", "def FillVC8GradientColour(self, dc, tabPoints, active):\r\n\r\n xList = [pt.x for pt in tabPoints]\r\n yList = [pt.y for pt in tabPoints]\r\n \r\n minx, maxx = min(xList), max(xList)\r\n miny, maxy = min(yList), max(yList)\r\n\r\n rect = wx.Rect(minx, maxy, maxx-minx, miny-maxy+1) \r\n region = wx.RegionFromPoints(tabPoints)\r\n\r\n if self._buttonRect.width > 0:\r\n buttonRegion = wx.Region(*self._buttonRect)\r\n region.XorRegion(buttonRegion)\r\n \r\n dc.SetClippingRegionAsRegion(region)\r\n\r\n if active:\r\n bottom_colour = top_colour = wx.WHITE\r\n else:\r\n bottom_colour = StepColour(self._base_colour, 90)\r\n top_colour = StepColour(self._base_colour, 170)\r\n\r\n dc.GradientFillLinear(rect, top_colour, bottom_colour, wx.SOUTH)\r\n dc.DestroyClippingRegion()", "def undrawGrid(draw,points,coeff2,newColorArray):\r\n ## This is the merge function.\r\n ## If two neighboring regions are considered to be the same color by the comparison function then this function replaces the black line between them by a line of their color (making it invisible).\r\n for j in range(0,coeff2-1):\r\n if comparisonFunction(newColorArray[0][j],newColorArray[0][j+1]):\r\n draw.line((points*0, points*(j+1), points*(0+1)-3, points*(j+1)), fill=newColorArray[0][j], width=lineWidth)\r\n\r\n for i in range(0,coeff2-1):\r\n if comparisonFunction(newColorArray[i][0],newColorArray[i+1][0]):\r\n draw.line((points*(i+1), points*0, points*(i+1), points*1-3), fill=newColorArray[i][0], width=lineWidth)\r\n\r\n for i in range(1,coeff2):\r\n for j in range(1,coeff2):\r\n if comparisonFunction(newColorArray[i][j],newColorArray[i][j-1]):\r\n draw.line((points*i+3, points*j, points*(i+1)-3, points*j), fill=newColorArray[i][j], width=lineWidth)\r\n\r\n if comparisonFunction(newColorArray[i][j],newColorArray[i-1][j]):\r\n draw.line((points*i, points*j+3, points*i, points*(j+1)-3), fill=newColorArray[i][j], width=lineWidth)", "def split_dotted_general(captcha):\n image = captcha[19:46,]\n\n col_sum = np.sum(image, axis = 0)\n col_sum_list = list(col_sum)\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 200:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 199:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n five_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n # Building a list of GAPS (lengths of the dark regions)\n # and LINES that split such gaps in half\n gaps = []\n lines = []\n for i, region in enumerate(five_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If more than 5 gaps are identified, the problem may be due to split letters\n # Some of the troublesome letters are m, n and h\n # We will try to fix this issue by completing gaps in these letters\n if len(lines) > 5:\n\n for i in range(len(col_sum_list[:-9])):\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 3060, 3060]:\n captcha[28:30, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 2550, 2550]:\n captcha[31:33, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 3060, 3060, 0, 510, 510, 0, 0, 0, 0]:\n captcha[28:30, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 2550, 2550, 0, 510, 510, 0, 0, 0, 0]:\n captcha[31:33, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 4080, 4080, 0, 0, 0, 0, 510, 510]:\n captcha[31:33, i+4:i+6] = 255\n\n # Reloading image (based on modified captcha) and redefiding col_sum_list\n image = captcha[19:46, ]\n col_sum_list = list(np.sum(image, axis = 0))\n\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 200:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 199:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n five_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n # Building a list of GAPS (lengths of the dark regions)\n # and LINES that split such gaps in half\n gaps = []\n lines = []\n for i, region in enumerate(five_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If the errors persists, we move on to next captcha\n if len(lines) > 5:\n return('error')\n\n # If the algorithm finds less letters than expected (merged letters), we move on to next captcha\n if len(lines) < 5:\n return('error')\n\n # Defining rightmost and leftmost lines, appending lines list, and sorting\n left_line = left_region[1] - 2\n right_line = right_region[0] + 2\n lines.append(left_line)\n lines.append(right_line)\n lines = sorted(lines)\n\n # Finding letters x-coordinates\n letters_xcoords = []\n for i in range(len(lines)):\n if lines[i] == lines[-1]:\n break\n letter = (lines[i], lines[i + 1])\n letters_xcoords.append(letter)\n\n letters = []\n for i, letter in enumerate(letters_xcoords):\n letter_image = captcha[:60, letter[0]:letter[1]]\n letters.append(letter_image)\n\n return(letters)", "def segment_region_of_interest(image):\n binary = image < 604\n cleared = clear_border(binary)\n\n label_image = label(cleared)\n\n areas = [r.area for r in regionprops(label_image)]\n areas.sort()\n if len(areas) > 2:\n for region in regionprops(label_image):\n if region.area < areas[-2]:\n for coordinates in region.coords:\n label_image[coordinates[0], coordinates[1]] = 0\n\n binary = label_image > 0\n\n selem = disk(2)\n binary = binary_erosion(binary, selem)\n\n selem = disk(10)\n binary = binary_closing(binary, selem)\n\n edges = roberts(binary)\n binary = scipy.ndimage.binary_fill_holes(edges)\n\n get_high_vals = binary == 0\n image[get_high_vals] = 0\n\n return image", "def highlight_unallocated(series):\n one_person_req = (series > 0) & (series <= 0.5)\n two_person_req = series > 0.5\n is_good = series == 0\n\n style = []\n for i in range(len(series)):\n if two_person_req[i]:\n style.append(\"background-color: red\")\n elif one_person_req[i]:\n style.append(\"background-color: orange\")\n elif is_good[i]:\n style.append(\"background-color: lime\")\n else:\n style.append(\"background-color: yellow\")\n\n return style", "def _find_regions(base_pairs, scores):\n # Make sure the lower residue is on the left for each row\n sorted_base_pairs = np.sort(base_pairs, axis=1)\n\n # Sort the first column in ascending order\n original_indices = np.argsort(sorted_base_pairs[:, 0])\n sorted_base_pairs = sorted_base_pairs[original_indices]\n\n # Rank each base\n # E.g.: [[3, 5] --> [[0, 1]\n # [9, 7]] [3, 2]]\n order = np.argsort(sorted_base_pairs.flatten())\n rank = np.argsort(order).reshape(base_pairs.shape)\n\n # The base pairs belonging to the current region\n region_pairs = []\n # The individual regions\n regions = set()\n\n # Find separate regions\n for i in range(len(sorted_base_pairs)):\n # if a new region is to be started append the current base pair\n if len(region_pairs) == 0:\n region_pairs.append(original_indices[i])\n continue\n\n # Check if the current base pair belongs to the region that is\n # currently being defined\n previous_upstream_rank = rank[i-1, 0]\n this_upstream_rank = rank[i, 0]\n previous_downstream_rank = rank[i-1, 1]\n this_downstream_rank = rank[i, 1]\n\n # if the current base pair belongs to a new region, save the\n # current region and start a new region\n if ((previous_downstream_rank - this_downstream_rank) != 1 or\n (this_upstream_rank - previous_upstream_rank) != 1):\n regions.add(\n _Region(base_pairs, np.array(region_pairs), scores)\n )\n region_pairs = []\n\n # Append the current base pair to the region\n region_pairs.append(original_indices[i])\n\n # The last region has no endpoint defined by the beginning of a\n # new region.\n regions.add(_Region(base_pairs, np.array(region_pairs), scores))\n\n # Return the graphical representation of the conflicting regions\n return _generate_graphical_representation(regions)", "def phase_segmentation(image, threshold):\n # Normalize the image\n im_norm = (image - image.min()) / (image.max() - image.min())\n\n # Do a background subtraction\n im_blur = skimage.filters.gaussian(image, 50.0)\n im_sub = im_norm - im_blur\n\n # Threshold the image\n im_thresh = im_sub < -0.2\n\n # Label the image\n im_label = skimage.measure.label(im_thresh)\n\n # Get the properties and apply an area threshold\n props = skimage.measure.regionprops(im_label)\n\n # Make an empty image to store the approved cells\n approved_objects = np.zeros_like(im_label)\n\n # Apply the area filters\n for prop in props:\n obj_area = prop.area * 0.160**2 # Given the interpixel distance\n if (obj_area > 0.5) & (obj_area < 5):\n approved_objects += (im_label==prop.label)\n\n # Relabel the image.\n return im_relab", "def colour_segmentation(img, num_segments=1000, round_schedule = [0.02, 0.04, 0.06, 0.08], colour_median_prop=0, max_clust_size=0.05, min_clust_size=0.002):\n origimg = img\n \n # Initial segmentation\n regions = skimage.segmentation.slic(img, n_segments=num_segments)\n\n for round_thr in round_schedule:\n # Compute colour change of each pixel\n edges = skimage.util.dtype.img_as_float(colour.colourchange(img))\n \n # Merge clusters hierarchically based on above distance\n rag = skimage.future.graph.rag_boundary(regions, edges)\n regions = skimage.future.graph.merge_hierarchical(regions, rag, thresh=round_thr, rag_copy=False, in_place_merge=True, merge_func=_merge_boundary, weight_func=_weight_boundary)\n\n # Replace all pixels in (some?) clusters with their median colour\n clust_sizes = skimage.exposure.histogram(regions)[0]\n clust_sizes = clust_sizes / float(sum(clust_sizes))\n medianclusters = np.where(clust_sizes > colour_median_prop)[0]\n\n img = origimg.copy()\n for mediancluster in medianclusters:\n img[regions == mediancluster] = np.median(img[regions == mediancluster], axis=0)\n \n if len(clust_sizes) == 1:\n break\n \n # Filter out too small and too large clusters\n num_clusters = 0\n for clust in range(len(clust_sizes)):\n if clust_sizes[clust] > max_clust_size or clust_sizes[clust] < min_clust_size: # background or noise resp.\n regions[regions == clust] = 0\n else:\n num_clusters += 1\n regions[regions == clust] = num_clusters\n \n # Extract centroids\n centroids, bboxes = zip(*[(clust.centroid, clust.bbox) for clust in skimage.measure.regionprops(regions)])\n \n return (regions, centroids, bboxes)", "def offer_fix_broken_regions(self, with_window: ProjectWindow = None):\n if with_window:\n result = with_window.CustomDialog(\n title=\"Region Cleanup\",\n message=\"In vanilla Dark Souls, the Duke's Archives has four unused regions that can break event\\n\"\n \"scripts. Would you like Soulstruct to delete those four regions now?\",\n button_names=(\"Yes, delete them\", \"No, leave them be\"),\n button_kwargs=(\"YES\", \"NO\"),\n cancel_output=1,\n default_output=1,\n )\n else:\n result = 1 if (\n input(\n \"In vanilla Dark Souls, the Duke's Archives has four unused regions that can break event\\n\"\n \"scripts. Would you like Soulstruct to delete those four regions now? [y]/n\",\n ).lower() == \"n\"\n ) else 0\n if result == 0:\n archives_msb = self.maps.DukesArchives\n repeats = archives_msb.get_repeated_entity_ids() # re-checking just in case\n if {e.entity_id for e in repeats[\"Regions\"]} == {1702745, 1702746, 1702747, 1702748}:\n for entry in repeats[\"Regions\"]:\n archives_msb.regions.delete_entry(entry)\n return True\n else:\n return False", "def highlight_de(adata, basis='umap', components=[1, 2], n_top_genes=10,\n de_keys='names, scores, pvals_adj, logfoldchanges',\n cell_keys='', n_neighbors=5, fill_alpha=0.1, show_hull=True,\n legend_loc='top_right', plot_width=None, plot_height=None):\n\n if 'rank_genes_groups' not in adata.uns_keys():\n raise ValueError('Run differential expression first.')\n\n\n if isinstance(de_keys, str):\n de_keys = list(dict.fromkeys(map(str.strip, de_keys.split(','))))\n if de_keys != ['']:\n assert all(map(lambda k: k in adata.uns['rank_genes_groups'].keys(), de_keys)), 'Not all keys are in `adata.uns[\\'rank_genes_groups\\']`.'\n else:\n de_keys = []\n\n if isinstance(cell_keys, str):\n cell_keys = list(dict.fromkeys(map(str.strip, cell_keys.split(','))))\n if cell_keys != ['']:\n assert all(map(lambda k: k in adata.obs.keys(), cell_keys)), 'Not all keys are in `adata.obs.keys()`.'\n else:\n cell_keys = []\n\n if f'X_{basis}' not in adata.obsm.keys():\n raise ValueError(f'Key `X_{basis}` not found in adata.obsm.')\n\n if not isinstance(components, np.ndarray):\n components = np.asarray(components)\n\n key = adata.uns['rank_genes_groups']['params']['groupby']\n if key not in cell_keys:\n cell_keys.insert(0, key)\n\n df = pd.DataFrame(adata.obsm[f'X_{basis}'][:, components - (basis != 'diffmap')], columns=['x', 'y'])\n for k in cell_keys:\n df[k] = list(map(str, adata.obs[k]))\n\n knn = neighbors.KNeighborsClassifier(n_neighbors)\n knn.fit(df[['x', 'y']], adata.obs[key])\n df['prediction'] = knn.predict(df[['x', 'y']])\n\n conv_hulls = df[df[key] == df['prediction']].groupby(key).apply(lambda df: df.iloc[ConvexHull(np.vstack([df['x'], df['y']]).T).vertices])\n\n mapper = _create_mapper(adata, key)\n categories = adata.obs[key].cat.categories\n fig = figure(tools='pan, reset, wheel_zoom, lasso_select, save')\n _set_plot_wh(fig, plot_width, plot_height)\n legend_dict = defaultdict(list)\n\n for k in categories:\n d = df[df[key] == k]\n data_source = ColumnDataSource(d)\n legend_dict[k].append(fig.scatter('x', 'y', source=data_source, color={'field': key, 'transform': mapper}, size=5, muted_alpha=0))\n\n hover_cell = HoverTool(renderers=[r[0] for r in legend_dict.values()], tooltips=[(f'{key}', f'@{key}')] + [(f'{k}', f'@{k}') for k in cell_keys[1:]])\n\n c_hulls = conv_hulls.copy()\n de_possible = conv_hulls[key].isin(adata.uns['rank_genes_groups']['names'].dtype.names)\n ok_patches = []\n prev_cat = []\n for i, isin in enumerate((~de_possible, de_possible)):\n conv_hulls = c_hulls[isin]\n\n if len(conv_hulls) == 0:\n continue\n\n xs, ys, ks = zip(*conv_hulls.groupby(key).apply(lambda df: list(map(list, (df['x'], df['y'], df[key])))))\n tmp_data = defaultdict(list)\n tmp_data['xs'] = xs\n tmp_data['ys'] = ys\n tmp_data[key] = list(map(lambda k: k[0], ks))\n \n if i == 1:\n ix = list(map(lambda k: adata.uns['rank_genes_groups']['names'].dtype.names.index(k), tmp_data[key]))\n for k in de_keys:\n tmp = np.array(list(zip(*adata.uns['rank_genes_groups'][k])))[ix, :n_top_genes]\n for j in range(n_top_genes):\n tmp_data[f'{k}_{j}'] = tmp[:, j]\n\n tmp_data = pd.DataFrame(tmp_data)\n for k in categories:\n d = tmp_data[tmp_data[key] == k]\n source = ColumnDataSource(d)\n\n patches = fig.patches('xs', 'ys', source=source, fill_alpha=fill_alpha, muted_alpha=0, hover_alpha=0.5,\n color={'field': key, 'transform': mapper} if (show_hull and i == 1) else None,\n hover_color={'field': key, 'transform': mapper} if (show_hull and i == 1) else None)\n legend_dict[k].append(patches)\n if i == 1:\n ok_patches.append(patches)\n\n hover_group = HoverTool(renderers=ok_patches, tooltips=[(f'{key}', f'@{key}'),\n ('groupby', adata.uns['rank_genes_groups']['params']['groupby']),\n ('reference', adata.uns['rank_genes_groups']['params']['reference']),\n ('rank', ' | '.join(de_keys))] + [(f'#{i + 1}', ' | '.join((f'@{k}_{i}' for k in de_keys))) for i in range(n_top_genes)]\n )\n \n\n fig.toolbar.active_inspect = [hover_group]\n if len(cell_keys) > 1:\n fig.add_tools(hover_group, hover_cell)\n else:\n fig.add_tools(hover_group)\n\n if legend_loc is not None:\n legend = Legend(items=list(legend_dict.items()), location=legend_loc)\n fig.add_layout(legend)\n fig.legend.click_policy = 'hide' # hide does disable hovering, whereas 'mute' does not\n\n fig.xaxis.axis_label = f'{basis}_{components[0]}'\n fig.yaxis.axis_label = f'{basis}_{components[1]}'\n\n show(fig)", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def drawValidationNeedles(self): \n #productive #onButton\n profprint()\n # reset report table\n # print \"Draw manually segmented needles...\"\n #self.table =None\n #self.row=0\n self.initTableView()\n self.deleteEvaluationNeedlesFromTable()\n while slicer.util.getNodes('manual-seg*') != {}:\n nodes = slicer.util.getNodes('manual-seg*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n \n if self.tableValueCtrPt==[[]]:\n self.tableValueCtrPt = [[[999,999,999] for i in range(100)] for j in range(100)]\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\n nbNode=modelNodes.GetNumberOfItems()\n for nthNode in range(nbNode):\n modelNode=slicer.mrmlScene.GetNthNodeByClass(nthNode,'vtkMRMLAnnotationFiducialNode')\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\n needleNumber = int(modelNode.GetAttribute(\"NeedleNumber\"))\n needleStep = int(modelNode.GetAttribute(\"NeedleStep\"))\n coord=[0,0,0]\n modelNode.GetFiducialCoordinates(coord)\n self.tableValueCtrPt[needleNumber][needleStep]=coord\n print needleNumber,needleStep,coord\n # print self.tableValueCtrPt[needleNumber][needleStep]\n\n for i in range(len(self.tableValueCtrPt)):\n if self.tableValueCtrPt[i][1]!=[999,999,999]:\n colorVar = random.randrange(50,100,1)/float(100)\n controlPointsUnsorted = [val for val in self.tableValueCtrPt[i] if val !=[999,999,999]]\n controlPoints=self.sortTable(controlPointsUnsorted,(2,1,0))\n self.addNeedleToScene(controlPoints,i,'Validation')\n else:\n # print i\n pass", "def highlight_moves(win, game):\n #Get available moves\n player_turn = game.get_player_turn()\n moves_available = game.possible_moves(game.get_pawn(player_turn))\n if None in moves_available:\n moves_available.remove(None)\n \n #Highlight moves\n for move in moves_available:\n move_coords = (move[0]*(SQUARESIZE+FENCEWIDTH), move[1]*(SQUARESIZE+FENCEWIDTH))\n move_center_coords = (move_coords[0]+SQUARESIZE/2, move_coords[1]+SQUARESIZE/2)\n pygame.draw.circle(win, LIGHTGREEN, move_center_coords, SQUARESIZE/4)", "def sed(self, search, replace):\n\n for section in self.sections:\n for i, block in enumerate(section.blocks):\n if block == search:\n section.blocks[i] = replace\n self.all_damaged = True\n self.dirty = True", "def BraceHighlight(self, pos1, pos2):\n # Check if we are still alive or not, as this may be called\n # after we have been deleted.\n if self:\n super(EditraBaseStc, self).BraceHighlight(pos1, pos2)", "def mask_region_ends(self, n_days=20):\n for rg in self.Rs:\n i = self.Rs.index(rg)\n self.Active.mask[i, -n_days:] = True\n self.Confirmed.mask[i, -n_days:] = True\n self.Deaths.mask[i, -n_days:] = True\n self.NewDeaths.mask[i, -n_days:] = True\n self.NewCases.mask[i, -n_days:] = True", "def border_analyze_horizon(mask, borders, min_segment_height=5, max_border_height=0.4):\n max_border_height = mask.shape[0] * max_border_height\n if len(borders) == 0: return False\n upper_most = 0\n lower_most = mask.shape[0] - 1\n # middle_borders = [border for border in borders if (left_most in border or right_most in border) and abs(border[0]-border[1])>3 ]\n # if len(middle_borders)==0:return False\n slice_start = 0\n block_heights = []\n block_areas = []\n # segs_by_middle_borders=[]\n for v_border in borders:\n border_start = v_border[0]\n border_stop = v_border[1]\n if border_start == upper_most:\n slice_start = border_stop\n elif border_stop == lower_most:\n # mask_slice=mask[slice_start:border_start]\n block_heights.append(border_start - slice_start)\n\n block_areas.append(np.sum(mask[slice_start:border_start, :]))\n # segs_by_middle_borders.append(mask_slice)\n slice_start = border_stop\n elif border_stop - border_start >= 1:\n # mask_slice = mask[slice_start,border_start]\n if border_stop - border_start < max_border_height:\n block_heights.append(border_start - slice_start)\n block_areas.append(np.sum(mask[slice_start:border_start, :]))\n # segs_by_middle_borders.append(mask_slice)\n slice_start = border_stop\n if slice_start < lower_most:\n # segs_by_middle_borders.append(mask[slice_start:])\n block_heights.append(mask.shape[0] - slice_start)\n block_areas.append(np.sum(mask[slice_start:mask.shape[0], :]))\n\n if len(block_areas) < 2:\n return False\n if len(block_areas) == 2:\n max_area = max(block_areas)\n min_area = min(block_areas)\n if max_area / min_area > 3:\n return False\n if len(block_areas) > 2:\n if np.std(block_areas) / np.mean(block_areas) > 2:\n return False\n\n # print(block_areas)\n # print(mask.shape[0]*mask.shape[1])\n # print(np.std(block_areas)/np.mean(block_areas))\n block_heights = [h for h in block_heights if h > min_segment_height]\n\n # print('block areas ')\n # print(block_areas)\n\n # print('block heights deviation' )\n # print(np.std(block_heights))\n\n # print('block heights dev/mean' )\n # print(np.std(block_heights)/np.mean(block_heights))\n\n if len(block_heights) < 2: return False\n return segment_analyzer(block_heights)", "def section_to_patch(self, group_of_segments, angle=None, row_spacing=None,\n max_stitch_length=None):\n if max_stitch_length is None:\n max_stitch_length = self.max_stitch_length\n\n if row_spacing is None:\n row_spacing = self.row_spacing\n\n if angle is None:\n angle = self.angle\n\n # print >> sys.stderr, len(groups_of_segments)\n\n patch = Patch(color=self.color)\n first_segment = True\n swap = False\n last_end = None\n\n for segment in group_of_segments:\n # We want our stitches to look like this:\n #\n # ---*-----------*-----------\n # ------*-----------*--------\n # ---------*-----------*-----\n # ------------*-----------*--\n # ---*-----------*-----------\n #\n # Each successive row of stitches will be staggered, with\n # num_staggers rows before the pattern repeats. A value of\n # 4 gives a nice fill while hiding the needle holes. The\n # first row is offset 0%, the second 25%, the third 50%, and\n # the fourth 75%.\n #\n # Actually, instead of just starting at an offset of 0, we\n # can calculate a row's offset relative to the origin. This\n # way if we have two abutting fill regions, they'll perfectly\n # tile with each other. That's important because we often get\n # abutting fill regions from pull_runs().\n\n (beg, end) = segment\n\n if (swap):\n (beg, end) = (end, beg)\n\n beg = PyEmb.Point(*beg)\n end = PyEmb.Point(*end)\n\n row_direction = (end - beg).unit()\n segment_length = (end - beg).length()\n\n # only stitch the first point if it's a reasonable distance away\n # from the last stitch\n if last_end is None or (beg - last_end).length() > 0.1 * px_per_mm:\n patch.add_stitch(beg)\n\n first_stitch = self.adjust_stagger(beg, angle, row_spacing,\n max_stitch_length)\n\n # we might have chosen our first stitch just outside this row, so\n # move back in\n if (first_stitch - beg) * row_direction < 0:\n first_stitch += row_direction * max_stitch_length\n\n offset = (first_stitch - beg).length()\n\n while offset < segment_length:\n patch.add_stitch(beg + offset * row_direction)\n offset += max_stitch_length\n\n if (end - patch.stitches[-1]).length() > 0.1 * px_per_mm:\n patch.add_stitch(end)\n\n last_end = end\n swap = not swap\n\n return patch", "def Plot_RNAStructure_highlight(sequence, dot, hg_base_list=[], mode='fill', correctT=True, \n scaling=0.8, highlight_region=[], annotation=[], \n bpprob=[], bpprob_cutofflist=[0.6,0.8,0.95], bpprob_mode='color', bpwarning=True,\n period=10, first_base_pos=1, peroid_color='#000000',\n title=\"\", wait=True, VARNAProg=VARNAProg):\n \n assert len(sequence) == len(dot)\n assert mode in ('label', 'fill')\n \n if correctT:\n sequence = sequence.replace('T', 'U')\n \n CMD = \"java -cp \"+VARNAProg+\" fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN %s -structureDBN \\\"%s\\\" -drawBackbone false -drawBases false -bpStyle simple \" % (sequence, dot)\n \n if hg_base_list:\n if mode == 'label':\n CMD += \"-basesStyle1 \\\"label=#FF0000\\\" \"\n else:\n CMD += \"-basesStyle1 \\\"fill=#FF0000\\\" \"\n hg_base_list = [str(item) for item in hg_base_list]\n CMD += \"-applyBasesStyle1on \\\"%s\\\" \" % (\",\".join(hg_base_list), )\n \n if highlight_region:\n CMD += \" \" + __highlight_region_cmd(highlight_region)\n \n if annotation:\n CMD += \" \" + __annotation_cmd(annotation)\n \n if scaling:\n CMD += \" -spaceBetweenBases \\\"%s\\\"\" % (scaling, )\n \n if bpprob:\n new_bpprob = __dot_match_bpprob(dot, bpprob, bpwarning)\n CMD += \" \" + __basepair_bpprob_cmd(new_bpprob, bpprob_cutofflist, bpprob_mode)\n \n if first_base_pos==1 and peroid_color=='#000000':\n CMD += f\" -period {period}\"\n else:\n CMD += \" \" + __manual_period(len(sequence), first_base_pos, period, peroid_color)\n \n if title:\n CMD += \" -title \\\"%s\\\"\" % (title, )\n \n if not wait:\n CMD = \"nohup \" + CMD + \" &\"\n \n return CMD", "def separation_crawler(mode: bool) -> bool:\r\n for x in range(shape):\r\n for y in range(shape):\r\n if mode:\r\n if conflict_space[x, y] != 0 and safeboard[x, y] == 1:\r\n if walled_in(x, y):\r\n safeboard[x, y] = 0\r\n print(\"Cell will create separation if marked. Marked safe:\", x, \",\", y)\r\n progress_handler(False, True)\r\n else:\r\n if example[x, y] == 0:\r\n if walled_in(x, y):\r\n print(\"Solution Rejected, separated areas\")\r\n return True", "def _red_detect_(self, nslice = 0, thresh = 2.0):\n zk_1 = 's_' + format(nslice, '03d')\n zk_2 = 's_' + format(nslice+1, '03d')\n\n zf_1 = self.z_dense[zk_1]\n zf_2 = self.z_dense[zk_2]\n\n # extract the y and x coordinates\n y1 = zf_1[:,0]\n x1 = zf_1[:,1]\n\n y2 = zf_2[:,0]\n x2 = zf_2[:,1]\n\n\n # create a meshgrid\n [YC, YR] = np.meshgrid(y2, y1)\n [XC, XR] = np.meshgrid(x2, x1)\n\n\n dist_block = np.sqrt((YC-YR)**2 + (XC-XR)**2)\n red_pair = np.where(dist_block <= thresh) # find out where the distance between cell i in plane k and cell j in plane k+1 is below the threshold.\n\n ind1 = red_pair[0] # the indices in the first frame\n ind2 = red_pair[1] # the indices in the second frame\n\n\n # select those with markers > 0 and markers < 0\n marker_1 = zf_1[ind1, 3]\n\n\n new_idx = (marker_1 == 0) # select those with zero-markers, which are never counted before. These are new cells. marker_1 needs to be updated.\n pool_new = ind1[new_idx] # select the indices in the first frame where new redundancies are detected \n pool_new_cov = ind2[new_idx] # select the indices in the second frame where new redundancies are detected.\n\n\n pool_exist = ind1[~new_idx] # among the detected redundancies, find those already marked.\n pool_exist_cov = ind2[~new_idx] # correspondingly, find those already marked in the adjacent slice\n\n n_new = len(pool_new)\n n_exist = len(pool_exist)\n if self.verbose:\n print(n_new, \"new redundancies, \", n_exist, \"existing redundancies\")\n\n for n_count in np.arange(n_new):\n # build the new keys\n # also, we need to assign each new key an identity number which is unique.\n n_ind1 = pool_new[n_count] # find the indices in the first slice that contains new redundancies\n n_ind2 = pool_new_cov[n_count] # find the indices in the following slice \n pr_number = nslice * 1000 + n_ind1\n pr_key = 'sl_' + str(pr_number) # build a key \n new_sl = Simple_list(nslice) # create a simple list with z_marker = nslice, nslice is the index of the first z-slice \n new_sl.add([nslice, zf_1[n_ind1, 4]])\n new_sl.add([nslice+1, zf_2[n_ind2, 4]])\n zf_1[n_ind1, 3] = pr_number # assign the new pr_number to zf_1\n zf_2[n_ind2, 3] = pr_number # assigne the same new pr_number to zf_2\n\n self.redundancy_pool[pr_key] = new_sl # stored into the redundancy pool\n\n\n for n_count in np.arange(n_exist):\n # search for the existing keys\n n_ind1 = pool_exist[n_count]\n n_ind2 = pool_exist_cov[n_count]\n pr_number = int(zf_1[n_ind1, 3])# catch up the pr_number\n pr_key = 'sl_' + str(pr_number) # this pr_key should already exist in the pool. \n\n self.redundancy_pool[pr_key].add([nslice+1, zf_2[n_ind2, 4]])\n zf_2[n_ind2, 3] = pr_number # update the pr_number in the adjacent slice", "def borra_overlaps(self):\r\n nomTabla=self.nomTabla.split(\".\")[1]\r\n dicCondWhere={}\r\n dicCondWhere[\"id_trabajo\"]=self.oUtiles.id_trabajo\r\n if nomTabla == \"ed_fincas\":\r\n nomTablaOverlaps=\"ed_src\" + str(self.oUtiles.src_trabajo) + \".\" + \"ed_overlaps_fincas\"\r\n nomTablaGaps=\"ed_src\" + str(self.oUtiles.src_trabajo) + \".\" + \"ed_gaps_fincas\"\r\n else:\r\n nomTablaOverlaps=\"src\" + str(self.oUtiles.src_trabajo) + \".\" + \"overlaps_fincas\"\r\n nomTablaGaps=\"src\" + str(self.oUtiles.src_trabajo) + \".\" + \"gaps_fincas\"\r\n self.oUtiles.oConsultasPg.deleteDatos(nombreTabla=nomTablaOverlaps,dicCondWhere=dicCondWhere)\r\n self.oUtiles.oConsultasPg.deleteDatos(nombreTabla=nomTablaGaps,dicCondWhere=dicCondWhere)", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def searchBreakend(self, region): \n\t\treturn filter(lambda X: X == region, self)", "def threshold_segment(img):\n\n m,n = img.shape\n g = img.sum()/(m*n)\n\n segmented = np.zeros((m,n),dtype=float)\n\n for i,j in it.product(range(m),range(n)):\n\n if img[i,j] - g >= 100:\n segmented[i,j] = 0\n elif img[i,j] - g >= 50:\n segmented[i,j] = 50\n elif img[i,j] - g >= 0:\n segmented[i,j] = 100\n elif img[i,j] - g >= -50:\n segmented[i,j] = 150\n elif img[i,j] - g >= -100:\n segmented[i,j] = 200\n else:\n segmented[i,j] = 255\n\n return segmented", "def separate_frontier(self):\n\t#print self.frontier\n region = [] # a list of tuples\n region_list = [] # a list of regions\n in_list = False\n region_size = 7\n num_regions = 25\n n = 0\n h = 0\n print \"separate frontier\"\n while(region_size>0):\n for i in range(len(self.frontier)):\n if (h < num_regions):\n region = []\n self.find_region(self.frontier[i], region)\n\t\t #rospy.loginfo(region)\n in_list = region in region_list\n if (len(region) > region_size) and (not in_list):\n region_list.append(region)\n h += 1\n self.regions = region_list\n region_size -= 1\n\t#print self.regions", "def render_regions(view=None):\r\n # Get current active view\r\n if view is None:\r\n view = sublime.active_window().active_view()\r\n # Unable to set regions when no view available\r\n if view is None:\r\n return\r\n\r\n # Do no set regions if view is empty or still loading\r\n if view.size() == 0 or view.is_loading():\r\n return\r\n\r\n # Remove all markers to avoid marker conflict\r\n view.erase_regions(S.REGION_KEY_BREAKPOINT)\r\n view.erase_regions(S.REGION_KEY_CURRENT)\r\n view.erase_regions(S.REGION_KEY_DISABLED)\r\n\r\n # Get filename of current view and check if is a valid filename\r\n filename = view.file_name()\r\n if not filename:\r\n return\r\n\r\n # Determine icon for regions\r\n icon_current = get_region_icon(S.KEY_CURRENT_LINE)\r\n icon_disabled = get_region_icon(S.KEY_BREAKPOINT_DISABLED)\r\n icon_enabled = get_region_icon(S.KEY_BREAKPOINT_ENABLED)\r\n\r\n # Get all (disabled) breakpoint rows (line numbers) for file\r\n breakpoint_rows = []\r\n disabled_rows = []\r\n if filename in S.BREAKPOINT and isinstance(S.BREAKPOINT[filename], dict):\r\n for lineno, bp in S.BREAKPOINT[filename].items():\r\n # Do not show temporary breakpoint\r\n if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] == filename and S.BREAKPOINT_RUN['lineno'] == lineno:\r\n continue\r\n # Determine if breakpoint is enabled or disabled\r\n if bp['enabled']:\r\n breakpoint_rows.append(lineno)\r\n else:\r\n disabled_rows.append(lineno)\r\n\r\n # Get current line from breakpoint hit\r\n if S.BREAKPOINT_ROW is not None:\r\n # Make sure current breakpoint is in this file\r\n if filename == S.BREAKPOINT_ROW['filename']:\r\n # Remove current line number from breakpoint rows to avoid marker conflict\r\n if S.BREAKPOINT_ROW['lineno'] in breakpoint_rows:\r\n breakpoint_rows.remove(S.BREAKPOINT_ROW['lineno'])\r\n # Set icon for current breakpoint\r\n icon_breakpoint_current = get_region_icon(S.KEY_BREAKPOINT_CURRENT)\r\n if icon_breakpoint_current:\r\n icon_current = icon_breakpoint_current\r\n if S.BREAKPOINT_ROW['lineno'] in disabled_rows:\r\n disabled_rows.remove(S.BREAKPOINT_ROW['lineno'])\r\n # Set current line marker\r\n if icon_current:\r\n view.add_regions(S.REGION_KEY_CURRENT, rows_to_region(S.BREAKPOINT_ROW['lineno']), S.REGION_SCOPE_CURRENT, icon_current, sublime.HIDDEN)\r\n\r\n # Set breakpoint marker(s)\r\n if breakpoint_rows and icon_enabled:\r\n view.add_regions(S.REGION_KEY_BREAKPOINT, rows_to_region(breakpoint_rows), S.REGION_SCOPE_BREAKPOINT, icon_enabled, sublime.HIDDEN)\r\n if disabled_rows and icon_disabled:\r\n view.add_regions(S.REGION_KEY_DISABLED, rows_to_region(disabled_rows), S.REGION_SCOPE_BREAKPOINT, icon_disabled, sublime.HIDDEN)", "def slice_graph_bwd( endea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\t\r\n\tstartnode = slice_node( 0, endea, reg )\t\t# start at the end of the slice node\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\t[tgt_reg, split] = currslice.get_target_reg_bwd()\r\n\t\tprint tgt_reg\r\n\t\tprint split\r\n\t\tif tgt_reg == \"END\":\r\n\t\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.startea != currslice.get_lines()[0][0]):\r\n\t\t\t# Do process this node further, nothing really going on \r\n\t\t\tprint \"ZEZ\"\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name() )\r\n\t\telse:\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tif split:\r\n\t\t\t\tfor ref in xrefs:\r\n\t\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\treturn [ graph, data_bib ]", "def shade(self):\n if self.distribution_records:\n path_nodes = self.svg_map.xpath(self.PATH_NODES_XPATH,\n namespaces=NAMESPACES)\n matching_regions = {}\n for record in self.distribution_records:\n region = STATES_MAP.get(record.value_str)\n region = ((region in CANADIAN_PROVINCES and 'ca-' or 'us-') +\n region)\n matching_regions[region] = True\n\n for node in path_nodes:\n if node.get('style'):\n node_id = node.get('id').lower()\n box = Path(node)\n if node_id in matching_regions:\n box.color(PRESENT_COLOR)\n else:\n box.color(ABSENT_COLOR)\n return self", "def segment_red(img, yellow_thresh, red_thresh):\n # 255 is white, Red, Green, Blue\n # where green and yellow exists strongly set to 0\n yellow_filter = (img[:, :, 1] < yellow_thresh)*numpy.ones((img.shape[0], img.shape[1]))\n # where red and yellow doesnt exist set to 0\n red_filter = (img[:, :, 2] > red_thresh)*numpy.ones((img.shape[0], img.shape[1]))\n # TAKE THE INTERSECTION of SET A and B\n total_filter = numpy.multiply(yellow_filter, red_filter)\n img[:, :, 0] = 0\n img[:, :, 1] = numpy.multiply(total_filter, img[:, :, 1])\n img[:, :, 2] = numpy.multiply(total_filter, img[:, :, 2])\n cv2.imwrite('segmented.png', img)\n return img", "def mask_region(self, region, days=14):\n i = self.Rs.index(region)\n c_s = np.nonzero(np.cumsum(self.NewCases.data[i, :] > 0) == days + 1)[0][0]\n d_s = np.nonzero(np.cumsum(self.NewDeaths.data[i, :] > 0) == days + 1)[0]\n if len(d_s) > 0:\n d_s = d_s[0]\n else:\n d_s = len(self.Ds)\n\n self.Active.mask[i, c_s:] = True\n self.Confirmed.mask[i, c_s:] = True\n self.Deaths.mask[i, d_s:] = True\n self.NewDeaths.mask[i, d_s:] = True\n self.NewCases.mask[i, c_s:] = True\n\n return c_s, d_s", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def plot_timeline(self, param_name, start_time=None, stop_time=None, \n provider=None, calib=False, max_pts=None):\n\n meta = self.get_param_info(param_name, mode='simple')\n data = self.get_data(param_name, start_time, stop_time, provider, calib, max_pts)\n data = data.squeeze()\n\n if data is None:\n return None\n\n fig, ax = plt.subplots()\n \n # get a list of changes in the data\n # changes = data[data.diff()!=0].index.tolist() # <-- does not work with strings\n # changes = data[1:][data[1:].ne(data[1:].shift())].index.tolist()\n changes = data[data.ne(data.shift())].index.tolist()\n\n # add the end of the last period\n changes.append(data.index.max())\n\n # trying to do gap detection here to NOT fully shade areas where we have no data\n # first finding the mean periodicity of the data\n mean = data.index.to_series().diff().mean()\n\n # now flag periods where the gaps are 2x this\n gap_ends = data[data.index.to_series().diff()>2*mean].index.tolist()\n\n # get the durations\n durations = np.diff(changes)\n\n\n\n # make a colour index with the correct number of colors, spanning the colourmap\n colours = cm.get_cmap('viridis')\n\n # get the list of unique values and create a colour list with this many entries\n num_unique = len(data.unique())\n colour_list = [colours(1.*i/num_unique) for i in range(num_unique)]\n \n # make a dictionary mapping unique values to colours\n unique = data.unique().tolist()\n colors = data[changes].map(dict(zip(unique, colour_list)))\n\n \n\n # now define the x and y ranges \n xranges = [(stop, end) for stop, end in zip(changes, durations)]\n yranges = (1, 0.5)\n\n # plot it using the broken horizontal bar function\n ax.broken_barh(xranges, yranges, facecolors=colors, zorder=2)\n \n ax.set_title(meta['Description'])\n ax.set_xlabel('Date (UTC)')\n # if 'Unit' in meta.index:\n # ax.set_ylabel(meta['Unit'])\n # else:\n # ax.set_ylabel('Calibrated') if calib else ax.set_ylabel('Raw')\n fig.autofmt_xdate()\n xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')\n ax.xaxis.set_major_formatter(xfmt)\n\n return ax", "def region_growing(imOr,reg,area,prof,conn,precision):\n\tif prof:\n\t\tif area > 0.085:\n\t\t\treg1 = morph('erode',reg,25)\n\t\t\tif reg1.max() == 1.0:\n\t\t\t\treg = reg1\n\t\t\telse:\n\t\t\t\treg1 = morph('erode',reg,15)\n\t\t\t\tif reg1.max() == 1.0:\n\t\t\t\t\treg = reg1\n\telse:\n\t\tif area > 0.15:\n\t\t\treg1 = morph('erode',reg,15)\n\t\t\tif reg1.max() == 1.0:\n\t\t\t\treg = reg1\n\n\telementos = contar (reg,1)\n\tseguir = True\n\twhile seguir:\n\t\treg = cv2.convertScaleAbs(reg)\n\t\t_,contours, h = cv2.findContours(reg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t\treg[reg != 0] = 1\n\t\tmedia_region = sum(imOr[reg==1])/max(cv2.contourArea(contours[0]),0.001)\n\n\t\tfor elemento in contours[0]:\n\t\t\tif prof:\n\t\t\t\treg = expand(elemento,reg,imOr,media_region,precision/2,conn)\n\t\t\telse:\n\t\t\t\treg = expand(elemento,reg,imOr,media_region,precision,conn)\n\t\telementos_nuevo = contar (reg,1)\n\t\tif elementos == elementos_nuevo:\n\t\t\tseguir = False\n\t\telementos = elementos_nuevo\n\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\n\treg = cv2.dilate(reg.astype(np.float32),se,iterations = 3)\n\tse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\n\treg = cv2.erode(reg.astype(np.float32),se,iterations = 2)\n\n\treturn reg", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def _seg_image(self, x, y, r_cut=100):\n snr=self.snr\n npixels=self.npixels\n bakground = self.bakground\n error= self.bkg_rms(x,y,r_cut)\n kernel = self.kernel\n image_cutted = self.cut_image(x,y,r_cut)\n image_data = image_cutted\n threshold_detect_objs=detect_threshold(data=image_data, nsigma=snr,error=error)\n segments=detect_sources(image_data, threshold_detect_objs, npixels=npixels, filter_kernel=kernel)\n segments_deblend = deblend_sources(image_data, segments, npixels=npixels,nlevels=10)\n segments_deblend_info = source_properties(image_data, segments_deblend)\n nobjs = segments_deblend_info.to_table(columns=['id'])['id'].max()\n xcenter = segments_deblend_info.to_table(columns=['xcentroid'])['xcentroid'].value\n ycenter = segments_deblend_info.to_table(columns=['ycentroid'])['ycentroid'].value\n image_data_size = np.int((image_data.shape[0] + 1) / 2.)\n dist = ((xcenter - image_data_size) ** 2 + (ycenter - image_data_size) ** 2) ** 0.5\n c_index = np.where(dist == dist.min())[0][0]\n center_mask=(segments_deblend.data==c_index+1)*1 #supposed to be the data mask\n obj_masks = []\n for i in range(nobjs):\n mask = ((segments_deblend.data==i+1)*1)\n obj_masks.append(mask)\n xmin = segments_deblend_info.to_table(columns=['bbox_xmin'])['bbox_xmin'].value\n xmax = segments_deblend_info.to_table(columns=['bbox_xmax'])['bbox_xmax'].value\n ymin = segments_deblend_info.to_table(columns=['bbox_ymin'])['bbox_ymin'].value\n ymax = segments_deblend_info.to_table(columns=['bbox_ymax'])['bbox_ymax'].value\n xmin_c, xmax_c = xmin[c_index], xmax[c_index]\n ymin_c, ymax_c = ymin[c_index], ymax[c_index]\n xsize_c = xmax_c - xmin_c\n ysize_c = ymax_c - ymin_c\n if xsize_c > ysize_c:\n r_center = np.int(xsize_c)\n else:\n r_center = np.int(ysize_c)\n center_mask_info= [center_mask, r_center, xcenter, ycenter, c_index]\n return obj_masks, center_mask_info, segments_deblend", "def deleteSelectedSegs(self):\n inds = []\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n inds.append(ix)\n\n if len(inds)==0:\n print(\"No segments selected\")\n return\n\n self.segsChanged = True\n for ix in reversed(inds):\n del self.segments[ix]\n del self.picbuttons[ix]\n\n # update self.clusters, delete clusters with no members\n todelete = []\n for ID, label in self.clusters.items():\n empty = True\n for seg in self.segments:\n if seg[-1] == ID:\n empty = False\n break\n if empty:\n todelete.append(ID)\n\n self.clearButtons()\n\n # Generate new class labels\n if len(todelete) > 0:\n keys = [i for i in range(self.nclasses) if i not in todelete] # the old keys those didn't delete\n # print('old keys left: ', keys)\n\n nclasses = self.nclasses - len(todelete)\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in keys:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n labels = dict(labels)\n # print(labels)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before delete: ', self.clusters)\n self.clusters = clusters\n print('after delete: ', self.clusters)\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n\n self.nclasses = nclasses\n\n # redraw the buttons\n self.updateButtons()\n self.completeChanged.emit()", "def find_lines(\n\tthreshold, regions=None, direction=\"horizontal\", line_scale=15, iterations=0\n):\n\tlines = []\n\n\tif direction == \"vertical\":\n\t\t#size = threshold.shape[0] // line_scale\n\t\tsize = threshold.shape[0] // line_scale\n\t\tel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, size))\n\telif direction == \"horizontal\":\n\t\tsize = threshold.shape[1] // line_scale\n\t\tel = cv2.getStructuringElement(cv2.MORPH_RECT, (size, 1))\n\telif direction is None:\n\t\traise ValueError(\"Specify direction as either 'vertical' or 'horizontal'\")\n\n\tif regions is not None:\n\t\tregion_mask = np.zeros(threshold.shape)\n\t\tfor region in regions:\n\t\t\tx, y, w, h = region\n\t\t\tregion_mask[y : y + h, x : x + w] = 1\n\t\tthreshold = np.multiply(threshold, region_mask)\n\n\tthreshold = cv2.erode(threshold, el)\n\n\tthreshold = cv2.dilate(threshold, el)\n\t##############################################\n\t#threshold = cv2.dilate(threshold,el,iterations=iterations)\n\t#threshold = cv2.erode(threshold,el,iterations=iterations)\n\t\n\t#threshold = cv2.dilate(threshold,el,iterations=iterations)\n\t#threshold = cv2.erode(threshold,el,iterations=iterations)\n\t####################################################\n\tdmask = cv2.dilate(threshold, el, iterations=iterations)\n\n\ttry:\n\t\t_, contours, _ = cv2.findContours(\n\t\t\tthreshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\texcept ValueError:\n\t\t# for opencv backward compatibility\n\t\tcontours, _ = cv2.findContours(\n\t\t\tthreshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\n\tfor c in contours:\n\t\tx, y, w, h = cv2.boundingRect(c)\n\t\tx1, x2 = x, x + w\n\t\ty1, y2 = y, y + h\n\t\tif direction == \"vertical\":\n\t\t\tlines.append(((x1 + x2) // 2, y2, (x1 + x2) // 2, y1))\n\t\telif direction == \"horizontal\":\n\t\t\tlines.append((x1, (y1 + y2) // 2, x2, (y1 + y2) // 2))\n\n\treturn dmask, lines", "def closeRegions(regions):\n for orig,table in regions.items():\n for dest in table['neighbors']:\n if not regions.has_key(dest):\n regions[dest] = {'neighbors': set(),\n 'value': 4}\n regions[dest]['neighbors'].add(orig)\n return regions", "def segment(segmentation_model, thresholds=None):\n\n if thresholds is None:\n thresholds = DEFAULT_THRESHOLDS\n\n # Mark all flats\n _set_flat_segments(segmentation_model, thresholds)\n\n yield None\n\n while (segmentation_model.phases == CurvePhases.UndeterminedNonFlat.value).any():\n\n # Mark linear slope\n flanking = _set_nonflat_linear_segment(segmentation_model, thresholds)\n\n yield None\n\n if flanking.any():\n\n first_on_left_flank = flanking.argmin()\n\n for filt in _get_candidate_segment(flanking):\n\n direction = PhaseEdge.Right if \\\n filt.argmin() == first_on_left_flank else \\\n PhaseEdge.Left\n\n # Mark flanking non-linear phase\n phase = _set_nonlinear_phase_type(segmentation_model, thresholds, filt, direction)\n\n if phase is CurvePhases.Undetermined:\n # If no curved segment found, it is not safe to look for more\n # non-flat linear phases because could merge two that should\n # not be merged.\n segmentation_model.phases[filt] = CurvePhases.UndeterminedNonLinear.value\n\n # Only look for the first non-linear segment rest is up for grabs for\n # Next iteration of finding impulses or collapses\n flanking[filt] = False\n\n yield None\n\n # Try to classify remaining positions as non linear phases\n for filt in _get_candidate_segment(segmentation_model.phases, test_value=CurvePhases.UndeterminedNonLinear.value):\n\n phase = _set_nonlinear_phase_type(segmentation_model, thresholds, filt, PhaseEdge.Intelligent)\n\n yield None\n\n # If currently considered segment had no phase then it is undetermined\n if phase is CurvePhases.Undetermined:\n\n segmentation_model.phases[filt] = phase.value\n yield None\n\n # If there's an offset assume phase carries to edge\n if segmentation_model.offset:\n segmentation_model.phases[:segmentation_model.offset] = \\\n segmentation_model.phases[segmentation_model.offset]\n segmentation_model.phases[-segmentation_model.offset:] = \\\n segmentation_model.phases[-segmentation_model.offset - 1]\n yield None\n\n # Bridge neighbouring segments of same type if gap is one\n _fill_undefined_gaps(segmentation_model.phases)", "def removeDots(image,points,coeff2,newColorArray):\r\n for i in range(1,coeff2):\r\n if (px[3,points*i] != (0,0,0)):\r\n im.putpixel((0,points*i-1),newColorArray[0][i-1])\r\n im.putpixel((1,points*i-1),newColorArray[0][i-1])\r\n im.putpixel((0,points*i),newColorArray[0][i])\r\n im.putpixel((1,points*i),newColorArray[0][i])\r\n im.putpixel((0,points*i+1),newColorArray[0][i])\r\n im.putpixel((1,points*i+1),newColorArray[0][i])\r\n if lineWidth==5:\r\n im.putpixel((0,points*i-2),newColorArray[0][i-1])\r\n im.putpixel((1,points*i-2),newColorArray[0][i-1])\r\n im.putpixel((0,points*i+2),newColorArray[0][i])\r\n im.putpixel((1,points*i+2),newColorArray[0][i])\r\n\r\n for i in range(1,coeff2):\r\n for j in range(1,coeff2):\r\n if (px[points*i-3,points*j] != (0,0,0) and px[points*i+3,points*j] != (0,0,0) and px[points*i,points*j-3] != (0,0,0) and px[points*i,points*j+3] != (0,0,0)):\r\n draw.line((points*i-5,points*j,points*i+5,points*j),fill=newColorArray[i][j],width=lineWidth)\r\n for i in range(1,coeff2):\r\n if px[points*i,500]!=(0,0,0):\r\n draw.line((points*i,500,points*i,511),fill=newColorArray[i][15],width=lineWidth)\r\n if px[500,points*i]!=(0,0,0):\r\n draw.line((500,points*i,511,points*i),fill=newColorArray[15][i],width=lineWidth)", "def detectBorders(self, points):\n lane1 = []; lane2 = []\n self.leftLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n self.rightLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n\n pointMap = np.zeros((points.shape[0], 20))\n prePoint = np.zeros((points.shape[0], 20))\n postPoint = np.zeros((points.shape[0], 20))\n\n dis = 10\n max1 = -1; max2 = -1\n\n ##\n ## /!\\ UNSAFE LOOP, TODO: FIX\n ##\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n pointMap[i][j] = 1\n prePoint[i][j] = -1\n postPoint[i][j] = -1\n\n for i in reversed(range(points.shape[0] - 2)):\n\n for j in range(len(points[i])):\n\n err = 320\n for m in range(1, min(points.shape[0] - 1 - i, 5)):\n check = False ## TODO: why unused ?\n\n for k in range(len(points[i + 1])):\n\n (x_m, y_m) = points[i + m][k].pt\n (x, y) = points[i][j].pt\n\n if (abs(x_m - x) < dis and abs(y_m - y) < err):\n err = abs(x_m - x)\n\n pointMap[i][j] = pointMap[i + m][k] + 1\n prePoint[i][j] = k\n postPoint[i + m][k] = j\n check = True\n\n break ## breaks out of the m loop. Why is it not conditioned by check ? TODO: ???\n\n if (pointMap[i][j] > max1):\n max1 = pointMap[i][j]\n posMax = cv2.KeyPoint(i, j, _size=0)\n \n else:\n posMax = None\n\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n if posMax:\n if (pointMap[i][j] > max2 and (i != posMax.pt[0] or j != posMax.pt[1]) and postPoint[i][j] == -1): #FIXME \"local variable 'posMax' referenced before assignment\" possible\n max2 = pointMap[i][j]\n posMax2 = cv2.KeyPoint(i, j, _size=0)\n\n\n\n if max1 == -1:\n return\n\n # DEFINES LANE 1 POINTS\n while (max1 >= 1):\n (x,y) = points[int(posMax.pt[0])][int(posMax.pt[1])].pt\n lane1.append(\n [x,y]\n )\n if (max1 == 1):\n break\n\n posMax = cv2.KeyPoint(\n posMax.pt[0]+1,\n prePoint[int(posMax.pt[0])][int(posMax.pt[1])],\n _size=0\n )\n\n max1 -= 1\n\n # DEFINES LANE 2 POINTS\n while (max2 >= 1):\n (x,y) = points[int(posMax2.pt[0])][int(posMax2.pt[1])].pt\n lane2.append(\n [x, y]\n )\n if (max2 == 1):\n break\n\n posMax2 = cv2.KeyPoint(\n posMax2.pt[0]+1,\n prePoint[int(posMax2.pt[0])][int(posMax2.pt[1])],\n _size=0\n )\n\n max2-= 1\n\n subLane1 = np.array(lane1[0:5])\n subLane2 = np.array(lane2[0:5])\n\n # checking if sublane has an empty value\n\n line1 = cv2.fitLine(subLane1, 2, 0, 0.01, 0.01)\n line2 = cv2.fitLine(subLane2, 2, 0, 0.01, 0.01)\n\n try:\n lane1X = (self.BIRDVIEW_WIDTH - line1[3]) * line1[0] / line1[1] + line1[2]\n except:\n lane1X = 0\n\n try:\n lane2X = (self.BIRDVIEW_WIDTH - line2[3]) * line2[0] / line2[1] + line2[2]\n except:\n lane2X = 0\n \n if (lane1X < lane2X):\n for i in range(len(lane1)):\n self.leftLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.rightLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]\n\n else:\n\n for i in range(len(lane1)):\n self.rightLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.leftLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]", "def segment_cells(frame, mask=None):\n \n blurred = filters.gaussian(frame, 2)\n ridges = enhance_ridges(frame)\n \n # threshold ridge image\n thresh = filters.threshold_otsu(ridges)\n thresh_factor = 0.5\n prominent_ridges = ridges > thresh_factor*thresh\n prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256)\n prominent_ridges = morphology.binary_closing(prominent_ridges)\n prominent_ridges = morphology.binary_dilation(prominent_ridges)\n \n # skeletonize\n ridge_skeleton = morphology.medial_axis(prominent_ridges)\n ridge_skeleton = morphology.binary_dilation(ridge_skeleton)\n ridge_skeleton *= mask\n ridge_skeleton = np.bitwise_xor(ridge_skeleton, mask)\n \n # label\n cell_label_im = measure.label(ridge_skeleton)\n \n # morphological closing to fill in the cracks\n for cell_num in range(1, cell_label_im.max()+1):\n cell_mask = cell_label_im==cell_num\n cell_mask = morphology.binary_closing(cell_mask, disk(3))\n cell_label_im[cell_mask] = cell_num\n \n return cell_label_im", "def _cons_traj(self,lon,lat,period):\n\t\tgroup = self['%g_sec'%( period )]\n\t\tlonArr = group['lonArr'].value\n\t\tlatArr = group['latArr'].value\n\t\tageArr = group['age_Arr'].value\n\t\tage_drv_lon = group['age_deriv_lon_Arr']\n\t\tage_drv_lat = group['age_deriv_lat_Arr']\n\t\tage_drv_msk = group['age_deriv_msk_Arr']\n\t\ttomo_data_msk = group['tomo_data_msk'].value\n\t\tmask_init = np.logical_or(tomo_data_msk, age_drv_msk)\n\t\tmask = np.array(np.ones(lonArr.shape),dtype=bool)\n\t\ti_s, j_s = self._find_nearest_grid(lon,lat,period) # find the 1st and 2nd index of the start point on grid\n\t\tif mask_init[i_s,j_s]:\n\t\t\traise ValueError(\"The starting point is out bounds of the resulting map\")\n\t\tmask[i_s,j_s] = False\n\t\t# go along increasing age direction\n\t\ti,j = _walk(age_drv_lon,age_drv_lat,i_s,j_s,1)\n\t\twhile (0<i<lonArr.shape[0] and 0<j<lonArr.shape[1]):\n\t\t\tif mask_init[i,j]:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tmask[i,j] = False\n\t\t\ti,j = _walk(age_drv_lon,age_drv_lat,i,j,1)\n\t\t\t\n\t\t# go along descending age direction\n\t\ti_2,j_2 = _walk(age_drv_lon,age_drv_lat,i_s,j_s,-1)\n\t\tmask[i_2,j_2] = False\n\t\twhile (0<i_2<lonArr.shape[0] and 0<j_2<lonArr.shape[1]):\n\t\t\tif mask_init[i_2,j_2]:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tmask[i_2,j_2] = False\n\t\t\tage_b = ageArr[i_2,j_2]\n\t\t\ti_b,j_b = i_2,j_2\n\t\t\ti_2,j_2 = _walk(age_drv_lon,age_drv_lat,i_2,j_2,-1)\n\t\t\tif ageArr[i_2,j_2] > age_b: # find the youngest age\n\t\t\t\ti_3,j_3 = i_b,j_b\n\t\t\t\tbreak\n\t\ttry:\n\t\t\ti_3\n\t\texcept NameError:\n\t\t\treturn mask\n\t\t\n\t\t# go along increasing age from the found youngest age\n\t\twhile (0<i_3<lonArr.shape[0] and 0<j_3<lonArr.shape[1]):\n\t\t\tif mask_init[i_3,j_3]:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tmask[i_3,j_3] = False\n\t\t\ti_3,j_3 = _walk(age_drv_lon,age_drv_lat,i_3,j_3,1)\n\t\treturn mask", "def _draw_segments(frame, segments):\n for segment in segments:\n cv2.line(frame, segment[0], segment[1],\n color=(0, 255, 255), thickness=2)\n cv2.circle(frame, segment[0], radius=3,\n color=(255, 0, 0), thickness=-1)\n cv2.circle(frame, segment[1], radius=3,\n color=(255, 0, 0), thickness=-1)", "def overlay_segmentation(image_bgr,\n segmentation,\n colormap=cv2.COLORMAP_PARULA,\n alpha=0.6,\n inplace=True):\n if inplace:\n image_target_bgr = image_bgr\n else:\n image_target_bgr = image_bgr * 0\n\n bbox_xywh = segmentation['bbox']\n x, y, w, h = [int(v) for v in bbox_xywh]\n if w <= 0 or h <= 0:\n return image_bgr\n\n segm = segmentation['segm']\n levels = 255.0 / segm.max()\n mask_bg = np.tile((segm == 0)[:, :, np.newaxis], [1, 1, 3])\n\n segm = segm.astype(np.float32) * levels\n segm = segm.clip(0, 255).astype(np.uint8)\n segm = cv2.applyColorMap(segm, cv2.COLORMAP_PARULA)\n\n segm[mask_bg] = image_target_bgr[y:y + h, x:x + w, :][mask_bg]\n\n # img_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n # segm_hsv = cv2.cvtColor(segm, cv2.COLOR_BGR2HSV)\n\n image_target_bgr[y:y + h, x:x + w, :] = (image_target_bgr[y:y + h, x:x + w, :] * (1.0 - alpha) +\n segm * alpha)\n\n return image_target_bgr.astype(np.uint8)", "def region_graph(t0, t1, t2, t3):\r\n medians0 = sorted(sub_saharan_africa_countries())\r\n t0.goto(-250, ((medians0[0])-50))\r\n t0.rt(90)\r\n for idx in range(1, len(medians0)):\r\n t0.pencolor(\"blue\")\r\n t0.pd()\r\n t0.setpos((-250+(idx*10.5)), ((medians0[idx])))\r\n\r\n medians1 = sorted(south_asia_countries())\r\n t1.goto(-250, ((medians1[0]) - 60))\r\n t1.rt(90)\r\n for idx in range(1, len(medians1)):\r\n t1.pencolor(\"red\")\r\n t1.pd()\r\n t1.setpos((-250 + (idx * 68.5)), (2*(medians1[idx])+10))\r\n\r\n medians2 = sorted(europe_central_asia_countries())\r\n t2.goto(-250, (medians2[0])+60)\r\n t2.rt(90)\r\n for idx in range(1, len(medians2)):\r\n t2.pencolor(\"green\")\r\n t2.pd()\r\n t2.setpos((-250 + (idx*10.19)), (2 * (medians2[idx]) + 10))\r\n\r\n medians3 = sorted(latin_america_countries())\r\n t3.goto(-250, (medians3[0]) + 20)\r\n t3.rt(90)\r\n for idx in range(1, len(medians3)):\r\n t3.pencolor(\"gold\")\r\n t3.pd()\r\n t3.setpos((-250 + (idx * 14.39)), (2 * (medians3[idx]) + 10))\r\n\r\n t0.pu()\r\n t0.goto(initialCoordinates())\r\n t1.pu()\r\n t1.goto(initialCoordinates())\r\n t2.pu()\r\n t2.goto(initialCoordinates())\r\n t3.pu()\r\n t3.goto(initialCoordinates())\r\n\r\n medians4 = sorted(middle_east_countries())\r\n t0.goto(-250, (medians4[0])-40)\r\n t0.rt(90)\r\n for idx in range(1, len(medians4)):\r\n t0.pencolor(\"BLACK\")\r\n t0.pd()\r\n t0.setpos((-250 + (idx * 26.3)), (2 * (medians4[idx]) + 10))\r\n t0.rt(180)\r\n\r\n medians5 = sorted(north_america_countries())\r\n t1.goto(-250, (medians5[0])+60)\r\n t1.rt(90)\r\n for idx in range(1, len(medians5)):\r\n t1.pencolor(\"YELLOW\")\r\n t1.pd()\r\n t1.setpos((-250 + (idx * 485.3)), (2 * (medians5[idx]) + 10))\r\n t1.rt(180)\r\n\r\n medians6 = sorted(east_asia_pacific_countries())\r\n t2.goto(-250, (medians6[0]))\r\n t2.rt(90)\r\n for idx in range(1, len(medians6)):\r\n t2.pencolor(\"purple\")\r\n t2.pd()\r\n t2.setpos((-250 + (idx * 16)), (2 * (medians6[idx]) + 10))\r\n t0.pu()\r\n t0.goto(initialCoordinates())\r\n t1.pu()\r\n t1.goto(initialCoordinates())\r\n t2.pu()\r\n t2.goto(initialCoordinates())", "def getsegs (bounds, split):\n segmentslist=bisect_rectange(split, bounds[0], bounds[1], bounds[2], bounds[3])\n count=1\n segpass=0\n \n #Get list of segment ids currently in database\n query=\"\"\"select seg_id from segment;\"\"\"\n df = pd.read_sql_query(query,con=engine)\n segids=set(df.seg_id)\n \n while count < len(segmentslist):\n try:\n for i in segmentslist:\n segments=getsegmentinfo(i)\n \n \n for seg in segments:\n #If running function several times for different splits, this ignores existing segments and prints a message\n if seg.id in segids: \n segpass+=1\n if (segpass % 10 == 0): \n print (\"{} segments already exist\".format(segpass))\n #Else this is a new segment, so get details from the strava and geocodio apis and save them to a dataframe and eventually to the database\n else:\n location = geocodio_client.reverse((seg.start_latlng[0], seg.start_latlng[1]))\n zipcode=location['results'][0]['address_components']['zip']\n \n newrow = {'seg_id' : seg.id,\n 'resource_state': seg.resource_state,\n 'climb_category':seg.climb_category,\n 'climb_category_desc':seg.climb_category_desc,\n 'average_grade':seg.avg_grade,\n 'elev_difference': str(seg.elev_difference).split()[0],\n 'distance': str(seg.distance).split()[0],\n 'name' : seg.name,\n 'start_lat' : seg.start_latlng[0],\n 'start_long' : seg.start_latlng[1],\n 'end_lat' : seg.end_latlng[0],\n 'end_long' : seg.end_latlng[1],\n 'points' : seg.points,\n 'starred':seg.starred,\n 'zipcode':zipcode\n }\n df=pd.DataFrame(newrow, index=[0])\n \n try:\n #Save dataframe to database\n df.to_sql('segment', engine,index=False,if_exists='append')\n except:\n pass\n\n #Prints message which keeps track of number of sub bounds completed \n if (count % 10) == 0:\n print (\"Getting segments in bound {} of {}\".format(count, len(segmentslist)))\n count+=1\n except Exception as inst:\n print (inst) \n return None", "def consecutive_sections(): # noqa: D416", "def test_segmentation_functions(sersic_2d_image):\n\n image_mean, image_median, image_stddev = sigma_clipped_stats(sersic_2d_image, sigma=3)\n threshold = image_stddev * 3\n npixels = 4 ** 2\n\n # Testing make_segments\n segm = pf.make_segments(sersic_2d_image, npixels=npixels, threshold=threshold)\n\n assert isinstance(segm, SegmentationImage)\n assert segm.shape == sersic_2d_image.shape\n assert np.all(segm.data >= 0)\n assert len(np.unique(segm.data)) == 2 # account for background being labeled as 0\n\n # Testing deblend_segments\n segm_deblend = pf.deblend_segments(sersic_2d_image, segm, npixels=npixels, contrast=0.00)\n\n assert isinstance(segm_deblend, SegmentationImage)\n assert segm_deblend.shape == sersic_2d_image.shape\n assert np.all(segm_deblend.data >= 0)\n assert len(np.unique(segm_deblend.data)) >= len(np.unique(segm.data))", "def test_burst_dispersion(self):\n # some reproducible arbitrariness\n np.random.seed(7342642)\n\n n = 25\n t_max = 50\n dt = 0.1\n n_sim = 10\n \n G = HVCLikeLayer(n)\n\n burst_starts = []\n for i in xrange(n_sim):\n M = simulation.EventMonitor(G)\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n # split spikes by neuron index\n spikes = [np.asarray(M.t)[np.asarray(M.i) == i] for i in xrange(n)]\n burst_starts.append([_[0] for _ in spikes])\n\n burst_starts_range = [np.ptp([_[i] for _ in burst_starts])\n for i in xrange(n)]\n \n self.assertLess(np.max(burst_starts_range), G.burst_noise + dt/2)", "def areasShaded(self):\n global area_ABE\n area_ABE = always_redraw(\n lambda : Polygon(dot_center.get_center(), radius_ang_end_dot.get_center(), dropped_dot.get_center(),\n color=PINK, fill_color=PINK, fill_opacity=0.5)\n )\n global area_ABE_copy\n area_ABE_copy = area_ABE.copy()\n\n self.play(Write(area_ABE))\n self.wait(0.5)\n self.play(area_ABE_copy.animate.move_to(consider_area_text.get_center()+DOWN*1.5+LEFT*1.5))\n\n global area_ABD\n area_ABD = always_redraw(\n lambda : Sector(outer_radius=self.x_max, start_angle=0, angle=theta.get_value()*DEGREES,\n stroke_width=DEFAULT_STROKE_WIDTH , stroke_color=BLUE, color=BLUE, fill_opacity=0.5).shift(LEFT*5)\n )\n global area_ABD_copy\n area_ABD_copy = area_ABD.copy()\n\n self.play(Write(area_ABD))\n self.wait(0.5)\n self.play(area_ABD_copy.animate.move_to(consider_area_text.get_center()+DOWN*1.5+RIGHT*1.5))\n\n global area_ABC\n area_ABC = always_redraw(\n lambda : Polygon(dot_center.get_center(), radius_ang_end_dot.get_center(), small_tangent_end_dot.get_center(),\n color=GREEN, fill_color=GREEN, fill_opacity=0.5)\n )\n global area_ABC_copy\n area_ABC_copy = area_ABC.copy()\n\n self.play(Write(area_ABC))\n self.wait(0.5)\n self.play(area_ABC_copy.animate.move_to(consider_area_text.get_center()+DOWN*3.5))\n\n self.play(FadeOut(consider_area_text))", "def __iteratively_retain(\n self,\n orf_regions: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n\n ret = []\n\n arr = np.zeros((len(self.seq), ))\n\n for start, end in orf_regions:\n ret.append((start, end))\n arr[start-1:end] = 1\n orf_coverage = np.sum(arr) / len(arr)\n if orf_coverage > self.min_orf_coverage:\n break\n\n return ret", "def split(self,i):\n alpha = 0.6\n eps = 2.6\n\n if self.n > self.maxn-3:\n print \"cannot refine any further\"\n return False\n \n # The son \n self.m[i] = self.m[i] / 4.0\n #self.h[i] = self.h[i] * alpha\n\n # Daughter 1\n self.r[self.n] = self.r[i] + eps*np.array([0,1])\n self.m[self.n] = self.m[i] \n self.v[self.n] = self.v[i]\n \n # Daughter 2\n self.r[self.n+1] = self.r[i] + eps*np.array([0.866025,-0.5])\n self.m[self.n+1] = self.m[i] \n self.v[self.n+1] = self.v[i]\n \n # Daughter 3\n self.r[self.n+2] = self.r[i] + eps*np.array([-0.866025,-0.5])\n self.m[self.n+2] = self.m[i] \n self.v[self.n+2] = self.v[i]\n \n self.n = self.n+3\n #print \"There are now \",self.n,\"particles\"\n return True", "def dark_cloud(self):\n self.data['dark_cloud'] = ((self.data['Close'].shift(1) > self.data['Open'].shift(1)) & \\\n (((self.data['Close'].shift(1) + self.data['Open'].shift(1)) / 2) > self.data['Close']) & \\\n (self.data['Open'] > self.data['Close']) & (self.data['Open'] > self.data['Close'].shift(1)) &\\\n (self.data['Close'] > self.data['Open'].shift(1)) & \\\n ((self.data['Open'] - self.data['Close']) / (.001 + (self.data['High'] - self.data['Low'])) > .6))", "def region_growing_from_input(self, color, bone_from_scan=None):\n collect()\n # initilize\n if not bone_from_scan:\n self.load_original_data()\n else:\n self.copy_original_from_bone(bone_from_scan)\n checked = zeros(self._original_img_data.shape)\n seg = zeros(self._original_img_data.shape)\n need_to_check = []\n # Color the seeds and check for neighbors\n for seed in self._seeds_points:\n seg[seed] = color\n checked[seed] = 1\n neighbors = self._get_neighbors(seed, checked, self.\n _original_img_data.shape)\n for neighbor in neighbors:\n if self._get_threshold(self._original_img_data[neighbor],\n VOID_VALUES[0],\n VOID_VALUES[1]):\n need_to_check.append(neighbor)\n # Region Growing - while there's a neighbor, color it and keep going\n bone_to_check = []\n while need_to_check:\n pt = need_to_check.pop()\n if checked[pt] == 1:\n continue\n else:\n checked[pt] = 1\n neighbors = self._get_neighbors(pt, checked, self.\n _original_img_data.shape)\n for neighbor in neighbors:\n if self._get_threshold(\n self._original_img_data[neighbor],\n VOID_VALUES[0], VOID_VALUES[1]):\n need_to_check.append(neighbor)\n if self._get_threshold(\n self._original_img_data[neighbor],\n BONE_BOUND_VALUES[0], BONE_BOUND_VALUES[1]):\n bone_to_check.append(neighbor)\n seg[pt] = color\n # Closing holes\n del need_to_check\n # check for Bone value - edge of the radius\n while bone_to_check:\n pt = bone_to_check.pop()\n if checked[pt] == 1:\n continue\n else:\n checked[pt] = 1\n neighbors = self._get_neighbors(pt, checked, self.\n _original_img_data.shape)\n for neighbor in neighbors:\n if self._get_threshold(\n self._original_img_data[neighbor],\n RADIUS_VALUES[0], RADIUS_VALUES[1]):\n bone_to_check.append(neighbor)\n seg[pt] = color\n del checked, bone_to_check\n for i in range(self._dilation):\n seg = dilation(seg, cube(3, uint8))\n for i in range(self._dilation - 1):\n seg = erosion(seg, cube(3, uint8))\n self._segmentation_data = seg\n del seg\n collect()", "def fix_straight_lines(self):\r\n\r\n # Creates a vertical 1x5 kernel and applies binary closing based on that kernel\r\n vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5))\r\n self.thresh_invert = cv2.morphologyEx(self.thresh_invert, cv2.MORPH_CLOSE, vertical_kernel, iterations=9)\r\n\r\n # Creates a horizontal 5x1 kernel and applies binary closing based on that kernel\r\n horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))\r\n self.thresh_invert = cv2.morphologyEx(self.thresh_invert, cv2.MORPH_CLOSE, horizontal_kernel, iterations=4)", "def test_burst_dispersion(self):\n # some reproducible arbitrariness\n np.random.seed(7342642)\n\n n = 25\n t_max = 50\n dt = 0.1\n n_sim = 10\n \n G = RateHVCLayer(n)\n\n burst_starts = []\n for i in xrange(n_sim):\n M = simulation.StateMonitor(G, 'out')\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n burst_starts.append([dt*min((M.out[i] > 0).nonzero()[0])\n for i in xrange(n)])\n\n burst_starts_range = [np.ptp([_[i] for _ in burst_starts])\n for i in xrange(n)]\n \n self.assertLess(np.max(burst_starts_range), G.burst_noise + dt/2)", "def BraceBadLight(self, pos):\n # Check if we are still alive or not, as this may be called\n # after we have been deleted.\n if self:\n super(EditraBaseStc, self).BraceBadLight(pos)", "def draw_visible_area(self):\n\n if not self.given_center:\n return\n\n # Figure out what regions we should be showing\n horiz_width = self.hbar.pageStep()\n gui_start_x = self.hbar.value()\n vert_height = self.vbar.pageStep()\n gui_start_y = self.vbar.value()\n (game_min_x, game_max_y) = self.scene_to_ingame(gui_start_x, gui_start_y)\n (game_max_x, game_min_y) = self.scene_to_ingame(\n gui_start_x + horiz_width,\n gui_start_y + vert_height,\n )\n min_rx = game_min_x//32 - 1\n max_rx = game_max_x//32 + 1\n min_ry = game_min_y//32 - 1\n max_ry = game_max_y//32 + 1\n\n # First find out how many regions we're going to have to load (so that\n # we can initialize a progressbar)\n valid_regions = set()\n regions_to_load = []\n for rx in range(min_rx, max_rx+1):\n for ry in range(min_ry, max_ry+1):\n region = (rx, ry)\n valid_regions.add(region)\n if region in self.regions:\n if not self.regions[region].loaded:\n regions_to_load.append(region)\n\n # Initialize progressbar\n region_loading = self.mainwindow.region_loading\n region_loading.start(len(regions_to_load))\n\n # Now actually do the loading\n for idx, region in enumerate(regions_to_load):\n #print('Loading region {}'.format(region))\n self.regions[region].load()\n self.loaded_regions.add(region)\n region_loading.update(idx)\n\n # Unload regions which are too far out\n regions_to_unload = []\n for region in list(self.loaded_regions):\n if region not in valid_regions:\n regions_to_unload.append(region)\n\n region_loading.start(len(regions_to_unload), label='Unloading Regions')\n for idx, region in enumerate(regions_to_unload):\n #print('Unloading region {}'.format(region))\n self.regions[region].unload()\n self.loaded_regions.remove(region)\n region_loading.update(idx)\n\n # Finish our progress bar\n region_loading.finish()", "def discriminator(time,signal,height_th,dt_left=300e-9,dt_right=1400e-9,Plot=False, method=0):\n dt = time[1] - time[0]\n bins_left = int(dt_left / dt)\n bins_right = int(dt_right / (time[1] - time[0]))\n bins_final = len(time) - 1\n\n # where signal is above threshold\n initial_window = np.array(np.where(signal > height_th)[0])\n mask = initial_window\n if len(mask) > 0:\n if method == 0:\n\n # MASK METHOD 1: include regions left and right of signal above\n # threshold.\n for i in np.arange(len(initial_window)) - 1:\n if (initial_window[i + 1] - initial_window[i] < bins_right + bins_left):\n mask = array_fill_between(\n mask, initial_window[i], initial_window[i + 1])\n else:\n mask = array_fill_between(\n mask, initial_window[i], initial_window[i] + bins_right)\n mask = array_fill_between(\n mask, initial_window[i + 1] - bins_left, initial_window[i + 1])\n # right most pulse close to edge but doesn't exceed\n if mask[-1] + bins_right <= bins_final:\n mask = array_fill_between(\n mask, mask[-1], mask[-1] + bins_right)\n else:\n # right most pulse exceeds right edge of trace: include mask up to end of pulse\n # bins_final+1 term includes one more index since fill_between\n # fills up to bins_final\n mask = array_fill_between(mask, mask[-1], bins_final + 1)\n if mask[0] - bins_left >= 0: # left most pulse close to edge but doesn't exceed\n mask = array_fill_between(mask, mask[0] - bins_left, mask[0])\n else:\n # left most pulse exceeds left edge of trace\n # 0-1 term includes one more index '-1' since fill_between\n # fills from 0\n mask = array_fill_between(mask, 0 - 1, mask[0])\n\n if method == 1:\n # MASK METHOD 2: SR LATCH\n mask = srlatch_full(signal, 0, height_th)\n\n # MASK METHOD 1: include regions left and right of signal above threshold.\n for i in np.arange(len(initial_window))-1:\n if (initial_window[i+1]-initial_window[i] < bins_right+bins_left):\n mask = array_fill_between(mask, initial_window[i], initial_window[i+1])\n else:\n mask = array_fill_between(mask, initial_window[i], initial_window[i]+bins_right)\n mask = array_fill_between(mask, initial_window[i+1]-bins_left, initial_window[i+1])\n if mask[-1]+bins_right <= bins_final: #right most pulse close to edge but doesn't exceed\n mask = array_fill_between(mask, mask[-1], mask[-1]+bins_right)\n else: \n #right most pulse exceeds right edge of trace: include mask up to end of pulse\n #bins_final+1 term includes one more index since fill_between fills up to bins_final\n mask = array_fill_between(mask, mask[-1], bins_final+1)\n if mask[0]-bins_left >= 0: #left most pulse close to edge but doesn't exceed\n mask = array_fill_between(mask, mask[0]-bins_left, mask[0])\n else: \n #left most pulse exceeds left edge of trace\n #0-1 term includes one more index '-1' since fill_between fills from 0\n mask = array_fill_between(mask, 0-1, mask[0])\n \n\n if method == 2:\n # MASK METHOD 3: rev SR LATCH\n initial_window = np.where(srlatch_rev(signal, 0, height_th))[0]\n \n # MANUAL INCLUSION OF ADDITIONAL PULSE REGIONS, since srlatch_rev does not include the full pulse \n mask = initial_window\n\n for i in np.arange(len(initial_window)) - 1:\n if (initial_window[i + 1] - initial_window[i] < bins_right + bins_left):\n mask = array_fill_between(\n mask, initial_window[i], initial_window[i + 1])\n else:\n mask = array_fill_between(\n mask, initial_window[i], initial_window[i] + bins_right)\n mask = array_fill_between(\n mask, initial_window[i + 1] - bins_left, initial_window[i + 1])\n # right most pulse close to edge but doesn't exceed\n if mask[-1] + bins_right <= bins_final:\n mask = array_fill_between(\n mask, mask[-1], mask[-1] + bins_right)\n else:\n # right most pulse exceeds right edge of trace: include mask up to end of pulse\n # bins_final+1 term includes one more index since fill_between\n # fills up to bins_final\n mask = array_fill_between(mask, mask[-1], bins_final + 1)\n if mask[0] - bins_left >= 0: # left most pulse close to edge but doesn't exceed\n mask = array_fill_between(mask, mask[0] - bins_left, mask[0])\n else:\n # left most pulse exceeds left edge of trace\n # 0-1 term includes one more index '-1' since fill_between\n # fills from 0\n mask = array_fill_between(mask, 0 - 1, mask[0])\n\n # transforms index mask into boolean mask\n\n for i in np.arange(len(initial_window))-1:\n if (initial_window[i+1]-initial_window[i] < bins_right+bins_left):\n mask = array_fill_between(mask, initial_window[i], initial_window[i+1])\n else:\n mask = array_fill_between(mask, initial_window[i], initial_window[i]+bins_right)\n mask = array_fill_between(mask, initial_window[i+1]-bins_left, initial_window[i+1])\n \n if mask[-1]+bins_right <= bins_final: #right most pulse close to edge but doesn't exceed\n mask = array_fill_between(mask, mask[-1], mask[-1]+bins_right)\n else: \n #right most pulse exceeds right edge of trace: include mask up to end of pulse\n #bins_final+1 term includes one more index since fill_between fills up to bins_final\n mask = array_fill_between(mask, mask[-1], bins_final+1)\n if mask[0]-bins_left >= 0: #left most pulse close to edge but doesn't exceed\n mask = array_fill_between(mask, mask[0]-bins_left, mask[0])\n else: \n #left most pulse exceeds left edge of trace\n #0-1 term includes one more index '-1' since fill_between fills from 0\n mask = array_fill_between(mask, 0-1, mask[0])\n\n if method == 3:\n # MASK METHOD 4: SR LATCH (CONVENTIONAL)\n mask = srlatch(signal, 0, height_th)\n\n #transforms index mask into boolean mask\n\n mask_boolean = np.zeros(len(time), dtype=int)\n mask_boolean[mask] = 1\n mask = mask_boolean\n\n edges = np.diff(mask * 1) # left edge = 1, right edge = -1\n\n # find indices of left and right edges of pulses\n right_edges = np.array(np.where(edges < 0), dtype='int64').flatten()\n left_edges = np.array(np.where(edges > 0), dtype='int64').flatten()\n clamp = np.zeros(len(time))\n\n if (len(left_edges) > 0)and(len(right_edges > 0)):\n for i in np.arange(len(time)):\n if left_edges[0] <= i <= right_edges[-1]:\n clamp[i] = 1\n\n clamp = np.array(clamp, dtype='bool')\n mask = np.array(mask, dtype='bool')\n # print clamp,edges,left_edges,right_edges\n\n if Plot:\n plt.figure()\n plt.plot(time, signal)\n plt.hlines(height_th, time[0], time[1], linestyle='--')\n plt.plot(time, mask * np.max(signal), label='mask')\n plt.plot(time[1:], edges * np.max(signal), label='edges')\n plt.plot(time, (clamp & mask) * np.max(signal), label='clamp&mask')\n plt.scatter(time[(clamp & mask)], signal[\n (clamp & mask)], label='', color='red', marker='o')\n\n # else:\n # clamp = np.ones(len(time)) \n # print 'error: no index between first left and last right \\n{}'.format([left_edges, right_edges])\n # else:\n # clamp = np.ones(len(time))\n # print 'error: left or right index zero length \\n{}'.format([left_edges, right_edges])\n clamp = np.array(clamp,dtype='bool')\n mask = np.array(mask,dtype='bool')\n \n # print clamp,edges,left_edges,right_edges\n\n if Plot:\n plt.figure(figsize=(10,5))\n plt.plot(time,signal)\n plt.hlines(height_th,time[0],time[1],linestyle='--')\n plt.plot(time,mask*np.max(signal),label='mask')\n plt.plot(time[1:],edges*np.max(signal),label='edges')\n plt.plot(time,(clamp&mask)*np.max(signal),label='clamp&mask')\n plt.scatter(time[(clamp&mask)],signal[(clamp&mask)],label='',color='red',marker='o')\n plt.legend()\n\n return np.array([mask, clamp, edges, left_edges, right_edges])", "def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False):\r\n minMarker = localMin(eccMap, cutStep)\r\n\r\n plt.figure()\r\n plt.imshow(minMarker, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 1')\r\n plt.show()\r\n\r\n minMarker = minMarker.astype(np.int32)\r\n selfArray = self.array.astype(np.int32)\r\n minMarker = minMarker + 1\r\n minMarker[minMarker == 1] = 0\r\n minMarker = minMarker + (-1 * (selfArray - 1))\r\n # minMarker: marker type for opencv watershed,\r\n # sure background = 1\r\n # unknow = 0\r\n # sure forgrand = 2,3,4... etc\r\n\r\n plt.figure()\r\n plt.imshow(minMarker, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 2')\r\n plt.show()\r\n\r\n eccMapNor = (np.round(ia.array_nor(eccMap) * 255)).astype(np.uint8)\r\n eccMapRGB = cv2.cvtColor(eccMapNor, cv2.COLOR_GRAY2RGB)\r\n # eccMapRGB: image type for opencv watershed, RGB, [uint8, uint8, uint8]\r\n\r\n newLabel = cv2.watershed(eccMapRGB, minMarker)\r\n\r\n plt.figure()\r\n plt.imshow(newLabel, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 3')\r\n plt.show()\r\n\r\n newBorder = np.zeros(newLabel.shape).astype(np.int)\r\n\r\n newBorder[newLabel == -1] = 1\r\n\r\n border = ni.binary_dilation(self.array).astype(np.int) - self.array\r\n\r\n border = newBorder + border\r\n\r\n border[border > 1] = 1\r\n\r\n border = sm.skeletonize(border)\r\n\r\n if borderWidth > 1:\r\n border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8)\r\n\r\n newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1))\r\n\r\n labeledNewPatchMap, patchNum = ni.label(newPatchMap)\r\n\r\n # if patchNum != np.amax(newLabel):\r\n # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel)\r\n # raise ValueError, \"Number of patches after splitting does not equal to number of local minimum!\"\r\n\r\n newPatchDict = {}\r\n\r\n for j in range(1, patchNum + 1):\r\n\r\n currPatchName = patchName + '.' + str(j)\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[labeledNewPatchMap == j] = 1\r\n currArray = currArray * self.array\r\n\r\n if np.sum(currArray[:]) > 0:\r\n newPatchDict.update({currPatchName: Patch(currArray, self.sign)})\r\n\r\n if isplot:\r\n plt.figure()\r\n plt.subplot(121)\r\n plt.imshow(self.array, interpolation='nearest')\r\n plt.title(patchName + ': before split')\r\n plt.subplot(122)\r\n plt.imshow(labeledNewPatchMap, interpolation='nearest')\r\n plt.title(patchName + ': after split')\r\n\r\n return newPatchDict", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.0):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = pts[0][2]\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def reformatCoronalView4NeedleSegment(self, base, tip, ID=-1):\r\n #research\r\n profprint()\r\n for i in range(2): # workaround update problem\r\n if ID >=0:\r\n modelNode = slicer.util.getNode('vtkMRMLModelNode' + str(ID))\r\n polyData = modelNode.GetPolyData()\r\n nb = polyData.GetNumberOfPoints()\r\n base = [0, 0, 0]\r\n tip = [0, 0, 0]\r\n polyData.GetPoint(nb - 1, tip)\r\n polyData.GetPoint(0, base)\r\n a, b, c = tip[0] - base[0], tip[1] - base[1], tip[2] - base[2]\r\n \r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(1)\r\n reformatLogic.SetSliceNormal(sGreen, 1, -a / b, 0)\r\n #reformatLogic.SetSliceOrigin(sGreen, base[0],base[1],base[2])#crashes\r\n m = sGreen.GetSliceToRAS()\r\n m.SetElement(0, 3, base[0])\r\n m.SetElement(1, 3, base[1])\r\n m.SetElement(2, 3, base[2])\r\n sGreen.Modified()", "def identify_dbs(image):\n locations = {\"red\": Point(), \"green\": Point(), \"blue\": Point()}\n masks = {\"red\": [], \"green\": [], \"blue\": []}\n\n bridge = cv_bridge.CvBridge()\n image = bridge.imgmsg_to_cv2(image, \"bgr8\")\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # upper and lower bounds for red\n # using python 3 bgr [0,0,188] = hsv [0, 255, 188]\n lower_red = numpy.array([0, 100, 100]) \n upper_red = numpy.array([10, 255, 255])\n masks[\"red\"] = cv2.inRange(hsv, lower_red, upper_red)\n\n # upper and lower bounds for green\n # using python 3 bgr [0,175,0] = hsv [60, 255, 175]\n lower_green = numpy.array([50, 100, 100]) \n upper_green = numpy.array([70, 255, 255])\n masks[\"green\"] = cv2.inRange(hsv, lower_green, upper_green)\n\n # upper and lower bounds for blue\n # using python 3 bgr [176, 0, 17] = hsv [123, 255, 176]\n lower_blue = numpy.array([113, 100, 100])\n upper_blue = numpy.array([133, 255, 255])\n masks[\"blue\"] = cv2.inRange(hsv, lower_blue, upper_blue)\n\n x, y, w, h = 0, 0, image.shape[1]//3, image.shape[0]\n\n for color, mask in masks.items():\n pixels = {\"left\": 0, \"middle\": 0, \"right\": 0}\n \n # define section of image to use for left, middle and right\n left = mask[y:y+h, x:x+w]\n middle = mask[y:y+h, x+w:x+w+w]\n right = mask[y:y+h, x+w+w:x+3*w]\n\n # count the number of pixels in each section\n pixels[\"left\"] = cv2.countNonZero(left)\n pixels[\"middle\"] = cv2.countNonZero(middle)\n pixels[\"right\"] = cv2.countNonZero(right)\n location = max(pixels, key=pixels.get)\n\n # map the relative position of the db (left, middle, right) to the correct Point()\n locations[color] = db_locations[location]\n \n return locations", "def establecer_region(self, region, guess, delta_ppm=(1,1)): \r\n # obtengo los indices del centro del pico.\r\n xc, yc = self.encontrar_picos(guess, delta_ppm)\r\n # obtengo las coordenadas que determinan el rectangulo donde voy a\r\n # integrar. \r\n x_lims, y_lims = self.establecer_limites(xc, yc)\r\n \r\n xi,xf = x_lims\r\n yi,yf = y_lims\r\n spec = self.spec[yi:yf, xi:xf]\r\n ppmGridDir = self.ppmGridDir[yi:yf, xi:xf]\r\n ppmGridInd = self.ppmGridInd[yi:yf, xi:xf]\r\n \r\n \r\n n, m = region\r\n self.regiones[n][m] = Region(ppmGridDir, ppmGridInd, spec)", "def region_region_checkerboard(self, **_):\n outputs: dict = {}\n\n if self.AGG_BY == \"zone\":\n agg = \"zone\"\n else:\n agg = \"region\"\n\n # List of properties needed by the plot, properties are a set of tuples and\n # contain 3 parts: required True/False, property name and scenarios required,\n # scenarios must be a list.\n properties = [(True, f\"{agg}_{agg}s_Net_Interchange\", self.Scenarios)]\n\n # Runs get_formatted_data within PlotDataStoreAndProcessor to populate PlotDataStoreAndProcessor dictionary\n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n\n ncols, nrows = set_x_y_dimension(len(self.Scenarios))\n grid_size = ncols * nrows\n excess_axs = grid_size - len(self.Scenarios)\n\n mplt = PlotLibrary(nrows, ncols, squeeze=False, ravel_axs=True)\n fig, axs = mplt.get_figure()\n plt.subplots_adjust(wspace=0.02, hspace=0.4)\n max_flow_group = []\n Data_Out = []\n n = 0\n for scenario in self.Scenarios:\n rr_int = self[f\"{agg}_{agg}s_Net_Interchange\"].get(scenario)\n if shift_leapday:\n rr_int = adjust_for_leapday(rr_int)\n\n if self.AGG_BY != \"region\" and self.AGG_BY != \"zone\":\n agg_region_mapping = (\n self.region_mapping[[\"region\", self.AGG_BY]]\n .set_index(\"region\")\n .to_dict()[self.AGG_BY]\n )\n # Checks if keys all aggregate to a single value, this plot requires multiple values to work\n if len(set(agg_region_mapping.values())) == 1:\n return UnsupportedAggregation()\n rr_int = rr_int.reset_index()\n rr_int[\"parent\"] = rr_int[\"parent\"].map(agg_region_mapping)\n rr_int[\"child\"] = rr_int[\"child\"].map(agg_region_mapping)\n rr_int_agg = rr_int.groupby([\"parent\", \"child\"], as_index=True).sum()\n rr_int_agg.rename(columns={\"values\": \"flow (MW)\"}, inplace=True)\n rr_int_agg = rr_int_agg.loc[\n rr_int_agg[\"flow (MW)\"] > 0.01\n ] # Keep only positive flows\n rr_int_agg.sort_values(ascending=False, by=\"flow (MW)\")\n rr_int_agg = rr_int_agg / 1000 # MWh -> GWh\n\n data_out = rr_int_agg.copy()\n data_out.rename(\n columns={\"flow (MW)\": \"{} flow (GWh)\".format(scenario)}, inplace=True\n )\n\n max_flow = max(rr_int_agg[\"flow (MW)\"])\n rr_int_agg = rr_int_agg.unstack(\"child\")\n rr_int_agg = rr_int_agg.droplevel(level=0, axis=1)\n\n current_cmap = plt.cm.get_cmap()\n current_cmap.set_bad(color=\"grey\")\n\n axs[n].imshow(rr_int_agg)\n axs[n].set_xticks(np.arange(rr_int_agg.shape[1]))\n axs[n].set_yticks(np.arange(rr_int_agg.shape[0]))\n axs[n].set_xticklabels(rr_int_agg.columns)\n axs[n].set_yticklabels(rr_int_agg.index)\n axs[n].set_title(scenario.replace(\"_\", \" \"), fontweight=\"bold\")\n\n # Rotate the tick labels and set their alignment.\n plt.setp(\n axs[n].get_xticklabels(),\n rotation=90,\n ha=\"right\",\n rotation_mode=\"anchor\",\n )\n\n # Delineate the boxes and make room at top and bottom\n axs[n].set_xticks(np.arange(rr_int_agg.shape[1] + 1) - 0.5, minor=True)\n axs[n].set_yticks(np.arange(rr_int_agg.shape[0] + 1) - 0.5, minor=True)\n axs[n].grid(which=\"minor\", color=\"k\", linestyle=\"-\", linewidth=1)\n axs[n].tick_params(which=\"minor\", bottom=False, left=False)\n\n max_flow_group.append(max_flow)\n Data_Out.append(data_out)\n n += 1\n\n # Remove extra axes\n mplt.remove_excess_axs(excess_axs, grid_size)\n\n cmap = cm.inferno\n norm = mcolors.Normalize(vmin=0, vmax=max(max_flow_group))\n cax = plt.axes([0.90, 0.1, 0.035, 0.8])\n fig.colorbar(\n cm.ScalarMappable(norm=norm, cmap=cmap),\n cax=cax,\n label=\"Total Net Interchange [GWh]\",\n )\n plt.xlabel(\"To Region\", color=\"black\", rotation=\"horizontal\", labelpad=40)\n plt.ylabel(\"From Region\", color=\"black\", rotation=\"vertical\", labelpad=40)\n\n data_table_out = pd.concat(Data_Out, axis=1)\n save_figures = self.figure_folder.joinpath(f\"{self.AGG_BY}_transmission\")\n fig.savefig(\n save_figures.joinpath(\"region_region_checkerboard.svg\"),\n dpi=600,\n bbox_inches=\"tight\",\n )\n data_table_out.to_csv(save_figures.joinpath(\"region_region_checkerboard.csv\"))\n\n outputs = DataSavedInModule()\n return outputs", "def betas_middle_slice_graph(betas_4d):\n\tplt.imshow(betas_4d[:, :, 16, 0], interpolation='nearest', cmap='gray')\n\tplt.title('Middle Slice Beta(Gain)')\n\tplt.colorbar()\n\tplt.savefig('middle_slice_gain.png')\n\tplt.close()\n\tplt.imshow(betas_4d[:, :, 16, 1], interpolation='nearest', cmap='gray')\n\tplt.title('Middle Slice Beta(Loss)')\n\tplt.colorbar()\n\tplt.savefig('middle_slice_loss.png')\n\tplt.close()", "def find_chart():\r\n ###############################################\r\n # Read values of S/N\r\n sn = np.loadtxt(outtable, usecols=(14,))\r\n xs, ys = np.loadtxt(outtable, usecols=(1, 2)).T\r\n specs = np.loadtxt(outtable, usecols=(0,), dtype=str)\r\n ###############################################\r\n # Find good (and bad) regions according to S/N\r\n good = np.where(((~np.isnan(sn)) & (sn >= sn_cut)))[0]\r\n bad = np.where((sn < sn_cut))[0]\r\n ###############################################\r\n # Filter arrays for S/N\r\n sn = sn[good]\r\n xs = xs[good]\r\n ys = ys[good]\r\n specs = specs[good].tolist()\r\n specs = [x.replace(\".fits\", \"\")[1:] for x in specs]\r\n ###############################################\r\n # Set limits for the plot\r\n norm = Normalize(0, 1)\r\n ###############################################\r\n # Set colormap\r\n # cmap = brewer2mpl.get_map('YlGnBu', 'sequential', 5).mpl_colormap\r\n # Produces a collection of polygons with colors according to S/N values\r\n coll = PolyCollection(polygons_bins[good], array=np.ones_like(sn),\r\n cmap=\"gray\", edgecolors='0.5', norm=norm)\r\n ###############################################\r\n # Initiate figure and axis for matplotlib\r\n fig = plt.figure(figsize=(6.25, 6))\r\n gs = gridspec.GridSpec(1, 1)\r\n gs.update(left=0.08, right=0.985, bottom=0.08, top=0.985, hspace=0.05,\r\n wspace=0.06)\r\n ax = plt.subplot(gs[0])\r\n ###############################################\r\n # Draw the polygons\r\n draw_map(fig, ax, coll)\r\n ###############################################\r\n # Add contours according to V-band image\r\n draw_contours(\"vband\", fig, ax)\r\n ###############################################\r\n for x, y, spec in zip(xs, ys, specs):\r\n ax.text(x, y, spec, fontsize=10)\r\n # Write labels\r\n xylabels(ax)\r\n ##############################################\r\n # Save the figure\r\n plt.show()\r\n plt.savefig(\"figs/find_chart.pdf\")\r\n return", "def draw_laser_ranges():\n NUM_RANGES = len(D.ranges) # should be 360\n if False: #for easy commenting out...\n for angle in range(NUM_RANGES):\n print angle, \":\", D.ranges[angle] \n \n # helpful starting points, perhaps:\n # add line to the ranges image, \"D.image\"\n #cv.Line(D.image, (42,100), (100,42), cv.RGB(255, 0, 0), 1) # 1 == thickness\n # add dots to image being used to compute the Hough tr. \"D.hough\"\n # cv.Line(D.hough, (42,42), (42,42), 255, 2) # 1 == thickness\n for angle in range(NUM_RANGES):\n point = (CENTER + int(0.2*D.ranges[angle]*sin(radians(angle))), CENTER + int(0.2*D.ranges[angle]*cos(radians(angle))))\n cv.Line(D.image, (CENTER,CENTER), point, cv.RGB(255, 0 , 0), 1)\n cv.Line(D.hough, point, point, 255, 2) \n\n return", "def opencv_watershed(masked, mask) -> JSON_TYPE:\n # For code and detailed explanation see:\n # http://datahacker.rs/007-opencv-projects-image-segmentation-with-watershed-algorithm/\n threshold: int = 30\n gray = cv2.cvtColor(masked, cv2.COLOR_RGB2GRAY)\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n\n n_increases: int = 0\n while local_max_location.shape[0] < 30 and n_increases < 15:\n threshold += 20\n ret, thresh_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)\n # Noise removal\n kernel = np.ones((3), np.uint8)\n opening_img = cv2.morphologyEx(thresh_img, cv2.MORPH_OPEN, kernel, iterations=9)\n # Noise removal\n closing_img = cv2.morphologyEx(thresh_img, cv2.MORPH_CLOSE, kernel, iterations=4)\n dist_transform = cv2.distanceTransform(255 - closing_img, cv2.DIST_L2, 3)\n local_max_location = peak_local_max(dist_transform, min_distance=1, indices=True)\n n_increases += 1\n # Reset threshold\n threshold = 30\n\n num_clusters: int = 30\n if n_increases >= 15:\n num_clusters = local_max_location.shape[0]\n kmeans = KMeans(n_clusters=num_clusters)\n # If local_max_location size is 0, return 0 predictions\n if not local_max_location.size:\n return {\n \"count\": 0\n }\n kmeans.fit(local_max_location)\n local_max_location = kmeans.cluster_centers_.copy()\n # Kmeans is returning a float data type so we need to convert it to an int. \n local_max_location = local_max_location.astype(int)\n dist_transform_copy = dist_transform.copy()\n for i in range(local_max_location.shape[0]):\n cv2.circle(dist_transform_copy, (local_max_location[i][1], local_max_location[i][0]), 5, 255)\n # markers = np.zeros_like(dist_transform)\n ret, sure = cv2.threshold(dist_transform, 0.01*dist_transform.max(), 255, 0)\n sure = np.uint8(sure)\n ret, markers = cv2.connectedComponents(sure)\n labels = np.arange(kmeans.n_clusters)\n markers[local_max_location[:,0], local_max_location[:,1]] = labels + 1\n # Convert all local markers to an integer. This because cluster centers will be float numbers. \n markers = markers.astype(int)\n markers_copy = markers.copy()\n index_non_zero_markers = np.argwhere(markers != 0)\n markers_copy = markers_copy.astype(np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n for i in range(index_non_zero_markers.shape[0]):\n string_text = str(markers[index_non_zero_markers[i][0], index_non_zero_markers[i][1]])\n cv2.putText(markers_copy, string_text, (index_non_zero_markers[i][1], index_non_zero_markers[i][0]), font, 1, 255)\n markers = markers.astype(np.int32)\n segmented = cv2.watershed(masked, markers)\n count_segments(markers)\n #return {\n # \"count\": local_max_location.shape[0]\n #}\n return {\n \"count\": count_segments(markers),\n }", "def SplitGap(data,gapsize,medwin,fluxdiff):\n \n # defining new empty lists and stuff\n pcount=0\n istamps=[]\n outData={}\n \n data['x'].mask = data['UnMasked']\n data['y'].mask = data['UnMasked']\n data['yerr'].mask = data['UnMasked']\n \n # median smoothing the lightcurve\n mvavg1 = movingMedian(data['y'],medwin)\n mvavg1 = num.append(mvavg1,mvavg1[-1])\n mvavg1 = data['y']\n # first derivative of smoothed lightcurve\n diff1 = num.diff(mvavg1)\n diff1 = num.hstack((diff1,diff1[-1]))\n \n # second derivative of smoothed lightcurve\n diff2 = num.diff(diff1)\n diff2 = num.hstack((diff2[-1],diff2))\n\n # compute ourlier resistant sigma\n sig = compute1Sigma(diff1)\n #pylab.plot(diff1,'g.')\n #pylab.plot([0,6000],[5*sig,5*sig],'k-')\n #pylab.plot([0,6000],[3*sig,3*sig],'k-')\n #pylab.plot([0,6000],[1*sig,1*sig],'k-')\n #pylab.show()\n\n # The grand master loop >=}\n # to make portion slices\n for i in range(len(data['x'])-1):\n dt = data['x'][i+1]- data['x'][i]\n j1 = max(0,i-medwin)\n j2 = i + medwin\n if pcount == 0:\n i0 = 0\n if pcount > 0:\n i0 = i1+1\n if dt > gapsize:\n i1 = i\n istamps.append([i0,i1])\n pcount += 1\n #if num.abs(diff1[i]) > 5*sig:\n #i1 = i\n #istamps.append([i0,i1])\n #pcount += 1\n #print num.abs(diff1[i]/data['y'][i]), diff1[i], data['y'][i], diff1[i+1], data['y'][i+1]\n #print i, ' test flux gap'\n i1 = i+1\n istamps.append([i0,i1])\n \n \n \n if data['bool']==False:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':False}\n else:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1], 'TransitMask':data['TransitMask'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':True}\n \n return outData", "def segment_by_shape(self, dt, criterion):\n enumerate_crit = Criterion(\n 'patterns', criterion.column_name+'_derivatives_patterns_clusters',\n 'enumerate')\n return self.segment_by_enumerate(dt, enumerate_crit)", "def isDivision(event,buff):\n index,diff,label = event\n label = label[0]\n if diff<0:\n return False,[]\n img_before = np.copy(buff[:,:,index-1])\n img_after = np.copy(buff[:,:,index])\n mask_after = (img_after==label).astype(np.uint8)\n nb_elts_after = np.amax(img_after)\n kernel = np.ones((7,7),np.uint8)\n neighbouring_mask = cv2.dilate(mask_after,kernel,iterations=8)\n\n \n new_map = np.multiply(img_after,neighbouring_mask.astype(np.uint8)) \n #Removing the element we are currently looking at\n new_map[img_after==label]=0\n possible_candidates = []\n for i in range(nb_elts_after):\n if np.any(new_map==i+1):\n possible_candidates.append(i+1)\n #Computes the area of the cells and compares them\n size_cell_after = np.count_nonzero(img_after==label)\n match = [] #lists the ratios sizeAfter/sizeBefore for possible matches\n for vals in possible_candidates:\n size_before = np.count_nonzero(img_before==vals)\n size_other_cell = np.count_nonzero(img_after==vals)\n size_after = size_cell_after + size_other_cell\n ratio = float(size_after)/float(size_before)\n if ratio>0.8 and ratio<1.2:\n match.append((vals,abs(1-ratio)))\n if len(match)==0:\n return False,[]\n if len(match)>1:\n #Several matches, so pick the best\n values = [y for x,y in match]\n result_label,osef = match[np.argmin(values)]\n else:\n result_label, osef = match[0]\n return True,result_label", "def cleanup_regions(self, timestamp, bid, ofr):\n regions = []\n\n for region in self.regions:\n if not region.can_delete(timestamp, bid, ofr):\n regions.append(region)\n\n # replace the regions list\n self.regions = regions", "def highlight_rare_blocks(bv, threshold=1):\n if no_coverage_warn():\n return\n rare_blocks = covdb.get_rare_blocks(threshold)\n rare_color = HighlightStandardColor.RedHighlightColor\n highlight_set(rare_blocks, rare_color)\n log.log_info(\"[*] Found %d rare blocks (threshold: %d)\" %\n (len(rare_blocks), threshold))\n for block in rare_blocks:\n log.log_info(\" 0x%x\" % block)", "def border_analyze_vertical(mask, borders, min_segment_width=5, max_border_width=0.4):\n max_border_width = mask.shape[1] * max_border_width\n if len(borders) == 0: return False\n left_most = 0\n right_most = mask.shape[1] - 1\n # middle_borders = [border for border in borders if (left_most in border or right_most in border) and abs(border[0]-border[1])>3 ]\n # if len(middle_borders)==0:return False\n slice_start = 0\n # segs_by_middle_borders=[]\n block_widths = []\n for v_border in borders[::-1]:\n border_start = v_border[1]\n border_stop = v_border[0]\n if border_start == left_most:\n slice_start = border_stop\n elif border_stop == right_most:\n # mask_slice=mask[slice_start:border_start]\n block_widths.append(border_start - slice_start)\n # segs_by_middle_borders.appen(mask_slice)\n slice_start = border_stop\n elif border_stop - border_start >= 1:\n # mask_slice = mask[slice_start,border_start]\n if border_stop - border_start < max_border_width:\n block_widths.append(border_start - slice_start)\n # segs_by_middle_borders.appen(mask_slice)\n slice_start = border_stop\n if slice_start < right_most:\n block_widths.append(mask.shape[1] - slice_start)\n # segs_by_middle_borders.append(mask[slice_start:])\n\n block_widths = [w for w in block_widths if w > min_segment_width]\n # print('block widths')\n # print(block_widths)\n # print('block widths deviation' )\n # print(np.std(block_widths))\n # print('block heights dev/mean' )\n # print(np.std(block_widths)/np.mean(block_widths))\n\n if len(block_widths) < 2: return False\n return segment_analyzer(block_widths)", "def fix_half_inning(self, half_inning):\n outs = 0\n active_runners = []\n for atbat in half_inning:\n self.hold_runners(active_runners, atbat)\n\n active_runners = [r for r in atbat.runners\n if not r.out and r.end != 4]\n outs = atbat.outs", "def highlight(self,screen,midpos = (800,450)):\n posInt = self.getIntPos()\n posInt = getOffsetPos(posInt,midpos)\n pygame.draw.circle(screen, [max(0,tmp - (10 - self.timeDriving%10)*10) for tmp in self.colour], \n posInt, int(10+ (self.timeDriving%10 )),2)\n pygame.draw.circle(screen, self.colour, posInt, int(20+ (self.timeDriving%10 )),2)\n pygame.draw.circle(screen, [max(0,tmp - (self.timeDriving%10)*10) for tmp in self.colour], \n posInt, int(30+ (self.timeDriving%10 )),2)", "def remove_blocks(draft):\n for symbol in draft.Blocks:\n if symbol.Name in blocks_to_delete:\n print(\"[-] %s, \\tdeleted\" % symbol.Name)\n symbol.delete()\n\n # for ball in draft.ActiveSheet.Balloons:\n if draft.Balloons:\n for ball in draft.Balloons:\n if ball.BalloonType == 7: # type 7 filter the triangle balloons.\n print(\"[-] %s, \\tdeleted\" % ball.Name)\n ball.Delete()\n else:\n pass", "def RegionGrowingSegmentation(image, kernel_sigma, seed_row, seed_col, Niter, sim_threshold): \n \n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n image = image / image.max();\n nr, nc = image.shape;\n mask_old = np.zeros(image.shape, dtype = bool);\n mask_old[seed_row, seed_col] = True;\n \n sim_pos = [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)];\n nb_pos = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 0), (0, 1), (1, -1), (1, 0), (1, 1)];\n \n for iter_no in range(Niter):\n mask = np.zeros(image.shape, dtype = bool);\n for row_no in range(1, nr - 1):\n for col_no in range(1, nc - 1):\n if mask_old[row_no, col_no] != 1:\n continue;\n \n nb = image[row_no - 1:row_no + 2, col_no - 1:col_no + 2]; \n region_mean = image[mask_old].mean();\n if region_mean == 1:\n mask[row_no - 1:row_no + 2, col_no - 1:col_no + 2] = 1; \n continue;\n \n similarity = np.exp(-(nb - region_mean) ** 2); \n for el_no in range(len(nb_pos)):\n delta_row, delta_col = nb_pos[el_no];\n row_sim, col_sim = sim_pos[el_no];\n if similarity[row_sim, col_sim] < sim_threshold:\n continue;\n \n mask[row_no + delta_row, col_no + delta_col] = 1;\n \n if mask_old.sum() == mask.sum():\n break; \n \n mask_old = mask;\n \n return mask;", "def highlight_set(addr_set, color=None, bv=None, start_only=True):\n if bv is not None:\n binary_view = bv\n else:\n if gbv is None:\n print(\"[!] To use manually, pass in a binary view (bv=binary_view), or set bncov.gbv first\")\n return\n binary_view = gbv\n if start_only:\n get_blocks = binary_view.get_basic_blocks_starting_at\n else:\n get_blocks = binary_view.get_basic_blocks_at\n for addr in addr_set:\n blocks = get_blocks(addr)\n if len(blocks) >= 1:\n for block in blocks:\n if covdb is not None:\n if addr in covdb.block_dict:\n count = len(covdb.block_dict[addr])\n else:\n count = 0\n highlight_block(block, count, color)\n else:\n highlight_block(block, 0, color)\n else:\n if get_blocks == binary_view.get_basic_blocks_starting_at:\n containing_blocks = binary_view.get_basic_blocks_at(addr)\n if containing_blocks:\n log.log_warn(\"[!] No blocks start at 0x%x, but %d blocks contain it:\" %\n (addr, len(containing_blocks)))\n for i, block in enumerate(containing_blocks):\n log.log_info(\"%d: 0x%x - 0x%x in %s\" % (i, block.start, block.end, block.function.name))\n else:\n log.log_warn(\"[!] No blocks contain address 0x%x; check the address is inside a function.\" % addr)\n else: # get_blocks is binary_view.get_basic_blocks_at\n log.log_warn(\"[!] No blocks contain address 0x%x; check the address is inside a function.\" % addr)", "def populate_region(mask, layer_params):\n\n from .speedups import (\n NEW_CELL_MASK, CAN_OSCILLATE_MASK, INCLUDE_VIOLATIONS_MASK)\n\n border = ndimage.maximum_filter(mask, size=3, mode='wrap') ^ mask\n interior = ndimage.minimum_filter(mask, size=3, mode='wrap')\n gen_mask = mask * (\n NEW_CELL_MASK |\n CAN_OSCILLATE_MASK |\n INCLUDE_VIOLATIONS_MASK\n ) + border * (\n INCLUDE_VIOLATIONS_MASK\n )\n board = np.zeros(mask.shape, dtype=np.uint16)\n foreground = np.zeros(mask.shape, dtype=bool)\n background = np.zeros(mask.shape, dtype=bool)\n background_color = np.zeros(mask.shape, dtype=bool)\n seeds = None\n max_period = 1\n\n for layer in layer_params:\n if not isinstance(layer, dict):\n raise ValueError(\n \"'layer_params' should be a list of parameter dictionaries.\")\n layer = _fix_random_values(layer)\n old_board = board.copy()\n gen_mask0 = gen_mask.copy()\n interior = ndimage.minimum_filter(\n gen_mask & NEW_CELL_MASK > 0, size=3, mode='wrap')\n color = COLORS.get(layer.get('color'), 0)\n\n fence_frac = layer.get('fences', 0.0)\n if fence_frac > 0:\n fences = build_fence(gen_mask & speedups.NEW_CELL_MASK)\n fences *= coinflip(fence_frac, fences.shape)\n gen_mask &= ~(fences * (NEW_CELL_MASK | CAN_OSCILLATE_MASK))\n board += fences.astype(np.uint16) * CellTypes.wall\n\n spawners = layer.get('spawners', 0)\n if spawners > 0:\n _mask = (gen_mask0 & NEW_CELL_MASK > 0) & interior\n new_cells = _mask & coinflip(spawners, board.shape)\n if not new_cells.any() and _mask.any():\n i, j = np.nonzero(_mask)\n k = get_rng().choice(len(i)) # ensure at least one spawner\n new_cells[i[k], j[k]] = True\n gen_mask[new_cells] ^= NEW_CELL_MASK\n board[new_cells] = CellTypes.spawner + color\n\n tree_lattice = layer.get('tree_lattice')\n # Create a lattice of trees that are spread throughout the region\n # such that every empty cell touches one (and only one) tree\n # (modulo edge effects).\n # Such a lattice tends to make the resulting board very chaotic.\n # Note that this will disrupt any pre-existing patterns.\n if tree_lattice is not None:\n if not isinstance(tree_lattice, dict):\n tree_lattice = {}\n h, w = board.shape\n stagger = tree_lattice.get('stagger', True)\n spacing = float(tree_lattice.get('spacing', 5))\n if not stagger:\n new_cells = _make_lattice(h, w, spacing, spacing, 0)\n elif spacing <= 3:\n new_cells = _make_lattice(h, w, 3, 3, 1)\n elif spacing == 4:\n new_cells = _make_lattice(h, w, 10, 1, 3)\n elif spacing == 5:\n new_cells = _make_lattice(h, w, 13, 1, 5)\n else:\n # The following gets pretty sparse.\n new_cells = _make_lattice(h, w, 6, 3, 3)\n\n new_cells &= gen_mask & NEW_CELL_MASK > 0\n board[new_cells] = CellTypes.tree + color\n\n period = 1\n if 'pattern' in layer:\n pattern_args = layer['pattern'].copy()\n period = pattern_args.get('period', 1)\n if period == 1:\n gen_mask2 = gen_mask & ~CAN_OSCILLATE_MASK\n pattern_args.update(period=max_period, osc_bonus=0)\n elif period == 0:\n gen_mask2 = gen_mask & ~INCLUDE_VIOLATIONS_MASK\n pattern_args.update(period=max_period, osc_bonus=0)\n elif period < max_period:\n raise ValueError(\n \"Periods for sequential layers in a region must be either 0, 1,\"\n \" or at least as large as the largest period in prior layers.\")\n else:\n gen_mask2 = gen_mask\n max_period = period\n\n board = _gen_pattern(board, gen_mask2, seeds, **pattern_args)\n\n # We need to update the mask for subsequent layers so that they\n # do not destroy the pattern in this layer.\n # First get a list of board states throughout the oscillation cycle.\n boards = [board]\n for _ in range(1, max_period):\n boards.append(speedups.advance_board(boards[-1]))\n non_empty = np.array(boards) != 0\n still_cells = non_empty.all(axis=0)\n osc_cells = still_cells ^ non_empty.any(axis=0)\n # Both still life cells and oscillating cells should disallow\n # any later changes. We also want to disallow changes to the cells\n # that are neighboring the oscillating cells, because any changes\n # there would propogate to the oscillating cells at later time\n # steps.\n # Note that it doesn't really matter whether the oscillating mask\n # is set for the currently oscillating cells, because we're not\n # checking for violations in them anyways, and we don't allow any\n # changes that would affect them.\n osc_neighbors = ndimage.maximum_filter(osc_cells, size=3, mode='wrap')\n gen_mask[osc_cells] &= ~(NEW_CELL_MASK | INCLUDE_VIOLATIONS_MASK)\n gen_mask[still_cells | osc_neighbors] &= ~(NEW_CELL_MASK | CAN_OSCILLATE_MASK)\n\n new_mask = board != old_board\n life_mask = ((board & CellTypes.alive) > 0) & new_mask\n board += color * new_mask * life_mask\n # The seeds are starting points for the next layer of patterns.\n # This just makes the patterns more likely to end up close together.\n seeds = ((board & CellTypes.alive) > 0) & mask\n\n new_mask = board != old_board\n\n movable_walls = layer.get('movable_walls', 0)\n if movable_walls > 0:\n new_cells = coinflip(movable_walls, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.wall\n board += new_cells * CellTypes.movable\n\n movable_trees = layer.get('movable_trees', 0)\n if movable_trees > 0:\n new_cells = coinflip(movable_trees, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.tree\n board += new_cells * CellTypes.movable\n\n hardened_life = layer.get('hardened_life', 0)\n if hardened_life > 0:\n new_cells = coinflip(hardened_life, board.shape) * new_mask\n new_cells *= (board & ~CellTypes.rainbow_color) == CellTypes.life\n board -= new_cells * CellTypes.destructible\n\n buffer_size = layer.get('buffer_zone', 0) * 2 + 1\n life_cells = board & CellTypes.alive > 0\n buf = ndimage.maximum_filter(life_cells, size=buffer_size, mode='wrap')\n gen_mask[buf] &= ~NEW_CELL_MASK\n\n target = layer.get('target', 'board')\n if target == 'board':\n foreground[new_mask] = True\n if period > 0:\n background[new_mask] = True\n elif target == 'goals':\n background[new_mask] = True\n background_color[new_mask] = True\n # Make sure to add walls and such to the foreground\n foreground[new_mask & (board & CellTypes.alive == 0)] = True\n elif target == 'both':\n foreground[new_mask] = True\n if period > 0:\n background[new_mask] = True\n background_color[new_mask] = True\n else:\n raise ValueError(\"Unexpected value for 'target': %s\" % (target,))\n\n fountains = layer.get('fountains', 0)\n if fountains > 0:\n new_cells = coinflip(fountains, board.shape)\n new_cells *= gen_mask & NEW_CELL_MASK > 0\n neighbors = ndimage.maximum_filter(new_cells, size=3, mode='wrap')\n neighbors *= gen_mask & NEW_CELL_MASK > 0\n gen_mask[neighbors] = INCLUDE_VIOLATIONS_MASK\n if buffer_size > 1:\n buf = ndimage.maximum_filter(neighbors, size=buffer_size, mode='wrap')\n gen_mask[buf] &= ~NEW_CELL_MASK\n board[neighbors] = CellTypes.wall + color\n board[new_cells] = CellTypes.fountain + color\n foreground[new_cells] = True\n background[neighbors] = True\n background_color[neighbors] = True\n\n goals = board.copy()\n board *= foreground\n goals *= background\n goals &= ~CellTypes.spawning\n goals &= ~(CellTypes.rainbow_color * ~background_color)\n\n return board, goals", "def get_valid_regions(self):\n pass", "def do_full(self, image,hsv,upper,lower,debug=False):\n single_color_img = self.extract_single_color_range(image,hsv,lower,upper)\n if debug:\n # cv2.imshow('single_color_img',single_color_img)\n cv2.imwrite('debug_pics/single_color_img.jpg',single_color_img)\n single_channel = self.threshold_image(single_color_img,debug)\n if debug:\n # cv2.imshow('single_channel',single_channel)\n cv2.imwrite('debug_pics/single_channel.jpg',single_channel)\n cont,hierarchy = self.contours(single_channel,debug)\n\n if debug:\n for i,cnt in enumerate(cont):\n cv2.drawContours(single_channel,cont,i,(0,0,255),2)\n if debug: cv2.imwrite('debug_pics/contours.jpg',single_channel) #cv2.imshow('contours',single_channel)\n\n return self.get_bricks(cont)", "def segment(data):", "def find_sync_light_onsets(sync_light, invert=True, fixmissing=False):\n # -- Find changes in synch light --\n sync_light_diff = np.diff(sync_light, prepend=0)\n if invert:\n sync_light_diff = -sync_light_diff\n sync_light_diff[sync_light_diff < 0] = 0\n sync_light_threshold = 0.2*sync_light_diff.max()\n sync_light_onset = sync_light_diff > sync_light_threshold\n\n\n # -- Find period of sync_light_onset --\n sync_light_onset_ind = np.where(sync_light_onset)[0]\n sync_light_onset_diff = np.diff(sync_light_onset_ind) # In units of frames\n expected_onset_period = np.median(sync_light_onset_diff) # In units of (float) frames\n\n # -- Remove repeated onsets --\n onset_freq_upper_threshold = int(1.5 * expected_onset_period)\n onset_freq_lower_threshold = int(0.5 * expected_onset_period)\n repeated_onsets = sync_light_onset_diff < onset_freq_lower_threshold\n repeated_onsets_ind = np.where(repeated_onsets)[0]\n fixed_sync_light_onset = sync_light_onset.copy()\n fixed_sync_light_onset[sync_light_onset_ind[repeated_onsets_ind+1]] = False\n\n # -- Fix missing onsets --\n if fixmissing:\n missing_next_onsets = sync_light_onset_diff > onset_freq_upper_threshold\n missing_next_onsets_ind = np.where(missing_next_onsets)[0]\n for indm, missing_onset_ind in enumerate(missing_next_onsets_ind):\n onset_diff = sync_light_onset_diff[missing_onset_ind]\n n_missing = int(np.round(onset_diff / expected_onset_period))-1\n #print(n_missing)\n last_onset_ind = sync_light_onset_ind[missing_onset_ind]\n next_onset_ind = sync_light_onset_ind[missing_onset_ind+1]\n period_missing = (next_onset_ind - last_onset_ind)//(n_missing+1)\n new_onset_inds = last_onset_ind + np.arange(1, n_missing+1)*period_missing\n #print([last_onset_ind, next_onset_ind])\n #print(new_onset_inds)\n fixed_sync_light_onset[new_onset_inds] = True\n\n return fixed_sync_light_onset", "def ibd_segments(\n self,\n *,\n within=None,\n between=None,\n max_time=None,\n min_span=None,\n store_pairs=None,\n store_segments=None,\n ):\n return self.tables.ibd_segments(\n within=within,\n between=between,\n max_time=max_time,\n min_span=min_span,\n store_segments=store_segments,\n store_pairs=store_pairs,\n )", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def _find_trial_beam_breaks_regions_in_sync(sync_file):\n bit = 'BEAM_BREAK'\n return _find_bit_in_sync(sync_file, bit, ['down', 'up'])" ]
[ "0.54850954", "0.508608", "0.50796324", "0.506026", "0.50399494", "0.50315195", "0.502835", "0.49149758", "0.49066833", "0.49031895", "0.48982894", "0.48454717", "0.48394826", "0.48072115", "0.47997016", "0.47875527", "0.47487685", "0.4743557", "0.47257307", "0.4671103", "0.46685162", "0.46618134", "0.46433684", "0.4642642", "0.46052745", "0.4603047", "0.46016183", "0.45921078", "0.45519036", "0.45293596", "0.45262367", "0.45202428", "0.45146835", "0.4505614", "0.45054895", "0.44908455", "0.4489845", "0.44813937", "0.44811815", "0.44621906", "0.44472077", "0.4441818", "0.44345346", "0.44305038", "0.44231683", "0.44190115", "0.44166467", "0.44129267", "0.43981552", "0.43907672", "0.43907154", "0.43874243", "0.43656325", "0.43619293", "0.43582013", "0.4357957", "0.43527588", "0.4346104", "0.43376827", "0.43358958", "0.4335335", "0.4335244", "0.43346235", "0.43321803", "0.43319204", "0.43284914", "0.4324223", "0.43230826", "0.43164164", "0.4315521", "0.4314651", "0.43134916", "0.43081897", "0.4304995", "0.43038484", "0.4302911", "0.42991996", "0.4294293", "0.42942044", "0.42912728", "0.42886236", "0.42878082", "0.42749295", "0.42712873", "0.42706123", "0.4269841", "0.4265185", "0.42643568", "0.42636728", "0.42605412", "0.42601818", "0.42581144", "0.42576927", "0.42576814", "0.42522478", "0.42500702", "0.42484647", "0.42476428", "0.4247371", "0.4244197" ]
0.47201967
19
override this to do logic on input_update messages
def do_on_input_update(self, msg_id, payload, player): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_input_changed(self):\n self.message.data = self.dataInput.toPlainText()\n self.validate_data_input(self.message.dlc)", "def update(self, msg):\n pass", "def process_IN_MODIFY(self, event):", "def handle_input(self, event):\n pass", "def oppdater(self, input):\n return", "def handle_updates(self, update):\r\n self.__manage_pump()", "def _update(self):\n pass", "def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)", "def OnUpdateUI(self, event):\n if not self._input_state == 'readline':\n ConsoleWidget.OnUpdateUI(self, event)", "def on_new_input(self):\n self.Error.clear()\n self.Warning.clear()\n self.Information.clear()\n self.update_messages()\n self.update_status()\n if self.auto_save and self.sheet_url:\n self.save_sheet()", "def update( ):\r\n pass", "def _update_input_type(self):\n pass", "def dummy_update( self ):\r\n pass", "def update_handler(self, update):\n \n m = update.get('message',{})\n\n sender = self.get_sender(m)\n if sender == self.user_id: return\n \n # code that'll execute upon receiving any message\n if self.greeting and m:\n self.greeting(m)\n\n # parse bot commands\n command, params = self._parse_commands(m)\n \n if command: \n self._apply_command_filter(m, command, params)\n else:\n self._apply_msg_filter(m)", "def __update(self, input_):\n self.__process_input(input_)\n\n if not self.state.game_over:\n self.state.spawn_tail()\n self.state.try_move_player()\n if not self.state.game_over:\n self.state.cut_tail()\n self.state.eat_orbs()\n\n self.display.draw(self.state)\n self.state.tick = self.state.tick + 1", "def update(self, *inputs):\n raise NotImplementedError", "def onUpdated(self):", "def on_commitMessageEdit_textChanged(self):\n self.__updateOK()", "def user_input_listener(state: SharedState):", "def ev_textinput(self, event: TextInput) -> None:", "def processInputs(self):", "def update(self, *inputs):\n raise NotImplementedError('Must define update function to use this base class')", "def on_update(self):\n raise NotImplemented(\"on_update method should be implemented.\")", "def update(self):\n\n self.check_events()", "def modify_input(self, raw_input_par):\r\n\r\n return self.meta_model.modify_input(raw_input_par)", "def update():", "def update():", "def __process_input(self, input_):\n if self.state.game_over:\n if input_.key_pressed:\n self.state.exit = True\n else:\n if input_.action == 'PLAYER_UP':\n self.state.player.direction = 'U'\n elif input_.action == 'PLAYER_DOWN':\n self.state.player.direction = 'D'\n elif input_.action == 'PLAYER_LEFT':\n self.state.player.direction = 'L'\n elif input_.action == 'PLAYER_RIGHT':\n self.state.player.direction = 'R'", "def update(self):\r\n pass", "def commandUpdate(self):\n pass", "def __msg_handler(self, bot, update):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def handle_actual_updated(self):\n self._actual_updated()", "def modify_input(self, raw_input_par):\r\n raise NotImplementedError", "def modify_input(self, raw_input_par):\r\n raise NotImplementedError", "def handle_update(self, call):\n self.fire_event(EVENT_UPDATE)", "def __msg_handler(self, update, bot):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def beforeUpdate(self):", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self) -> None:\n ...", "def update(self, *args, **kwargs):", "def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_type(event)\n\n # Deal with buttons\n self.handle_button(event, code)\n\n # Mouse wheel\n if code == 22:\n self.handle_scrollwheel(event)\n # Other relative mouse movements\n else:\n self.handle_relative(event)\n\n # Add in the absolute position of the mouse cursor\n self.handle_absolute(event)\n\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(self.events)", "def after_update(self, *args):\n raise NotImplementedError", "def onFlowUpdate(self, event):", "def UpdateInput(self, newInput):\n self.bufferedInput = self.newestInput\n self.newestInput = newInput", "def _validate_update_data(self, data):\n return", "def before_update(self, obj, st):\n pass", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def update(self):", "def update(self):", "def update(self):", "def pre_update(self, **values):\r\n pass", "def update(self):\n\n pass", "def set_input(self, input):\n pass", "def set_input(self, input):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_key_code(event)\n\n if code in self.codes:\n new_code = self.codes[code]\n else:\n new_code = 0\n event_type = self._get_event_type(event)\n value = self._get_key_value(event, event_type)\n scan_event, key_event = self.emulate_press(\n new_code, code, value, self.timeval)\n\n self.events.append(scan_event)\n self.events.append(key_event)\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n # We are done\n self.write_to_pipe(self.events)", "def process_inputs(self, inputs):", "def dispatch_update(self, update):\n command_handlers = {\n '/start': self._do_start,\n '/help': self._do_help,\n '/neuer_spruch': self._do_neuer_spruch,\n '/mein_spruch': self._do_mein_spruch,\n '/alle_meine_sprueche': self._do_alle_meine_sprueche,\n '/loesche_meine_sprueche': self._do_loesche_meine_sprüche,\n '/setze_aktiven_spruch': self._do_setze_aktiven_spruch\n }\n callback_handlers = {\n '/delete': self._callback_delete,\n '/active': self._callback_active\n }\n\n if \"message\" in update.keys():\n # Parse command\n args = update[\"message\"][\"text\"].split(' ', 1)\n command = args[0].replace('@cde_nasenspruch_bot', '')\n chat_id = update[\"message\"][\"chat\"][\"id\"]\n user_id = update[\"message\"][\"from\"][\"id\"]\n\n # Call command handler function\n try:\n command_handlers[command](chat_id, user_id, args, update)\n except KeyError:\n if command.startswith('/'):\n self.tclient.send_message('Unbekannter Befehl. Versuch es mal mit /help', chat_id)\n pass\n elif \"callback_query\" in update.keys():\n args = update[\"callback_query\"][\"data\"].split(' ', 2)\n command = args[0].replace('@cde_nasenspruch_bot', '')\n chat_id = update[\"callback_query\"][\"from\"][\"id\"]\n user_id = update[\"callback_query\"][\"from\"][\"id\"]\n \n # Call callback handler function\n try:\n callback_handlers[command](chat_id, user_id, args, update)\n except KeyError:\n print('Unbekannter callback_query {}'.format(update[\"callback_query\"][\"data\"]))\n pass", "def update(self, *args, **kw):\n pass", "def install_handle_input(self):\n pass", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def update(self)->None:\n pass", "def input(self):\r\n pass", "def update(self):\n # default implementation is to do nothing.", "def _handleInput(self, paramInput):\n pass", "def update(self, msg):\r\n self.msgVar.set(msg)", "def update(self, msg):\r\n self.msgVar.set(msg)", "def update_data():\n pass", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return", "def updateMessages(self, parameters):\r\n return" ]
[ "0.7064037", "0.68353105", "0.679278", "0.6780095", "0.6757028", "0.66975", "0.6632578", "0.6601894", "0.65531427", "0.6519125", "0.64932", "0.6461436", "0.645683", "0.64120686", "0.6398695", "0.63841844", "0.6377531", "0.63212633", "0.6266965", "0.6207796", "0.6190815", "0.61791474", "0.6159929", "0.61538285", "0.61509174", "0.61281544", "0.61281544", "0.6125984", "0.61129475", "0.61063725", "0.6087144", "0.60774964", "0.6077347", "0.6077347", "0.6061959", "0.6032166", "0.60319275", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.6031357", "0.60107577", "0.60085744", "0.599906", "0.59634745", "0.59617656", "0.59582883", "0.59458697", "0.5935914", "0.59038794", "0.5902206", "0.5902206", "0.5902206", "0.589704", "0.5888465", "0.5863034", "0.5863034", "0.58544207", "0.58544207", "0.58544207", "0.5846684", "0.5839743", "0.583942", "0.583895", "0.5831098", "0.5828973", "0.58195525", "0.5810551", "0.579335", "0.57839936", "0.5779489", "0.5779489", "0.5754509", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531", "0.5748531" ]
0.79876757
0
Given the state of the 'game', decide what your cells ('game.me.cells') should do.
def step(self, game: Game): print("Tick #{}".format(game.time_left)) splitValue = getSplitValue(game) print (getSplitValue(game)) for cell in game.me.cells: if game.time_left < 6: cell.trade(99999) if cell.mass >= splitValue: if len(game.me.cells) < 10: cell.split() #else: #cell.trade(cell.mass - 100) else: distance = cell.position.distance_to(cell.target) possibleVictims = findVictims(cell, game.enemies) if (cell.mass <= 100): target = closestRessource(game, cell, possibleVictims + game.resources.allResources, len(possibleVictims)) else: #cell.burst() target = closestRessource(game, cell, possibleVictims, len(possibleVictims)) for e in game.enemies: for c in e.cells: if enemyComingthrough(cell, c): target = cell.position + (c.target - c.position) #cell.burst() pass if (target != None): cell.move(target) else: print (' KES TU FAIS, VA PAS LÀ ')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def init_cells(self):\n state = list()\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if randint(1, 100) >= 100 - CELL_DENSITY:\n # Live cell.\n status = NORMAL\n state.append(1)\n else:\n # Dead cell.\n status = HIDDEN\n state.append(0)\n\n cell = self.canvas.create_rectangle((index % width) * CELL_SIZE, (index / width) * CELL_SIZE,\n ((index % width) + 1) * CELL_SIZE, ((index / width) + 1) * CELL_SIZE,\n fill=\"black\", state=status, outline=\"white\")\n self.cells.append(cell)\n\n return state", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def next_state_of_cell(self, x_cell, y_cell):\n neighbours = self.get_number_neighbours_of_cell(x_cell, y_cell)\n if(self.board_state[x_cell][y_cell] == 1):\n # Any live cell with more than three live neighbours dies, \n # as if by overpopulation.\n if(neighbours > 3):\n return 0\n # Any live cell with fewer than two live neighbours dies,\n # as if by underpopulation.\n elif(neighbours < 2):\n return 0\n # Any live cell with two or three live neighbours lives\n # on to the next generation.\n else:\n return 1\n if(self.board_state[x_cell][y_cell] == 0):\n # Any dead cell with exactly three live neighbours becomes a live cell, \n # as if by reproduction.\n if(neighbours == 3):\n return 1\n else:\n return 0", "def in_cell(self):\n for player in self.players:\n for cell in self.cell_lst:\n if player.x in cell[0] and player.y in cell[1]:\n player.current_cell = cell\n break", "def test_dead_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def test_live_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n world.set_cell((0, 0))\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def update_cells(self, state):\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if state[index] != self.get_state(index):\n self.toggle_color(index)", "def cell_create(game_set, screen, covids, cells):\n cell_create_flag = True\n cell = Cell(game_set, screen)\n for old_cell in cells.sprites():\n if old_cell.rect.y < game_set.cell_number_adjust:\n cell_create_flag = False\n break\n if (not pygame.sprite.spritecollide(cell, cells, 0) and\n not pygame.sprite.spritecollide(cell, covids, 0) and\n cell_create_flag):\n cells.add(cell)", "def state_generator(self):\n\n kernel = np.array([\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n iteration = 0\n\n while True: # (Game of Life does not end)\n # Run 2D convolution with the given kernel to find out how many neighbors each cell has.\n # Boundary option determines whether to run with hard boundaries on the game board or\n # using a toroid board which wraps circularly. These are the two strategies for handling\n # a finite game board. scipy.signal.convolve2d handles these two modes gracefully, which\n # is why it is used here. There is also a performance gain when using numpy/scipy matrix\n # operations as opposed to iterating element-wise over the whole matrix.\n # See https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.signal.convolve2d.html\n\n # There is a more sophisticated and efficient algorithm for determining next game state\n # (see http://dotat.at/prog/life/life.html) but for clarity and a lack of time, the standard\n # implementation was chosen.\n\n num_neighbors_board = convolve2d(self.board, kernel, mode='same', boundary=self.boundary.value)\n\n # Find empty cells that have three neighbors\n birth_coordinates = np.where(np.logical_and(self.board == 0, num_neighbors_board == 3))\n\n # Find live cells with too few or too many neighbors\n death_coordinates = np.where(\n np.logical_and(\n self.board == 1,\n np.logical_or(num_neighbors_board < 2, num_neighbors_board > 3)\n )\n )\n\n births = np.array(birth_coordinates).transpose().tolist()\n deaths = np.array(death_coordinates).transpose().tolist()\n self.board[birth_coordinates] = 1\n self.board[death_coordinates] = 0\n\n iteration += 1\n yield self.board, births, deaths, iteration", "def game_of_life():\n # 3x3 neighbourhood\n offsets = [[(y, x) for y in range(-1, 2)] for x in range(-1, 2)]\n\n # Create mappings\n mappings = {}\n for i in range(2 ** 9):\n\n # Determine the initial state (key)\n key = f\"{bin(i)[2:]:0>9}\" # As binary string\n key = tuple(k == \"1\" for k in key) # As tuple of bools\n key = tuple(key[i * 3:i * 3 + 3] for i in range(3)) # Reshape into 2D grid\n\n # Alive counts\n centre = key[1][1]\n others = sum(sum(row) for row in key) - centre\n\n # Skip if state does not evaluate to True\n if centre:\n if others not in (2, 3):\n continue\n\n else:\n if others != 3:\n continue\n\n mappings[key] = True\n\n return Mapping2DRuleset(mappings, offsets)", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # TODO: check \\ diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col+1] == state[i+2][col+2] == state[i+3][col+3]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check / diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col+3] != ' ' and state[i][col+3] == state[i+1][col+2] == state[i+2][col+1] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check 2x2 box wins\n for col in range(4):\n for i in range(4):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i][col+1] == state[i+1][col+1]:\n return 1 if state[i][col]==self.my_piece else -1\n \n return 0 # no winner yet", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # check \\ diagonal wins\n for i in range(2):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check / diagonal wins\n for i in range(3,5):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check diamond wins\n for i in range(3):\n for j in range(1,4):\n if state[i+1][j] == ' ' and state[i][j]!= ' ' and state[i][j] == state[i+1][j-1] == state[i+1][j+1] == state[i+2][j]:\n return 1 if state[i][j]==self.my_piece else -1\n\n return 0 # no winner yet", "def gameOfLife(self, board: List[List[int]]) -> None:\n # copy matrix\n copy_matrix = [[board[row][col] for col in range(len(board[0]))] for row in range(len(board))]\n \n # 8 possible directions\n directions = [(0,1), (0, -1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)]\n num_rows = len(board)\n num_cols = len(board[0])\n \n # matrix traversal\n for i in range(0, num_rows):\n for j in range(0, num_cols):\n # for each cell, explore all of its neighboring cells\n num_live_cells = 0\n for direction in directions:\n r = i + direction[0]\n c = j + direction[1]\n # make sure if it is a live cell \n if (r < num_rows and r >=0) and (c < num_cols and c>=0) and (copy_matrix[r][c]==1):\n # if it is live cell, increment live_cell_count\n num_live_cells +=1\n # if here: We now have estimate of surrounding live cells\n # start applying rules \n # Rule-1: Any live cell with fewer than 2 live neighbors die\n # Rule-2: Any live cell with 2/3 live neighbors live up\n # Rule-3: Any Live cell with > 3 live neighbors die\n # Rule-4: Any dead cell with ==3 live neighbors becomes alive\n if copy_matrix[i][j] == 1 and (num_live_cells > 3 or num_live_cells < 2):\n # Rule-1 and Rule-3: So the current cell dies...\n board[i][j] = 0\n if copy_matrix[i][j] == 0 and num_live_cells == 3:\n # Rule-4: Dead becomes alive\n board[i][j] = 1\n # Rule-2 is taken care by default.", "def game_value(self, state):\r\n # check horizontal wins\r\n for row in state:\r\n for i in range(2):\r\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\r\n return 1 if row[i] == self.my_piece else -1\r\n # check col wins\r\n for col in range(5):\r\n for i in range(2):\r\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\r\n return 1 if state[i][col] == self.my_piece else -1\r\n #check diag up wins\r\n for x in range(2):\r\n for y in range(2):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y+1] == state[x+2][y+2] == state[x+3][y+3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check diag down wins\r\n for x in range(2):\r\n for y in range(3, 5):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y-1] == state[x+2][y-2] == state[x+3][y-3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check square box wins \r\n for x in range(4):\r\n for y in range(4):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y] == state[x][y+1] == state[x+1][y+1]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n\r\n return 0 # no winner yet\r", "def cycle(self):\n\n coordinates = self.get_random_coordinates()\n\n for coord in coordinates:\n if isinstance(self.cells[coord], (Jungle, Savannah, Desert)):\n self.cells[coord].feeding()\n\n for coord in coordinates:\n if isinstance(self.cells[coord], (Jungle, Savannah, Desert)):\n self.cells[coord].procreation()\n\n self.migration()\n\n for coord in coordinates:\n if isinstance(self.cells[coord], (Jungle, Savannah, Desert)):\n self.cells[coord].aging()\n\n for coord in coordinates:\n if isinstance(self.cells[coord], (Jungle, Savannah, Desert)):\n self.cells[coord].loss_of_weight()\n\n for coord in coordinates:\n if isinstance(self.cells[coord], (Jungle, Savannah, Desert)):\n self.cells[coord].death()\n\n self.animals_on_island()", "def play_best_guess(self, game):\n\n\n # create a list of cells\n cells = [game.board[i][j]\n for i in xrange(game.rows)\n for j in xrange(game.cols)]\n\n first_cell = cells[0]\n game.reveal_cell(first_cell.row, first_cell.col)\n\n # draw updated board and pause for a second\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n\n\n total_flagged = 0\n while not game.lost_game and not game.won_game:\n\n # remember if we've made a move in the while loop\n # so we know whether to make a random move later on\n made_move = False\n\n # look through all revealed cells for any with a number of neighboring mines.\n # if the cell has the same number of unrevealed neighbors as the cell's\n # number of neighboring mines, all the unrevealed neighbors must be mines.\n revealed_numbered_cells = [c for c in cells if c.revealed and (not c.flagged) and (c.neighbors > 0)]\n while revealed_numbered_cells:\n cell = revealed_numbered_cells.pop()\n # cell may have been marked flagged after revealed_numbered_cells was compiled\n if not cell.flagged:\n neighbor_cells = ms.Minesweeper.get_neighbors(cell.row, cell.col, game.board)\n flagged_neighbors = [n for n in neighbor_cells if n.flagged]\n number_remaining_mines = cell.neighbors - len(flagged_neighbors)\n unknown_neighbors = [n for n in neighbor_cells if not n.flagged and not n.revealed]\n if number_remaining_mines > 0 and len(unknown_neighbors) == number_remaining_mines:\n # flag every neighbor\n for c in unknown_neighbors:\n if total_flagged < game.mines:\n total_flagged += 1\n game.flag_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # we may have won with the flag above so test whether we're still playing\n # before further calculations\n if not game.lost_game and not game.won_game:\n # loop through all unrevealed, unflagged cells and see if we know it's safe to reveal\n for c in cells:\n if not c.revealed and not c.flagged and self.is_cell_safe(c, game.board):\n game.reveal_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # assume we've made our best guesses and now have to guess randomly\n # this will prevent us from looping forever if no obvious moves are available\n if not made_move:\n unrevealed = [c for c in cells if not c.revealed and not c.flagged]\n if len(unrevealed) > 0:\n cell = random.choice(unrevealed)\n game.reveal_cell(cell.row, cell.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(3)", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def game_over(self) -> bool:\n for row in range(9):\n for col in range(9):\n if self._grid_sol[row][col] != self.get_cell(row, col):\n return False\n return True", "def get_all_game_cells(self):\n return GameCell.objects.filter(game=self)", "def gameOfLife(self, board):\n \n # Neighbours array for 8 neighboring cells of a given cell\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n \n rows = len(board)\n cols = len(board[0])\n \n # Iterate through the board by each cell\n for row in range(rows):\n for col in range(cols):\n \n # For each cell counting number of live neighbors\n live_neighbors = 0\n for neighbor in neighbors:\n \n # row and column of neighboring cell\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n \n # Checking validity of neighboring cell and if it was originally a live cell\n if(r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n \n live_neighbors += 1\n \n # Rule 1 or Rule 3\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n \n board[row][col] = -1 # -1 meaning cell is now dead but was originally live\n \n # Rule 4\n if board[row][col] == 0 and live_neighbors == 3:\n board[row][col] = 2 #2 meaning cell is now live but was originally dead\n # Get final representation for updated board \n for row in range(rows):\n for col in range(cols):\n \n if board[row][col] > 0:\n board[row][col] = 1\n \n else:\n board[row][col] = 0", "def __get_cell_state(self, y, x):\n\t\tif 0 <= y <= self.__height - 1:\n\t\t\tif 0 <= x <= self.__width - 1:\n\t\t\t\treturn self.__board[y][x]\n\t\treturn 0", "def game_state(matrix):\n\n \"\"\"\n # To set winning tile\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 2048:\n # return 'win'\n # return 'not over'\n \"\"\"\n for i in range(len(matrix)-1):\n # intentionally reduced to check the row on the right and below\n # more elegant to use exceptions but most likely this will be their solution\n for j in range(len(matrix[0])-1):\n if matrix[i][j] == matrix[i+1][j] or matrix[i][j+1] == matrix[i][j]:\n return 'not over'\n for i in range(len(matrix)): # check for any zero entries\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n return 'not over'\n for k in range(len(matrix)-1): # to check the left/right entries on the last row\n if matrix[len(matrix)-1][k] == matrix[len(matrix)-1][k+1]:\n return 'not over'\n for j in range(len(matrix)-1): # check up/down entries on last column\n if matrix[j][len(matrix)-1] == matrix[j+1][len(matrix)-1]:\n return 'not over'\n return 'lose'", "def has_cells(self):\n return len(self._cells) > 0", "def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions", "def getNeighbors(cell, all_living_cells, test=False, test_color=8):\n neighbors = []\n NeighborCellGrid = [ # all possible neighbor positions\n [cell.x - 1, cell.y - 1], # top left\n [cell.x, cell.y - 1], # top\n [cell.x + 1, cell.y - 1], # top right\n [cell.x - 1, cell.y], # left\n [cell.x + 1, cell.y], # right\n [cell.x - 1, cell.y + 1], # bottom left\n [cell.x, cell.y + 1], # bottom\n [cell.x + 1, cell.y + 1] # bottom right\n ]\n count = 0\n for i in all_living_cells:\n count+=1\n if i.id != cell.id and i.alive == True: # not self and pixel is alive\n if [i.x, i.y] in NeighborCellGrid: # next to\n neighbors.append(i)\n if test:\n for i in NeighborCellGrid:\n g = simCell(i[0], i[1], color=test_color)\n test_cells.append(g)\n return neighbors", "def new_game(self):\n self.cells = [] # Array of cells\n self.frame_count = 0\n self.database = []\n self.timer = [Consts[\"MAX_TIME\"], Consts[\"MAX_TIME\"]]\n self.result = None\n # Define the players first\n self.cells.append(Cell(0, [Consts[\"WORLD_X\"] / 4, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n self.cells.append(Cell(1, [Consts[\"WORLD_X\"] / 4 * 3, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n # Generate a bunch of random cells\n for i in range(Consts[\"CELLS_COUNT\"]):\n if i < 4:\n rad = 1.5 + (random.random() * 1.5) # Small cells\n elif i < 10:\n rad = 10 + (random.random() * 4) # Big cells\n else:\n rad = 2 + (random.random() * 9) # Everything else\n x = Consts[\"WORLD_X\"] * random.random()\n y = Consts[\"WORLD_Y\"] * random.random()\n cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)\n safe_dist = Consts[\"SAFE_DIST\"] + rad\n while min(map(cell.distance_from, self.cells[:2])) < safe_dist:\n cell.pos = [\n Consts[\"WORLD_X\"] * random.random(),\n Consts[\"WORLD_Y\"] * random.random()\n ]\n self.cells.append(cell)", "def draw_occupied_cells(self):\n reds = [cell for cell in self.game.get_cells() if cell.player == 1]\n blacks = [cell for cell in self.game.get_cells() if cell.player == 2]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=reds,\n edgecolors='black', node_color='red', linewidths=2)\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=blacks,\n edgecolors='black', node_color='black', linewidths=2)", "def winGame(sub_state):\n for i in range(sub_state.shape[0] - 4):\n for j in range(sub_state.shape[1] - 4):\n\n horizontal = sub_state[i][j: j+5]\n if (horizontal == 1).all():\n return True\n\n vertical = [sub_state[i+k, j] for k in range(5)]\n if (np.array(vertical) == 1).all():\n return True\n\n diagonal = [sub_state[(i+k, j+k)] for k in range(5)]\n if (np.array(diagonal) == 1).all():\n return True\n\n return False", "def _create_cells(self):\n\t\tcellId=0\n\t\t# Iterate over all dictionaries\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tcellNumber = cellInfo[2]\n\t\t\t\tif len(cellInfo)>=4: neuronParam = cellInfo[3]\n\t\t\t\telse: neuronParam = None\n\t\t\t\tcellId = self._create_cell_population(cellId,muscle,muscAfferentDelay,cellClass,cellName,cellNumber,neuronParam)\n\t\t# Add special cells\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tcellNumber = cellInfo[3]\n\t\t\tif len(cellInfo)>=5: neuronParam = cellInfo[4]\n\t\t\telse: neuronParam = None\n\t\t\tmuscAfferentDelay = None\n\t\t\tcellId = self._create_cell_population(cellId,groupOrMuscle,muscAfferentDelay,cellClass,cellName,cellNumber,neuronParam)\n\n\t\tself._motoneuronsNames = self._intMotoneuronsNames+self._realMotoneuronsNames\n\t\tself._afferentsNames = self._primaryAfferentsNames+self._secondaryAfferentsNames", "def test_toggle_cell_in_board(self):\n self.gameBoard.getGridItem(50, 50).toggle_living()\n self.assertEqual(self.gameBoard.getGridItem(50,50).is_living(), True)", "def checkwin(self):\n w = self.getWidth()\n h = self.getHeight()\n numOccupiedCell = 0 # counter for the number of occupied cells (use to detect a terminal condition of the game)\n for r in range(h):\n for c in range(w):\n if self.cell[c][r] == EMPTY:\n continue # this cell can't be part of winning segment\n # if we reach this point, the cell is occupied by a player stone\n numOccupiedCell = numOccupiedCell+1\n for dr,dc in [(1,0),(0,1),(1,1),(-1,1)]: # direction of search\n for i in range(NWIN):\n # test if there exists a segment of NWIN uniformly coloured cells\n if not(r+i*dr>=0) or not(r+i*dr<h) or not(c+i*dc<w) or not(self.cell[c+i*dc][r+i*dr] == self.cell[c][r]):\n break # segment broken\n else:\n # Python remark: notice that the else is attached to the for loop \"for i...\"\n # This block is executed if and only if 'i' arrives at the end of its range \n return self.cell[c][r] , True # found a winning segment \n return EMPTY, numOccupiedCell==w*h", "def play_randomly(self, game):\n\n # create a stack of unrevealed cells\n unrevealed = []\n\n for i in xrange(game.rows):\n for j in xrange(game.cols):\n unrevealed.append(game.board[i][j])\n # we will pop the cells from the stack, so randomize their order first\n random.shuffle(unrevealed)\n\n # while the game is being played, choose a random unrevealed cell to reveal next\n while not game.lost_game and not game.won_game:\n\n cell = unrevealed.pop()\n\n # before we click the cell, see if only mines remain\n # if so, flag this cell, otherwise reveal it.\n if len(unrevealed) < game.mines:\n game.flag_cell(cell.row, cell.col)\n print \"Flagging\", cell\n\n # cell may have been previously revealed as a neighbor\n # if not, reveal it now, otherwise discard the cell and continue\n elif not cell.revealed:\n game.reveal_cell(cell.row, cell.col)\n print \"Revealing\", cell\n # update the stack to only contain non-revealed cells\n # TODO: make this more efficient by not modifying the list in place\n unrevealed = []\n for i in xrange(game.rows):\n for j in xrange(game.cols):\n if not game.board[i][j].revealed and not game.board[i][j].flagged:\n unrevealed.append(game.board[i][j])\n random.shuffle(unrevealed)\n\n #check to see if there's any corners that can be flagged as bombs\n check_corners(game, unrevealed)\n\n # draw updated board and pause for a second\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)", "def _checkCells(self):\r\n if(self.startCell.isEmpty()):\r\n raise IllegalMoveException(\"No pawn in start cell\")\r\n if(self.endCell.isOccupied()):\r\n raise IllegalMoveException(\"Targeted cell is already occupied\")\r\n return True", "def life_step(state):\n\t# For every cell each live cell in any of the 8 neighbouring cells contributes 1 to the sum\n\t# Rolling matricies is periodic so this implements periodic boundary conditions\n\tnumberOfNeigbours = sum(np.roll(np.roll(state, i, axis=0), j, axis=1)\n\t\t\t\t\t\t for i in (-1,0,1) for j in (-1,0,1) if (i != 0 or j != 0))\n\n\t# Any live cell with fewer than two live neighbours dies, as if caused by under-population\n\tstate = np.where(numberOfNeigbours < 2, 0, state)\n\t# Any live cell with more than three live neighbours dies, as if by over-population\n\tstate = np.where(numberOfNeigbours > 3, 0, state)\n\t# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.\n\tstate = np.where(numberOfNeigbours == 3, 1, state)\n\n\treturn state", "def simulate(screen, clock):\n old_cells = populate_cells(MAX_X, MAX_Y)\n new_cells = {}\n\n running = True\n while running:\n\n screen.fill((20, 20, 20))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n break\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n break\n\n for y in range(MAX_Y):\n for x in range(MAX_X):\n if old_cells[x, y]:\n pygame.draw.rect(screen, CELL_COLOR,\n (x * CELL_WIDTH, y * CELL_HEIGHT,\n CELL_WIDTH, CELL_HEIGHT))\n dead_or_alive(old_cells, x, y, new_cells)\n\n old_cells = new_cells\n new_cells = {}\n clock.tick(200)\n pygame.display.update()\n\n pygame.quit()", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def gameOfLife(self, board: List[List[int]]) -> None:\n if not board or len(board)==0:\n return \n\n rows = len(board)\n cols = len(board[0])\n #lives = 0\n \n\n for i in range(rows):\n for j in range(cols):\n lives = self.n_neighbors(board,i,j)\n \n # Rule 1 and Rule 3\n if board[i][j]==1 and (lives <2 or lives >3):\n board[i][j]= 2 # -1 signifies the cell is now dead but originally was live.\n if board[i][j]== 0 and lives ==3:\n board[i][j]=3 # signifies the cell is now live but was originally dead.\n\n for i in range(rows):\n for j in range(cols):\n board[i][j] = board[i][j]%2\n return board", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def _simulate_all_cells(self):\n for ID in tqdm(self.condition_dict, desc='Simulating cells'):\n for n in range(len(self.condition_dict[ID])):\n cond_dict = self.condition_dict[ID][n]\n g, tc, rsh_mult, rs_mult, Io_mult, Il_mult, nnsvth_mult = cond_dict['E'], cond_dict['Tc'], cond_dict[\n 'Rsh_mult'], cond_dict['Rs_mult'], cond_dict['Io_mult'], cond_dict['Il_mult'], cond_dict['nnsvth_mult']\n # calculate the 5 parameters for each set of cell conditions\n\n # Eventually, replace this with derived 5-parameters\n iph, io, rs, rsh, nnsvth = pvlib.pvsystem.calcparams_cec(effective_irradiance=g, temp_cell=tc,\n alpha_sc=self.cell_parameters['alpha_sc'],\n a_ref=self.cell_parameters['a_ref'],\n I_L_ref=self.cell_parameters['I_L_ref'],\n I_o_ref=self.cell_parameters['I_o_ref'],\n R_sh_ref=self.cell_parameters['R_sh_ref'],\n R_s=self.cell_parameters['R_s'],\n Adjust=self.cell_parameters['Adjust'])\n rs, rsh, io, iph, nnsvth = rs * rs_mult, rsh * \\\n rsh_mult, io * Io_mult, iph * Il_mult, nnsvth * nnsvth_mult\n\n # calculate cell IV curves by condition, rather than by cell index\n voc_est = pvlib.singlediode.estimate_voc(iph, io, nnsvth)\n v = voltage_pts(self.num_points_in_IV, voc_est,\n self.module_parameters['breakdown_voltage'])\n i = pvlib.singlediode.bishop88_i_from_v(v, iph, io, rs, rsh, nnsvth,\n breakdown_factor=self.module_parameters['breakdown_factor'],\n breakdown_voltage=self.module_parameters[\n 'breakdown_voltage'],\n breakdown_exp=self.module_parameters['breakdown_exp'])\n\n # @dev: Uncomment if debugging pvlib bishop88 simulation results\n # plt.plot(v,i)\n # plt.xlim(-5,v[-1])\n # plt.ylim(0,iph+1)\n # plt.title(f\"{ID}: {n} :: {rs},\"\n # f\"{rsh}, {io}, {iph}, {nnsvth}\")\n # plt.show()\n\n self.condition_dict[ID][n]['V'] = v\n self.condition_dict[ID][n]['I'] = i\n self.condition_dict[ID][n]['E'] = g\n self.condition_dict[ID][n]['Tc'] = tc\n return", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def update_cells(self):\n mineboard = self.mineboard\n gameboard = mineboard.gameboard\n for change in mineboard.changes:\n i, j = change[0], change[1]\n text_val = gameboard[i][j]\n\n if text_val == 'M':\n self.canvas.delete(self.cells[i][j])\n self.cells[i][j] = self.canvas.create_image(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, image=EXPLODED, anchor='nw')\n self.reveal_mines(i, j)\n\n elif text_val == 'F':\n self.canvas.delete(self.cells[i][j])\n self.cells[i][j] = self.canvas.create_image(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, image=FLAG, anchor='nw')\n\n elif text_val == ' ':\n self.canvas.delete(self.cells[i][j])\n self.cells[i][j] = self.canvas.create_rectangle(\n 2+j*CELLWIDTH, 2+i*CELLWIDTH, (j+1)*CELLWIDTH, (i+1)*CELLWIDTH, fill=DEFAULT_COLOR, outline=\"\")\n\n elif text_val in ['0', '1', '2', '3', '4', '5', '6', '7', '8']:\n self.canvas.itemconfig(\n self.cells[i][j], fill=COLORS[int(text_val)])\n if text_val != '0':\n # offset here is by 12 pixels\n self.canvas.create_text(\n 2+j*CELLWIDTH+(CELLWIDTH-1)//2, 2+i*CELLWIDTH+(CELLWIDTH-1)//2, anchor='center', text=f\"{text_val}\")\n\n mineboard.changes = [] # removes previous changes\n if mineboard.gamestate is not None:\n # if the game has ended displays game end message and buttons\n self.win_lose_lbl.grid(row=3, column=0, columnspan=4)\n self.win_lose_msg.set(\n f\"You {self.mineboard.gamestate}! Play again?\")\n self.same_again_bttn.grid(row=4, column=0, columnspan=2)\n self.play_again_bttn.grid(row=4, column=2, columnspan=2)", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def _should_cell_live(self, cell: Cell) -> bool:\n living_neighbours_count = self._count_living_neighbors(cell)\n # Any live cell with two or three live neighbours survives\n if cell.is_alive and living_neighbours_count in [2, 3]:\n return True\n # Any dead cell with three live neighbours becomes a live cell\n if not cell.is_alive and living_neighbours_count == 3:\n return True\n # All other live cells die in the next generation. Similarly, all other dead cells stay dead\n return False", "def evaluate(self, state):\n\t\ttranspose = state.board.transpose()\t\t# columns in state.board = rows in transpose\n\t\tcount = []\n\t\topponentcount = []\n\t\tfor row, column in zip(state.board, transpose):\n\t\t\trowcounter = collections.Counter(row)\n\t\t\tcolumncounter = collections.Counter(column)\n\t\t\tcount.append(rowcounter.get(state.current_player, 0))\n\t\t\tcount.append(columncounter.get(state.current_player, 0))\n\t\t\topponentcount.append(rowcounter.get(state.current_player * - 1, 0))\n\t\t\topponentcount.append(columncounter.get(state.current_player * -1 , 0))\n\n\t\tY = state.board[:, ::-1]\n\t\tdiagonals = [np.diagonal(state.board), np.diagonal(Y)]\n\t\tmain_diagonal_count = collections.Counter(diagonals[0])\n\t\tsecond_diagonal_count = collections.Counter(diagonals[1])\n\t\tcount.append(main_diagonal_count.get(state.current_player, 0))\n\t\tcount.append(second_diagonal_count.get(state.current_player, 0))\n\t\topponentcount.append(main_diagonal_count.get(state.current_player * - 1, 0))\n\t\topponentcount.append(second_diagonal_count.get(state.current_player * -1, 0))\n\n\t\t# max(count): maximum number of player's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\t# max(opponentcount): maximum number of opponent's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\tscoremax = 5 ** max(count)\n\t\tscoremin = 5 ** max(opponentcount)\n\n\t\treturn scoremax - scoremin", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n mat = [row[:] for row in board] #original copy of the board\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if mat[i+direc[0]][j+direc[1]]==1:\n cnt_live+=1\n if mat[i][j]==1 and cnt_live<2 or mat[i][j]==1 and cnt_live>3:\n board[i][j]=0\n elif mat[i][j]==1 and 2<=cnt_live<=3 or mat[i][j]==0 and cnt_live==3:\n board[i][j]=1", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def cell(x, y):\n try:\n if cells[y][x]['filled'] == 1:\n return # this has already been processed\n except IndexError:\n return\n cells[y][x]['filled'] = 1 # this cell is now filled\n\n nn = []\n for nx, ny in neighbours(x, y):\n try:\n if cells[ny][nx]['filled']:\n nn.append(cells[ny][nx])\n except IndexError:\n continue\n \n c = 0 # colour weighting\n \n #------ Flippedness\n flipped = sum([i['inverted'] for i in nn if i['inverted']])\n cells[y][x]['inverted'] = (randint(0, 3) + flipped) % 4\n \n #------- Colour calculation\n avg_colour = sum([i['colour'][0] for i in nn]) / len(nn)\n avg_sat = sum([i['colour'][1] for i in nn]) / len(nn)\n avg_bri = sum([i['colour'][2] for i in nn]) / len(nn)\n \n # small chance of going totally random otherwise small variation from neighbours\n if random(100) > 90:\n h = randint(0, 100)\n s = randint(0, 100)\n b = randint(0, 100)\n else:\n h = (avg_colour + randint(-15, 15)) % 100\n s = (avg_sat + randint(-15, 15)) % 100\n b = (avg_bri + randint(-15, 15)) % 100\n cells[y][x]['colour'] = (h, s, b)\n \n #------- Alpha calculation\n d = sqrt((x*cell_size - rx)**2 + (y*cell_size - ry)**2) # distance from epicenter\n mx = sqrt((w-rx*cell_size)**2 + (h-ry*cell_size)**2)\n a = d/sqrt(w**2+h**2)*255\n cells[y][x]['alpha'] = a\n \n for cx,cy in neighbours(x, y):\n cell(cx, cy)", "def is_fixed_state( previous_live, live_cells ):\n fixed = False\n if previous_live[0].size == live_cells[0].size:\n if previous_live[1].size == live_cells[1].size:\n if (previous_live[0]==live_cells[0]).all():\n if (previous_live[1]==live_cells[1]).all():\n fixed = True\n return fixed", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def computer_play( game ):\n\n grid = game.get_grid()\n\n diag = game.checkDiagonals()\n row = game.checkRows()\n column = game.checkColumns()\n\n if isinstance(diag, tuple):\n \n for x in diag[1]:\n try:\n x = int(x)\n print(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue\n\n elif isinstance(row, tuple):\n\n for x in row[1]:\n try:\n x = int(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue\n\n elif isinstance(column, tuple):\n\n for x in column[1]:\n try:\n x = int(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue \n\n for x in list(range(1,10)):\n if game.set_mark('O', x):\n return\n else:\n continue", "def Get_empty_cells(difficulty, size):\n if(difficulty == 'beginner'):\n return size**2 - 50\n elif (difficulty == 'easy'):\n return size**2 - 40\n elif (difficulty == 'medium'):\n return size**2 - 33\n elif (difficulty == 'hard'):\n return size**2 - 26\n elif (difficulty == 'hell'):\n return size**2 - 17", "def getGameState(self):\n row1 = [0, 0, 0]\n row2 = [0, 0, 0]\n row3 = [0, 0, 0]\n tilePosStatement = Statement()\n posTerm1 = Term('?x')\n posTerm2 = Term('?y')\n posTerm3 = Term('?tile')\n tilePosStatement.terms = (posTerm1, posTerm2, posTerm3)\n tilePosStatement.predicate = 'tilePos'\n for fact in self.kb.facts:\n if match(fact.statement, tilePosStatement):\n if fact.statement.terms[2] == Term(Constant('tile1')):\n term = 1\n if fact.statement.terms[2] == Term(Constant('tile2')):\n term = 2\n if fact.statement.terms[2] == Term(Constant('tile3')):\n term = 3\n if fact.statement.terms[2] == Term(Constant('tile4')):\n term = 4\n if fact.statement.terms[2] == Term(Constant('tile5')):\n term = 5\n if fact.statement.terms[2] == Term(Constant('tile6')):\n term = 6\n if fact.statement.terms[2] == Term(Constant('tile7')):\n term = 7\n if fact.statement.terms[2] == Term(Constant('tile8')):\n term = 8\n if fact.statement.terms[2] == Term(Constant('empty')):\n term = -1\n if fact.statement.terms[0] == Term(Constant('pos1')):\n col = 0\n elif fact.statement.terms[0] == Term(Constant('pos2')):\n col = 1\n elif fact.statement.terms[0] == Term(Constant('pos3')):\n col = 2\n if fact.statement.terms[1] == Term(Constant('pos1')):\n row1[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos2')):\n row2[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos3')):\n row3[col] = term\n\n row1 = tuple(row1)\n row2 = tuple(row2)\n row3 = tuple(row3)\n result = (row1, row2, row3)\n return result\n\n ### Student code goes here", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if board[i+direc[0]][j+direc[1]]==1 or board[i+direc[0]][j+direc[1]]==-1:\n cnt_live+=1\n if (board[i][j]==1 and cnt_live<2) or \\\n (board[i][j]==1 and cnt_live>3):\n board[i][j]=-1\n elif board[i][j]==0 and cnt_live==3:\n board[i][j]=2\n for i in range(m):\n for j in range(n):\n if board[i][j]==-1:\n board[i][j]=0\n elif board[i][j]==2:\n board[i][j]=1", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def is_won(self):\n combinations = [*[(i, i + 3, i + 6) for i in range(3)],\n *[(i*3, i*3 + 1, i*3 + 2) for i in range(3)],\n (0, 4, 8), (2, 4, 6)]\n\n win = [*filter(lambda x: self[x[0]] == self[x[1]] == self[x[2]] and\n self[x[0]] != self.CELL_EMPTY, combinations)]\n return self[win[0][0]] if len(win) > 0 else self.CELL_EMPTY", "def makeGrid(self, width, height, rewardLocs, exit, nPick=1, nAux=1, walls=[]):\n # Make mapping from coordinate (x, y, (takenreward1, takenreward2, ...))\n # to state number, and vice-versa.\n rTaken = iter([(),])\n for nPicked in range(1, nPick+1):\n rTaken = itertools.chain(rTaken, \n myCombinations(rewardLocs, r=nPicked)\n )\n # Iterators are hard to reset, so we list it.\n rTaken = list(rTaken)\n\n # Mappings from state to coordinates, vice-versa\n coordToState = {}\n stateToCoord = {}\n stateIdx = 0\n for x in range(width):\n for y in range(height):\n for stuff in rTaken:\n for holding in self.holdingPossibilities:\n coordToState[(x, y, stuff, holding)] = stateIdx\n stateToCoord[stateIdx] = (x, y, stuff, holding)\n stateIdx += 1\n self.deadEndState = stateIdx\n\n # Actually make the transition function\n def trans(f, p): \n aux = p\n (x, y, stuff, holding) = stateToCoord[f]\n actionMap = {}\n default = {(f, aux): 1}\n # Make the transition dictionary if the dead-end state (state width*height)\n if f == self.F-1:\n for action in range(5):\n actionMap[action] = default\n return actionMap\n\n # Otherwise, determine directions of motion, etc. \n for i in range(4):\n actionMap[i] = default\n if x != 0 and ((x-1, y) not in walls):\n actionMap[0] = {(coordToState[(x-1,y,stuff, holding)], aux): 1}\n if x < width-1 and ((x+1, y) not in walls):\n actionMap[1] = {(coordToState[(x+1,y,stuff, holding)], aux): 1}\n if y != 0 and ((x, y-1) not in walls):\n actionMap[2] = {(coordToState[(x,y-1,stuff, holding)], aux): 1}\n if y < height-1 and ((x, y+1) not in walls):\n actionMap[3] = {(coordToState[(x,y+1,stuff, holding)], aux): 1}\n # What happens when the agent uses action 4?\n if (x, y) == exit:\n # Some cases, depending on self.oneAtATime\n if not self.oneAtATime:\n # The agent is leaving.\n actionMap[4] = {(self.deadEndState, aux): 1}\n else:\n # The agent is dropping off a reward. holeFiller will\n # take care of the reward value.\n if len(stuff) >= nPick:\n # The agent is not allowed to pick up more stuff\n actionMap[4] = {(self.deadEndState, aux): 1}\n else:\n # The agent drops off the object.\n actionMap[4] = {(coordToState[(x,y,stuff, -1)], aux): 1}\n elif (x, y) not in rewardLocs:\n # No reward to pick up. Do nothing.\n actionMap[4] = default\n elif (x, y) in stuff:\n # This reward has already been used. Do nothing.\n actionMap[4] = default\n elif len(stuff) >= nPick or (holding != -1 and holding < len(stuff)\n and self.oneAtATime):\n # The agent has its hands full.\n actionMap[4] = default\n else:\n # The agent is allowed to pick up an object.\n newStuff = tuple(sorted(list(stuff) + [(x, y)]))\n if self.oneAtATime:\n newHoldingIdx = newStuff.index((x, y))\n else:\n newHoldingIdx = -1\n actionMap[4] = {(coordToState[(x, y, newStuff, newHoldingIdx)], aux): 1}\n return actionMap\n\n # Man, I'm outputting a lot of stuff.\n # coordToState[(x, y, rewardsLeft, holding)] -> index of this state\n # stateToCoord[index] -> (x, y, rewardsLeft, holding)\n # rTaken is a list of all possible combinations of leftover rewards.\n return (trans, coordToState, stateToCoord, rTaken)", "def state(self) -> PuzzleState:\n\t\tfor cell in self._cells:\n\t\t\tif not (cell.value() or any(cell.potential_values())):\n\t\t\t\treturn PuzzleState.Conflict\n\n\t\tfor group in self._groups:\n\t\t\tif len(group.unsolved_cells()) > 0:\n\t\t\t\treturn PuzzleState.Unsolved\n\n\t\t\tdistinct_values = set()\n\t\t\tfor cell in group:\n\t\t\t\tvalue = cell.value()\n\t\t\t\tif value in distinct_values:\n\t\t\t\t\treturn PuzzleState.Conflict\n\t\t\t\tdistinct_values.add(value)\n\n\t\treturn PuzzleState.Solved", "def generate_next_state(self) -> Dict[Tuple[int, int], Cell]:\n next_state: Dict[Tuple[int, int], Cell] = {}\n for living_cell in self.living_cells.values():\n for x in range(living_cell.x - 1, living_cell.x + 2):\n for y in range(living_cell.y - 1, living_cell.y + 2):\n cell = Cell(x, y)\n if (x, y) in self.living_cells.keys():\n cell = self.living_cells[x, y]\n if self._should_cell_live(cell):\n next_state[x, y] = Cell(x, y, True)\n \n self.living_cells = next_state\n \n return self.living_cells", "def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )", "def cell_is_game_over(self, y, x, map_data):\n # check for water\n if map_data[y][x] == self.WATER_SYMBOL:\n return True\n\n # check for anti-tank\n # up direction\n for i in range(y, -1, -1):\n if map_data[i][x] == self.ANTI_TANK_DOWN_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(i, x, map_data):\n break\n\n # down direction\n for i in range(y, self.y_size):\n if map_data[i][x] == self.ANTI_TANK_UP_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(i, x, map_data):\n break\n\n # left direction\n for i in range(x, -1, -1):\n if map_data[y][i] == self.ANTI_TANK_RIGHT_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(y, i, map_data):\n break\n\n # right direction\n for i in range(x, self.x_size):\n if map_data[y][i] == self.ANTI_TANK_LEFT_SYMBOL:\n return True\n # if blocked, can stop checking for anti-tank\n if self.cell_is_blocked(y, i, map_data):\n break\n\n # no water or anti-tank danger\n return False", "def test_solo_cell():\n cell = c6.Cell(loc=[1, 1])\n for i in range(10):\n cell.step()", "def test_multigrid_calculates_neighbours_correctly():\n\n # create a grid which will result in 9 cells\n h = 64\n img_dim = (3 * h + 1, 3 * h + 1)\n amg = mg.MultiGrid(img_dim, h, WS=127)\n\n # check that each cell has the expected neighbours\n print(amg.n_cells)\n\n # expected neieghbours left to right, bottom to top\n cells = [{\"north\": amg.cells[3], \"east\": amg.cells[1], \"south\": None, \"west\": None}, # bl\n {\"north\": amg.cells[4], \"east\": amg.cells[2],\n \"south\": None, \"west\": amg.cells[0]}, # bm\n {\"north\": amg.cells[5], \"east\": None,\n \"south\": None, \"west\": amg.cells[1]}, # br\n {\"north\": amg.cells[6], \"east\": amg.cells[4],\n \"south\": amg.cells[0], \"west\": None}, # ml\n {\"north\": amg.cells[7], \"east\": amg.cells[5],\n \"south\": amg.cells[1], \"west\": amg.cells[3]}, # mm\n {\"north\": amg.cells[8], \"east\": None,\n \"south\": amg.cells[2], \"west\": amg.cells[4]}, # mr\n # tl\n {\"north\": None, \"east\": amg.cells[7],\n \"south\": amg.cells[3], \"west\": None},\n # tm\n {\"north\": None,\n \"east\": amg.cells[8], \"south\": amg.cells[4], \"west\": amg.cells[6]},\n {\"north\": None, \"east\": None,\n \"south\": amg.cells[5], \"west\": amg.cells[7]}, # tr\n ]\n\n for ii, (gc, cell) in enumerate(zip(amg.cells, cells)):\n print(ii)\n assert gc.north == cell['north']\n assert gc.east == cell['east']\n assert gc.south == cell['south']\n assert gc.west == cell['west']", "def _compute_world_params(self) -> None:\n\n self.states = []\n for row in range(self.grid_height):\n for col in range(self.grid_width):\n cell = row * self.grid_width + col\n cell_type = self.grid[cell]\n\n possible_actions = {\n Action.up: self._get_action(max(row - 1, 0) * self.grid_width + col),\n Action.down: self._get_action(min(row + 1, self.grid_height - 1) * self.grid_width + col),\n Action.right: self._get_action(row * self.grid_width + min(col + 1, self.grid_width - 1)),\n Action.left: self._get_action(row * self.grid_width + max(col - 1, 0))\n }\n\n self.states.append(State(cell, possible_actions, cell_type))", "def updateGameState(self):\n boardArray = self._board.get_board()\n \n if self.state is self._BEGIN:\n\n for i in [1, self._board.get_board_size() - 2]:\n for j in range(1, self._board.get_board_size() - 1):\n\n if boardArray[i][j] != self._board._EMPTY:\n self.state = self._MIDDLE\n return\n\n if boardArray[j][i] != self._board._EMPTY:\n self.state = self._MIDDLE\n return\n\n\n elif self.state is self._MIDDLE:\n nbPieces = self._board.get_total_pieces()\n\n if nbPieces >= self._board.get_board_size()**2 - ENDGAME:\n self.state = self._END\n return", "def __init__(self):\n pygame.init()\n self.settings = Settings()\n self.number_cells_x = int(input(\"Enter number of cells in a row: \"))\n self.cell_width = float(self.settings.screen_width // self.number_cells_x)\n #print(self.cell_width)\n self.number_cells_y = int(self.settings.screen_height // self.cell_width)\n\n self.screen = pygame.display.set_mode((self.settings.screen_width,self.settings.screen_height))\n pygame.display.set_caption(\"Game of Life\")\n\n self.cells = []\n self.to_be_updated = []\n self._create_cells()\n\n self.bg_colour = (self.settings.bg_colour)\n self.waiting = True", "def test_print_cell(self):\n self.assertEqual(str(self.deadCell), \" \")\n self.assertEqual(str(self.livingCell), \"X\")", "def generate_grains(self, cells):\n\t\tfor cell_num in range(cells):\n\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\tsample_cell = sample_cell[0]\n\t\t\twhile sample_cell.state != 0:\n\t\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\t\tsample_cell = sample_cell[0]\n\t\t\tsample_cell.change_state(self.init_time ,cell_num)", "def actions(self, state):\n # actions = ['Left', 'Down', 'Right', 'Up']\n\n\n adjacent_cell_up = cell_adjacent(state[0], \"Up\")\n adjacent_cell_down = cell_adjacent(state[0], \"Down\")\n adjacent_cell_left = cell_adjacent(state[0], \"Left\")\n adjacent_cell_right = cell_adjacent(state[0], \"Right\")\n\n #Checking of macro is true\n if not self.macro:\n worker_actions = []\n # check if valid cell is above.\n if adjacent_cell_up not in self.warehouse.walls:\n worker_actions.append (\"Up\")\n\n # check if valid cell is below.\n if adjacent_cell_down not in self.warehouse.walls:\n worker_actions.append(\"Down\")\n\n # check if valid cell is to the left\n if adjacent_cell_left not in self.warehouse.walls:\n worker_actions.append(\"Left\")\n\n # check if valid cell is to the right\n if adjacent_cell_right not in self.warehouse.walls:\n worker_actions.append(\"Right\")\n\n return worker_actions\n\n else:\n box = []\n box_all = []\n #Checking if boxes are allowed in taboo area\n if not self.allow_taboo_push:\n for boxes in state[1:]:\n\n # check if valid cell is above.\n adjacent_box_up = cell_adjacent(boxes, \"Up\")\n #Checks if there is a taboo square or wall or box in next cell\n if adjacent_box_up not in self.taboo_list and adjacent_box_up not in self.warehouse.walls and adjacent_box_up not in state[1:]:\n box.append(\"Up\")\n\n # check if valid cell is bellow.\n adjacent_box_down = cell_adjacent(boxes, \"Down\")\n #Checks if there is a taboo square or wall or box in next cell\n if adjacent_box_down not in self.taboo_list and adjacent_box_down not in self.warehouse.walls and adjacent_box_down not in state[1:]:\n box.append(\"Down\")\n\n # check if valid cell is to the left\n adjacent_box_left = cell_adjacent(boxes, \"Left\")\n #Checks if there is a taboo square or wall or box in next cell\n if adjacent_box_left not in self.taboo_list and adjacent_box_left not in self.warehouse.walls and adjacent_box_left not in state[1:]:\n box.append(\"Left\")\n\n # check if valid cell is to the right\n adjacent_box_right = cell_adjacent(boxes, \"Right\")\n #Checks if there is a taboo square or wall or box in next cell\n if adjacent_box_right not in self.taboo_list and adjacent_box_right not in self.warehouse.walls and adjacent_box_right not in state[1:]:\n box.append(\"Right\")\n box_all.append(box)\n else:\n for boxes in state[1:]:\n # check if valid cell is above.\n adjacent_box_up = cell_adjacent(boxes, \"Up\")\n #Checks if there is a or wall or box in next cell\n if adjacent_box_up not in self.warehouse.walls and adjacent_box_up not in state[1:]:\n box.append(\"Up\")\n\n # check if valid cell is bellow.\n adjacent_box_down = cell_adjacent(boxes, \"Down\")\n #Checks if there is a or wall or box in next cell\n if adjacent_box_down not in self.warehouse.walls and adjacent_box_down not in state[1:]:\n box.append(\"Down\")\n\n # check if valid cell is to the left.\n adjacent_box_left = cell_adjacent(boxes, \"Left\")\n #Checks if there is a or wall or box in next cell\n if adjacent_box_left not in self.warehouse.walls and adjacent_box_left not in state[1:]:\n box.append(\"Left\")\n\n # check if valid cell is to the right.\n adjacent_box_right = cell_adjacent(boxes, \"Right\")\n #Checks if there is a or wall or box in next cell\n if adjacent_box_right not in self.warehouse.walls and adjacent_box_right not in state[1:]:\n box.append(\"Right\")\n box_all.append(box)\n return actions", "def get_effective_cell_moves(state, cell, player):\n board = state.get_board()\n if board.is_cell_on_board(cell):\n possibles_moves = YoteRules._get_rules_possibles_moves(cell, board.board_shape)\n effective_moves = []\n i, j = cell\n for move in possibles_moves:\n if board.is_empty_cell(move):\n effective_moves.append(move)\n elif board.get_cell_color(move) == Color(player * -1):\n k, l = move\n if i == k and j < l and board.is_empty_cell((i, j + 2)):\n effective_moves.append((i, j + 2))\n elif i == k and l < j and board.is_empty_cell((i, j - 2)):\n effective_moves.append((i, j - 2))\n elif j == l and i < k and board.is_empty_cell((i + 2, j)):\n effective_moves.append((i + 2, j))\n elif j == l and k < i and board.is_empty_cell((i - 2, j)):\n effective_moves.append((i - 2, j))\n return effective_moves", "def randomize(self):\n cell_stack = []\n cell = random.choice(self.cells)\n n_visited_cells = 1\n\n while n_visited_cells < len(self.cells):\n neighbors = [c for c in self.neighbors(cell) if c.is_full()]\n if len(neighbors):\n neighbor = random.choice(neighbors)\n cell.connect(neighbor)\n cell_stack.append(cell)\n cell = neighbor\n n_visited_cells += 1\n else:\n cell = cell_stack.pop()", "def check_score(self) -> None:\n self.player_1, self.player_2 = 0, 0\n for cell in self.cells:\n if cell.player == 1:\n self.player_1 += 1\n elif cell.player == 2:\n self.player_2 += 1", "def is_inacessible(cell):\n adj, count = num_adj_buildings(cell)\n return adj == count", "def outcome(self):\n if self.grid[0][0] == self.grid[1][0] == self.grid[2][0] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][1] == self.grid[1][1] == self.grid[2][1] and self.grid[0][1] != 0:\n return self.grid[0][1]\n if self.grid[0][2] == self.grid[1][2] == self.grid[2][2] and self.grid[0][2] != 0:\n return self.grid[0][2]\n if self.grid[0][0] == self.grid[0][1] == self.grid[0][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[1][0] == self.grid[1][1] == self.grid[1][2] and self.grid[1][0] != 0:\n return self.grid[1][0]\n if self.grid[2][0] == self.grid[2][1] == self.grid[2][2] and self.grid[2][0] != 0:\n return self.grid[2][0]\n if self.grid[0][0] == self.grid[1][1] == self.grid[2][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][2] == self.grid[1][1] == self.grid[2][0] and self.grid[0][2] != 0:\n return self.grid[0][2]\n return 0", "def run(self):\n for cell in self.grid.each_cell():\n neighbors = []\n if cell.north:\n neighbors.append(cell.north)\n if cell.east:\n neighbors.append(cell.east)\n if neighbors:\n neighbor = random.choice(neighbors)\n if neighbor:\n cell.link(neighbor)\n return self.grid", "def _is_dead_end(self, i_row, i_col, direction):\n return (((i_row, i_col) in self._ts_cells and direction == \"s\") or\n ((i_row, i_col) in self._ts_cells and direction == \"se\") or\n ((i_row, i_col) in self._ts_cells and direction == \"sw\") or\n ((i_row, i_col) in self._ls_cells and direction == \"e\") or\n ((i_row, i_col) in self._ls_cells and direction == \"ne\") or\n ((i_row, i_col) in self._ls_cells and direction == \"se\") or\n ((i_row, i_col) in self._bs_cells and direction == \"n\") or\n ((i_row, i_col) in self._bs_cells and direction == \"nw\") or\n ((i_row, i_col) in self._bs_cells and direction == \"ne\") or\n ((i_row, i_col) in self._rs_cells and direction == \"w\") or\n ((i_row, i_col) in self._rs_cells and direction == \"nw\") or\n ((i_row, i_col) in self._rs_cells and direction == \"sw\") or\n ((i_row, i_col) == self._tl_cell and direction == \"s\") or\n ((i_row, i_col) == self._tl_cell and direction == \"se\") or\n ((i_row, i_col) == self._tl_cell and direction == \"e\") or\n ((i_row, i_col) == self._bl_cell and direction == \"n\") or\n ((i_row, i_col) == self._bl_cell and direction == \"ne\") or\n ((i_row, i_col) == self._bl_cell and direction == \"e\") or\n ((i_row, i_col) == self._tr_cell and direction == \"w\") or\n ((i_row, i_col) == self._tr_cell and direction == \"sw\") or\n ((i_row, i_col) == self._tr_cell and direction == \"s\") or\n ((i_row, i_col) == self._br_cell and direction == \"w\") or\n ((i_row, i_col) == self._br_cell and direction == \"nw\") or\n ((i_row, i_col) == self._br_cell and direction == \"n\"))", "def distribute_onto_grid(self, cells,type):\n\n\t\t# setting up the topology of the space the cells will be in\n\t\tif self.VARS.NEIGHBORHOOD == \"Moore\":\n\t\t\tsize = 10\n\t\t\tcount=0\n\t\t\tcells = self.VARS.CELLS\n\t\t\t# moving them into a '10X10 grid'\n\t\t\tfor i in range(1,size+1):\n\t\t\t\tfor j in range(1, size+1):\n\t\t\t\t\tcells[count].set_location(i,j)\n\t\t\t\t\tcount +=1\n\n\t\t\t# distances to get Moore neighbors of everyone in the network\n\t\t\tfor i in range(len(cells)):\n\t\t\t\tfor j in range(len(cells)):\n\t\t\t\t\tif j > i:\n\t\t\t\t\t\tone = cells[i]\n\t\t\t\t\t\ttwo = cells[j]\n\t\t\t\t\t\t# wraps edges of the torus around.\n\t\t\t\t\t\tdist = measure_distance(one, two)\n\t\t\t\t\t\tif dist < 1.5:\n\t\t\t\t\t\t\tone.add_neighbor(two)\n\t\t\t\t\t\t\ttwo.add_neighbor(one)\n\t\t\t\t\t\t\tself.net.add_edge(one,two)", "def determine_game_state(self):\n if self.board == BLANK_BOARD:\n return GameState.GAME_NOT_STARTED\n\n # check for three of the same symbol across or down.\n for r in range(3):\n offset = r*3\n if self.board[offset] == self.board[offset+1] == self.board[offset+2]:\n if self.board[offset] == X_SYMBOL:\n return GameState.GAME_OVER_X_WINS\n elif self.board[offset] == O_SYMBOL:\n return GameState.GAME_OVER_O_WINS\n if self.board[r] == self.board[3 + r] == self.board[6 + r]:\n if self.board[r] == X_SYMBOL:\n return GameState.GAME_OVER_X_WINS\n elif self.board[r] == O_SYMBOL:\n return GameState.GAME_OVER_O_WINS\n\n # check for diagonal wins\n if ((self.board[0] == self.board[4] == self.board[8]) or\n (self.board[2] == self.board[4] == self.board[6])):\n if self.board[4] == X_SYMBOL:\n return GameState.GAME_OVER_X_WINS\n elif self.board[4] == O_SYMBOL:\n return GameState.GAME_OVER_O_WINS\n \n # check for tie.\n if not self.board.count(EMPTY_SYMBOL):\n return GameState.GAME_OVER_DRAW\n\n return GameState.GAME_IN_PROGRESS", "def _check_inner_dirs(self, i_row, i_col, adj_opp_cells):\n opp_player = \"B\" if self._turn == \"W\" else \"W\"\n \n if self._board[i_row-1][i_col] == opp_player: #north, tile to be placed will enter from the south\n adj_opp_cells.append((i_row-1, i_col, \"s\")) \n if self._board[i_row-1][i_col+1] == opp_player: #northeast, tile to be placed will enter from the sw\n adj_opp_cells.append((i_row-1, i_col+1, \"sw\"))\n if self._board[i_row][i_col+1] == opp_player: #east, tile to be placed will enter from the west\n adj_opp_cells.append((i_row, i_col+1, \"w\"))\n if self._board[i_row+1][i_col+1] == opp_player: #southeast, tile to be placed will enter from the nw\n adj_opp_cells.append((i_row+1, i_col+1, \"nw\"))\n if self._board[i_row+1][i_col] == opp_player: #south, tile to be placed will enter from the north\n adj_opp_cells.append((i_row+1, i_col, \"n\"))\n if self._board[i_row+1][i_col-1] == opp_player: #southwest, tile to be placed will enter from the ne\n adj_opp_cells.append((i_row+1, i_col-1, \"ne\"))\n if self._board[i_row][i_col-1] == opp_player: #west, tile to be placed will enter from the east.\n adj_opp_cells.append((i_row, i_col-1, \"e\"))\n if self._board[i_row-1][i_col-1] == opp_player: #northwest, tile to be placed will enter from the se.\n adj_opp_cells.append((i_row-1, i_col-1, \"se\"))", "def check_game_status(self):\n for player in (\"1\", \"2\"):\n row_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 1, self.board\n ).any()\n col_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 0, self.board\n ).any()\n d1_win = set(self.data[[0, 4, 8]]) == {player}\n d2_win = set(self.data[[2, 4, 6]]) == {player}\n if any([row_win, col_win, d1_win, d2_win]):\n return (\"win\", player)\n\n if self.counter[\"_\"] == 0:\n return (\"tie\", None)\n else:\n return (\"turn\", \"1\" if self.counter[\"1\"] == self.counter[\"2\"] else \"2\")", "def _update_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()", "def choose_cell_to_assign(self):\r\n min_domain = 10\r\n max_degree = -1\r\n chosen_row = None\r\n chosen_col = None\r\n for row in range(9):\r\n for col in range(9):\r\n if self.puzzle[row][col] == 0:\r\n domain_size = len(self.grid[row][col].domain)\r\n if domain_size < min_domain:\r\n min_domain = domain_size\r\n chosen_row = row\r\n chosen_col = col\r\n elif domain_size == min_domain:\r\n degree = len(self.grid[row][col].neighbors)\r\n if degree > max_degree:\r\n max_degree = degree\r\n chosen_row = row\r\n chosen_col = col\r\n return self.grid[chosen_row][chosen_col]", "def is_cell_safe(self, cell, board):\n # look at a cell and the cell's revealed neighbors\n # if any neighbors say there's 1 mine nearby, and that neighbor has neighbors which\n # contain a flag, it's safe to click here\n # TODO: this really needs to only check neighbors' neighbors that border the original cell.\n # right now more cells are considered than should be.\n safe = False\n neighbors = ms.Minesweeper.get_neighbors(cell.row, cell.col, board)\n revealed_neighbors = [n for n in neighbors if n.revealed or n.flagged]\n for n in revealed_neighbors:\n if n.neighbors > 0:\n n_neighbors = ms.Minesweeper.get_neighbors(n.row, n.col, board)\n flagged_n_neighbors = [n for n in n_neighbors if n.flagged]\n if len(flagged_n_neighbors) > 0:\n safe = True\n return safe", "def gameOfLife(self, board: List[List[int]]) -> None:\n\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n\n rows = len(board)\n cols = len(board[0])\n\n # 遍历面板每一个格子里的细胞\n for row in range(rows):\n for col in range(cols):\n # 对于每一个细胞统计其八个相邻位置里的活细胞数量\n live_neighbors = 0\n\n for neighbor in neighbors:\n # 相邻位置的坐标\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n # 查看相邻的细胞是否是活细胞\n if (r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n live_neighbors += 1\n\n # 过去的活细胞,现在变为死细胞\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n # -1 代表这个细胞过去是活的现在死了\n board[row][col] = -1\n # 过去的死细胞,现在变为活细胞\n if board[row][col] == 0 and live_neighbors == 3:\n # 2 代表这个细胞过去是死的现在活了\n board[row][col] = 2\n\n # 遍历 board 刷新更新后的状态\n for row in range(rows):\n for col in range(cols):\n if board[row][col] > 0:\n board[row][col] = 1\n else:\n board[row][col] = 0", "def check(self):\n winner = None\n count = 0\n\n for y in range(self.gridSize):\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for item in self.grid[y]:\n # Check row of the grid\n if item == \"P1\":\n P1 += 1\n elif item == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for x in range(self.gridSize):\n # Check column of the grid\n if self.grid[x][y] == \"P1\":\n P1 += 1\n elif self.grid[x][y] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for y in range(self.gridSize):\n # Check right top to left bottom across the grid\n for x in range(self.gridSize):\n if x == y:\n if self.grid[x][y] == \"P1\":\n P1 += 1\n elif self.grid[x][y] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n if winner != None:\n return winner\n P1, P2 = 0, 0\n for y in range(self.gridSize):\n # Check the left top to the right bottom across the grid\n for x in range(self.gridSize - 1, -1, -1):\n # Check how many filled spaces there are\n if \".\" not in self.grid[y][x]:\n count += 1\n if x + y == self.gridSize - 1:\n if self.grid[y][x] == \"P1\":\n P1 += 1\n elif self.grid[y][x] == \"P2\":\n P2 += 1\n winner = self.checkval(P1, P2, self.gridSize)\n # Check if there is a winner if so return the winner\n if winner != None:\n return winner\n # Check if the fields that are filled are equal to the possible spaces to be filled in the grid\n if count == self.gridSize**2:\n return \"Tie\"", "def _ignite_cells(self, istep, ip):\n particle = self.particles[ip] # get particle\n state, x, y = particle.get_from_keys([\"state\", \"x\", \"y\"])\n if state > STTHR:\n for i in range(self.grid.NX-1):\n if abs(x - self.grid.XCELL[i, 0]) < self.grid.DX/2:\n INDX = i\n for j in range(self.grid.NY-1):\n if abs(y - self.grid.YCELL[0, j]) < self.grid.DY/2:\n INDY = j\n cell = self.grid.CELLS[INDX, INDY]\n cell.BURNPROG += 1\n if (cell.QMAXTR > 0 or cell.QMAXBLD > 0) and cell.BURNSTAT == 0:\n cell.BURNSTAT = 1\n cell.CLOCK = self.TIME[istep]\n # elif cell.QMAXTR == 0 or cell.QMAXBLD == 0:\n # particle.update(state=0.0, factor=0.0)\n # if pType == 2:\n # particle.update(state=0.0)", "def _compute_grid_state(self, for_id):\n own = np.zeros_like(self._map, float)\n own_pos = self._id2pos[for_id]\n own[own_pos] = 1\n\n thieves = (self._map == THIEF ).astype(float)\n guardians = (self._map == GUARDIAN).astype(float)\n\n own_team = self.id2team[for_id]\n if own_team == THIEF:\n teammates = thieves\n opponents = guardians\n else:\n teammates = guardians\n opponents = thieves\n\n treasure_channel = (self._map == TREASURE).astype(float)\n\n # Channels first\n return np.stack([own, teammates, opponents, self._walls_channel, treasure_channel])", "def changeCell(self, i, j):\n\t\t#If Cell is on Top row\n\t\tif(i==0):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[0][1] + self.board[1][0] + self.board[1][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[0][self.size-2] + self.board[1][self.size-2] + self.board[1][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[0][j-1] + self.board[1][j] + self.board[0][j+1] + self.board[1][j-1] + self.board[1][j+1]\n\t\t\t\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell on Bottom row\n\t\telif(i==(self.size-1)):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[self.size-1][1] + self.board[self.size-2][0] + self.board[self.size-2][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[self.size-1][self.size-2] + self.board[self.size-2][self.size-2] + self.board[self.size-2][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[self.size-1][j-1] + self.board[self.size-2][j] + self.board[self.size-1][j+1] + self.board[self.size-2][j-1] + self.board[self.size-2][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell is in a middle row\n\t\telse:\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1]\n\t\t\telse:\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0", "def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True", "def random_state(self) -> Grid2D.State:\n return Grid2D.State(random.choice(self.empty_cell_list))", "def getGameState(self):\n ### Student code goes here\n\n ask_tile_11 = parse_input(\"fact: (located ?X pos1 pos1)\")\n ask_tile_12 = parse_input(\"fact: (located ?X pos2 pos1)\")\n ask_tile_13 = parse_input(\"fact: (located ?X pos3 pos1)\")\n ask_tile_21 = parse_input(\"fact: (located ?X pos1 pos2)\")\n ask_tile_22 = parse_input(\"fact: (located ?X pos2 pos2)\")\n ask_tile_23 = parse_input(\"fact: (located ?X pos3 pos2)\")\n ask_tile_31 = parse_input(\"fact: (located ?X pos1 pos3)\")\n ask_tile_32 = parse_input(\"fact: (located ?X pos2 pos3)\")\n ask_tile_33 = parse_input(\"fact: (located ?X pos3 pos3)\")\n\n bindings_11 = self.kb.kb_ask(ask_tile_11)\n bindings_12 = self.kb.kb_ask(ask_tile_12)\n bindings_13 = self.kb.kb_ask(ask_tile_13)\n bindings_21 = self.kb.kb_ask(ask_tile_21)\n bindings_22 = self.kb.kb_ask(ask_tile_22)\n bindings_23 = self.kb.kb_ask(ask_tile_23)\n bindings_31 = self.kb.kb_ask(ask_tile_31)\n bindings_32 = self.kb.kb_ask(ask_tile_32)\n bindings_33 = self.kb.kb_ask(ask_tile_33)\n\n row1_list = []\n row2_list = []\n row3_list = []\n\n row1_list.append(bindings_11.list_of_bindings[0][0].bindings[0].constant.element)\n row1_list.append(bindings_12.list_of_bindings[0][0].bindings[0].constant.element)\n row1_list.append(bindings_13.list_of_bindings[0][0].bindings[0].constant.element)\n\n row2_list.append(bindings_21.list_of_bindings[0][0].bindings[0].constant.element)\n row2_list.append(bindings_22.list_of_bindings[0][0].bindings[0].constant.element)\n row2_list.append(bindings_23.list_of_bindings[0][0].bindings[0].constant.element)\n\n row3_list.append(bindings_31.list_of_bindings[0][0].bindings[0].constant.element)\n row3_list.append(bindings_32.list_of_bindings[0][0].bindings[0].constant.element)\n row3_list.append(bindings_33.list_of_bindings[0][0].bindings[0].constant.element)\n\n counter = 0\n for tile in row1_list:\n if tile == \"empty\":\n row1_list[counter] = -1\n else:\n row1_list[counter] = int(tile[4:])\n counter += 1\n\n counter = 0\n for tile in row2_list:\n if tile == \"empty\":\n row2_list[counter] = -1\n else:\n row2_list[counter] = int(tile[4:])\n counter += 1\n\n counter = 0\n for tile in row3_list:\n if tile == \"empty\":\n row3_list[counter] = -1\n else:\n row3_list[counter] = int(tile[4:])\n counter += 1\n\n gamestate = (tuple(row1_list), tuple(row2_list), tuple(row3_list))\n return gamestate", "def actions(self, state):\n MovementList = []\n #Check if the agent is able to move a box (Left, Down, Right, Up) \n #without moving it into a taboo cell or pushing two blocks (Invalid move)\n #then move the box in the given direction.\n \n possible_moves = [\"Up\", \"Down\", \"Left\", \"Right\"]\n \n worker = state[0]\n boxes = state[1]\n \n # Iterate throguh the moves and make sure they satify constraints\n for move in possible_moves:\n if (move_coords(worker, move) not in self.walls):\n if (move_coords(worker, move) in boxes):\n if move_coords(move_coords(worker, move), move) in self.taboo:\n pass\n else: \n MovementList.append(move)\n else:\n MovementList.append(move)\n \n return MovementList", "def spawn_ok(game):\n me = game.me\n shipyard_cell = game.game_map[me.shipyard]\n\n # % turns above mining rate to dropoff the halite, will typically be about 2?\n mining_over_head = 2\n ship_count = len(me.get_ships())\n\n #\n # absolute constraints (order can be important)\n #\n\n if ship_count >= MAX_SHIPS:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. MAX ships reached\".format())\n return False\n\n if me.halite_amount < constants.SHIP_COST:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. Insufficient halite\".format())\n return False\n\n #\n # conditional constraints\n #\n\n logging.debug(\"shipyard_cell.is_occupied: {}\".format(shipyard_cell.is_occupied))\n if shipyard_cell.is_occupied:\n logging.debug(\"shipyard_cell.ship.owner == me.id: {}\".format(shipyard_cell.ship.owner == me.id))\n\n # watch for collisions with owner only, note this will be 1 turn behind\n occupied_cells = []\n if shipyard_cell.is_occupied and shipyard_cell.ship.owner == me.id:\n occupied_cells.append(shipyard_cell.position)\n\n logging.debug(\"oc1: {}\".format(occupied_cells))\n\n # entry lane are N/S\n n_cell = shipyard_cell.position.directional_offset(Direction.North)\n s_cell = shipyard_cell.position.directional_offset(Direction.South)\n e_cell = shipyard_cell.position.directional_offset(Direction.East)\n w_cell = shipyard_cell.position.directional_offset(Direction.West)\n for pos in [n_cell, s_cell, e_cell, w_cell]:\n if game.game_map[pos].is_occupied:\n occupied_cells.append(pos)\n\n logging.debug(\"oc2: {}\".format(occupied_cells))\n\n # need to keep track of ships docking instead, a ship in an adjacent cell could be leaving\n if occupied_cells:\n if DEBUG & (DEBUG_GAME): logging.info(\"Game - Spawn denied. Occupied cells: {}\".format(occupied_cells))\n return False\n\n return True", "def empty_cells(state):\r\n cells = []\r\n for x, row in enumerate(state):\r\n for y, cell in enumerate(row):\r\n if cell == 0:\r\n cells.append([x, y])\r\n\r\n return cells", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def gameOfLife(self, board: List[List[int]]) -> None:\n changes = list()\n for i in range(len(board)):\n for j in range(len(board[0])):\n neighbor_data = {\n 'live': 0,\n 'dead': 0\n }\n checks = {(0,1), (0,-1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1,-1)}\n if i == 0:\n checks.discard((-1, 0))\n checks.discard((-1, 1))\n checks.discard((-1, -1))\n if j == 0:\n checks.discard((0, -1))\n checks.discard((-1, -1))\n checks.discard((1, -1))\n if i == (len(board) - 1):\n checks.discard((1,0))\n checks.discard((1,-1))\n checks.discard((1, 1))\n if j == (len(board[0]) - 1):\n checks.discard((0, 1))\n checks.discard((-1, 1))\n checks.discard((1, 1))\n for check in checks:\n if board[i + check[0]][j + check[1]]:\n neighbor_data['live'] += 1\n else:\n neighbor_data['dead'] += 1\n if board[i][j]:\n # check live rules\n if neighbor_data['live'] < 2 or neighbor_data['live'] > 3:\n changes.append((i, j))\n else:\n # check dead rules\n if neighbor_data['live'] == 3:\n changes.append((i, j))\n for change in changes:\n board[change[0]][change[1]] = int (not board[change[0]][change[1]])\n \n print (board)", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently" ]
[ "0.69623125", "0.6414086", "0.6400114", "0.63498706", "0.6227592", "0.6179046", "0.6162203", "0.61035144", "0.6049122", "0.604795", "0.6018108", "0.60137135", "0.60055864", "0.6002554", "0.598495", "0.59398884", "0.5925738", "0.5910711", "0.59071994", "0.5905565", "0.5895984", "0.58819795", "0.5880821", "0.5879823", "0.5872225", "0.5826755", "0.5825616", "0.58246577", "0.5802987", "0.5784841", "0.57644796", "0.575903", "0.5750791", "0.5747522", "0.5747344", "0.5746174", "0.5742305", "0.57333994", "0.57234263", "0.5720458", "0.57030207", "0.5702953", "0.57012594", "0.56920326", "0.5671717", "0.56703514", "0.5665134", "0.5651492", "0.564618", "0.5644662", "0.5638183", "0.5638183", "0.5635321", "0.563413", "0.56333596", "0.56330574", "0.5612233", "0.5611423", "0.5610855", "0.5608484", "0.560393", "0.56017506", "0.5594365", "0.55860114", "0.5585741", "0.5581846", "0.5573908", "0.5561342", "0.5556672", "0.5555005", "0.55472994", "0.554024", "0.5539831", "0.5539038", "0.55376214", "0.5535675", "0.5524062", "0.5523404", "0.5522296", "0.5522041", "0.55126745", "0.5512487", "0.5509559", "0.5507776", "0.55076677", "0.5506632", "0.5500923", "0.5497907", "0.54840696", "0.54781914", "0.5476614", "0.5473403", "0.54727495", "0.5472673", "0.54637855", "0.545914", "0.5456236", "0.54552495", "0.5455022", "0.5453495" ]
0.5600861
62
Collect data into fixedlength chunks or blocks
def grouper(iterable, n, fillvalue=None): # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def chunks(data: list, n: int) -> list:\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def chunks(data: List[Any], num: int) -> Generator[List[Any], None, None]:\n for i in range(0, len(data), num):\n yield data[i : i + num]", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def chunks(data, rows=10000):\n\n for i in range(0, len(data), rows):\n yield data[i:i+rows]", "def chunked(self, length, overlap):\n def new_gen():\n buffer = self.read(length)\n while True:\n yield np.array([buffer]) #pack into one more dimension\n new_elems = self.read(length - overlap)\n if new_elems.shape[0] == 0:\n # Reached the end of the stream\n break\n buffer[:overlap] = buffer[length-overlap:]\n buffer[overlap:] = new_elems\n return Stream(new_gen(), chunk_size=1)", "def getChunks():", "def fill_buffer(self):\n num_of_smp = 0\n while num_of_smp < self.buf_size:\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n num_of_smp += len(new_c)\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size", "def perform_chunking(self, data_size, chunk_size):\r\n\r\n chunks, i = [], 0\r\n while True:\r\n chunks.append((i * (chunk_size - self.overlap / 2), i * (chunk_size - self.overlap / 2) + chunk_size))\r\n i += 1\r\n if chunks[-1][1] > data_size:\r\n break\r\n\r\n n_count = len(chunks)\r\n chunks[-1] = tuple(x - (n_count * chunk_size - data_size - (n_count - 1) * self.overlap / 2) for x in chunks[-1])\r\n chunks = [(int(x), int(y)) for x, y in chunks]\r\n return chunks", "def recv_chunk(self, data):", "def chunked(size, source):\n for i in range(0, len(source), size):\n yield source[i : i + size]", "def static_batch(data, batch_size=16):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= batch_size:\n yield buf\n buf = []\n if len(buf) > 0:\n yield buf", "def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)", "def chunk(seq, size, groupByList=True):\n func = tuple\n if groupByList:\n func = list\n return [func(seq[i:i + size]) for i in range(0, len(seq), size)]", "def chunk(max_elems = 8192, dtype = numpy.float64):\n\n @filters\n def _dagpype_internal_fn_act(target):\n assert max_elems > 0\n dtype_ = dtype\n\n l = []\n try:\n while True:\n while len(l) < max_elems:\n l.append((yield))\n target.send(numpy.array(l, dtype = dtype_))\n l = []\n except GeneratorExit:\n if len(l) > 0:\n target.send(numpy.array(l, dtype = dtype_)) \n \n return _dagpype_internal_fn_act", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def some_payloaded_data(length=1000000, size=32, var=0):\n for datum in some_simple_data(length):\n yield DataWithPayload(datum, some_payload(size, var))", "def _chunker(self, seq, size):\n return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size))", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def make_chunks(l, chunk_length):\n for i in range(0, len(l), chunk_length):\n yield l[i:i + chunk_length]", "def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf", "def iter_unpack(raw):\n return chunks(raw)", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res", "def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]", "def chunk(it, size):\n it = iter(it)\n return iter(lambda: list(islice(it, size)), [])", "def build(self, block_size):", "def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]", "def chunks(items, size):\n return [items[i:i+size] for i in range(0, len(items), size)]", "def chunked(self, n):\n return imap(self.__class__, chunked(self._bytes, n))", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def _get_data_chunk(self):\n if self._start_pos < self.max_pos:\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n elif self._current_forc < self._num_forcs - 1:\n # Resest for next FORC\n self._current_forc += 1\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n self._get_dc_offset()\n\n self._start_pos = 0\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n\n else:\n self.data = None\n\n return", "def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group", "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def chunks(seq, size):\n for i in range(0, len(seq), size):\n yield seq[i:i + size]", "def chunks(items, chunk_size):\r\n items = list(items)\r\n return (items[i:i + chunk_size] for i in xrange(0, len(items), chunk_size))", "def chunks(arr, n):\n for i in range(0, len(arr), n):\n yield arr[i:i + n]", "def get_chunks(self, l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def _get_chunks(l, n = 10):\n \n for i in range(0, len(l), n): yield l[i: i + n]", "def chunker(seq, size):\n\n return (seq[pos : pos + size] for pos in range(0, len(seq), size))", "def __iter__(self):\n\n # collector will fetch chunksize array for each 'get' call\n collector = FIFOArray(self.chunksize, self.axis)\n\n # make tmp array to hold generated subarrs\n tmp = []\n tmp_size = 0\n for subarr in self.data(**self.kwargs):\n\n tmp.append(subarr)\n tmp_size += subarr.shape[self.axis]\n\n # if tmp exceeds chunksize put in collector\n if tmp_size >= self.chunksize:\n arr = np.concatenate(tmp, axis=self.axis)\n collector.put(arr)\n\n # fetch chunksize till not full\n while collector.full():\n yield collector.get()\n\n # place leftover back into tmp and empty collector\n tmp = [collector.queue]\n tmp_size = collector.qsize()\n collector.queue = np.array([])\n\n else:\n\n # append to tmp again\n continue\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n # yield whatever is left in tmp (its below chunksize)\n remaining = np.concatenate(tmp, axis=self.axis)\n if remaining.size > 0:\n yield remaining", "def chunks(data, overrides = {}):\n counter, filesize = 0, len(data)\n last = None\n while counter < filesize:\n try:\n magic, size = chunk.unpack_from(data, counter)\n except struct_error as e:\n print('failed loading chunk from', data[:counter])\n print('last chunk:', last)\n raise e\n\n counter += chunk.size\n contents = data[counter:counter+size]\n\n if magic[3] != 0x4D:\n raise Exception('bad magic', magic, 'last chunk:', last)\n\n if magic in overrides:\n size = overrides[magic]\n\n yield magic, size, contents\n counter += size\n\n last = (magic, size, contents)", "def chunks(arr, n):\n for i in range(0, len(arr), n):\n yield arr[i:i + n]", "def test_chunk_memory(self):\n layer = tl.Serial(tl.Dense(1024*1024), tl.Dense(128))\n chunked = tl.Chunk(layer, 256)\n x = np.random.uniform(size=(16*1024, 16))\n chunked.init(shapes.signature(x))\n y = chunked(x)\n z = tl.Accelerate(chunked)(x)\n self.assertEqual(y.shape, (16*1024, 128))\n self.assertEqual(z.shape, (16*1024, 128))", "def chunk(list, chunksize):\n for i in range(0, len(list), chunksize):\n yield list[i:i + chunksize]", "def _chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list", "def chunks(self, n):\n return _([self._[i:i+n] for i in range(0, self.size()._, n)])", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def all_gather_list(data, group=None, max_size=16384):\n SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n\n if enc_size + SIZE_STORAGE_BYTES > max_size:\n raise ValueError(\n 'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))\n\n rank = get_rank()\n world_size = get_world_size()\n buffer_size = max_size * world_size\n\n if not hasattr(all_gather_list, '_buffer') or \\\n all_gather_list._buffer.numel() < buffer_size:\n all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)\n all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()\n\n buffer = all_gather_list._buffer\n buffer.zero_()\n cpu_buffer = all_gather_list._cpu_buffer\n\n assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(\n 256 ** SIZE_STORAGE_BYTES)\n\n size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')\n\n cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))\n cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))\n\n start = rank * max_size\n size = enc_size + SIZE_STORAGE_BYTES\n buffer[start: start + size].copy_(cpu_buffer[:size])\n\n all_reduce(buffer, group=group)\n\n try:\n result = []\n for i in range(world_size):\n out_buffer = buffer[i * max_size: (i + 1) * max_size]\n size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')\n if size > 0:\n result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))\n return result\n except pickle.UnpicklingError:\n raise Exception(\n 'Unable to unpickle data from other workers. all_gather_list requires all '\n 'workers to enter the function together, so this error usually indicates '\n 'that the workers have fallen out of sync somehow. Workers can fall out of '\n 'sync if one of them runs out of memory, or if there are other conditions '\n 'in your training script that can cause one worker to finish an epoch '\n 'while other workers are still iterating over their portions of the data.'\n )", "def _chunkify(arr, size):\n arrs = []\n for i in range(0, len(arr), size):\n chunk = bytearray(arr[i:i + size])\n arrs.append(chunk)\n return arrs", "def chunk(flat, sizes):\n iter_flat = iter(flat)\n yield from (list(islice(iter_flat, 0, size)) for size in sizes)", "def chunks(item_list, n_items):\n for i in range(0, len(item_list), n_items):\n yield item_list[i : i + n_items]", "def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)", "def chunks(sequence: Iterable[T], chunk_size: int = 2) -> Iterable[List[T]]:\n lsequence = list(sequence)\n while lsequence:\n size = min(len(lsequence), chunk_size)\n yield lsequence[:size]\n lsequence = lsequence[size:]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def batch(byte_array, funcs):\n result = []\n length = bytes_to_int(byte_array[0:4])\n item_size = bytes_to_int(byte_array[4:8])\n for i in range(0, length):\n chunk = byte_array[8+i*item_size:8+(i+1)*item_size]\n for f in funcs:\n f(chunk)\n return result", "def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunker(iterable, size):\n for i in range(0, len(iterable), size):\n yield iterable[i:i + size]", "def chunks(A, N):\n for i in range(0, len(A)):\n r = A[i:i+N]\n if len(r) == N:\n yield r", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunkify(list,size):\n for i in range (0, len(list), size):\n yield list[i:i+size]", "def _batchify(self, data, align_right=False, include_lengths=False):\n lengths = [x.size(0) for x in data]\n max_length = max(lengths)\n out = data[0].new(len(data), max_length).fill_(neusum.Constants.PAD)\n for i in range(len(data)):\n data_length = data[i].size(0)\n offset = max_length - data_length if align_right else 0\n out[i].narrow(0, offset, data_length).copy_(data[i])\n\n if include_lengths:\n return out, lengths\n else:\n return out", "def chunks(alist, n):\n for i in range(0, len(alist), n):\n yield alist[i:i + n]", "def _buffered_func(dataset, size):\n\n class _EndSignal(object):\n pass\n\n end = _EndSignal()\n\n def _read_worker(r, q):\n for d in r:\n q.put(d)\n q.put(end)\n\n def _data_reader():\n r = dataset()\n q = multiprocessing.Queue(maxsize=size)\n t = multiprocessing.Process(\n target=_read_worker, args=(\n r,\n q, ))\n t.daemon = True\n t.start()\n e = q.get()\n while e != end:\n yield e\n e = q.get()\n\n return _data_reader", "def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]", "def _read_blocks(input_data, size=2**20):\n\n if isinstance(input_data, (BufferedReader, BytesIO)):\n f = input_data\n opened = False\n elif input_data == '-':\n f = sys.stdin.buffer # read binary instead of unicode\n opened = False\n else:\n f = open(input_data, 'rb')\n opened = True\n\n try:\n\n data = f.read(size)\n while len(data) > 0:\n yield data\n data = f.read(size)\n finally:\n if opened:\n f.close()", "def chunks(self, l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunk(lst, chunk_len):\n\n for index in range(0, len(lst), chunk_len):\n yield lst[index:index + chunk_len]", "def grouper(iterable, n):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, n))\n if not chunk:\n return\n yield chunk", "def _chunk(iterable, size, fillvalue=None):\n\t\targs = [iter(iterable)] * size\n\t\treturn [''.join(x) for x in itertools.izip_longest(*args, fillvalue=fillvalue)]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunkize_serial(iterable, chunksize, as_numpy=False, dtype=np.float32):\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[np.array(doc, dtype=dtype) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()", "def pack_unpack_hard():\n # Array is apprx. 1.5 GB large\n # should make apprx 1536 chunks\n pack_unpack(100, chunk_size=reverse_pretty('1M'), progress=simple_progress)", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def chunks(array, size: int):\r\n for i in range(0, len(array), size):\r\n yield array[i:i + size]", "def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))", "def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]", "def build_chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i:i+n]", "def chunk(l, n=500):\n return [l[i:i+n] for i in range(0, len(l), n)]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def chunks(self, big_list, n):\n for i in range(0, len(big_list), n):\n yield big_list[i:i + n]", "def chunk(items, chunk_size):\n start_index = 0\n for start_index in xrange(0, len(items), chunk_size):\n end_index = min(start_index+chunk_size, len(items))\n yield items[start_index:end_index]", "def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]" ]
[ "0.719677", "0.6773225", "0.6730021", "0.6680324", "0.6640408", "0.6590066", "0.65835804", "0.6442601", "0.6409235", "0.6338401", "0.6329446", "0.63054276", "0.62845904", "0.62551296", "0.6208536", "0.61931676", "0.61904657", "0.61624444", "0.6139798", "0.61395085", "0.6132434", "0.61276203", "0.6122731", "0.61212295", "0.61159533", "0.6108721", "0.60819376", "0.60680634", "0.6058476", "0.60563767", "0.6053068", "0.6041372", "0.6037783", "0.6036225", "0.6011828", "0.5996495", "0.59833133", "0.59762454", "0.59653133", "0.5952669", "0.59524304", "0.5948817", "0.5948479", "0.594678", "0.5946217", "0.5940416", "0.59388435", "0.59384", "0.59298396", "0.592769", "0.5925492", "0.59121263", "0.5911052", "0.5909056", "0.59083295", "0.5897241", "0.5893316", "0.5885597", "0.5879244", "0.5874232", "0.5871904", "0.5859664", "0.5855663", "0.58546954", "0.5853913", "0.5853913", "0.5853913", "0.58535653", "0.5852855", "0.5848501", "0.5845817", "0.58451694", "0.584331", "0.5841809", "0.5839486", "0.5839036", "0.58389395", "0.58332", "0.5832653", "0.5830454", "0.5827135", "0.58199453", "0.581866", "0.58033884", "0.58030635", "0.57974917", "0.57962865", "0.57903546", "0.57886183", "0.57842547", "0.57822704", "0.5781485", "0.5781485", "0.5781485", "0.5781485", "0.5781485", "0.5775787", "0.57757515", "0.57732415", "0.5771817", "0.57689655" ]
0.0
-1
Temporal workaround for static template tag to support SCRIPT_NAME
def static(parser, token): return NewStaticNode.handle_token(parser, token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_script_name(t_req):\n if settings.FORCE_SCRIPT_NAME is not None:\n return force_text(settings.FORCE_SCRIPT_NAME)\n\n # If Apache's mod_rewrite had a whack at the URL, Apache set either\n # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any\n # rewrites. Unfortunately not every Web server (lighttpd!) passes this\n # information through all the time, so FORCE_SCRIPT_NAME, above, is still\n # needed.\n script_url = t_req.headers.get('SCRIPT_URL', '')\n if not script_url:\n script_url = t_req.headers.get('REDIRECT_URL', '')\n\n if script_url:\n path_info = t_req.headers.get('PATH_INFO', '')\n script_name = script_url[:-len(path_info)]\n else:\n script_name = t_req.headers.get('SCRIPT_NAME', '')\n\n # It'd be better to implement URI-to-IRI decoding, see #19508.\n # return script_name.decode(UTF_8)\n return script_name", "def ext_static(context, extension, path):\n return static('ext/%s/%s' % (extension.id, path))", "def get_js_file(self):\n return 'placeholder'", "def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }", "def get_default_javascript():\n return [\"_static/require.js\"]", "def getScriptname():\n return os.environ.get('SCRIPT_NAME', '')", "def static(request):\n return {\n 'JSERRORLOGGING_STATIC_URL': STATIC_URL\n }", "def static(filename):\n\ttimestamp = os.path.getmtime(os.path.join(app.static_folder, filename))\n\treturn \"%s/%s?%s\" % (app.static_url_path, filename, timestamp)", "def static(filename):\n\ttimestamp = os.path.getmtime(os.path.join(app.static_folder, filename))\n\treturn \"%s/%s?%s\" % (app.static_url_path, filename, timestamp)", "def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline", "def module_use_template_javascript(self):\n return False", "def module_use_template_javascript(self):\n return False", "def add_static(ext):\n ext = ext.lower()\n\n compiler = StaticCompiler(ext)\n file_list = compiler.get_staticfiles_list()\n\n return render_to_string(\n \"mub/context_%s.html\" % ext,\n {\n \"items\": file_list,\n \"STATIC_URL\": settings.STATIC_URL,\n \"IS_MINIFIED\": compiler.is_minified\n }\n )", "def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'", "def include_admin_script(script_path):\n if not absolute_url_re.match(script_path):\n script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path)\n return '<script type=\"text/javascript\" src=\"%s\"></script>' % script_path", "def static_html(subpath):\n return render_template(f'static_html/{subpath}.html')", "def _template_file_default(self):\n return \"index\"", "def get_wsgi_file_name(self):\n return self.wsgi", "def render(self, template_name, **kwargs):\n currentUser = self.current_user\n from_workspace_str = self.get_argument(\"from_workspace\", default=\"0\", strip=False)\n from_workspace = from_workspace_str == \"1\"\n html = self.render_string(template_name, currentUser=currentUser, from_workspace = from_workspace, **kwargs)\n if from_workspace :\n scriptName = self.__class__.__name__\n\n if scriptName.endswith('Handler') :\n scriptName = scriptName[:-7] \n\n path = self.static_url('scripts/' + scriptName + '.js')\n\n js = '<script src=\"' + escape.xhtml_escape(path) + '\" type=\"text/javascript\"></script>'\n html = html + utf8(js)\n self.finish(html)\n return\n\n # Insert the additional JS and CSS added by the modules on the page\n js_embed = []\n js_files = []\n css_embed = []\n css_files = []\n html_heads = []\n html_bodies = []\n for module in getattr(self, \"_active_modules\", {}).values():\n embed_part = module.embedded_javascript()\n if embed_part:\n js_embed.append(utf8(embed_part))\n file_part = module.javascript_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n js_files.append(file_part)\n else:\n js_files.extend(file_part)\n embed_part = module.embedded_css()\n if embed_part:\n css_embed.append(utf8(embed_part))\n file_part = module.css_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n css_files.append(file_part)\n else:\n css_files.extend(file_part)\n head_part = module.html_head()\n if head_part:\n html_heads.append(utf8(head_part))\n body_part = module.html_body()\n if body_part:\n html_bodies.append(utf8(body_part))\n\n def is_absolute(path):\n return any(path.startswith(x) for x in [\"/\", \"http:\", \"https:\"])\n if js_files:\n # Maintain order of JavaScript files given by modules\n paths = []\n unique_paths = set()\n for path in js_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n js = ''.join('<script src=\"' + escape.xhtml_escape(p) +\n '\" type=\"text/javascript\"></script>'\n for p in paths)\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + utf8(js) + b'\\n' + html[sloc:]\n if js_embed:\n js = b'<script type=\"text/javascript\">\\n//<![CDATA[\\n' + \\\n b'\\n'.join(js_embed) + b'\\n//]]>\\n</script>'\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + js + b'\\n' + html[sloc:]\n if css_files:\n paths = []\n unique_paths = set()\n for path in css_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n css = ''.join('<link href=\"' + escape.xhtml_escape(p) + '\" '\n 'type=\"text/css\" rel=\"stylesheet\"/>'\n for p in paths)\n hloc = html.index(b'</head>')\n html = html[:hloc] + utf8(css) + b'\\n' + html[hloc:]\n if css_embed:\n css = b'<style type=\"text/css\">\\n' + b'\\n'.join(css_embed) + \\\n b'\\n</style>'\n hloc = html.index(b'</head>')\n html = html[:hloc] + css + b'\\n' + html[hloc:]\n if html_heads:\n hloc = html.index(b'</head>')\n html = html[:hloc] + b''.join(html_heads) + b'\\n' + html[hloc:]\n if html_bodies:\n hloc = html.index(b'</body>')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n self.finish(html)", "def server_static (filename):\n return static_file(filename, root=\"./static\")", "def include_static_files(app):\n file_path = sphinx_prolog.get_static_path(STATIC_FILE)\n if file_path not in app.config.html_static_path:\n app.config.html_static_path.append(file_path)", "def render(request, template):\r\n return render_to_response('static_templates/' + template, {})", "def topcoat_icons_script_tag():\n return u'<script type=\"text/javascript src=\"%s\"></script>' % topcoat_icons_script_url()", "def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')", "def scriptpath(self, code):\n return '' if code == 'en' else ('/' + code)", "def get_script_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.scripts.get(self.script or '')", "def glr_path_static():\n return os.path.join(base_path, \"static\")", "def url(self, url):\n prefix = self.request_local.environ['toscawidgets.prefix']\n script_name = self.request_local.environ['SCRIPT_NAME']\n if hasattr(url, 'url_mapping'):\n url = url.url_mapping['normal']\n return ''.join([script_name, prefix, url])", "def static_index():\n return \"xxxxxx.your-domain.tld\"", "def server_static(filename):\n return static_file(filename, root='static/stats')", "def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]", "def get_available_name(self, name, *args, **kwargs):\n # If the filename already exists, remove it as if it was a true file system\n if self.exists(name) and os.path.exists(\n os.path.join(settings.STATIC_ROOT, name)):\n os.remove(os.path.join(settings.STATIC_ROOT, name))\n return name", "def static(website, request, **etc):\n return website.static.respond(request)", "def _remap_static(self, stream, prefix='/static/'):\n def map_static(name, event):\n attrs = event[1][1]\n name = attrs.get(name)[len(prefix):]\n if self.static_map:\n name = self.static_map.get(name, name)\n return static(name)\n return stream | Transformer('//*[matches(@src, \"^%s\")]' % prefix).attr('src', map_static) | \\\n Transformer('//*[matches(@href, \"^%s\")]' % prefix).attr('href', map_static)", "def complete_static_filename(self, filename):\n return staticfiles_finder(filename)", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def static(filename):\n return href.static(file=filename)", "def href(self, request) -> str:\n return request.static_path(self.url_spec)", "def static(self, filename):\n return send_from_directory(self.static_path, filename)", "def template_loader(self):\n return None", "def djangify(line: str):\n\n global APP_NAME\n\n # Don't change the contents of the line if it contails a URL that links\n # outside content. Ex. www.example.com/webpage.html\n if containsURL(line):\n return line\n # Don't change the line if it contains placeholder URL like '#'\n if line == '#':\n return line\n # If line links to an internal file, make it Django compatible by loading\n # from static directory appended with APP_NAME\n return \" {% static '\" + APP_NAME + line + \"' %} \"", "def get_src_js(self):\n if self.get_style() != self.STYLE_BASE:\n return f\"dtables/js/dataTables.{self.get_style()}.js\"\n else:\n return f\"dtables/js/{self.get_style()}.dataTables.js\"", "def core_cdn_file(request, source):\n\n file_path = settings.CENTIPAIR_TEMPLATE_DIR + \"/cdn/\" + source\n source_file_url = settings.TEMPLATE_STATIC_URL + \"/\" + file_path\n return source_file_url", "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def getBaseURL():\n return getQualifiedURL(getScriptname())", "def get_template_name(request, base_template_name):\n template_base_dir = get_template_base_directory(request)\n return f\"cast/{template_base_dir}/{base_template_name}\"", "def staticfile(path):\n normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path and getattr(settings, 'STATIC_ROOT', None):\n absolute_path = os.path.join(settings.STATIC_ROOT, path)\n if absolute_path:\n return '%s%s?v=%s' % (settings.STATIC_URL, path, os.stat(absolute_path)[stat.ST_MTIME])\n return path", "def static_url(self, path):\n\t\tif not hasattr(self, \"_static_hashes\"):\n\t\t\tself._static_hashes = {}\n\t\thashes = self._static_hashes\n\t\tif path not in hashes:\n\t\t\timport hashlib\n\t\t\ttry:\n\t\t\t\tf = open(os.path.join(\n\t\t\t\t\tself.application.settings[\"static_path\"], path))\n\t\t\t\thashes[path] = hashlib.md5(f.read()).hexdigest()\n\t\t\t\tf.close()\n\t\t\texcept:\n\t\t\t\tprint \"Could not open static file %r\"%path\n\t\t\t\thashes[path] = None\n\t\tbase = \"http://static.\"+_config.get(\"varnish\", \"ovzcphost\") + \"/\"\n\t\tif hashes.get(path):\n\t\t\treturn base + path + \"?v=\" + hashes[path][:5]\n\t\telse:\n\t\t\treturn base + path", "def get_xmodule_urls():\r\n if settings.DEBUG:\r\n paths = [path.replace(\".coffee\", \".js\") for path in\r\n settings.PIPELINE_JS['module-js']['source_filenames']]\r\n else:\r\n paths = [settings.PIPELINE_JS['module-js']['output_filename']]\r\n return [staticfiles_storage.url(path) for path in paths]", "def asset_tag(request, key, **kwargs):\n theme = request.theme\n asset = theme.stacked_assets[key]\n settings = request.registry.settings\n should_compile = asbool(settings.get('pyramid_frontend.compile'))\n\n if should_compile:\n filename = theme.compiled_asset_path(key)\n url_path = '/compiled/' + theme.key + '/' + filename\n else:\n url_path = asset.url_path\n\n return literal(asset.tag(theme, url_path, production=should_compile,\n **kwargs))", "def generate_loader_vanilla():\n return template_loader_vanilla", "def client_plugin_source(self, language):\n\n static = self.static\n if static is None:\n return None\n\n filename = os.path.join(static, \"main.\" + language)\n realfilename = os.path.realpath(filename)\n\n if not realfilename.startswith(self.static + '/'): # pragma: no cover\n raise ValueError(\"Invalid language `%s`\" % language)\n\n if not os.path.isfile(realfilename):\n return None\n\n return realfilename", "def cdn_file(request, source):\n site = request.site\n file_path = site.template_dir + \"/cdn/\" + source\n source_file_url = settings.TEMPLATE_STATIC_URL + \"/\" + file_path\n return source_file_url", "def server_static_img(filename):\n return static_file(filename, root='static/img')", "def generic(request):\n\n # Load context\n json_path = os.path.join(settings.STATIC_ROOT,'json/context.json')\n context = simplejson.loads(''.join(open(json_path).readlines()))\n\n # Determine template name\n template_path = request.path[1:]\n if template_path == '':\n template_path = 'index.html'\n if template_path.endswith('/'):\n template_path += 'index.html'\n elif not template_path.endswith('.html'):\n template_path += '.html'\n\n # Check if template exists \n template_found = False\n for template_dir in settings.TEMPLATE_DIRS:\n full_template_path = os.path.join(template_dir, template_path)\n if os.path.isfile(full_template_path):\n template_found = True\n break\n\n if not template_found:\n raise Http404\n\n return direct_to_template(request, template_path, context)", "def test_static_package_resource(self):\n resource = StaticResource('pyramid_webpack:jinja2ext.py')\n import pyramid_webpack.jinja2ext\n with resource.open() as i:\n self.assertEqual(i.read(),\n inspect.getsource(pyramid_webpack.jinja2ext))", "def setStaticContent(*args):", "def test_raw_static_check():\r\n path = '\"/static/foo.png?raw\"'\r\n assert_equals(path, replace_static_urls(path, DATA_DIRECTORY))\r\n\r\n text = 'text <tag a=\"/static/js/capa/protex/protex.nocache.js?raw\"/><div class=\"'\r\n assert_equals(path, replace_static_urls(path, text))", "def get_static_path(path, aid, filename):\n return os.path.join(path, aid, os.path.basename(filename))", "def have_asp_extension(l):\r\n if \".asp\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def loadjs(*args):\n return render(settings, 'JS_FILES', 'staticloader/load_js.html', *args)", "def template_context(request):\n context = {\n 'application_version': settings.APPLICATION_VERSION,\n }\n context.update(settings.STATIC_CONTEXT_VARS)\n return context", "def get_view_template_name(self, request=None, origin=None):\n if not self.view_template_name_ajax:\n return self.view_template_name\n elif request and request.is_ajax():\n return self.view_template_name_ajax\n else:\n return self.view_template_name", "def __get_server_static__(app_path,static_dir):\n import os\n # from . import config_loader\n\n # root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n _path = (static_dir).replace(\"/\", os.path.sep)\n return os.sep.join([app_path, _path])", "def test_string_pattern(self):\n with patch_settings(LIVETRANSLATION_JQUERY=u'/jquery.js'):\n pattern, url = process_jquery_setting()\n self.assertEqual(pattern, ur'<script\\s[^>]*src=\"\\/jquery\\.js\"')", "def test_abs_static_view(self):\n settings = {\n 'webpack.bundle_dir': '/foo/bar/baz',\n }\n state = WebpackState(settings)\n self.assertEqual(state.static_view_path, '/foo/bar/baz')", "def bootstrap_javascript_url():\n return javascript_url()", "def test_replace_namespaced_template(self):\n pass", "def __init__(self, static_url):\n super(AsyncHeadRenderer, self).__init__(static_url=static_url)\n\n self._anonymous_css = [] # CSS\n self._anonymous_javascript = [] # Javascript code", "def js(filepath):\n return static_file(filepath, root=\"public\")", "def PyHiew_GetScriptFileName(script):\r\n return '%s\\\\%s.py' % (PYHIEW_PATH, script)", "def package_filename(dist, *filename):\n static = static_filename(dist)\n if static is None:\n return\n if not os.path.exists(os.path.join(static, 'js', 'package.json')):\n return\n js_filename = os.path.abspath(os.path.join(static, 'js'))\n if filename is not None:\n js_filename = os.path.join(js_filename, *filename)\n if not os.path.exists(js_filename):\n return\n return js_filename", "def get_static_transcript(self, request):\r\n response = Response(status=404)\r\n # Only do redirect for English\r\n if not self.transcript_language == 'en':\r\n return response\r\n\r\n video_id = request.GET.get('videoId', None)\r\n if video_id:\r\n transcript_name = video_id\r\n else:\r\n transcript_name = self.sub\r\n\r\n if transcript_name:\r\n # Get the asset path for course\r\n asset_path = None\r\n if hasattr(self.descriptor.runtime, 'modulestore'):\r\n course = self.descriptor.runtime.modulestore.get_course(self.course_id)\r\n asset_path = course.static_asset_path\r\n else:\r\n # Handle XML Courses that don't have modulestore in the runtime\r\n asset_path = getattr(self.descriptor, 'data_dir', None)\r\n\r\n if asset_path:\r\n response = Response(\r\n status=307,\r\n location='/static/{0}/{1}'.format(\r\n asset_path,\r\n subs_filename(transcript_name, self.transcript_language)\r\n )\r\n )\r\n return response", "def html_template_file(self):\n pass", "def get_static_path(name):\n path = find_static_path(name)\n if path is None:\n path = staticfiles_storage.path(name)\n return os.path.abspath(path)", "def include_file(ctx, name):\n env = ctx.environment\n return jinja2.Markup(env.loader.get_source(env, name)[0])", "def template_hook_point(context, name):\n s = \"\"\n for hook in TemplateHook.by_name(name):\n if hook.applies_to(context):\n s += hook.render_to_string(context.get('request', None), context)\n\n return s", "def asset_url(filename=\"\", version=True):\n if filename.startswith(\"http\") or filename.startswith(\"/\"):\n return filename\n else:\n if config.static_url:\n return_url = \"http://\" + config.static_url\n else:\n return_url = \"/static\" # web.ctx.home + \"/static\"\n if filename:\n return_url += \"/\" + filename\n if version:\n return_url += \"?\" + config.asset_version\n return return_url", "def _get_template_filename(self):\n file_name = ReportMeta.reports[self._report_key]['fileName']\n return '{}.html'.format(file_name)", "def get_path(self):\n return StaticAsset.get_static_path(self._name)", "def un_src(self):\n if self.src is None:\n return\n self.inline = '''\n var script = document.createElement('script');\n script.src = \"%s\";\n document.body.appendChild(script);\n''' % self.src\n self.src = None", "def _clear_script_name(self, raw: str):\n\n # remove all occurrences of \"%ScriptPath%\\\"\n st = re.sub(\"%scriptpath%/\", \"\", raw, flags=re.I)\n\n # return (External) annotation, if necessary\n if (\"%opsiscripthelperpath%/lib\" in st.lower()) or\\\n (\"%winstdir%/lib\" in st.lower()) or\\\n (\"%scriptdrive%/\" in st.lower()) or\\\n (\"%/\" in st.lower()):\n st = \"(External) \" + st\n return st", "def get_script_name(pidx):\n suffix = \"\"\n if pidx >= 200:\n suffix = \"200\"\n elif pidx >= 100:\n suffix = \"100\"\n return f\"{BASEDIR}/scripts{suffix}/p{pidx}.py\"", "def get_template_names(self):\n name = self.__class__.__name__.replace(\"DatatableView\", \"\")\n name = re.sub(r'([a-z]|[A-Z]+)(?=[A-Z])', r'\\1_', name)\n return [\"demos/\" + name.lower() + \".html\", \"example_base.html\"]", "def set_jinja_before_request():\n resource_provider.set_jinja_globals()", "def ext_js_bundle(context, extension, name):\n return _render_js_bundle(context, extension, name)", "def test_url_current_app():\n from coffin.template.loader import get_template_from_string\n from django.template import RequestContext\n from django.http import HttpRequest\n t = get_template_from_string('{% url testapp:the-index-view %}')\n assert t.render(RequestContext(HttpRequest())) == '/app/one/'\n assert t.render(RequestContext(HttpRequest(), current_app=\"two\")) == '/app/two/'", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def serving_path_parameterized(self):\n return self.pod.path_format.format_static(\n self.path_format, locale=self.locale, parameterize=True)", "def version(_):\n\n return {'version': import_module(environ['DJANGO_SETTINGS_MODULE']).STATIC_VERSION}", "def static_url(self, path, include_host=None, **kwargs):\n raise NotImplementedError()", "def index(request):\n return render_to_response(\n # note: this is slightly different than the labs app with \"app/app.html\" rather than the labs/labs.html\n # and we don't pass submodule name. fixme, by changing to new style with name = app_name\n settings.JS_HOME+'app.html',\n {'INDIVO_UI_APP_CSS': settings.INDIVO_UI_SERVER_BASE+'/jmvc/ui/resources/css/ui.css'}\n )", "def render_template():\n template_engine = engines['django']\n def func(template_string):\n load_tags_string = '{% load wagtailextensions_tags %}'\n return template_engine.from_string(load_tags_string + template_string).render()\n return func", "def _django_prefix():\n return _interpolate(DJANGO_PREFIX)", "def __init__(self, static_url):\n super(HeadRenderer, self).__init__()\n\n # Directory where are located the static contents of the application\n self.static_url = static_url\n\n self._named_css = {} # CSS code\n self._css_url = {} # CSS URLs\n self._named_javascript = {} # Javascript code\n self._javascript_url = {} # Javascript URLs\n\n self._order = 0 # Memorize the order of the javascript and css", "def wsgi_template(appname, path, logging_level=\"INFO\"):\n template = (\n 'import os\\n'\n 'import logging\\n'\n 'import logging.config\\n'\n '\\n'\n 'activate_this = \"{path}/env/bin/activate_this.py\"\\n'\n 'execfile(activate_this, dict(__file__=activate_this))\\n'\n '\\n'\n 'logging.basicConfig(level=logging.{logging_level})\\n'\n 'logging.config.fileConfig(\"{path}/logging.ini\")\\n'\n '\\n'\n 'from {app}.main import app as application\\n'\n .format(\n app=appname,\n app_up=appname.upper(),\n path=path,\n logging_level=logging_level\n )\n )\n return template", "def _path(self):\n path = REQUIRES['static_url']\n\n # add paths as specified\n for prefix, subpath in self.getPrefixDict().items():\n if ( self.filename.startswith(prefix) ):\n path += subpath\n break;\n\n return path", "def _add_static_files(self, req):\n add_script(req, self._get_jqplot('jquery.jqplot'))\n add_stylesheet(req, 'common/js/jqPlot/jquery.jqplot.css')\n # excanvas is needed for IE8 support\n add_script(req, self._get_jqplot('excanvas.min'))\n add_script(req, self._get_jqplot('plugins/jqplot.dateAxisRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.highlighter'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasTextRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisTickRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisLabelRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.enhancedLegendRenderer'))", "def site_name(request):\n return {'site_name':'CatFood'}", "def static_files(filename):\n static_path = os.path.join(frontend.root_path, 'templates', current_app.config['FRONTEND_THEME'], 'static')\n return send_from_directory(static_path, filename)", "def test_theme_template_loading_by_prefix():\n app = create_ctfd()\n with app.test_request_context():\n tpl1 = render_template_string(\"{% extends 'core/page.html' %}\", content=\"test\")\n tpl2 = render_template(\"page.html\", content=\"test\")\n assert tpl1 == tpl2" ]
[ "0.6034971", "0.5963628", "0.59341127", "0.58655584", "0.5860948", "0.58456707", "0.58188504", "0.5782478", "0.5782478", "0.57443565", "0.5699513", "0.5699513", "0.56902325", "0.56528944", "0.5573736", "0.5472369", "0.5468209", "0.5464693", "0.5461462", "0.54162765", "0.53910553", "0.538104", "0.53701425", "0.5369225", "0.536596", "0.53489655", "0.53435516", "0.5311419", "0.5294258", "0.52730364", "0.5262032", "0.5242976", "0.52316815", "0.5219977", "0.52123845", "0.521105", "0.5192126", "0.5190773", "0.51899564", "0.518833", "0.5175464", "0.5170378", "0.5154159", "0.5141139", "0.51323366", "0.5122901", "0.51225185", "0.5092882", "0.5088868", "0.50861806", "0.50817615", "0.50784254", "0.5069011", "0.50455844", "0.50420916", "0.5040238", "0.5033475", "0.5029751", "0.50289935", "0.5008645", "0.5005721", "0.5004751", "0.5002828", "0.5002737", "0.49784938", "0.49644417", "0.49602193", "0.49558663", "0.4945296", "0.49437475", "0.49429536", "0.4941219", "0.49371633", "0.4929612", "0.49291015", "0.49276373", "0.49224812", "0.49196637", "0.49163157", "0.49078634", "0.49033877", "0.49017414", "0.48976135", "0.4892192", "0.48915917", "0.48889297", "0.48855653", "0.4882505", "0.4879054", "0.4877958", "0.48731226", "0.4871609", "0.48551655", "0.48524162", "0.48400033", "0.48394898", "0.48374212", "0.48366025", "0.48264006", "0.48258138", "0.48223126" ]
0.0
-1